diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 35767aadde2..eb422e039dd 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -123,6 +123,8 @@ jobs: - uses: actions/checkout@v1 - name: Lint code for quality and style with Clippy run: make lint + - name: Certify Cargo.lock freshness + run: git diff --exit-code Cargo.lock arbitrary-check: name: arbitrary-check runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index d5b3f72922c..570bb6cdf3a 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ target/ flamegraph.svg perf.data* *.tar.gz +bin/ diff --git a/Cargo.lock b/Cargo.lock index 29eee0eeb60..2620678c1b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,7 +2,7 @@ # It is not intended for manual editing. [[package]] name = "account_manager" -version = "0.2.0" +version = "0.2.8" dependencies = [ "account_utils", "bls", @@ -77,7 +77,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] @@ -182,9 +182,9 @@ checksum = "6b602bfe940d21c130f3895acd65221e8a61270debe89d628b9cb4e3ccb8569b" [[package]] name = "arbitrary" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cb544f1057eaaff4b34f8c4dcf56fc3cd04debd291998405d135017a7c3c0f4" +checksum = "0922a3e746b5a44e111e5603feb6704e5cc959116f66737f50bb5cbd264e9d87" dependencies = [ "derive_arbitrary", ] @@ -275,9 +275,9 @@ checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" [[package]] name = "autocfg" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" @@ -309,12 +309,6 @@ dependencies = [ "safemem", ] -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - [[package]] name = "base64" version = "0.12.3" @@ -346,12 +340,15 @@ dependencies = [ "lighthouse_metrics", "log 0.4.11", "lru", + "maplit", "merkle_proof", "operation_pool", "parking_lot 0.11.0", "proto_array", "rand 0.7.3", + "rand_core 0.5.1", "rayon", + "regex", "safe_arith", "serde", "serde_derive", @@ -361,7 +358,7 @@ dependencies = [ "slog-term", "sloggers", "slot_clock", - "smallvec 1.4.1", + "smallvec 1.4.2", "state_processing", "store", "tempfile", @@ -373,7 +370,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "0.2.0" +version = "0.2.8" dependencies = [ "beacon_chain", "clap", @@ -466,7 +463,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding", + "block-padding 0.1.5", "byte-tools", "byteorder", "generic-array 0.12.3", @@ -478,7 +475,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.3", + "block-padding 0.2.1", + "generic-array 0.14.4", ] [[package]] @@ -487,7 +485,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa136449e765dc7faa244561ccae839c394048667929af599b5d931ebe7b7f10" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] @@ -499,6 +497,12 @@ dependencies = [ "byte-tools", ] +[[package]] +name = "block-padding" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" + [[package]] name = "bls" version = "0.2.0" @@ -521,7 +525,7 @@ dependencies = [ [[package]] name = "blst" version = "0.1.1" -source = "git+https://github.com/sigp/blst.git?rev=22bfb91721af125d1cb08aa201a18477665a45fe#22bfb91721af125d1cb08aa201a18477665a45fe" +source = "git+https://github.com/sigp/blst.git?rev=284f7059642851c760a09fb1708bcb59c7ca323c#284f7059642851c760a09fb1708bcb59c7ca323c" dependencies = [ "cc", "glob", @@ -530,8 +534,9 @@ dependencies = [ [[package]] name = "boot_node" -version = "0.1.0" +version = "0.2.8" dependencies = [ + "beacon_node", "clap", "discv5", "eth2_libp2p", @@ -635,7 +640,7 @@ dependencies = [ "ethereum-types", "quickcheck", "quickcheck_macros", - "smallvec 1.4.1", + "smallvec 1.4.2", "tree_hash", ] @@ -685,20 +690,20 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.13" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c74d84029116787153e02106bf53e66828452a4b325cc8652b788b5967c0a0b6" +checksum = "942f72db697d8767c22d46a598e01f2d3b475501ea43d0db4f16d90259182d0b" dependencies = [ "num-integer", "num-traits", - "time 0.1.43", + "time 0.1.44", ] [[package]] name = "clap" -version = "2.33.2" +version = "2.33.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10040cdf04294b565d9e0319955430099ec3813a64c952b86a41200ad714ae48" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ "ansi_term", "atty", @@ -751,7 +756,7 @@ dependencies = [ "sloggers", "slot_clock", "store", - "time 0.2.16", + "time 0.2.17", "timer", "tokio 0.2.22", "toml", @@ -844,6 +849,12 @@ dependencies = [ "proc-macro-hack", ] +[[package]] +name = "const_fn" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce90df4c658c62f12d78f7508cf92f9173e5184a539c10bfe54a3107b3ffd0f2" + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -933,12 +944,12 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ee0cc8804d5393478d743b035099520087a5186f3b93fa58cec08fa62407b6" +checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" dependencies = [ - "cfg-if", "crossbeam-utils", + "maybe-uninit", ] [[package]] @@ -958,7 +969,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "cfg-if", "crossbeam-utils", "lazy_static", @@ -984,7 +995,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "cfg-if", "lazy_static", ] @@ -1011,7 +1022,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", "subtle 2.2.3", ] @@ -1069,6 +1080,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "curve25519-dalek" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8492de420e9e60bc9a1d66e2dbb91825390b738a388606600663fc529b4b307" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.5.1", + "subtle 2.2.3", + "zeroize", +] + [[package]] name = "darwin-libproc" version = "0.1.2" @@ -1091,9 +1115,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72aa14c04dfae8dd7d8a2b1cb7ca2152618cd01336dbfe704b8dcbf8d41dbd69" +checksum = "d4d0e2d24e5ee3b23a01de38eefdcd978907890701f08ffffd4cb457ca4ee8d6" [[package]] name = "db-key" @@ -1128,9 +1152,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b43185d3e7ce7dcd44a23ca761ec026359753ebf480283a571e6463853d2ef" +checksum = "d0f7c6c81276b6b8702074defbdb1938933ddf98c7f7e0dca8d9e9214dd6c730" dependencies = [ "proc-macro2", "quote", @@ -1163,7 +1187,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] @@ -1195,10 +1219,11 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "discv5" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7083584562c5b36f929dbe621437a911eef4f8ac73bab1b1c64c8f556212943" +checksum = "b4cba1b485c16864edc11ccbf3abf5fbf1c26ce759ab36c32ee8e12638d50b0d" dependencies = [ + "aes-gcm", "arrayvec", "digest 0.8.1", "enr", @@ -1213,12 +1238,11 @@ dependencies = [ "lru_time_cache", "multihash", "net2", - "openssl", "parking_lot 0.11.0", "rand 0.7.3", "rlp", "sha2 0.8.2", - "smallvec 1.4.1", + "smallvec 1.4.2", "tokio 0.2.22", "uint", "zeroize", @@ -1247,15 +1271,15 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "1.0.0-pre.4" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a8a37f4e8b35af971e6db5e3897e7a6344caa3f92f6544f88125a1f5f0035a" +checksum = "53d2e93f837d749c16d118e7ddf7a4dfd0ac8f452cf51e46e9348824e5ef6851" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 3.0.0", "ed25519", "rand 0.7.3", "serde", - "sha2 0.8.2", + "sha2 0.9.1", "zeroize", ] @@ -1284,24 +1308,24 @@ dependencies = [ [[package]] name = "either" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" +checksum = "cd56b59865bce947ac5958779cfa508f6c3b9497cc762b7e24a12d11ccde2c4f" [[package]] name = "encoding_rs" -version = "0.8.23" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ac63f94732332f44fe654443c46f6375d1939684c17b0afb6cb56b0456e171" +checksum = "a51b8cf747471cb9499b6d59e59b0444f4c90eba8968c4e44874e92b5b64ace2" dependencies = [ "cfg-if", ] [[package]] name = "enr" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c78d64a14865c080072c05ffb2c11aab15963d5e763ca4dbc02865dc1b615ee" +checksum = "a3137b4854534673ea350751670c6fe53920394a328ba9ce4d9acabd4f60a586" dependencies = [ "base64 0.12.3", "bs58", @@ -1504,7 +1528,7 @@ dependencies = [ "slog-async", "slog-stdlog", "slog-term", - "smallvec 1.4.1", + "smallvec 1.4.2", "snap", "tempdir", "tiny-keccak 2.0.2", @@ -1512,7 +1536,7 @@ dependencies = [ "tokio-io-timeout", "tokio-util", "types", - "unsigned-varint 0.3.3 (git+https://github.com/sigp/unsigned-varint?branch=latest-codecs)", + "unsigned-varint 0.3.3", "void", ] @@ -1522,7 +1546,7 @@ version = "0.1.2" dependencies = [ "eth2_ssz_derive", "ethereum-types", - "smallvec 1.4.1", + "smallvec 1.4.2", ] [[package]] @@ -1699,9 +1723,9 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e" +checksum = "766d0e77a2c1502169d4a93ff3b8c15a71fd946cd0126309752104e5f3c46d94" dependencies = [ "cfg-if", "crc32fast", @@ -1916,9 +1940,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60fb4bb6bba52f78a471264d9a3b7d026cc0af47b22cd2cffbc0b787ca003e63" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" dependencies = [ "typenum", "version_check 0.9.2", @@ -1978,7 +2002,7 @@ checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" dependencies = [ "cfg-if", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] @@ -2069,14 +2093,14 @@ checksum = "d36fab90f82edc3c747f9d438e06cf0a491055896f2a279638bb5beed6c40177" [[package]] name = "handlebars" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86dbc8a0746b08f363d2e00da48e6c9ceb75c198ac692d2715fcbb5bee74c87d" +checksum = "5deefd4816fb852b1ff3cb48f6c41da67be2d0e1d20b26a7a3b076da11f064b1" dependencies = [ "log 0.4.11", "pest", "pest_derive", - "quick-error", + "quick-error 2.0.0", "serde", "serde_json", ] @@ -2093,12 +2117,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34f595585f103464d8d2f6e9864682d74c1601fed5e07d62b1c9058dba8246fb" -dependencies = [ - "autocfg 1.0.0", -] +checksum = "00d63df3d41950fb462ed38308eea019113ad1508da725bbedcd0fa5a85ef5f7" [[package]] name = "hashset_delay" @@ -2241,7 +2262,7 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" dependencies = [ - "quick-error", + "quick-error 1.2.3", ] [[package]] @@ -2256,7 +2277,7 @@ dependencies = [ "log 0.3.9", "mime 0.2.6", "num_cpus", - "time 0.1.43", + "time 0.1.44", "traitobject", "typeable", "unicase 1.4.2", @@ -2281,7 +2302,7 @@ dependencies = [ "log 0.4.11", "net2", "rustc_version", - "time 0.1.43", + "time 0.1.44", "tokio 0.1.22", "tokio-buf", "tokio-executor", @@ -2310,7 +2331,7 @@ dependencies = [ "itoa", "pin-project", "socket2", - "time 0.1.43", + "time 0.1.44", "tokio 0.2.22", "tower-service", "tracing", @@ -2394,12 +2415,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b45e59b16c76b11bf9738fd5d38879d3bd28ad292d7b313608becb17ae2df9" +checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" dependencies = [ - "autocfg 1.0.0", - "hashbrown 0.8.1", + "autocfg 1.0.1", + "hashbrown 0.9.0", ] [[package]] @@ -2517,13 +2538,13 @@ dependencies = [ [[package]] name = "lazycell" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "0.2.1" +version = "0.2.8" dependencies = [ "bls", "clap", @@ -2576,9 +2597,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2f02823cf78b754822df5f7f268fb59822e7296276d3e069d8e8cb26a14bd10" +checksum = "755456fae044e6fa1ebbbd1b3e902ae19e73097ed4ed87bb79934a867c007bc3" [[package]] name = "libflate" @@ -2606,8 +2627,8 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.23.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +version = "0.25.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ "atomic", "bytes 0.5.6", @@ -2623,12 +2644,11 @@ dependencies = [ "libp2p-swarm", "libp2p-tcp", "libp2p-websocket", - "libp2p-yamux", "multihash", - "parity-multiaddr 0.9.1 (git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695)", + "parity-multiaddr 0.9.1", "parking_lot 0.10.2", "pin-project", - "smallvec 1.4.1", + "smallvec 1.4.2", "wasm-timer", ] @@ -2650,7 +2670,7 @@ dependencies = [ "log 0.4.11", "multihash", "multistream-select 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multiaddr 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-multiaddr 0.9.2", "parking_lot 0.10.2", "pin-project", "prost", @@ -2659,7 +2679,7 @@ dependencies = [ "ring", "rw-stream-sink", "sha2 0.8.2", - "smallvec 1.4.1", + "smallvec 1.4.2", "thiserror", "unsigned-varint 0.4.0", "void", @@ -2669,7 +2689,7 @@ dependencies = [ [[package]] name = "libp2p-core" version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ "asn1_der", "bs58", @@ -2682,8 +2702,8 @@ dependencies = [ "libsecp256k1", "log 0.4.11", "multihash", - "multistream-select 0.8.2 (git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695)", - "parity-multiaddr 0.9.1 (git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695)", + "multistream-select 0.8.2 (git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f)", + "parity-multiaddr 0.9.1", "parking_lot 0.10.2", "pin-project", "prost", @@ -2692,7 +2712,7 @@ dependencies = [ "ring", "rw-stream-sink", "sha2 0.8.2", - "smallvec 1.4.1", + "smallvec 1.4.2", "thiserror", "unsigned-varint 0.4.0", "void", @@ -2702,7 +2722,7 @@ dependencies = [ [[package]] name = "libp2p-core-derive" version = "0.20.2" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ "quote", "syn", @@ -2711,7 +2731,7 @@ dependencies = [ [[package]] name = "libp2p-dns" version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ "futures 0.3.5", "libp2p-core 0.21.0", @@ -2720,10 +2740,10 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +version = "0.22.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ - "base64 0.11.0", + "base64 0.12.3", "byteorder", "bytes 0.5.6", "fnv", @@ -2736,16 +2756,16 @@ dependencies = [ "prost", "prost-build", "rand 0.7.3", - "sha2 0.8.2", - "smallvec 1.4.1", - "unsigned-varint 0.4.0", + "sha2 0.9.1", + "smallvec 1.4.2", + "unsigned-varint 0.5.0", "wasm-timer", ] [[package]] name = "libp2p-identify" -version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +version = "0.22.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ "futures 0.3.5", "libp2p-core 0.21.0", @@ -2753,14 +2773,14 @@ dependencies = [ "log 0.4.11", "prost", "prost-build", - "smallvec 1.4.1", + "smallvec 1.4.2", "wasm-timer", ] [[package]] name = "libp2p-mplex" version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ "bytes 0.5.6", "fnv", @@ -2775,10 +2795,10 @@ dependencies = [ [[package]] name = "libp2p-noise" version = "0.23.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ "bytes 0.5.6", - "curve25519-dalek", + "curve25519-dalek 2.1.0", "futures 0.3.5", "lazy_static", "libp2p-core 0.21.0", @@ -2795,14 +2815,15 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +version = "0.22.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ + "either", "futures 0.3.5", "libp2p-core 0.21.0", "log 0.4.11", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.4.2", "void", "wasm-timer", ] @@ -2810,7 +2831,7 @@ dependencies = [ [[package]] name = "libp2p-tcp" version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ "futures 0.3.5", "futures-timer", @@ -2825,7 +2846,7 @@ dependencies = [ [[package]] name = "libp2p-websocket" version = "0.22.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ "async-tls", "either", @@ -2841,18 +2862,6 @@ dependencies = [ "webpki-roots 0.18.0", ] -[[package]] -name = "libp2p-yamux" -version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" -dependencies = [ - "futures 0.3.5", - "libp2p-core 0.21.0", - "parking_lot 0.10.2", - "thiserror", - "yamux", -] - [[package]] name = "libsecp256k1" version = "0.3.5" @@ -2882,19 +2891,18 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.0.25" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe" +checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" dependencies = [ "cc", - "libc", "pkg-config", "vcpkg", ] [[package]] name = "lighthouse" -version = "0.2.1" +version = "0.2.8" dependencies = [ "account_manager", "account_utils", @@ -3006,6 +3014,13 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "lru_cache" +version = "0.1.0" +dependencies = [ + "fnv", +] + [[package]] name = "lru_time_cache" version = "0.10.0" @@ -3051,7 +3066,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", ] [[package]] @@ -3105,9 +3120,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +checksum = "4d7559a8a40d0f97e1edea3220f698f78b1c5ab67532e49f68fde3910323b722" dependencies = [ "adler", ] @@ -3190,35 +3205,35 @@ dependencies = [ [[package]] name = "multihash" -version = "0.11.2" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f75db05d738947aa5389863aadafbcf2e509d7ba099dc2ddcdf4fc66bf7a9e03" +checksum = "567122ab6492f49b59def14ecc36e13e64dca4188196dd0cd41f9f3f979f3df6" dependencies = [ "blake2b_simd", "blake2s_simd", - "digest 0.8.1", - "sha-1", - "sha2 0.8.2", + "digest 0.9.0", + "sha-1 0.9.1", + "sha2 0.9.1", "sha3", - "unsigned-varint 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "unsigned-varint 0.5.0", ] [[package]] name = "multimap" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" +checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] name = "multistream-select" version = "0.8.2" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ "bytes 0.5.6", "futures 0.3.5", "log 0.4.11", "pin-project", - "smallvec 1.4.1", + "smallvec 1.4.2", "unsigned-varint 0.4.0", ] @@ -3232,7 +3247,7 @@ dependencies = [ "futures 0.3.5", "log 0.4.11", "pin-project", - "smallvec 1.4.1", + "smallvec 1.4.2", "unsigned-varint 0.4.0", ] @@ -3284,7 +3299,9 @@ dependencies = [ "itertools 0.9.0", "lazy_static", "lighthouse_metrics", + "lru_cache", "matches", + "num_cpus", "parking_lot 0.11.0", "rand 0.7.3", "rest_types", @@ -3292,7 +3309,7 @@ dependencies = [ "slog", "sloggers", "slot_clock", - "smallvec 1.4.1", + "smallvec 1.4.2", "state_processing", "store", "tempfile", @@ -3333,12 +3350,6 @@ dependencies = [ "validator_dir", ] -[[package]] -name = "nohash-hasher" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" - [[package]] name = "nom" version = "2.2.1" @@ -3351,7 +3362,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7f3fc75e3697059fb1bc465e3d8cca6cf92f56854f201158b3f9c77d5a3cfa0" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "num-integer", "num-traits", ] @@ -3371,7 +3382,7 @@ dependencies = [ "num-traits", "rand 0.7.3", "serde", - "smallvec 1.4.1", + "smallvec 1.4.2", "zeroize", ] @@ -3381,7 +3392,7 @@ version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "num-traits", ] @@ -3391,7 +3402,7 @@ version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a6e6b7c748f995c4c29c5f5ae0248536e04a5739927c74ec0fa564805094b9f" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "num-integer", "num-traits", ] @@ -3402,7 +3413,7 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", ] [[package]] @@ -3423,11 +3434,11 @@ checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" [[package]] name = "once_cell" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" +checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" dependencies = [ - "parking_lot 0.10.2", + "parking_lot 0.11.0", ] [[package]] @@ -3468,15 +3479,25 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +[[package]] +name = "openssl-src" +version = "111.10.2+1.1.1g" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a287fdb22e32b5b60624d4a5a7a02dbe82777f730ec0dbc42a0554326fef5a70" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" version = "0.9.58" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] @@ -3500,7 +3521,7 @@ dependencies = [ [[package]] name = "parity-multiaddr" version = "0.9.1" -source = "git+https://github.com/sigp/rust-libp2p?rev=3096cb6b89b2883a79ce5ffcb03d41778a09b695#3096cb6b89b2883a79ce5ffcb03d41778a09b695" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ "arrayref", "bs58", @@ -3516,9 +3537,9 @@ dependencies = [ [[package]] name = "parity-multiaddr" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc20af3143a62c16e7c9e92ea5c6ae49f7d271d97d4d8fe73afc28f0514a3d0f" +checksum = "2165a93382a93de55868dcbfa11e4a8f99676a9164eee6a2b4a9479ad319c257" dependencies = [ "arrayref", "bs58", @@ -3601,7 +3622,7 @@ dependencies = [ "cloudabi 0.0.3", "libc", "redox_syscall", - "smallvec 1.4.1", + "smallvec 1.4.2", "winapi 0.3.9", ] @@ -3616,7 +3637,7 @@ dependencies = [ "instant", "libc", "redox_syscall", - "smallvec 1.4.1", + "smallvec 1.4.2", "winapi 0.3.9", ] @@ -3691,7 +3712,7 @@ checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" dependencies = [ "maplit", "pest", - "sha-1", + "sha-1 0.8.2", ] [[package]] @@ -3781,9 +3802,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +checksum = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20" [[package]] name = "primitive-types" @@ -3812,9 +3833,9 @@ checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" [[package]] name = "proc-macro2" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12" +checksum = "175c513d55719db99da20232b06cda8bab6b83ec2d04e3283edf0213c37c1a29" dependencies = [ "unicode-xid", ] @@ -3910,9 +3931,9 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.16.2" +version = "2.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d883f78645c21b7281d21305181aa1f4dd9e9363e7cf2566c93121552cff003e" +checksum = "cb14183cc7f213ee2410067e1ceeadba2a7478a59432ff0747a335202798b1e2" [[package]] name = "psutil" @@ -3939,6 +3960,12 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-error" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ac73b1112776fc109b2e61909bc46c7e1bf0d7f690ffb1676553acce16d5cda" + [[package]] name = "quickcheck" version = "0.9.2" @@ -4102,11 +4129,11 @@ dependencies = [ [[package]] name = "rayon" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f02856753d04e03e26929f820d0a0a337ebe71f849801eea335d464b349080" +checksum = "cfd016f0c045ad38b5251be2c9c0ab806917f82da4d36b2a327e5166adad9270" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "crossbeam-deque", "either", "rayon-core", @@ -4114,12 +4141,12 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e92e15d89083484e11353891f1af602cc661426deb9564c298b270c726973280" +checksum = "91739a34c4355b5434ce54c9086c5895604a9c278586d1f1aa95e04f66b525a0" dependencies = [ + "crossbeam-channel", "crossbeam-deque", - "crossbeam-queue", "crossbeam-utils", "lazy_static", "num_cpus", @@ -4142,9 +4169,9 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_users" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ "getrandom", "redox_syscall", @@ -4207,9 +4234,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12427a5577082c24419c9c417db35cfeb65962efc7675bb6b0d5f1f9d315bfe6" +checksum = "e9eaa17ac5d7b838b7503d118fa16ad88f440498bf9ffe5424e621f93190d61e" dependencies = [ "base64 0.12.3", "bytes 0.5.6", @@ -4266,7 +4293,6 @@ dependencies = [ "node_test_rig", "operation_pool", "parking_lot 0.11.0", - "rayon", "remote_beacon_node", "rest_types", "serde", @@ -4289,15 +4315,22 @@ dependencies = [ name = "rest_types" version = "0.2.0" dependencies = [ + "beacon_chain", "bls", + "environment", "eth2_hashing", "eth2_ssz", "eth2_ssz_derive", + "hyper 0.13.7", "procinfo", "psutil", "rayon", "serde", + "serde_json", + "serde_yaml", "state_processing", + "store", + "tokio 0.2.22", "tree_hash", "types", ] @@ -4354,17 +4387,17 @@ dependencies = [ "libsqlite3-sys", "lru-cache", "memchr", - "smallvec 1.4.1", - "time 0.1.43", + "smallvec 1.4.2", + "time 0.1.44", ] [[package]] name = "rust-argon2" -version = "0.7.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" +checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" dependencies = [ - "base64 0.11.0", + "base64 0.12.3", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -4399,9 +4432,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac94b333ee2aac3284c5b8a1b7fb4dd11cba88c244e3fe33cdbd047af0eb693" +checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" dependencies = [ "base64 0.12.3", "log 0.4.11", @@ -4560,17 +4593,11 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -[[package]] -name = "send_wrapper" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eddf2e8f50ced781f288c19f18621fa72a3779e3cb58dbf23b07469b0abeb4" - [[package]] name = "serde" -version = "1.0.114" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" +checksum = "e54c9a88f2da7238af84b5101443f0c0d0a3bbdc455e34a5c9497b1903ed55d5" dependencies = [ "serde_derive", ] @@ -4587,9 +4614,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.114" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" +checksum = "609feed1d0a73cc36a0182a840a9b37b4a82f0b1150369f0536a9e3f2a31dc48" dependencies = [ "proc-macro2", "quote", @@ -4638,6 +4665,15 @@ dependencies = [ "url 2.1.1", ] +[[package]] +name = "serde_utils" +version = "0.1.0" +dependencies = [ + "serde", + "serde_derive", + "serde_json", +] + [[package]] name = "serde_yaml" version = "0.8.13" @@ -4662,6 +4698,19 @@ dependencies = [ "opaque-debug 0.2.3", ] +[[package]] +name = "sha-1" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "170a36ea86c864a3f16dd2687712dd6646f7019f301e57537c7f4dc9f5916770" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpuid-bool", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + [[package]] name = "sha1" version = "0.6.0" @@ -4695,15 +4744,14 @@ dependencies = [ [[package]] name = "sha3" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd26bc0e7a2e3a7c959bc494caf58b72ee0c71d67704e9520f736ca7e4853ecf" +checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" dependencies = [ - "block-buffer 0.7.3", - "byte-tools", - "digest 0.8.1", + "block-buffer 0.9.0", + "digest 0.9.0", "keccak", - "opaque-debug 0.2.3", + "opaque-debug 0.3.0", ] [[package]] @@ -4724,9 +4772,9 @@ checksum = "29f060a7d147e33490ec10da418795238fd7545bba241504d6b31a409f2e6210" [[package]] name = "simple_logger" -version = "1.6.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea0c4611f32f4c2bac73754f22dca1f57e6c1945e0590dae4e5f2a077b92367" +checksum = "13a53ed2efd04911c8280f2da7bf9abd350c931b86bc7f9f2386fbafbf525ff9" dependencies = [ "atty", "chrono", @@ -4741,6 +4789,7 @@ version = "0.2.0" dependencies = [ "clap", "env_logger", + "eth1", "eth1_test_rig", "futures 0.3.5", "node_test_rig", @@ -4893,9 +4942,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3757cb9d89161a2f24e1cf78efa0c1fcff485d18e3f55e0aa3480824ddaa0f3f" +checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252" [[package]] name = "snafu" @@ -4956,9 +5005,9 @@ dependencies = [ [[package]] name = "soketto" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85457366ae0c6ce56bf05a958aef14cd38513c236568618edbcd9a8c52cb80b0" +checksum = "b5c71ed3d54db0a699f4948e1bb3e45b450fa31fe602621dee6680361d569c88" dependencies = [ "base64 0.12.3", "bytes 0.5.6", @@ -4967,7 +5016,7 @@ dependencies = [ "httparse", "log 0.4.11", "rand 0.7.3", - "sha-1", + "sha-1 0.9.1", ] [[package]] @@ -4978,9 +5027,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0437cfb83762844799a60e1e3b489d5ceb6a650fbacb86437badc1b6d87b246" +checksum = "33a71ea1ea5f8747d1af1979bfb7e65c3a025a70609f04ceb78425bc5adad8e6" dependencies = [ "version_check 0.9.2", ] @@ -5108,7 +5157,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09f8ed9974042b8c3672ff3030a69fcc03b74c47c3d1ecb7755e8a3626011e88" dependencies = [ "block-cipher", - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] @@ -5149,9 +5198,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69abc24912995b3038597a7a593be5053eb0fb44f3cc5beec0deb421790c1f4" +checksum = "963f7d3cc59b59b9325165add223142bbf1df27655d07789f109896d353d8350" dependencies = [ "proc-macro2", "quote", @@ -5282,21 +5331,22 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] [[package]] name = "time" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a51cadc5b1eec673a685ff7c33192ff7b7603d0b75446fb354939ee615acb15" +checksum = "ca7ec98a72285d12e0febb26f0847b12d54be24577618719df654c66cadab55d" dependencies = [ - "cfg-if", + "const_fn", "libc", "standback", "stdweb", @@ -5388,9 +5438,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" +checksum = "238ce071d267c5710f9d31451efec16c5ee22de34df17cc05e56cbc92e967117" [[package]] name = "tokio" @@ -5729,9 +5779,9 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0aae59226cf195d8e74d4b34beae1859257efb4e5fed3f147d2dc2c7d372178" +checksum = "6d79ca061b032d6ce30c660fded31189ca0b9922bf483cd70759f13a2d86786c" dependencies = [ "cfg-if", "log 0.4.11", @@ -5740,9 +5790,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.13" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d593f98af59ebc017c0648f0117525db358745a8894a8d684e185ba3f45954f9" +checksum = "4f0e00789804e99b20f12bc7003ca416309d28a6f495d6af58d1e2c2842461b5" dependencies = [ "lazy_static", ] @@ -5781,7 +5831,7 @@ dependencies = [ "ethereum-types", "lazy_static", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.4.2", "tree_hash_derive", "types", ] @@ -5842,6 +5892,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "serde_utils", "serde_yaml", "slog", "swap_or_not_shuffle", @@ -5865,9 +5916,9 @@ checksum = "c6ff93345ba2206230b1bb1aa3ece1a63dd9443b7531024575d16a0680a59444" [[package]] name = "uint" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "429ffcad8c8c15f874578c7337d156a3727eb4a1c2374c0ae937ad9a9b748c80" +checksum = "9db035e67dfaf7edd9aebfe8676afcd63eed53c8a4044fed514c8cccf1835177" dependencies = [ "arbitrary", "byteorder", @@ -5942,7 +5993,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", "subtle 2.2.3", ] @@ -5957,15 +6008,19 @@ dependencies = [ [[package]] name = "unsigned-varint" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f67332660eb59a6f1eb24ff1220c9e8d01738a8503c6002e30bcfe4bd9f2b4a9" +checksum = "669d776983b692a906c881fcd0cfb34271a48e197e4d6cb8df32b05bfc3d3fa5" +dependencies = [ + "bytes 0.5.6", + "futures_codec", +] [[package]] name = "unsigned-varint" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "669d776983b692a906c881fcd0cfb34271a48e197e4d6cb8df32b05bfc3d3fa5" +checksum = "a98e44fc6af1e18c3a06666d829b4fd8d2714fb2dbffe8ab99d5dc7ea6baa628" dependencies = [ "bytes 0.5.6", "futures_codec", @@ -6011,7 +6066,7 @@ dependencies = [ [[package]] name = "validator_client" -version = "0.2.0" +version = "0.2.8" dependencies = [ "account_utils", "bls", @@ -6134,6 +6189,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" version = "0.2.67" @@ -6228,15 +6289,14 @@ dependencies = [ [[package]] name = "wasm-timer" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324c5e65a08699c9c4334ba136597ab22b85dccd4b65dd1e36ccf8f723a95b54" +checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ "futures 0.3.5", "js-sys", - "parking_lot 0.9.0", + "parking_lot 0.11.0", "pin-utils", - "send_wrapper", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -6424,7 +6484,7 @@ dependencies = [ "mio", "mio-extras", "rand 0.7.3", - "sha-1", + "sha-1 0.8.2", "slab 0.4.2", "url 2.1.1", ] @@ -6445,7 +6505,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637ff90c9540fa3073bb577e65033069e4bae7c79d49d74aa3ffdf5342a53217" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 2.1.0", "rand_core 0.5.1", "zeroize", ] @@ -6459,20 +6519,6 @@ dependencies = [ "linked-hash-map", ] -[[package]] -name = "yamux" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd37e58a1256a0b328ce9c67d8b62ecdd02f4803ba443df478835cb1a41a637c" -dependencies = [ - "futures 0.3.5", - "log 0.4.11", - "nohash-hasher", - "parking_lot 0.10.2", - "rand 0.7.3", - "static_assertions", -] - [[package]] name = "zeroize" version = "1.1.0" diff --git a/Cargo.toml b/Cargo.toml index 35fd77d4492..fec48a1349c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,7 @@ members = [ "common/lighthouse_metrics", "common/lighthouse_version", "common/logging", + "common/lru_cache", "common/remote_beacon_node", "common/rest_types", "common/slot_clock", @@ -43,6 +44,7 @@ members = [ "consensus/ssz_derive", "consensus/ssz_types", "consensus/serde_hex", + "consensus/serde_utils", "consensus/state_processing", "consensus/swap_or_not_shuffle", "consensus/tree_hash", diff --git a/Cross.toml b/Cross.toml new file mode 100644 index 00000000000..050f2bdbd75 --- /dev/null +++ b/Cross.toml @@ -0,0 +1,4 @@ +[build.env] +passthrough = [ + "RUSTFLAGS", +] diff --git a/Makefile b/Makefile index 618f6c30e29..6fadf222f0a 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,13 @@ EF_TESTS = "testing/ef_tests" STATE_TRANSITION_VECTORS = "testing/state_transition_vectors" +GIT_TAG := $(shell git describe --tags --candidates 1) +BIN_DIR = "bin" + +X86_64_TAG = "x86_64-unknown-linux-gnu" +BUILD_PATH_X86_64 = "target/$(X86_64_TAG)/release" +AARCH64_TAG = "aarch64-unknown-linux-gnu" +BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" # Builds the Lighthouse binary in release (optimized). # @@ -21,6 +28,53 @@ else cargo install --path lcli --force --locked endif +# The following commands use `cross` to build a cross-compile. +# +# These commands require that: +# +# - `cross` is installed (`cargo install cross`). +# - Docker is running. +# - The current user is in the `docker` group. +# +# The resulting binaries will be created in the `target/` directory. +# +# The *-portable options compile the blst library *without* the use of some +# optimized CPU functions that may not be available on some systems. This +# results in a more portable binary with ~20% slower BLS verification. +build-x86_64: + cross build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu +build-x86_64-portable: + cross build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features portable +build-aarch64: + cross build --release --manifest-path lighthouse/Cargo.toml --target aarch64-unknown-linux-gnu +build-aarch64-portable: + cross build --release --manifest-path lighthouse/Cargo.toml --target aarch64-unknown-linux-gnu --features portable + +# Create a `.tar.gz` containing a binary for a specific target. +define tarball_release_binary + cp $(1)/lighthouse $(BIN_DIR)/lighthouse + cd $(BIN_DIR) && \ + tar -czf lighthouse-$(GIT_TAG)-$(2)$(3).tar.gz lighthouse && \ + rm lighthouse +endef + +# Create a series of `.tar.gz` files in the BIN_DIR directory, each containing +# a `lighthouse` binary for a different target. +# +# The current git tag will be used as the version in the output file names. You +# will likely need to use `git tag` and create a semver tag (e.g., `v0.2.3`). +build-release-tarballs: + [ -d $(BIN_DIR) ] || mkdir -p $(BIN_DIR) + $(MAKE) build-x86_64 + $(call tarball_release_binary,$(BUILD_PATH_X86_64),$(X86_64_TAG),"") + $(MAKE) build-x86_64-portable + $(call tarball_release_binary,$(BUILD_PATH_X86_64),$(X86_64_TAG),"-portable") + $(MAKE) build-aarch64 + $(call tarball_release_binary,$(BUILD_PATH_AARCH64),$(AARCH64_TAG),"") + $(MAKE) build-aarch64-portable + $(call tarball_release_binary,$(BUILD_PATH_AARCH64),$(AARCH64_TAG),"-portable") + + # Runs the full workspace tests in **release**, without downloading any additional # test vectors. test-release: diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 95889b0980a..daca6bd80f7 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "account_manager" -version = "0.2.0" +version = "0.2.8" authors = ["Paul Hauner ", "Luke Anderson "] edition = "2018" diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 0781e911a95..c708e83961f 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -13,7 +13,7 @@ use validator_dir::Builder as ValidatorDirBuilder; pub const CMD: &str = "create"; pub const BASE_DIR_FLAG: &str = "base-dir"; pub const WALLET_NAME_FLAG: &str = "wallet-name"; -pub const WALLET_PASSPHRASE_FLAG: &str = "wallet-passphrase"; +pub const WALLET_PASSWORD_FLAG: &str = "wallet-password"; pub const DEPOSIT_GWEI_FLAG: &str = "deposit-gwei"; pub const STORE_WITHDRAW_FLAG: &str = "store-withdrawal-keystore"; pub const COUNT_FLAG: &str = "count"; @@ -34,8 +34,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .required(true), ) .arg( - Arg::with_name(WALLET_PASSPHRASE_FLAG) - .long(WALLET_PASSPHRASE_FLAG) + Arg::with_name(WALLET_PASSWORD_FLAG) + .long(WALLET_PASSWORD_FLAG) .value_name("WALLET_PASSWORD_PATH") .help("A path to a file containing the password which will unlock the wallet.") .takes_value(true) @@ -109,8 +109,7 @@ pub fn cli_run( let spec = env.core_context().eth2_config.spec; let name: String = clap_utils::parse_required(matches, WALLET_NAME_FLAG)?; - let wallet_password_path: PathBuf = - clap_utils::parse_required(matches, WALLET_PASSPHRASE_FLAG)?; + let wallet_password_path: PathBuf = clap_utils::parse_required(matches, WALLET_PASSWORD_FLAG)?; let validator_dir = clap_utils::parse_path_with_default_in_home_dir( matches, VALIDATOR_DIR_FLAG, diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index adc08dce20b..4f780179f9b 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -6,6 +6,7 @@ use account_utils::{ recursively_find_voting_keystores, ValidatorDefinition, ValidatorDefinitions, CONFIG_FILENAME, }, + ZeroizeString, }; use clap::{App, Arg, ArgMatches}; use std::fs; @@ -17,6 +18,7 @@ pub const CMD: &str = "import"; pub const KEYSTORE_FLAG: &str = "keystore"; pub const DIR_FLAG: &str = "directory"; pub const STDIN_PASSWORD_FLAG: &str = "stdin-passwords"; +pub const REUSE_PASSWORD_FLAG: &str = "reuse-password"; pub const PASSWORD_PROMPT: &str = "Enter the keystore password, or press enter to omit it:"; pub const KEYSTORE_REUSE_WARNING: &str = "DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH \ @@ -68,6 +70,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long(STDIN_PASSWORD_FLAG) .help("If present, read passwords from stdin instead of tty."), ) + .arg( + Arg::with_name(REUSE_PASSWORD_FLAG) + .long(REUSE_PASSWORD_FLAG) + .help("If present, the same password will be used for all imported keystores."), + ) } pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { @@ -79,6 +86,7 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { PathBuf::new().join(".lighthouse").join("validators"), )?; let stdin_password = matches.is_present(STDIN_PASSWORD_FLAG); + let reuse_password = matches.is_present(REUSE_PASSWORD_FLAG); ensure_dir_exists(&validator_dir)?; @@ -118,7 +126,9 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { // - Add the keystore to the validator definitions file. // // Skip keystores that already exist, but exit early if any operation fails. + // Reuses the same password for all keystores if the `REUSE_PASSWORD_FLAG` flag is set. let mut num_imported_keystores = 0; + let mut previous_password: Option = None; for src_keystore in &keystore_paths { let keystore = Keystore::from_json_file(src_keystore) .map_err(|e| format!("Unable to read keystore JSON {:?}: {:?}", src_keystore, e))?; @@ -136,6 +146,10 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { ); let password_opt = loop { + if let Some(password) = previous_password.clone() { + eprintln!("Reuse previous password."); + break Some(password); + } eprintln!(""); eprintln!("{}", PASSWORD_PROMPT); @@ -152,6 +166,9 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { eprintln!("Password is correct."); eprintln!(""); sleep(Duration::from_secs(1)); // Provides nicer UX. + if reuse_password { + previous_password = Some(password.clone()); + } break Some(password); } Err(eth2_keystore::Error::InvalidPassword) => { diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index 64703f4dc35..f8a5c0776a5 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -15,7 +15,7 @@ use std::path::{Path, PathBuf}; pub const CMD: &str = "create"; pub const HD_TYPE: &str = "hd"; pub const NAME_FLAG: &str = "name"; -pub const PASSPHRASE_FLAG: &str = "passphrase-file"; +pub const PASSWORD_FLAG: &str = "password-file"; pub const TYPE_FLAG: &str = "type"; pub const MNEMONIC_FLAG: &str = "mnemonic-output-path"; @@ -34,8 +34,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .required(true), ) .arg( - Arg::with_name(PASSPHRASE_FLAG) - .long(PASSPHRASE_FLAG) + Arg::with_name(PASSWORD_FLAG) + .long(PASSWORD_FLAG) .value_name("WALLET_PASSWORD_PATH") .help( "A path to a file containing the password which will unlock the wallet. \ @@ -71,7 +71,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> { let name: String = clap_utils::parse_required(matches, NAME_FLAG)?; - let wallet_password_path: PathBuf = clap_utils::parse_required(matches, PASSPHRASE_FLAG)?; + let wallet_password_path: PathBuf = clap_utils::parse_required(matches, PASSWORD_FLAG)?; let mnemonic_output_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; let type_field: String = clap_utils::parse_required(matches, TYPE_FLAG)?; @@ -90,7 +90,7 @@ pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> { // Create a random password if the file does not exist. if !wallet_password_path.exists() { - // To prevent users from accidentally supplying their password to the PASSPHRASE_FLAG and + // To prevent users from accidentally supplying their password to the PASSWORD_FLAG and // create a file with that name, we require that the password has a .pass suffix. if wallet_password_path.extension() != Some(&OsStr::new("pass")) { return Err(format!( diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index f9c20952553..fd0c9e1f77e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "0.2.0" +version = "0.2.8" authors = ["Paul Hauner ", "Age Manning VerifiedAggregatedAttestation { }?; // Ensure the block being voted for (attestation.data.beacon_block_root) passes validation. + // Don't enforce the skip slot restriction for aggregates. // // This indirectly checks to see if the `attestation.data.beacon_block_root` is in our fork // choice. Any known, non-finalized, processed block should be in fork choice, so this @@ -327,7 +334,7 @@ impl VerifiedAggregatedAttestation { // // Attestations must be for a known block. If the block is unknown, we simply drop the // attestation and do not delay consideration for later. - let shuffling_ids = verify_head_block_is_known(chain, &attestation)?; + let shuffling_ids = verify_head_block_is_known(chain, &attestation, None)?; // Ensure that the attestation has participants. if attestation.aggregation_bits.is_zero() { @@ -433,7 +440,10 @@ impl VerifiedUnaggregatedAttestation { // Attestations must be for a known block. If the block is unknown, we simply drop the // attestation and do not delay consideration for later. - let shuffling_ids = verify_head_block_is_known(chain, &attestation)?; + // + // Enforce a maximum skip distance for unaggregated attestations. + let shuffling_ids = + verify_head_block_is_known(chain, &attestation, chain.config.import_max_skip_slots)?; let (indexed_attestation, committees_per_slot) = obtain_indexed_attestation_and_committees_per_slot( @@ -536,18 +546,32 @@ impl VerifiedUnaggregatedAttestation { fn verify_head_block_is_known( chain: &BeaconChain, attestation: &Attestation, + max_skip_slots: Option, ) -> Result { - chain + if let Some(block) = chain .fork_choice .read() .get_block(&attestation.data.beacon_block_root) - .map(|block| BlockShufflingIds { + { + // Reject any block that exceeds our limit on skipped slots. + if let Some(max_skip_slots) = max_skip_slots { + if attestation.data.slot > block.slot + max_skip_slots { + return Err(Error::TooManySkippedSlots { + head_block_slot: block.slot, + attestation_slot: attestation.data.slot, + }); + } + } + + Ok(BlockShufflingIds { current: block.current_epoch_shuffling_id, next: block.next_epoch_shuffling_id, }) - .ok_or_else(|| Error::UnknownHeadBlock { + } else { + Err(Error::UnknownHeadBlock { beacon_block_root: attestation.data.beacon_block_root, }) + } } /// Verify that the `attestation` is within the acceptable gossip propagation range, with reference @@ -807,7 +831,12 @@ where metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES); let mut state = chain - .get_state(&target_block.state_root, Some(target_block.slot))? + .store + .get_inconsistent_state_for_attestation_verification_only( + &target_block.state_root, + Some(target_block.slot), + ) + .map_err(BeaconChainError::from)? .ok_or_else(|| BeaconChainError::MissingBeaconState(target_block.state_root))?; metrics::stop_timer(state_read_timer); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ed57fc85aab..22d90ee374a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3,9 +3,11 @@ use crate::attestation_verification::{ VerifiedUnaggregatedAttestation, }; use crate::block_verification::{ - check_block_relevancy, get_block_root, signature_verify_chain_segment, BlockError, - FullyVerifiedBlock, GossipVerifiedBlock, IntoFullyVerifiedBlock, + check_block_is_finalized_descendant, check_block_relevancy, get_block_root, + signature_verify_chain_segment, BlockError, FullyVerifiedBlock, GossipVerifiedBlock, + IntoFullyVerifiedBlock, }; +use crate::chain_config::ChainConfig; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::{EventHandler, EventKind}; @@ -29,6 +31,7 @@ use fork_choice::ForkChoice; use itertools::process_results; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; +use regex::bytes::Regex; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use state_processing::{ @@ -70,7 +73,6 @@ pub const ETH1_CACHE_DB_KEY: [u8; 32] = [0; 32]; pub const FORK_CHOICE_DB_KEY: [u8; 32] = [0; 32]; /// The result of a chain segment processing. -#[derive(Debug)] pub enum ChainSegmentResult { /// Processing this chain segment finished successfully. Successful { imported_blocks: usize }, @@ -161,6 +163,8 @@ pub trait BeaconChainTypes: Send + Sync + 'static { /// operations and chooses a canonical head. pub struct BeaconChain { pub spec: ChainSpec, + /// Configuration for `BeaconChain` runtime behaviour. + pub config: ChainConfig, /// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB. pub store: Arc>, /// Database migrator for running background maintenance on the store. @@ -423,6 +427,17 @@ impl BeaconChain { Ok(iter) } + /// As for `rev_iter_state_roots` but starting from an arbitrary `BeaconState`. + pub fn rev_iter_state_roots_from<'a>( + &self, + state_root: Hash256, + state: &'a BeaconState, + ) -> impl Iterator> + 'a { + std::iter::once(Ok((state_root, state.slot))) + .chain(StateRootsIterator::new(self.store.clone(), state)) + .map(|result| result.map_err(Into::into)) + } + /// Returns the block at the given slot, if any. Only returns blocks in the canonical chain. /// /// ## Errors @@ -476,30 +491,36 @@ impl BeaconChain { /// is the state as it was when the head block was received, which could be some slots prior to /// now. pub fn head(&self) -> Result, Error> { - self.canonical_head + self.with_head(|head| Ok(head.clone_with_only_committee_caches())) + } + + /// Apply a function to the canonical head without cloning it. + pub fn with_head( + &self, + f: impl FnOnce(&BeaconSnapshot) -> Result, + ) -> Result { + let head_lock = self + .canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or_else(|| Error::CanonicalHeadLockTimeout) - .map(|v| v.clone_with_only_committee_caches()) + .ok_or_else(|| Error::CanonicalHeadLockTimeout)?; + f(&head_lock) } /// Returns info representing the head block and state. /// /// A summarized version of `Self::head` that involves less cloning. pub fn head_info(&self) -> Result { - let head = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or_else(|| Error::CanonicalHeadLockTimeout)?; - - Ok(HeadInfo { - slot: head.beacon_block.slot(), - block_root: head.beacon_block_root, - state_root: head.beacon_state_root, - current_justified_checkpoint: head.beacon_state.current_justified_checkpoint, - finalized_checkpoint: head.beacon_state.finalized_checkpoint, - fork: head.beacon_state.fork, - genesis_time: head.beacon_state.genesis_time, - genesis_validators_root: head.beacon_state.genesis_validators_root, + self.with_head(|head| { + Ok(HeadInfo { + slot: head.beacon_block.slot(), + block_root: head.beacon_block_root, + state_root: head.beacon_state_root, + current_justified_checkpoint: head.beacon_state.current_justified_checkpoint, + finalized_checkpoint: head.beacon_state.finalized_checkpoint, + fork: head.beacon_state.fork, + genesis_time: head.beacon_state.genesis_time, + genesis_validators_root: head.beacon_state.genesis_validators_root, + }) }) } @@ -1215,6 +1236,14 @@ impl BeaconChain { // However, we will potentially get a `ParentUnknown` on a later block. The sync // protocol will need to ensure this is handled gracefully. Err(BlockError::WouldRevertFinalizedSlot { .. }) => continue, + // The block has a known parent that does not descend from the finalized block. + // There is no need to process this block or any children. + Err(BlockError::NotFinalizedDescendant { block_parent_root }) => { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::NotFinalizedDescendant { block_parent_root }, + } + } // If there was an error whilst determining if the block was invalid, return that // error. Err(BlockError::BeaconChainError(e)) => { @@ -1291,8 +1320,11 @@ impl BeaconChain { block: SignedBeaconBlock, ) -> Result, BlockError> { let slot = block.message.slot; - let graffiti_string = String::from_utf8(block.message.body.graffiti[..].to_vec()) - .unwrap_or_else(|_| format!("{:?}", &block.message.body.graffiti[..])); + #[allow(clippy::invalid_regex)] + let re = Regex::new("\\p{C}").expect("regex is valid"); + let graffiti_string = + String::from_utf8_lossy(&re.replace_all(&block.message.body.graffiti[..], &b""[..])) + .to_string(); match GossipVerifiedBlock::new(block, self) { Ok(verified) => { @@ -1310,7 +1342,7 @@ impl BeaconChain { debug!( self.log, "Rejected gossip block"; - "error" => format!("{:?}", e), + "error" => e.to_string(), "graffiti" => graffiti_string, "slot" => slot, ); @@ -1393,11 +1425,11 @@ impl BeaconChain { trace!( self.log, "Beacon block rejected"; - "reason" => format!("{:?}", other), + "reason" => other.to_string(), ); let _ = self.event_handler.register(EventKind::BeaconBlockRejected { - reason: format!("Invalid block: {:?}", other), + reason: format!("Invalid block: {}", other), block: Box::new(block), }); @@ -1416,7 +1448,6 @@ impl BeaconChain { fully_verified_block: FullyVerifiedBlock, ) -> Result> { let signed_block = fully_verified_block.block; - let block = &signed_block.message; let block_root = fully_verified_block.block_root; let mut state = fully_verified_block.state; let current_slot = self.slot()?; @@ -1427,7 +1458,7 @@ impl BeaconChain { // Iterate through the attestations in the block and register them as an "observed // attestation". This will stop us from propagating them on the gossip network. - for a in &block.body.attestations { + for a in &signed_block.message.body.attestations { match self.observed_attestations.observe_attestation(a, None) { // If the observation was successful or if the slot for the attestation was too // low, continue. @@ -1473,6 +1504,11 @@ impl BeaconChain { let mut fork_choice = self.fork_choice.write(); + // Do not import a block that doesn't descend from the finalized root. + let signed_block = + check_block_is_finalized_descendant::(signed_block, &fork_choice, &self.store)?; + let block = &signed_block.message; + // Register the new block with the fork choice service. { let _fork_choice_block_timer = @@ -1560,12 +1596,13 @@ impl BeaconChain { &self, randao_reveal: Signature, slot: Slot, + validator_graffiti: Option, ) -> Result, BlockProductionError> { let state = self .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - self.produce_block_on_state(state, slot, randao_reveal) + self.produce_block_on_state(state, slot, randao_reveal, validator_graffiti) } /// Produce a block for some `slot` upon the given `state`. @@ -1581,6 +1618,7 @@ impl BeaconChain { mut state: BeaconState, produce_at_slot: Slot, randao_reveal: Signature, + validator_graffiti: Option, ) -> Result, BlockProductionError> { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); let timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); @@ -1648,6 +1686,12 @@ impl BeaconChain { } } + // Override the beacon node's graffiti with graffiti from the validator, if present. + let graffiti = match validator_graffiti { + Some(graffiti) => graffiti, + None => self.graffiti, + }; + let mut block = SignedBeaconBlock { message: BeaconBlock { slot: state.slot, @@ -1657,7 +1701,7 @@ impl BeaconChain { body: BeaconBlockBody { randao_reveal, eth1_data, - graffiti: self.graffiti, + graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), attestations: self @@ -1718,7 +1762,7 @@ impl BeaconChain { let beacon_block_root = self.fork_choice.write().get_head(self.slot()?)?; let current_head = self.head_info()?; - let old_finalized_root = current_head.finalized_checkpoint.root; + let old_finalized_checkpoint = current_head.finalized_checkpoint; if beacon_block_root == current_head.block_root { return Ok(()); @@ -1798,15 +1842,32 @@ impl BeaconChain { ); }; - let old_finalized_epoch = current_head.finalized_checkpoint.epoch; - let new_finalized_epoch = new_head.beacon_state.finalized_checkpoint.epoch; - let finalized_root = new_head.beacon_state.finalized_checkpoint.root; + let new_finalized_checkpoint = new_head.beacon_state.finalized_checkpoint; + // State root of the finalized state on the epoch boundary, NOT the state + // of the finalized block. We need to use an iterator in case the state is beyond + // the reach of the new head's `state_roots` array. + let new_finalized_slot = new_finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let new_finalized_state_root = process_results( + StateRootsIterator::new(self.store.clone(), &new_head.beacon_state), + |mut iter| { + iter.find_map(|(state_root, slot)| { + if slot == new_finalized_slot { + Some(state_root) + } else { + None + } + }) + }, + )? + .ok_or_else(|| Error::MissingFinalizedStateRoot(new_finalized_slot))?; // It is an error to try to update to a head with a lesser finalized epoch. - if new_finalized_epoch < old_finalized_epoch { + if new_finalized_checkpoint.epoch < old_finalized_checkpoint.epoch { return Err(Error::RevertedFinalizedEpoch { - previous_epoch: old_finalized_epoch, - new_epoch: new_finalized_epoch, + previous_epoch: old_finalized_checkpoint.epoch, + new_epoch: new_finalized_checkpoint.epoch, }); } @@ -1845,11 +1906,11 @@ impl BeaconChain { ); }); - if new_finalized_epoch != old_finalized_epoch { + if new_finalized_checkpoint.epoch != old_finalized_checkpoint.epoch { self.after_finalization( - old_finalized_epoch, - finalized_root, - old_finalized_root.into(), + old_finalized_checkpoint, + new_finalized_checkpoint, + new_finalized_state_root, )?; } @@ -1877,68 +1938,53 @@ impl BeaconChain { /// Performs pruning and finality-based optimizations. fn after_finalization( &self, - old_finalized_epoch: Epoch, - finalized_block_root: Hash256, - old_finalized_root: SignedBeaconBlockHash, + old_finalized_checkpoint: Checkpoint, + new_finalized_checkpoint: Checkpoint, + new_finalized_state_root: Hash256, ) -> Result<(), Error> { - let finalized_block = self - .store - .get_block(&finalized_block_root)? - .ok_or_else(|| Error::MissingBeaconBlock(finalized_block_root))? - .message; + self.fork_choice.write().prune()?; - let new_finalized_epoch = finalized_block.slot.epoch(T::EthSpec::slots_per_epoch()); + self.observed_block_producers.prune( + new_finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); - if new_finalized_epoch < old_finalized_epoch { - Err(Error::RevertedFinalizedEpoch { - previous_epoch: old_finalized_epoch, - new_epoch: new_finalized_epoch, + self.snapshot_cache + .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .map(|mut snapshot_cache| { + snapshot_cache.prune(new_finalized_checkpoint.epoch); }) - } else { - self.fork_choice.write().prune()?; - - self.observed_block_producers - .prune(new_finalized_epoch.start_slot(T::EthSpec::slots_per_epoch())); + .unwrap_or_else(|| { + error!( + self.log, + "Failed to obtain cache write lock"; + "lock" => "snapshot_cache", + "task" => "prune" + ); + }); - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.prune(new_finalized_epoch); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "prune" - ); - }); + let finalized_state = self + .get_state(&new_finalized_state_root, None)? + .ok_or_else(|| Error::MissingBeaconState(new_finalized_state_root))?; - let finalized_state = self - .get_state(&finalized_block.state_root, Some(finalized_block.slot))? - .ok_or_else(|| Error::MissingBeaconState(finalized_block.state_root))?; + self.op_pool + .prune_all(&finalized_state, self.head_info()?.fork); - self.op_pool - .prune_all(&finalized_state, self.head_info()?.fork); - - // TODO: configurable max finality distance - let max_finality_distance = 0; - self.store_migrator.process_finalization( - finalized_block.state_root, - finalized_state, - max_finality_distance, - Arc::clone(&self.head_tracker), - old_finalized_root, - finalized_block_root.into(), - ); + self.store_migrator.process_finalization( + new_finalized_state_root.into(), + finalized_state, + self.head_tracker.clone(), + old_finalized_checkpoint, + new_finalized_checkpoint, + )?; - let _ = self.event_handler.register(EventKind::BeaconFinalization { - epoch: new_finalized_epoch, - root: finalized_block_root, - }); + let _ = self.event_handler.register(EventKind::BeaconFinalization { + epoch: new_finalized_checkpoint.epoch, + root: new_finalized_checkpoint.root, + }); - Ok(()) - } + Ok(()) } /// Returns `true` if the given block root has not been processed. @@ -2023,10 +2069,11 @@ impl BeaconChain { .beacon_block_root; let mut visited: HashSet = HashSet::new(); let mut finalized_blocks: HashSet = HashSet::new(); + let mut justified_blocks: HashSet = HashSet::new(); let genesis_block_hash = Hash256::zero(); writeln!(output, "digraph beacon {{").unwrap(); - writeln!(output, "\t_{:?}[label=\"genesis\"];", genesis_block_hash).unwrap(); + writeln!(output, "\t_{:?}[label=\"zero\"];", genesis_block_hash).unwrap(); // Canonical head needs to be processed first as otherwise finalized blocks aren't detected // properly. @@ -2057,6 +2104,8 @@ impl BeaconChain { .unwrap() .unwrap(); finalized_blocks.insert(state.finalized_checkpoint.root); + justified_blocks.insert(state.current_justified_checkpoint.root); + justified_blocks.insert(state.previous_justified_checkpoint.root); } if block_hash == canonical_head_hash { @@ -2077,6 +2126,15 @@ impl BeaconChain { signed_beacon_block.slot() ) .unwrap(); + } else if justified_blocks.contains(&block_hash) { + writeln!( + output, + "\t_{:?}[label=\"{} ({})\" shape=cds];", + block_hash, + block_hash, + signed_beacon_block.slot() + ) + .unwrap(); } else { writeln!( output, @@ -2106,6 +2164,11 @@ impl BeaconChain { let mut file = std::fs::File::create(file_name).unwrap(); self.dump_as_dot(&mut file); } + + // Should be used in tests only + pub fn set_graffiti(&mut self, graffiti: Graffiti) { + self.graffiti = graffiti; + } } impl Drop for BeaconChain { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index f4581d77ca9..9f25186edaa 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -48,6 +48,7 @@ use crate::{ }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, }; +use fork_choice::{ForkChoice, ForkChoiceStore}; use parking_lot::RwLockReadGuard; use slog::{error, Logger}; use slot_clock::SlotClock; @@ -62,7 +63,7 @@ use std::borrow::Cow; use std::convert::TryFrom; use std::fs; use std::io::Write; -use store::{Error as DBError, HotStateSummary, StoreOp}; +use store::{Error as DBError, HotColdDB, HotStateSummary, StoreOp}; use tree_hash::TreeHash; use types::{ BeaconBlock, BeaconState, BeaconStateError, ChainSpec, CloneConfig, EthSpec, Hash256, @@ -91,6 +92,8 @@ pub enum BlockError { /// It's unclear if this block is valid, but it cannot be processed without already knowing /// its parent. ParentUnknown(Box>), + /// The block skips too many slots and is a DoS risk. + TooManySkippedSlots { parent_slot: Slot, block_slot: Slot }, /// The block slot is greater than the present slot. /// /// ## Peer scoring @@ -118,6 +121,13 @@ pub enum BlockError { block_slot: Slot, finalized_slot: Slot, }, + /// The block conflicts with finalization, no need to propagate. + /// + /// ## Peer scoring + /// + /// It's unclear if this block is valid, but it conflicts with finality and shouldn't be + /// imported. + NotFinalizedDescendant { block_parent_root: Hash256 }, /// Block is already known, no need to re-import. /// /// ## Peer scoring @@ -199,6 +209,17 @@ pub enum BlockError { BeaconChainError(BeaconChainError), } +impl std::fmt::Display for BlockError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BlockError::ParentUnknown(block) => { + write!(f, "ParentUnknown(parent_root:{})", block.parent_root()) + } + other => write!(f, "{:?}", other), + } + } +} + impl From for BlockError { fn from(e: BlockSignatureVerifierError) -> Self { match e { @@ -371,9 +392,22 @@ impl GossipVerifiedBlock { }); } + let block_root = get_block_root(&block); + // Do not gossip a block from a finalized slot. check_block_against_finalized_slot(&block.message, chain)?; + // Check if the block is already known. We know it is post-finalization, so it is + // sufficient to check the fork choice. + // + // In normal operation this isn't necessary, however it is useful immediately after a + // reboot if the `observed_block_producers` cache is empty. In that case, without this + // check, we will load the parent and state from disk only to find out later that we + // already know this block. + if chain.fork_choice.read().contains_block(&block_root) { + return Err(BlockError::BlockIsAlreadyKnown); + } + // Check that we have not already received a block with a valid signature for this slot. if chain .observed_block_producers @@ -386,8 +420,19 @@ impl GossipVerifiedBlock { }); } + // Do not process a block that doesn't descend from the finalized root. + // + // We check this *before* we load the parent so that we can return a more detailed error. + let block = check_block_is_finalized_descendant::( + block, + &chain.fork_choice.read(), + &chain.store, + )?; + let (mut parent, block) = load_parent(block, chain)?; - let block_root = get_block_root(&block); + + // Reject any block that exceeds our limit on skipped slots. + check_block_skip_slots(chain, &parent.beacon_block.message, &block.message)?; let state = cheap_state_advance_to_obtain_committees( &mut parent.beacon_state, @@ -475,6 +520,10 @@ impl SignatureVerifiedBlock { chain: &BeaconChain, ) -> Result> { let (mut parent, block) = load_parent(block, chain)?; + + // Reject any block that exceeds our limit on skipped slots. + check_block_skip_slots(chain, &parent.beacon_block.message, &block.message)?; + let block_root = get_block_root(&block); let state = cheap_state_advance_to_obtain_committees( @@ -605,6 +654,9 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { return Err(BlockError::ParentUnknown(Box::new(block))); } + // Reject any block that exceeds our limit on skipped slots. + check_block_skip_slots(chain, &parent.beacon_block.message, &block.message)?; + /* * Perform cursory checks to see if the block is even worth processing. */ @@ -644,7 +696,10 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { let state_root = state.update_tree_hash_cache()?; let op = if state.slot % T::EthSpec::slots_per_epoch() == 0 { - StoreOp::PutState(state_root.into(), Cow::Owned(state.clone())) + StoreOp::PutState( + state_root.into(), + Cow::Owned(state.clone_with(CloneConfig::committee_caches_only())), + ) } else { StoreOp::PutStateSummary( state_root.into(), @@ -744,6 +799,30 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { } } +/// Check that the count of skip slots between the block and its parent does not exceed our maximum +/// value. +/// +/// Whilst this is not part of the specification, we include this to help prevent us from DoS +/// attacks. In times of dire network circumstance, the user can configure the +/// `import_max_skip_slots` value. +fn check_block_skip_slots( + chain: &BeaconChain, + parent: &BeaconBlock, + block: &BeaconBlock, +) -> Result<(), BlockError> { + // Reject any block that exceeds our limit on skipped slots. + if let Some(max_skip_slots) = chain.config.import_max_skip_slots { + if block.slot > parent.slot + max_skip_slots { + return Err(BlockError::TooManySkippedSlots { + parent_slot: parent.slot, + block_slot: block.slot, + }); + } + } + + Ok(()) +} + /// Returns `Ok(())` if the block is later than the finalized slot on `chain`. /// /// Returns an error if the block is earlier or equal to the finalized slot, or there was an error @@ -768,6 +847,36 @@ fn check_block_against_finalized_slot( } } +/// Returns `Ok(block)` if the block descends from the finalized root. +pub fn check_block_is_finalized_descendant>( + block: SignedBeaconBlock, + fork_choice: &ForkChoice, + store: &HotColdDB, +) -> Result, BlockError> { + if fork_choice.is_descendant_of_finalized(block.parent_root()) { + Ok(block) + } else { + // If fork choice does *not* consider the parent to be a descendant of the finalized block, + // then there are two more cases: + // + // 1. We have the parent stored in our database. Because fork-choice has confirmed the + // parent is *not* in our post-finalization DAG, all other blocks must be either + // pre-finalization or conflicting with finalization. + // 2. The parent is unknown to us, we probably want to download it since it might actually + // descend from the finalized root. + if store + .item_exists::>(&block.parent_root()) + .map_err(|e| BlockError::BeaconChainError(e.into()))? + { + Err(BlockError::NotFinalizedDescendant { + block_parent_root: block.parent_root(), + }) + } else { + Err(BlockError::ParentUnknown(Box::new(block))) + } + } +} + /// Performs simple, cheap checks to ensure that the block is relevant to be imported. /// /// `Ok(block_root)` is returned if the block passes these checks and should progress with diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index b0c467920d6..6ff94cfaad9 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -11,6 +11,7 @@ use crate::shuffling_cache::ShufflingCache; use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE}; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_pubkey_cache::ValidatorPubkeyCache; +use crate::ChainConfig; use crate::{ BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, Eth1Chain, Eth1ChainBackend, EventHandler, @@ -110,6 +111,7 @@ pub struct BeaconChainBuilder { pubkey_cache_path: Option, validator_pubkey_cache: Option, spec: ChainSpec, + chain_config: ChainConfig, disabled_forks: Vec, log: Option, graffiti: Graffiti, @@ -157,6 +159,7 @@ where disabled_forks: Vec::new(), validator_pubkey_cache: None, spec: TEthSpec::default_spec(), + chain_config: ChainConfig::default(), log: None, graffiti: Graffiti::default(), } @@ -171,6 +174,15 @@ where self } + /// Sets the maximum number of blocks that will be skipped when processing + /// some consensus messages. + /// + /// Set to `None` for no limit. + pub fn import_max_skip_slots(mut self, n: Option) -> Self { + self.chain_config.import_max_skip_slots = n; + self + } + /// Sets the store (database). /// /// Should generally be called early in the build chain. @@ -406,6 +418,12 @@ where self } + /// Sets the `ChainConfig` that determines `BeaconChain` runtime behaviour. + pub fn chain_config(mut self, config: ChainConfig) -> Self { + self.chain_config = config; + self + } + /// Consumes `self`, returning a `BeaconChain` if all required parameters have been supplied. /// /// An error will be returned at runtime if all required parameters have not been configured. @@ -489,6 +507,7 @@ where let beacon_chain = BeaconChain { spec: self.spec, + config: self.chain_config, store, store_migrator: self .store_migrator diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs new file mode 100644 index 00000000000..5c12bb40f86 --- /dev/null +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -0,0 +1,21 @@ +use serde_derive::{Deserialize, Serialize}; + +/// There is a 693 block skip in the current canonical Medalla chain, we use 700 to be safe. +pub const DEFAULT_IMPORT_BLOCK_MAX_SKIP_SLOTS: u64 = 700; + +#[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] +pub struct ChainConfig { + /// Maximum number of slots to skip when importing a consensus message (e.g., block, + /// attestation, etc). + /// + /// If `None`, there is no limit. + pub import_max_skip_slots: Option, +} + +impl Default for ChainConfig { + fn default() -> Self { + Self { + import_max_skip_slots: Some(DEFAULT_IMPORT_BLOCK_MAX_SKIP_SLOTS), + } + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 5c017a3e8e2..96f1c9a8411 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,5 +1,6 @@ use crate::beacon_chain::ForkChoiceError; use crate::eth1_chain::Error as Eth1ChainError; +use crate::migrate::PruningError; use crate::naive_aggregation_pool::Error as NaiveAggregationError; use crate::observed_attestations::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; @@ -61,6 +62,7 @@ pub enum BeaconChainError { requested_slot: Slot, max_task_runtime: Duration, }, + MissingFinalizedStateRoot(Slot), /// Returned when an internal check fails, indicating corrupt data. InvariantViolated(String), SszTypesError(SszTypesError), @@ -79,6 +81,7 @@ pub enum BeaconChainError { ObservedAttestationsError(ObservedAttestationsError), ObservedAttestersError(ObservedAttestersError), ObservedBlockProducersError(ObservedBlockProducersError), + PruningError(PruningError), ArithError(ArithError), } @@ -94,6 +97,7 @@ easy_from_to!(ObservedAttestationsError, BeaconChainError); easy_from_to!(ObservedAttestersError, BeaconChainError); easy_from_to!(ObservedBlockProducersError, BeaconChainError); easy_from_to!(BlockSignatureVerifierError, BeaconChainError); +easy_from_to!(PruningError, BeaconChainError); easy_from_to!(ArithError, BeaconChainError); #[derive(Debug)] diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 3c597bc72eb..793179da8d4 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -2,12 +2,17 @@ #[macro_use] extern crate lazy_static; +#[macro_use] +extern crate slog; +extern crate slog_term; + pub mod attestation_verification; mod beacon_chain; mod beacon_fork_choice_store; mod beacon_snapshot; mod block_verification; pub mod builder; +pub mod chain_config; mod errors; pub mod eth1_chain; pub mod events; @@ -32,6 +37,7 @@ pub use self::beacon_chain::{ ForkChoiceError, StateSkipConfig, }; pub use self::beacon_snapshot::BeaconSnapshot; +pub use self::chain_config::ChainConfig; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 59079344c92..9730d49f009 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -7,12 +7,28 @@ use std::mem; use std::sync::mpsc; use std::sync::Arc; use std::thread; -use store::hot_cold_store::{process_finalization, HotColdDBError}; -use store::iter::{ParentRootBlockIterator, RootsIterator}; +use store::hot_cold_store::{migrate_database, HotColdDBError}; +use store::iter::RootsIterator; use store::{Error, ItemStore, StoreOp}; pub use store::{HotColdDB, MemoryStore}; -use types::*; -use types::{BeaconState, EthSpec, Hash256, Slot}; +use types::{ + BeaconState, BeaconStateError, BeaconStateHash, Checkpoint, EthSpec, Hash256, + SignedBeaconBlockHash, Slot, +}; + +/// Logic errors that can occur during pruning, none of these should ever happen. +#[derive(Debug)] +pub enum PruningError { + IncorrectFinalizedState { + state_slot: Slot, + new_finalized_slot: Slot, + }, + MissingInfoForCanonicalChain { + slot: Slot, + }, + UnexpectedEqualStateRoots, + UnexpectedUnequalStateRoots, +} /// Trait for migration processes that update the database upon finalization. pub trait Migrate, Cold: ItemStore>: @@ -22,17 +38,17 @@ pub trait Migrate, Cold: ItemStore>: fn process_finalization( &self, - _state_root: Hash256, + _finalized_state_root: BeaconStateHash, _new_finalized_state: BeaconState, - _max_finality_distance: u64, _head_tracker: Arc, - _old_finalized_block_hash: SignedBeaconBlockHash, - _new_finalized_block_hash: SignedBeaconBlockHash, - ) { + _old_finalized_checkpoint: Checkpoint, + _new_finalized_checkpoint: Checkpoint, + ) -> Result<(), BeaconChainError> { + Ok(()) } /// Traverses live heads and prunes blocks and states of chains that we know can't be built - /// upon because finalization would prohibit it. This is an optimisation intended to save disk + /// upon because finalization would prohibit it. This is an optimisation intended to save disk /// space. /// /// Assumptions: @@ -40,37 +56,63 @@ pub trait Migrate, Cold: ItemStore>: fn prune_abandoned_forks( store: Arc>, head_tracker: Arc, - old_finalized_block_hash: SignedBeaconBlockHash, - new_finalized_block_hash: SignedBeaconBlockHash, - new_finalized_slot: Slot, + new_finalized_state_hash: BeaconStateHash, + new_finalized_state: &BeaconState, + old_finalized_checkpoint: Checkpoint, + new_finalized_checkpoint: Checkpoint, + log: &Logger, ) -> Result<(), BeaconChainError> { // There will never be any blocks to prune if there is only a single head in the chain. if head_tracker.heads().len() == 1 { return Ok(()); } - let old_finalized_slot = store - .get_block(&old_finalized_block_hash.into())? - .ok_or_else(|| BeaconChainError::MissingBeaconBlock(old_finalized_block_hash.into()))? - .slot(); - - // Collect hashes from new_finalized_block back to old_finalized_block (inclusive) - let mut found_block = false; // hack for `take_until` - let newly_finalized_blocks: HashMap = - ParentRootBlockIterator::new(&*store, new_finalized_block_hash.into()) - .take_while(|result| match result { - Ok((block_hash, _)) => { - if found_block { - false - } else { - found_block |= *block_hash == old_finalized_block_hash.into(); - true - } - } - Err(_) => true, - }) - .map(|result| result.map(|(block_hash, block)| (block_hash.into(), block.slot()))) - .collect::>()?; + let old_finalized_slot = old_finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + let new_finalized_slot = new_finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + let new_finalized_block_hash = new_finalized_checkpoint.root.into(); + + // The finalized state must be for the epoch boundary slot, not the slot of the finalized + // block. + if new_finalized_state.slot != new_finalized_slot { + return Err(PruningError::IncorrectFinalizedState { + state_slot: new_finalized_state.slot, + new_finalized_slot, + } + .into()); + } + + debug!( + log, + "Starting database pruning"; + "old_finalized_epoch" => old_finalized_checkpoint.epoch, + "old_finalized_root" => format!("{:?}", old_finalized_checkpoint.root), + "new_finalized_epoch" => new_finalized_checkpoint.epoch, + "new_finalized_root" => format!("{:?}", new_finalized_checkpoint.root), + ); + + // For each slot between the new finalized checkpoint and the old finalized checkpoint, + // collect the beacon block root and state root of the canonical chain. + let newly_finalized_chain: HashMap = + std::iter::once(Ok(( + new_finalized_slot, + (new_finalized_block_hash, new_finalized_state_hash), + ))) + .chain( + RootsIterator::new(store.clone(), new_finalized_state).map(|res| { + res.map(|(block_root, state_root, slot)| { + (slot, (block_root.into(), state_root.into())) + }) + }), + ) + .take_while(|res| { + res.as_ref() + .map_or(true, |(slot, _)| *slot >= old_finalized_slot) + }) + .collect::>()?; // We don't know which blocks are shared among abandoned chains, so we buffer and delete // everything in one fell swoop. @@ -79,75 +121,110 @@ pub trait Migrate, Cold: ItemStore>: let mut abandoned_heads: HashSet = HashSet::new(); for (head_hash, head_slot) in head_tracker.heads() { - let mut potentially_abandoned_head: Option = Some(head_hash); - let mut potentially_abandoned_blocks: Vec<( - Slot, - Option, - Option, - )> = Vec::new(); + let mut potentially_abandoned_head = Some(head_hash); + let mut potentially_abandoned_blocks = vec![]; let head_state_hash = store .get_block(&head_hash)? .ok_or_else(|| BeaconStateError::MissingBeaconBlock(head_hash.into()))? .state_root(); + // Iterate backwards from this head, staging blocks and states for deletion. let iter = std::iter::once(Ok((head_hash, head_state_hash, head_slot))) - .chain(RootsIterator::from_block(Arc::clone(&store), head_hash)?); + .chain(RootsIterator::from_block(store.clone(), head_hash)?); + for maybe_tuple in iter { - let (block_hash, state_hash, slot) = maybe_tuple?; - if slot < old_finalized_slot { - // We must assume here any candidate chains include old_finalized_block_hash, - // i.e. there aren't any forks starting at a block that is a strict ancestor of - // old_finalized_block_hash. - break; - } - match newly_finalized_blocks.get(&block_hash.into()).copied() { - // Block is not finalized, mark it and its state for deletion + let (block_root, state_root, slot) = maybe_tuple?; + let block_root = SignedBeaconBlockHash::from(block_root); + let state_root = BeaconStateHash::from(state_root); + + match newly_finalized_chain.get(&slot) { + // If there's no information about a slot on the finalized chain, then + // it should be because it's ahead of the new finalized slot. Stage + // the fork's block and state for possible deletion. None => { - potentially_abandoned_blocks.push(( - slot, - Some(block_hash.into()), - Some(state_hash.into()), - )); + if slot > new_finalized_slot { + potentially_abandoned_blocks.push(( + slot, + Some(block_root), + Some(state_root), + )); + } else if slot >= old_finalized_slot { + return Err(PruningError::MissingInfoForCanonicalChain { slot }.into()); + } else { + // We must assume here any candidate chains include the old finalized + // checkpoint, i.e. there aren't any forks starting at a block that is a + // strict ancestor of old_finalized_checkpoint. + warn!( + log, + "Found a chain that should already have been pruned"; + "head_block_root" => format!("{:?}", head_hash), + "head_slot" => head_slot, + ); + break; + } } - Some(finalized_slot) => { - // Block root is finalized, and we have reached the slot it was finalized - // at: we've hit a shared part of the chain. - if finalized_slot == slot { - // The first finalized block of a candidate chain lies after (in terms - // of slots order) the newly finalized block. It's not a candidate for - // prunning. - if finalized_slot == new_finalized_slot { + Some((finalized_block_root, finalized_state_root)) => { + // This fork descends from a newly finalized block, we can stop. + if block_root == *finalized_block_root { + // Sanity check: if the slot and block root match, then the + // state roots should match too. + if state_root != *finalized_state_root { + return Err(PruningError::UnexpectedUnequalStateRoots.into()); + } + + // If the fork descends from the whole finalized chain, + // do not prune it. Otherwise continue to delete all + // of the blocks and states that have been staged for + // deletion so far. + if slot == new_finalized_slot { potentially_abandoned_blocks.clear(); potentially_abandoned_head.take(); } - + // If there are skipped slots on the fork to be pruned, then + // we will have just staged the common block for deletion. + // Unstage it. + else { + for (_, block_root, _) in + potentially_abandoned_blocks.iter_mut().rev() + { + if block_root.as_ref() == Some(finalized_block_root) { + *block_root = None; + } else { + break; + } + } + } break; - } - // Block root is finalized, but we're at a skip slot: delete the state only. - else { + } else { + if state_root == *finalized_state_root { + return Err(PruningError::UnexpectedEqualStateRoots.into()); + } potentially_abandoned_blocks.push(( slot, - None, - Some(state_hash.into()), + Some(block_root), + Some(state_root), )); } } } } - abandoned_heads.extend(potentially_abandoned_head.into_iter()); - if !potentially_abandoned_blocks.is_empty() { + if let Some(abandoned_head) = potentially_abandoned_head { + debug!( + log, + "Pruning head"; + "head_block_root" => format!("{:?}", abandoned_head), + "head_slot" => head_slot, + ); + abandoned_heads.insert(abandoned_head); abandoned_blocks.extend( potentially_abandoned_blocks .iter() .filter_map(|(_, maybe_block_hash, _)| *maybe_block_hash), ); abandoned_states.extend(potentially_abandoned_blocks.iter().filter_map( - |(slot, _, maybe_state_hash)| match maybe_state_hash { - None => None, - Some(state_hash) => Some((*slot, *state_hash)), - }, + |(slot, _, maybe_state_hash)| maybe_state_hash.map(|sr| (*slot, sr)), )); } } @@ -161,11 +238,14 @@ pub trait Migrate, Cold: ItemStore>: .map(|(slot, state_hash)| StoreOp::DeleteState(state_hash, slot)), ) .collect(); + store.do_atomically(batch)?; for head_hash in abandoned_heads.into_iter() { head_tracker.remove_head(head_hash); } + debug!(log, "Database pruning complete"); + Ok(()) } } @@ -174,6 +254,17 @@ pub trait Migrate, Cold: ItemStore>: pub struct NullMigrator; impl, Cold: ItemStore> Migrate for NullMigrator { + fn process_finalization( + &self, + _finalized_state_root: BeaconStateHash, + _new_finalized_state: BeaconState, + _head_tracker: Arc, + _old_finalized_checkpoint: Checkpoint, + _new_finalized_checkpoint: Checkpoint, + ) -> Result<(), BeaconChainError> { + Ok(()) + } + fn new(_: Arc>, _: Logger) -> Self { NullMigrator } @@ -184,48 +275,59 @@ impl, Cold: ItemStore> Migrate fo /// Mostly useful for tests. pub struct BlockingMigrator, Cold: ItemStore> { db: Arc>, + log: Logger, } impl, Cold: ItemStore> Migrate for BlockingMigrator { - fn new(db: Arc>, _: Logger) -> Self { - BlockingMigrator { db } + fn new(db: Arc>, log: Logger) -> Self { + BlockingMigrator { db, log } } fn process_finalization( &self, - state_root: Hash256, + finalized_state_root: BeaconStateHash, new_finalized_state: BeaconState, - _max_finality_distance: u64, head_tracker: Arc, - old_finalized_block_hash: SignedBeaconBlockHash, - new_finalized_block_hash: SignedBeaconBlockHash, - ) { - if let Err(e) = process_finalization(self.db.clone(), state_root, &new_finalized_state) { - // This migrator is only used for testing, so we just log to stderr without a logger. - eprintln!("Migration error: {:?}", e); - } - - if let Err(e) = Self::prune_abandoned_forks( + old_finalized_checkpoint: Checkpoint, + new_finalized_checkpoint: Checkpoint, + ) -> Result<(), BeaconChainError> { + Self::prune_abandoned_forks( self.db.clone(), head_tracker, - old_finalized_block_hash, - new_finalized_block_hash, - new_finalized_state.slot, + finalized_state_root, + &new_finalized_state, + old_finalized_checkpoint, + new_finalized_checkpoint, + &self.log, + )?; + + match migrate_database( + self.db.clone(), + finalized_state_root.into(), + &new_finalized_state, ) { - eprintln!("Pruning error: {:?}", e); + Ok(()) => Ok(()), + Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { + debug!( + self.log, + "Database migration postponed, unaligned finalized block"; + "slot" => slot.as_u64() + ); + Ok(()) + } + Err(e) => Err(e.into()), } } } type MpscSender = mpsc::Sender<( - Hash256, + BeaconStateHash, BeaconState, Arc, - SignedBeaconBlockHash, - SignedBeaconBlockHash, - Slot, + Checkpoint, + Checkpoint, )>; /// Migrator that runs a background thread to migrate state from the hot to the cold database. @@ -243,34 +345,26 @@ impl, Cold: ItemStore> Migrate Self { db, tx_thread, log } } - /// Perform the freezing operation on the database, fn process_finalization( &self, - finalized_state_root: Hash256, + finalized_state_root: BeaconStateHash, new_finalized_state: BeaconState, - max_finality_distance: u64, head_tracker: Arc, - old_finalized_block_hash: SignedBeaconBlockHash, - new_finalized_block_hash: SignedBeaconBlockHash, - ) { - if !self.needs_migration(new_finalized_state.slot, max_finality_distance) { - return; - } - + old_finalized_checkpoint: Checkpoint, + new_finalized_checkpoint: Checkpoint, + ) -> Result<(), BeaconChainError> { let (ref mut tx, ref mut thread) = *self.tx_thread.lock(); - let new_finalized_slot = new_finalized_state.slot; if let Err(tx_err) = tx.send(( finalized_state_root, new_finalized_state, head_tracker, - old_finalized_block_hash, - new_finalized_block_hash, - new_finalized_slot, + old_finalized_checkpoint, + new_finalized_checkpoint, )) { let (new_tx, new_thread) = Self::spawn_thread(self.db.clone(), self.log.clone()); - drop(mem::replace(tx, new_tx)); + *tx = new_tx; let old_thread = mem::replace(thread, new_thread); // Join the old thread, which will probably have panicked, or may have @@ -286,46 +380,43 @@ impl, Cold: ItemStore> Migrate // Retry at most once, we could recurse but that would risk overflowing the stack. let _ = tx.send(tx_err.0); } + + Ok(()) } } impl, Cold: ItemStore> BackgroundMigrator { - /// Return true if a migration needs to be performed, given a new `finalized_slot`. - fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool { - let finality_distance = finalized_slot - self.db.get_split_slot(); - finality_distance > max_finality_distance - } - - #[allow(clippy::type_complexity)] /// Spawn a new child thread to run the migration process. /// /// Return a channel handle for sending new finalized states to the thread. fn spawn_thread( db: Arc>, log: Logger, - ) -> ( - mpsc::Sender<( - Hash256, - BeaconState, - Arc, - SignedBeaconBlockHash, - SignedBeaconBlockHash, - Slot, - )>, - thread::JoinHandle<()>, - ) { + ) -> (MpscSender, thread::JoinHandle<()>) { let (tx, rx) = mpsc::channel(); let thread = thread::spawn(move || { while let Ok(( state_root, state, head_tracker, - old_finalized_block_hash, - new_finalized_block_hash, - new_finalized_slot, + old_finalized_checkpoint, + new_finalized_checkpoint, )) = rx.recv() { - match process_finalization(db.clone(), state_root, &state) { + match Self::prune_abandoned_forks( + db.clone(), + head_tracker, + state_root, + &state, + old_finalized_checkpoint, + new_finalized_checkpoint, + &log, + ) { + Ok(()) => {} + Err(e) => warn!(log, "Block pruning failed: {:?}", e), + } + + match migrate_database(db.clone(), state_root.into(), &state) { Ok(()) => {} Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { debug!( @@ -342,17 +433,6 @@ impl, Cold: ItemStore> BackgroundMigrator {} - Err(e) => warn!(log, "Block pruning failed: {:?}", e), - } } }); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 76993f87a17..8690c2e8d2d 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -3,6 +3,7 @@ pub use crate::beacon_chain::{ }; use crate::migrate::{BlockingMigrator, Migrate, NullMigrator}; pub use crate::persisted_beacon_chain::PersistedBeaconChain; +use crate::slog::Drain; use crate::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::CachingEth1Backend, @@ -10,20 +11,23 @@ use crate::{ BeaconChain, BeaconChainTypes, StateSkipConfig, }; use genesis::interop_genesis_state; +use rand::rngs::StdRng; +use rand::Rng; +use rand_core::SeedableRng; use rayon::prelude::*; use sloggers::{null::NullLoggerBuilder, Build}; use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; use std::borrow::Cow; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use std::time::Duration; -use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore}; +use store::{config::StoreConfig, BlockReplay, HotColdDB, ItemStore, LevelDB, MemoryStore}; use tempfile::{tempdir, TempDir}; use tree_hash::TreeHash; use types::{ - AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, EthSpec, - Hash256, Keypair, SecretKey, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, + AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, Epoch, + EthSpec, Hash256, Keypair, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHash, SignedRoot, Slot, SubnetId, }; @@ -44,11 +48,25 @@ pub type BaseHarnessType = Witn TColdStore, >; -pub type HarnessType = BaseHarnessType, MemoryStore>; -pub type DiskHarnessType = +pub type NullMigratorEphemeralHarnessType = + BaseHarnessType, MemoryStore>; +pub type BlockingMigratorDiskHarnessType = BaseHarnessType, LevelDB>, E, LevelDB, LevelDB>; +pub type BlockingMigratorEphemeralHarnessType = BaseHarnessType< + BlockingMigrator, MemoryStore>, + E, + MemoryStore, + MemoryStore, +>; + +pub type AddBlocksResult = ( + HashMap, + HashMap, + SignedBeaconBlockHash, + BeaconState, +); -/// Indicates how the `BeaconChainHarness` should produce blocks. +/// Deprecated: Indicates how the `BeaconChainHarness` should produce blocks. #[derive(Clone, Copy, Debug)] pub enum BlockStrategy { /// Produce blocks upon the canonical head (normal case). @@ -64,7 +82,7 @@ pub enum BlockStrategy { }, } -/// Indicates how the `BeaconChainHarness` should produce attestations. +/// Deprecated: Indicates how the `BeaconChainHarness` should produce attestations. #[derive(Clone, Debug)] pub enum AttestationStrategy { /// All validators attest to whichever block the `BeaconChainHarness` has produced. @@ -73,32 +91,98 @@ pub enum AttestationStrategy { SomeValidators(Vec), } +fn make_rng() -> StdRng { + // Nondeterminism in tests is a highly undesirable thing. Seed the RNG to some arbitrary + // but fixed value for reproducibility. + StdRng::seed_from_u64(0x0DDB1A5E5BAD5EEDu64) +} + /// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and /// attestations. /// /// Used for testing. pub struct BeaconChainHarness { + pub validators_keypairs: Vec, + pub chain: BeaconChain, - pub keypairs: Vec, pub spec: ChainSpec, pub data_dir: TempDir, + + pub rng: StdRng, +} + +type HarnessAttestations = Vec<( + Vec<(Attestation, SubnetId)>, + Option>, +)>; + +impl BeaconChainHarness> { + pub fn new(eth_spec_instance: E, validators_keypairs: Vec) -> Self { + let data_dir = tempdir().unwrap(); + let mut spec = E::default_spec(); + + // Setting the target aggregators to really high means that _all_ validators in the + // committee are required to produce an aggregate. This is overkill, however with small + // validator counts it's the only way to be certain there is _at least one_ aggregator per + // committee. + spec.target_aggregators_per_committee = 1 << 32; + + let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); + let drain = slog_term::FullFormat::new(decorator).build(); + let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); + let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); + + let config = StoreConfig::default(); + let store = Arc::new(HotColdDB::open_ephemeral(config, spec.clone(), log.clone()).unwrap()); + + let chain = BeaconChainBuilder::new(eth_spec_instance) + .logger(log.clone()) + .custom_spec(spec.clone()) + .store(store.clone()) + .store_migrator(BlockingMigrator::new(store, log.clone())) + .data_dir(data_dir.path().to_path_buf()) + .genesis_state( + interop_genesis_state::(&validators_keypairs, HARNESS_GENESIS_TIME, &spec) + .unwrap(), + ) + .unwrap() + .dummy_eth1_backend() + .unwrap() + .null_event_handler() + .testing_slot_clock(HARNESS_SLOT_TIME) + .unwrap() + .build() + .unwrap(); + + Self { + spec: chain.spec.clone(), + chain, + validators_keypairs, + data_dir, + rng: make_rng(), + } + } } -impl BeaconChainHarness> { +impl BeaconChainHarness> { /// Instantiate a new harness with `validator_count` initial validators. - pub fn new(eth_spec_instance: E, keypairs: Vec, config: StoreConfig) -> Self { + pub fn new_with_store_config( + eth_spec_instance: E, + validators_keypairs: Vec, + config: StoreConfig, + ) -> Self { // Setting the target aggregators to really high means that _all_ validators in the // committee are required to produce an aggregate. This is overkill, however with small // validator counts it's the only way to be certain there is _at least one_ aggregator per // committee. - Self::new_with_target_aggregators(eth_spec_instance, keypairs, 1 << 32, config) + Self::new_with_target_aggregators(eth_spec_instance, validators_keypairs, 1 << 32, config) } /// Instantiate a new harness with `validator_count` initial validators and a custom /// `target_aggregators_per_committee` spec value pub fn new_with_target_aggregators( eth_spec_instance: E, - keypairs: Vec, + validators_keypairs: Vec, target_aggregators_per_committee: u64, config: StoreConfig, ) -> Self { @@ -107,7 +191,11 @@ impl BeaconChainHarness> { spec.target_aggregators_per_committee = target_aggregators_per_committee; - let log = NullLoggerBuilder.build().expect("logger should build"); + let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); + let drain = slog_term::FullFormat::new(decorator).build(); + let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); + let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); + let store = HotColdDB::open_ephemeral(config, spec.clone(), log.clone()).unwrap(); let chain = BeaconChainBuilder::new(eth_spec_instance) .logger(log) @@ -116,7 +204,7 @@ impl BeaconChainHarness> { .store_migrator(NullMigrator) .data_dir(data_dir.path().to_path_buf()) .genesis_state( - interop_genesis_state::(&keypairs, HARNESS_GENESIS_TIME, &spec) + interop_genesis_state::(&validators_keypairs, HARNESS_GENESIS_TIME, &spec) .expect("should generate interop state"), ) .expect("should build state using recent genesis") @@ -131,32 +219,37 @@ impl BeaconChainHarness> { Self { spec: chain.spec.clone(), chain, - keypairs, + validators_keypairs, data_dir, + rng: make_rng(), } } } -impl BeaconChainHarness> { +impl BeaconChainHarness> { /// Instantiate a new harness with `validator_count` initial validators. pub fn new_with_disk_store( eth_spec_instance: E, store: Arc, LevelDB>>, - keypairs: Vec, + validators_keypairs: Vec, ) -> Self { let data_dir = tempdir().expect("should create temporary data_dir"); let spec = E::default_spec(); - let log = NullLoggerBuilder.build().expect("logger should build"); + let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); + let drain = slog_term::FullFormat::new(decorator).build(); + let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); + let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let chain = BeaconChainBuilder::new(eth_spec_instance) .logger(log.clone()) .custom_spec(spec.clone()) + .import_max_skip_slots(None) .store(store.clone()) .store_migrator(BlockingMigrator::new(store, log.clone())) .data_dir(data_dir.path().to_path_buf()) .genesis_state( - interop_genesis_state::(&keypairs, HARNESS_GENESIS_TIME, &spec) + interop_genesis_state::(&validators_keypairs, HARNESS_GENESIS_TIME, &spec) .expect("should generate interop state"), ) .expect("should build state using recent genesis") @@ -171,16 +264,19 @@ impl BeaconChainHarness> { Self { spec: chain.spec.clone(), chain, - keypairs, + validators_keypairs, data_dir, + rng: make_rng(), } } +} +impl BeaconChainHarness> { /// Instantiate a new harness with `validator_count` initial validators. pub fn resume_from_disk_store( eth_spec_instance: E, store: Arc, LevelDB>>, - keypairs: Vec, + validators_keypairs: Vec, data_dir: TempDir, ) -> Self { let spec = E::default_spec(); @@ -190,6 +286,7 @@ impl BeaconChainHarness> { let chain = BeaconChainBuilder::new(eth_spec_instance) .logger(log.clone()) .custom_spec(spec) + .import_max_skip_slots(None) .store(store.clone()) .store_migrator( as Migrate>::new( store, @@ -209,8 +306,9 @@ impl BeaconChainHarness> { Self { spec: chain.spec.clone(), chain, - keypairs, + validators_keypairs, data_dir, + rng: make_rng(), } } } @@ -222,270 +320,68 @@ where Hot: ItemStore, Cold: ItemStore, { - /// Advance the slot of the `BeaconChain`. - /// - /// Does not produce blocks or attestations. - pub fn advance_slot(&self) { - self.chain.slot_clock.advance_slot(); + pub fn get_all_validators(&self) -> Vec { + (0..self.validators_keypairs.len()).collect() } - /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the - /// last-produced block (the head of the chain). - /// - /// Chain will be extended by `num_blocks` blocks. - /// - /// The `block_strategy` dictates where the new blocks will be placed. - /// - /// The `attestation_strategy` dictates which validators will attest to the newly created - /// blocks. - pub fn extend_chain( - &self, - num_blocks: usize, - block_strategy: BlockStrategy, - attestation_strategy: AttestationStrategy, - ) -> Hash256 { - let mut i = 0; - self.extend_chain_while( - |_, _| { - i += 1; - i <= num_blocks - }, - block_strategy, - attestation_strategy, - ) + pub fn slots_per_epoch(&self) -> u64 { + E::slots_per_epoch() } - /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the - /// last-produced block (the head of the chain). - /// - /// Chain will be extended while `predidcate` returns `true`. - /// - /// The `block_strategy` dictates where the new blocks will be placed. - /// - /// The `attestation_strategy` dictates which validators will attest to the newly created - /// blocks. - pub fn extend_chain_while( - &self, - mut predicate: F, - block_strategy: BlockStrategy, - attestation_strategy: AttestationStrategy, - ) -> Hash256 - where - F: FnMut(&SignedBeaconBlock, &BeaconState) -> bool, - { - let mut state = { - // Determine the slot for the first block (or skipped block). - let state_slot = match block_strategy { - BlockStrategy::OnCanonicalHead => { - self.chain.slot().expect("should have a slot") - 1 - } - BlockStrategy::ForkCanonicalChainAt { previous_slot, .. } => previous_slot, - }; - - self.chain - .state_at_slot(state_slot, StateSkipConfig::WithStateRoots) - .expect("should find state for slot") - }; - - // Determine the first slot where a block should be built. - let mut slot = match block_strategy { - BlockStrategy::OnCanonicalHead => self.chain.slot().expect("should have a slot"), - BlockStrategy::ForkCanonicalChainAt { first_slot, .. } => first_slot, - }; - - let mut head_block_root = None; - - loop { - let (block, new_state) = self.build_block(state.clone(), slot, block_strategy); - - if !predicate(&block, &new_state) { - break; - } - - while self.chain.slot().expect("should have a slot") < slot { - self.advance_slot(); - } - - let block_root = self - .chain - .process_block(block) - .expect("should not error during block processing"); - - self.chain.fork_choice().expect("should find head"); - head_block_root = Some(block_root); - - self.add_attestations_for_slot(&attestation_strategy, &new_state, block_root, slot); - - state = new_state; - slot += 1; - } - - head_block_root.expect("did not produce any blocks") - } - - /// A simple method to produce a block at the current slot without applying it to the chain. - /// - /// Always uses `BlockStrategy::OnCanonicalHead`. - pub fn get_block(&self) -> (SignedBeaconBlock, BeaconState) { - let state = self - .chain - .state_at_slot( - self.chain.slot().unwrap() - 1, - StateSkipConfig::WithStateRoots, - ) - .unwrap(); - - let slot = self.chain.slot().unwrap(); - - self.build_block(state, slot, BlockStrategy::OnCanonicalHead) + pub fn epoch_start_slot(&self, epoch: u64) -> u64 { + let epoch = Epoch::new(epoch); + epoch.start_slot(E::slots_per_epoch()).into() } - /// A simple method to produce and process all attestation at the current slot. Always uses - /// `AttestationStrategy::AllValidators`. - pub fn generate_all_attestations(&self) { - let slot = self.chain.slot().unwrap(); - let (state, block_root) = { - let head = self.chain.head().unwrap(); - (head.beacon_state.clone(), head.beacon_block_root) - }; - self.add_attestations_for_slot( - &AttestationStrategy::AllValidators, - &state, - block_root, - slot, - ); + pub fn get_current_state(&self) -> BeaconState { + self.chain.head().unwrap().beacon_state } - /// Returns current canonical head slot - pub fn get_chain_slot(&self) -> Slot { + pub fn get_current_slot(&self) -> Slot { self.chain.slot().unwrap() } - /// Returns current canonical head state - pub fn get_head_state(&self) -> BeaconState { - self.chain.head().unwrap().beacon_state + pub fn get_block(&self, block_hash: SignedBeaconBlockHash) -> Option> { + self.chain.get_block(&block_hash.into()).unwrap() } - /// Adds a single block (synchronously) onto either the canonical chain (block_strategy == - /// OnCanonicalHead) or a fork (block_strategy == ForkCanonicalChainAt). - pub fn add_block( - &self, - state: &BeaconState, - block_strategy: BlockStrategy, - slot: Slot, - validators: &[usize], - ) -> (SignedBeaconBlockHash, BeaconState) { - while self.chain.slot().expect("should have a slot") < slot { - self.advance_slot(); - } - - let (block, new_state) = self.build_block(state.clone(), slot, block_strategy); - - let block_root = self - .chain - .process_block(block) - .expect("should not error during block processing"); + pub fn block_exists(&self, block_hash: SignedBeaconBlockHash) -> bool { + self.get_block(block_hash).is_some() + } - self.chain.fork_choice().expect("should find head"); + pub fn get_hot_state(&self, state_hash: BeaconStateHash) -> Option> { + self.chain + .store + .load_hot_state(&state_hash.into(), BlockReplay::Accurate) + .unwrap() + } - let attestation_strategy = AttestationStrategy::SomeValidators(validators.to_vec()); - self.add_attestations_for_slot(&attestation_strategy, &new_state, block_root, slot); - (block_root.into(), new_state) + pub fn get_cold_state(&self, state_hash: BeaconStateHash) -> Option> { + self.chain + .store + .load_cold_state(&state_hash.into()) + .unwrap() } - #[allow(clippy::type_complexity)] - /// `add_block()` repeated `num_blocks` times. - pub fn add_blocks( - &self, - mut state: BeaconState, - mut slot: Slot, - num_blocks: usize, - attesting_validators: &[usize], - block_strategy: BlockStrategy, - ) -> ( - HashMap, - HashMap, - Slot, - SignedBeaconBlockHash, - BeaconState, - ) { - let mut blocks: HashMap = HashMap::with_capacity(num_blocks); - let mut states: HashMap = HashMap::with_capacity(num_blocks); - for _ in 0..num_blocks { - let (new_root_hash, new_state) = - self.add_block(&state, block_strategy, slot, attesting_validators); - blocks.insert(slot, new_root_hash); - states.insert(slot, new_state.tree_hash_root().into()); - state = new_state; - slot += 1; - } - let head_hash = blocks[&(slot - 1)]; - (blocks, states, slot, head_hash, state) + pub fn hot_state_exists(&self, state_hash: BeaconStateHash) -> bool { + self.get_hot_state(state_hash).is_some() } - #[allow(clippy::type_complexity)] - /// A wrapper on `add_blocks()` to avoid passing enums explicitly. - pub fn add_canonical_chain_blocks( - &self, - state: BeaconState, - slot: Slot, - num_blocks: usize, - attesting_validators: &[usize], - ) -> ( - HashMap, - HashMap, - Slot, - SignedBeaconBlockHash, - BeaconState, - ) { - let block_strategy = BlockStrategy::OnCanonicalHead; - self.add_blocks( - state, - slot, - num_blocks, - attesting_validators, - block_strategy, - ) + pub fn cold_state_exists(&self, state_hash: BeaconStateHash) -> bool { + self.get_cold_state(state_hash).is_some() } - #[allow(clippy::type_complexity)] - /// A wrapper on `add_blocks()` to avoid passing enums explicitly. - pub fn add_stray_blocks( - &self, - state: BeaconState, - slot: Slot, - num_blocks: usize, - attesting_validators: &[usize], - ) -> ( - HashMap, - HashMap, - Slot, - SignedBeaconBlockHash, - BeaconState, - ) { - let block_strategy = BlockStrategy::ForkCanonicalChainAt { - previous_slot: slot, - first_slot: slot + 2, - }; - self.add_blocks( - state, - slot + 2, - num_blocks, - attesting_validators, - block_strategy, - ) + pub fn is_skipped_slot(&self, state: &BeaconState, slot: Slot) -> bool { + state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } - /// Returns a newly created block, signed by the proposer for the given slot. - fn build_block( - &self, + pub fn make_block( + &mut self, mut state: BeaconState, slot: Slot, - block_strategy: BlockStrategy, ) -> (SignedBeaconBlock, BeaconState) { - if slot < state.slot { - panic!("produce slot cannot be prior to the state slot"); - } + assert_ne!(slot, 0, "can't produce a block at slot 0"); + assert!(slot >= state.slot); while state.slot < slot { per_slot_processing(&mut state, None, &self.spec) @@ -496,34 +392,37 @@ where .build_all_caches(&self.spec) .expect("should build caches"); - let proposer_index = match block_strategy { - BlockStrategy::OnCanonicalHead => self - .chain - .block_proposer(slot) - .expect("should get block proposer from chain"), - _ => state - .get_beacon_proposer_index(slot, &self.spec) - .expect("should get block proposer from state"), - }; + let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); - let sk = &self.keypairs[proposer_index].sk; - let fork = &state.fork; + // If we produce two blocks for the same slot, they hash up to the same value and + // BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce + // different blocks each time. + self.chain.set_graffiti(self.rng.gen::<[u8; 32]>()); let randao_reveal = { let epoch = slot.epoch(E::slots_per_epoch()); - let domain = - self.spec - .get_domain(epoch, Domain::Randao, fork, state.genesis_validators_root); + let domain = self.spec.get_domain( + epoch, + Domain::Randao, + &state.fork, + state.genesis_validators_root, + ); let message = epoch.signing_root(domain); + let sk = &self.validators_keypairs[proposer_index].sk; sk.sign(message) }; let (block, state) = self .chain - .produce_block_on_state(state, slot, randao_reveal) - .expect("should produce block"); + .produce_block_on_state(state, slot, randao_reveal, None) + .unwrap(); - let signed_block = block.sign(sk, &state.fork, state.genesis_validators_root, &self.spec); + let signed_block = block.sign( + &self.validators_keypairs[proposer_index].sk, + &state.fork, + state.genesis_validators_root, + &self.spec, + ); (signed_block, state) } @@ -533,25 +432,18 @@ where /// The first layer of the Vec is organised per committee. For example, if the return value is /// called `all_attestations`, then all attestations in `all_attestations[0]` will be for /// committee 0, whilst all in `all_attestations[1]` will be for committee 1. - pub fn get_unaggregated_attestations( + pub fn make_unaggregated_attestations( &self, - attestation_strategy: &AttestationStrategy, + attesting_validators: &[usize], state: &BeaconState, - head_block_root: Hash256, + head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, ) -> Vec, SubnetId)>> { - let spec = &self.spec; - let fork = &state.fork; - - let attesting_validators = self.get_attesting_validators(attestation_strategy); - - let committee_count = state - .get_committee_count_at_slot(state.slot) - .expect("should get committee count"); + let committee_count = state.get_committee_count_at_slot(state.slot).unwrap(); state .get_beacon_committees_at_slot(state.slot) - .expect("should get committees") + .unwrap() .iter() .map(|bc| { bc.committee @@ -566,21 +458,18 @@ where .produce_unaggregated_attestation_for_block( attestation_slot, bc.index, - head_block_root, + head_block_root.into(), Cow::Borrowed(state), ) - .expect("should produce attestation"); + .unwrap(); - attestation - .aggregation_bits - .set(i, true) - .expect("should be able to set aggregation bits"); + attestation.aggregation_bits.set(i, true).unwrap(); attestation.signature = { - let domain = spec.get_domain( + let domain = self.spec.get_domain( attestation.data.target.epoch, Domain::BeaconAttester, - fork, + &state.fork, state.genesis_validators_root, ); @@ -588,7 +477,9 @@ where let mut agg_sig = AggregateSignature::infinity(); - agg_sig.add_assign(&self.get_sk(*validator_index).sign(message)); + agg_sig.add_assign( + &self.validators_keypairs[*validator_index].sk.sign(message), + ); agg_sig }; @@ -598,7 +489,7 @@ where committee_count, &self.chain.spec, ) - .expect("should get subnet_id"); + .unwrap(); Some((attestation, subnet_id)) }) @@ -607,56 +498,49 @@ where .collect() } - fn get_attesting_validators(&self, attestation_strategy: &AttestationStrategy) -> Vec { - match attestation_strategy { - AttestationStrategy::AllValidators => (0..self.keypairs.len()).collect(), - AttestationStrategy::SomeValidators(vec) => vec.clone(), - } - } - - /// Generates a `Vec` for some attestation strategy and head_block. - pub fn add_attestations_for_slot( + /// Deprecated: Use make_unaggregated_attestations() instead. + /// + /// A list of attestations for each committee for the given slot. + /// + /// The first layer of the Vec is organised per committee. For example, if the return value is + /// called `all_attestations`, then all attestations in `all_attestations[0]` will be for + /// committee 0, whilst all in `all_attestations[1]` will be for committee 1. + pub fn get_unaggregated_attestations( &self, attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, - head_block_slot: Slot, - ) { - // These attestations will not be accepted by the chain so no need to generate them. - if state.slot + E::slots_per_epoch() < self.chain.slot().expect("should get slot") { - return; - } - - let spec = &self.spec; - let fork = &state.fork; - - let attesting_validators = self.get_attesting_validators(attestation_strategy); - - let unaggregated_attestations = self.get_unaggregated_attestations( - attestation_strategy, + attestation_slot: Slot, + ) -> Vec, SubnetId)>> { + let validators: Vec = match attestation_strategy { + AttestationStrategy::AllValidators => self.get_all_validators(), + AttestationStrategy::SomeValidators(vals) => vals.clone(), + }; + self.make_unaggregated_attestations( + &validators, state, - head_block_root, - head_block_slot, - ); + head_block_root.into(), + attestation_slot, + ) + } - // Loop through all unaggregated attestations, submit them to the chain and also submit a - // single aggregate. - unaggregated_attestations - .into_iter() - .for_each(|committee_attestations| { - // Submit each unaggregated attestation to the chain. - for (attestation, subnet_id) in &committee_attestations { - self.chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), *subnet_id) - .expect("should not error during attestation processing") - .add_to_pool(&self.chain) - .expect("should add attestation to naive pool"); - } + pub fn make_attestations( + &self, + attesting_validators: &[usize], + state: &BeaconState, + block_hash: SignedBeaconBlockHash, + slot: Slot, + ) -> HarnessAttestations { + let unaggregated_attestations = + self.make_unaggregated_attestations(&attesting_validators, &state, block_hash, slot); + let aggregated_attestations: Vec>> = unaggregated_attestations + .iter() + .map(|committee_attestations| { // If there are any attestations in this committee, create an aggregate. if let Some((attestation, _)) = committee_attestations.first() { let bc = state.get_beacon_committee(attestation.data.slot, attestation.data.index) - .expect("should get committee"); + .unwrap(); let aggregator_index = bc.committee .iter() @@ -667,13 +551,13 @@ where let selection_proof = SelectionProof::new::( state.slot, - self.get_sk(*validator_index), - fork, + &self.validators_keypairs[*validator_index].sk, + &state.fork, state.genesis_validators_root, - spec, + &self.spec, ); - selection_proof.is_aggregator(bc.committee.len(), spec).unwrap_or(false) + selection_proof.is_aggregator(bc.committee.len(), &self.spec).unwrap_or(false) }) .copied() .unwrap_or_else(|| panic!( @@ -686,7 +570,7 @@ where let aggregate = self .chain .get_aggregated_attestation(&attestation.data) - .expect("should not error whilst finding aggregate") + .unwrap() .unwrap_or_else(|| { committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, (att, _)| { agg.aggregate(att); @@ -698,25 +582,307 @@ where aggregator_index as u64, aggregate, None, - self.get_sk(aggregator_index), - fork, + &self.validators_keypairs[aggregator_index].sk, + &state.fork, state.genesis_validators_root, - spec, + &self.spec, ); - let attn = self.chain - .verify_aggregated_attestation_for_gossip(signed_aggregate) - .expect("should not error during attestation processing"); + Some(signed_aggregate) + } + else { + None + } + }).collect(); - self.chain.apply_attestation_to_fork_choice(&attn) - .expect("should add attestation to fork choice"); + unaggregated_attestations + .into_iter() + .zip(aggregated_attestations) + .collect() + } - self.chain.add_to_block_inclusion_pool(attn) - .expect("should add attestation to op pool"); - } - }); + pub fn process_block(&self, slot: Slot, block: SignedBeaconBlock) -> SignedBeaconBlockHash { + assert_eq!(self.chain.slot().unwrap(), slot); + let block_hash: SignedBeaconBlockHash = self.chain.process_block(block).unwrap().into(); + self.chain.fork_choice().unwrap(); + block_hash + } + + pub fn process_attestations(&self, attestations: HarnessAttestations) { + for (unaggregated_attestations, maybe_signed_aggregate) in attestations.into_iter() { + for (attestation, subnet_id) in unaggregated_attestations { + self.chain + .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id) + .unwrap() + .add_to_pool(&self.chain) + .unwrap(); + } + + if let Some(signed_aggregate) = maybe_signed_aggregate { + let attn = self + .chain + .verify_aggregated_attestation_for_gossip(signed_aggregate) + .unwrap(); + + self.chain.apply_attestation_to_fork_choice(&attn).unwrap(); + + self.chain.add_to_block_inclusion_pool(attn).unwrap(); + } + } + } + + fn set_current_slot(&self, slot: Slot) { + let current_slot = self.chain.slot().unwrap(); + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + let epoch = slot.epoch(E::slots_per_epoch()); + assert!( + epoch >= current_epoch, + "Jumping backwards to an earlier epoch isn't well defined. \ + Please generate test blocks epoch-by-epoch instead." + ); + self.chain.slot_clock.set_slot(slot.into()); + } + + pub fn add_block_at_slot( + &mut self, + slot: Slot, + state: BeaconState, + ) -> (SignedBeaconBlockHash, SignedBeaconBlock, BeaconState) { + self.set_current_slot(slot); + let (block, new_state) = self.make_block(state, slot); + let block_hash = self.process_block(slot, block.clone()); + (block_hash, block, new_state) + } + + pub fn attest_block( + &self, + state: &BeaconState, + block_hash: SignedBeaconBlockHash, + block: &SignedBeaconBlock, + validators: &[usize], + ) { + let attestations = + self.make_attestations(validators, &state, block_hash, block.message.slot); + self.process_attestations(attestations); + } + + pub fn add_attested_block_at_slot( + &mut self, + slot: Slot, + state: BeaconState, + validators: &[usize], + ) -> (SignedBeaconBlockHash, BeaconState) { + let (block_hash, block, state) = self.add_block_at_slot(slot, state); + self.attest_block(&state, block_hash, &block, validators); + (block_hash, state) + } + + pub fn add_attested_blocks_at_slots( + &mut self, + state: BeaconState, + slots: &[Slot], + validators: &[usize], + ) -> AddBlocksResult { + assert!(!slots.is_empty()); + self.add_attested_blocks_at_slots_given_lbh(state, slots, validators, None) + } + + fn add_attested_blocks_at_slots_given_lbh( + &mut self, + mut state: BeaconState, + slots: &[Slot], + validators: &[usize], + mut latest_block_hash: Option, + ) -> AddBlocksResult { + assert!( + slots.windows(2).all(|w| w[0] <= w[1]), + "Slots have to be sorted" + ); // slice.is_sorted() isn't stabilized at the moment of writing this + let mut block_hash_from_slot: HashMap = HashMap::new(); + let mut state_hash_from_slot: HashMap = HashMap::new(); + for slot in slots { + let (block_hash, new_state) = self.add_attested_block_at_slot(*slot, state, validators); + state = new_state; + block_hash_from_slot.insert(*slot, block_hash); + state_hash_from_slot.insert(*slot, state.tree_hash_root().into()); + latest_block_hash = Some(block_hash); + } + ( + block_hash_from_slot, + state_hash_from_slot, + latest_block_hash.unwrap(), + state, + ) + } + + /// A monstrosity of great usefulness. + /// + /// Calls `add_attested_blocks_at_slots` for each of the chains in `chains`, + /// taking care to batch blocks by epoch so that the slot clock gets advanced one + /// epoch at a time. + /// + /// Chains is a vec of `(state, slots, validators)` tuples. + pub fn add_blocks_on_multiple_chains( + &mut self, + chains: Vec<(BeaconState, Vec, Vec)>, + ) -> Vec> { + let slots_per_epoch = E::slots_per_epoch(); + + let min_epoch = chains + .iter() + .map(|(_, slots, _)| slots.iter().min().unwrap()) + .min() + .unwrap() + .epoch(slots_per_epoch); + let max_epoch = chains + .iter() + .map(|(_, slots, _)| slots.iter().max().unwrap()) + .max() + .unwrap() + .epoch(slots_per_epoch); + + let mut chains = chains + .into_iter() + .map(|(state, slots, validators)| { + ( + state, + slots, + validators, + HashMap::new(), + HashMap::new(), + SignedBeaconBlockHash::from(Hash256::zero()), + ) + }) + .collect::>(); + + for epoch in min_epoch.as_u64()..=max_epoch.as_u64() { + let mut new_chains = vec![]; + + for (head_state, slots, validators, mut block_hashes, mut state_hashes, head_block) in + chains + { + let epoch_slots = slots + .iter() + .filter(|s| s.epoch(slots_per_epoch).as_u64() == epoch) + .copied() + .collect::>(); + + let (new_block_hashes, new_state_hashes, new_head_block, new_head_state) = self + .add_attested_blocks_at_slots_given_lbh( + head_state, + &epoch_slots, + &validators, + Some(head_block), + ); + + block_hashes.extend(new_block_hashes); + state_hashes.extend(new_state_hashes); + + new_chains.push(( + new_head_state, + slots, + validators, + block_hashes, + state_hashes, + new_head_block, + )); + } + + chains = new_chains; + } + + chains + .into_iter() + .map(|(state, _, _, block_hashes, state_hashes, head_block)| { + (block_hashes, state_hashes, head_block, state) + }) + .collect() + } + + pub fn get_finalized_checkpoints(&self) -> HashSet { + let chain_dump = self.chain.chain_dump().unwrap(); + chain_dump + .iter() + .cloned() + .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint.root.into()) + .filter(|block_hash| *block_hash != Hash256::zero().into()) + .collect() + } + + /// Deprecated: Do not modify the slot clock manually; rely on add_attested_blocks_at_slots() + /// instead + /// + /// Advance the slot of the `BeaconChain`. + /// + /// Does not produce blocks or attestations. + pub fn advance_slot(&self) { + self.chain.slot_clock.advance_slot(); } + /// Deprecated: Use make_block() instead + /// + /// Returns a newly created block, signed by the proposer for the given slot. + pub fn build_block( + &mut self, + state: BeaconState, + slot: Slot, + _block_strategy: BlockStrategy, + ) -> (SignedBeaconBlock, BeaconState) { + self.make_block(state, slot) + } + + /// Deprecated: Use add_attested_blocks_at_slots() instead + /// + /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the + /// last-produced block (the head of the chain). + /// + /// Chain will be extended by `num_blocks` blocks. + /// + /// The `block_strategy` dictates where the new blocks will be placed. + /// + /// The `attestation_strategy` dictates which validators will attest to the newly created + /// blocks. + pub fn extend_chain( + &mut self, + num_blocks: usize, + block_strategy: BlockStrategy, + attestation_strategy: AttestationStrategy, + ) -> Hash256 { + let (state, slots) = match block_strategy { + BlockStrategy::OnCanonicalHead => { + let current_slot: u64 = self.get_current_slot().into(); + let slots: Vec = (current_slot..(current_slot + (num_blocks as u64))) + .map(Slot::new) + .collect(); + let state = self.get_current_state(); + (state, slots) + } + BlockStrategy::ForkCanonicalChainAt { + previous_slot, + first_slot, + } => { + let first_slot_: u64 = first_slot.into(); + let slots: Vec = (first_slot_..(first_slot_ + (num_blocks as u64))) + .map(Slot::new) + .collect(); + let state = self + .chain + .state_at_slot(previous_slot, StateSkipConfig::WithStateRoots) + .unwrap(); + (state, slots) + } + }; + let validators = match attestation_strategy { + AttestationStrategy::AllValidators => self.get_all_validators(), + AttestationStrategy::SomeValidators(vals) => vals, + }; + let (_, _, last_produced_block_hash, _) = + self.add_attested_blocks_at_slots(state, &slots, &validators); + last_produced_block_hash.into() + } + + /// Deprecated: Use add_attested_blocks_at_slots() instead + /// /// Creates two forks: /// /// - The "honest" fork: created by the `honest_validators` who have built `honest_fork_blocks` @@ -726,7 +892,7 @@ where /// /// Returns `(honest_head, faulty_head)`, the roots of the blocks at the top of each chain. pub fn generate_two_forks_by_skipping_a_block( - &self, + &mut self, honest_validators: &[usize], faulty_validators: &[usize], honest_fork_blocks: usize, @@ -765,9 +931,4 @@ where (honest_head, faulty_head) } - - /// Returns the secret key for the given validator index. - fn get_sk(&self, validator_index: usize) -> &SecretKey { - &self.keypairs[validator_index].sk - } } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 407bf2ee7d3..a01286d64a7 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -26,7 +26,7 @@ lazy_static! { fn produces_attestations() { let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4; - let harness = BeaconChainHarness::new( + let mut harness = BeaconChainHarness::new_with_store_config( MainnetEthSpec, KEYPAIRS[..].to_vec(), StoreConfig::default(), diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 27db261cbc1..ec2200b1d77 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -5,7 +5,9 @@ extern crate lazy_static; use beacon_chain::{ attestation_verification::Error as AttnError, - test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType}, + test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType, + }, BeaconChain, BeaconChainTypes, }; use int_to_bytes::int_to_bytes32; @@ -30,7 +32,7 @@ lazy_static! { } /// Returns a beacon chain harness. -fn get_harness(validator_count: usize) -> BeaconChainHarness> { +fn get_harness(validator_count: usize) -> BeaconChainHarness> { let harness = BeaconChainHarness::new_with_target_aggregators( MainnetEthSpec, KEYPAIRS[0..validator_count].to_vec(), @@ -184,8 +186,7 @@ fn get_non_aggregator( /// Tests verification of `SignedAggregateAndProof` from the gossip network. #[test] fn aggregated_gossip_verification() { - let harness = get_harness(VALIDATOR_COUNT); - let chain = &harness.chain; + let mut harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. harness.extend_chain( @@ -197,7 +198,7 @@ fn aggregated_gossip_verification() { // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); - let current_slot = chain.slot().expect("should get slot"); + let current_slot = harness.chain.slot().expect("should get slot"); assert_eq!( current_slot % E::slots_per_epoch(), @@ -532,8 +533,7 @@ fn aggregated_gossip_verification() { /// Tests the verification conditions for an unaggregated attestation on the gossip network. #[test] fn unaggregated_gossip_verification() { - let harness = get_harness(VALIDATOR_COUNT); - let chain = &harness.chain; + let mut harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. harness.extend_chain( @@ -545,8 +545,8 @@ fn unaggregated_gossip_verification() { // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); - let current_slot = chain.slot().expect("should get slot"); - let current_epoch = chain.epoch().expect("should get epoch"); + let current_slot = harness.chain.slot().expect("should get slot"); + let current_epoch = harness.chain.epoch().expect("should get epoch"); assert_eq!( current_slot % E::slots_per_epoch(), @@ -772,8 +772,7 @@ fn unaggregated_gossip_verification() { /// This also checks that we can do a state lookup if we don't get a hit from the shuffling cache. #[test] fn attestation_that_skips_epochs() { - let harness = get_harness(VALIDATOR_COUNT); - let chain = &harness.chain; + let mut harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. harness.extend_chain( @@ -782,16 +781,18 @@ fn attestation_that_skips_epochs() { AttestationStrategy::SomeValidators(vec![]), ); - let current_slot = chain.slot().expect("should get slot"); - let current_epoch = chain.epoch().expect("should get epoch"); + let current_slot = harness.chain.slot().expect("should get slot"); + let current_epoch = harness.chain.epoch().expect("should get epoch"); let earlier_slot = (current_epoch - 2).start_slot(MainnetEthSpec::slots_per_epoch()); - let earlier_block = chain + let earlier_block = harness + .chain .block_at_slot(earlier_slot) .expect("should not error getting block at slot") .expect("should find block at slot"); - let mut state = chain + let mut state = harness + .chain .get_state(&earlier_block.state_root(), Some(earlier_slot)) .expect("should not error getting state") .expect("should find state"); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 30c7c5603f1..409511761a1 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -4,7 +4,9 @@ extern crate lazy_static; use beacon_chain::{ - test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType}, + test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType, + }, BeaconSnapshot, BlockError, }; use store::config::StoreConfig; @@ -18,8 +20,9 @@ use types::{ type E = MainnetEthSpec; // Should ideally be divisible by 3. -pub const VALIDATOR_COUNT: usize = 24; -pub const CHAIN_SEGMENT_LENGTH: usize = 64 * 5; +const VALIDATOR_COUNT: usize = 24; +const CHAIN_SEGMENT_LENGTH: usize = 64 * 5; +const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGTH - 1]; lazy_static! { /// A cached set of keys. @@ -30,7 +33,7 @@ lazy_static! { } fn get_chain_segment() -> Vec> { - let harness = get_harness(VALIDATOR_COUNT); + let mut harness = get_harness(VALIDATOR_COUNT); harness.extend_chain( CHAIN_SEGMENT_LENGTH, @@ -47,8 +50,8 @@ fn get_chain_segment() -> Vec> { .collect() } -fn get_harness(validator_count: usize) -> BeaconChainHarness> { - let harness = BeaconChainHarness::new( +fn get_harness(validator_count: usize) -> BeaconChainHarness> { + let harness = BeaconChainHarness::new_with_store_config( MainnetEthSpec, KEYPAIRS[0..validator_count].to_vec(), StoreConfig::default(), @@ -80,7 +83,7 @@ fn junk_aggregate_signature() -> AggregateSignature { fn update_proposal_signatures( snapshots: &mut [BeaconSnapshot], - harness: &BeaconChainHarness>, + harness: &BeaconChainHarness>, ) { for snapshot in snapshots { let spec = &harness.chain.spec; @@ -90,7 +93,7 @@ fn update_proposal_signatures( .get_beacon_proposer_index(slot, spec) .expect("should find proposer index"); let keypair = harness - .keypairs + .validators_keypairs .get(proposer_index) .expect("proposer keypair should be available"); @@ -272,17 +275,73 @@ fn chain_segment_non_linear_slots() { ); } -#[test] -fn invalid_signatures() { - let mut checked_attestation = false; +fn assert_invalid_signature( + harness: &BeaconChainHarness>, + block_index: usize, + snapshots: &[BeaconSnapshot], + item: &str, +) { + let blocks = snapshots + .iter() + .map(|snapshot| snapshot.beacon_block.clone()) + .collect(); - for &block_index in &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT.len() - 1] { - let harness = get_harness(VALIDATOR_COUNT); - harness - .chain - .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + // Ensure the block will be rejected if imported in a chain segment. + assert!( + matches!( + harness + .chain + .process_chain_segment(blocks) + .into_block_error(), + Err(BlockError::InvalidSignature) + ), + "should not import chain segment with an invalid {} signature", + item + ); + // Ensure the block will be rejected if imported on its own (without gossip checking). + let ancestor_blocks = CHAIN_SEGMENT + .iter() + .take(block_index) + .map(|snapshot| snapshot.beacon_block.clone()) + .collect(); + // We don't care if this fails, we just call this to ensure that all prior blocks have been + // imported prior to this test. + let _ = harness.chain.process_chain_segment(ancestor_blocks); + assert!( + matches!( + harness + .chain + .process_block(snapshots[block_index].beacon_block.clone()), + Err(BlockError::InvalidSignature) + ), + "should not import individual block with an invalid {} signature", + item + ); + + // NOTE: we choose not to check gossip verification here. It only checks one signature + // (proposal) and that is already tested elsewhere in this file. + // + // It's not trivial to just check gossip verification since it will start refusing + // blocks as soon as it has seen one valid proposal signature for a given (validator, + // slot) tuple. +} + +fn get_invalid_sigs_harness() -> BeaconChainHarness> { + let harness = get_harness(VALIDATOR_COUNT); + harness + .chain + .slot_clock + .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + harness +} +#[test] +fn invalid_signature_gossip_block() { + for &block_index in BLOCK_INDICES { + // Ensure the block will be rejected if imported on its own (without gossip checking). + let harness = get_invalid_sigs_harness(); + let mut snapshots = CHAIN_SEGMENT.clone(); + snapshots[block_index].beacon_block.signature = junk_signature(); // Import all the ancestors before the `block_index` block. let ancestor_blocks = CHAIN_SEGMENT .iter() @@ -294,63 +353,28 @@ fn invalid_signatures() { .process_chain_segment(ancestor_blocks) .into_block_error() .expect("should import all blocks prior to the one being tested"); + assert!( + matches!( + harness + .chain + .process_block(snapshots[block_index].beacon_block.clone()), + Err(BlockError::InvalidSignature) + ), + "should not import individual block with an invalid gossip signature", + ); + } +} - // For the given snapshots, test the following: - // - // - The `process_chain_segment` function returns `InvalidSignature`. - // - The `process_block` function returns `InvalidSignature` when importing the - // `SignedBeaconBlock` directly. - // - The `verify_block_for_gossip` function does _not_ return an error. - // - The `process_block` function returns `InvalidSignature` when verifying the - // `GossipVerifiedBlock`. - let assert_invalid_signature = |snapshots: &[BeaconSnapshot], item: &str| { - let blocks = snapshots - .iter() - .map(|snapshot| snapshot.beacon_block.clone()) - .collect(); - - // Ensure the block will be rejected if imported in a chain segment. - assert!( - matches!( - harness - .chain - .process_chain_segment(blocks) - .into_block_error(), - Err(BlockError::InvalidSignature) - ), - "should not import chain segment with an invalid {} signature", - item - ); - - // Ensure the block will be rejected if imported on its own (without gossip checking). - assert!( - matches!( - harness - .chain - .process_block(snapshots[block_index].beacon_block.clone()), - Err(BlockError::InvalidSignature) - ), - "should not import individual block with an invalid {} signature", - item - ); - - // NOTE: we choose not to check gossip verification here. It only checks one signature - // (proposal) and that is already tested elsewhere in this file. - // - // It's not trivial to just check gossip verification since it will start refusing - // blocks as soon as it has seen one valid proposal signature for a given (validator, - // slot) tuple. - }; - - /* - * Block proposal - */ +#[test] +fn invalid_signature_block_proposal() { + for &block_index in BLOCK_INDICES { + let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); snapshots[block_index].beacon_block.signature = junk_signature(); let blocks = snapshots .iter() .map(|snapshot| snapshot.beacon_block.clone()) - .collect(); + .collect::>(); // Ensure the block will be rejected if imported in a chain segment. assert!( matches!( @@ -360,22 +384,15 @@ fn invalid_signatures() { .into_block_error(), Err(BlockError::InvalidSignature) ), - "should not import chain segment with an invalid gossip signature", - ); - // Ensure the block will be rejected if imported on its own (without gossip checking). - assert!( - matches!( - harness - .chain - .process_block(snapshots[block_index].beacon_block.clone()), - Err(BlockError::InvalidSignature) - ), - "should not import individual block with an invalid gossip signature", + "should not import chain segment with an invalid block signature", ); + } +} - /* - * Randao reveal - */ +#[test] +fn invalid_signature_randao_reveal() { + for &block_index in BLOCK_INDICES { + let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); snapshots[block_index] .beacon_block @@ -384,11 +401,14 @@ fn invalid_signatures() { .randao_reveal = junk_signature(); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&snapshots, "randao"); + assert_invalid_signature(&harness, block_index, &snapshots, "randao"); + } +} - /* - * Proposer slashing - */ +#[test] +fn invalid_signature_proposer_slashing() { + for &block_index in BLOCK_INDICES { + let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); let proposer_slashing = ProposerSlashing { signed_header_1: SignedBeaconBlockHeader { @@ -409,11 +429,14 @@ fn invalid_signatures() { .expect("should update proposer slashing"); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&snapshots, "proposer slashing"); + assert_invalid_signature(&harness, block_index, &snapshots, "proposer slashing"); + } +} - /* - * Attester slashing - */ +#[test] +fn invalid_signature_attester_slashing() { + for &block_index in BLOCK_INDICES { + let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); let indexed_attestation = IndexedAttestation { attesting_indices: vec![0].into(), @@ -445,11 +468,16 @@ fn invalid_signatures() { .expect("should update attester slashing"); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&snapshots, "attester slashing"); + assert_invalid_signature(&harness, block_index, &snapshots, "attester slashing"); + } +} - /* - * Attestation - */ +#[test] +fn invalid_signature_attestation() { + let mut checked_attestation = false; + + for &block_index in BLOCK_INDICES { + let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); if let Some(attestation) = snapshots[block_index] .beacon_block @@ -461,15 +489,22 @@ fn invalid_signatures() { attestation.signature = junk_aggregate_signature(); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&snapshots, "attestation"); + assert_invalid_signature(&harness, block_index, &snapshots, "attestation"); checked_attestation = true; } + } - /* - * Deposit - * - * Note: an invalid deposit signature is permitted! - */ + assert!( + checked_attestation, + "the test should check an attestation signature" + ) +} + +#[test] +fn invalid_signature_deposit() { + for &block_index in BLOCK_INDICES { + // Note: an invalid deposit signature is permitted! + let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); let deposit = Deposit { proof: vec![Hash256::zero(); DEPOSIT_TREE_DEPTH + 1].into(), @@ -503,10 +538,13 @@ fn invalid_signatures() { ), "should not throw an invalid signature error for a bad deposit signature" ); + } +} - /* - * Voluntary exit - */ +#[test] +fn invalid_signature_exit() { + for &block_index in BLOCK_INDICES { + let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); let epoch = snapshots[block_index].beacon_state.current_epoch(); snapshots[block_index] @@ -524,13 +562,8 @@ fn invalid_signatures() { .expect("should update deposit"); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&snapshots, "voluntary exit"); + assert_invalid_signature(&harness, block_index, &snapshots, "voluntary exit"); } - - assert!( - checked_attestation, - "the test should check an attestation signature" - ) } fn unwrap_err(result: Result) -> E { @@ -641,6 +674,48 @@ fn block_gossip_verification() { "should not import a block with an invalid proposal signature" ); + /* + * This test ensures that: + * + * Spec v0.12.2 + * + * The block's parent (defined by block.parent_root) passes validation. + */ + + let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let parent_root = Hash256::from_low_u64_be(42); + block.message.parent_root = parent_root; + assert!( + matches!( + unwrap_err(harness.chain.verify_block_for_gossip(block)), + BlockError::ParentUnknown(block) + if block.parent_root() == parent_root + ), + "should not import a block for an unknown parent" + ); + + /* + * This test ensures that: + * + * Spec v0.12.2 + * + * The current finalized_checkpoint is an ancestor of block -- i.e. get_ancestor(store, + * block.parent_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) == + * store.finalized_checkpoint.root + */ + + let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let parent_root = CHAIN_SEGMENT[0].beacon_block_root; + block.message.parent_root = parent_root; + assert!( + matches!( + unwrap_err(harness.chain.verify_block_for_gossip(block)), + BlockError::NotFinalizedDescendant { block_parent_root } + if block_parent_root == parent_root + ), + "should not import a block that conflicts with finality" + ); + /* * This test ensures that: * diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index 4bb2f074853..6ca90565b24 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -7,7 +7,7 @@ extern crate lazy_static; use beacon_chain::observed_operations::ObservationOutcome; use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, + AttestationStrategy, BeaconChainHarness, BlockStrategy, BlockingMigratorDiskHarnessType, }; use sloggers::{null::NullLoggerBuilder, Build}; use std::sync::Arc; @@ -28,7 +28,7 @@ lazy_static! { } type E = MinimalEthSpec; -type TestHarness = BeaconChainHarness>; +type TestHarness = BeaconChainHarness>; type HotColdDB = store::HotColdDB, LevelDB>; fn get_store(db_path: &TempDir) -> Arc { @@ -57,8 +57,8 @@ fn get_harness(store: Arc, validator_count: usize) -> TestHarness { fn voluntary_exit() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); - let spec = &harness.chain.spec; + let mut harness = get_harness(store.clone(), VALIDATOR_COUNT); + let spec = &harness.chain.spec.clone(); harness.extend_chain( (E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize, diff --git a/beacon_node/beacon_chain/tests/persistence_tests.rs b/beacon_node/beacon_chain/tests/persistence_tests.rs index 23f1a94f4cd..b5bc3cae1ac 100644 --- a/beacon_node/beacon_chain/tests/persistence_tests.rs +++ b/beacon_node/beacon_chain/tests/persistence_tests.rs @@ -44,7 +44,7 @@ fn finalizes_after_resuming_from_db() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = BeaconChainHarness::new_with_disk_store( + let mut harness = BeaconChainHarness::new_with_disk_store( MinimalEthSpec, store.clone(), KEYPAIRS[0..validator_count].to_vec(), @@ -88,7 +88,7 @@ fn finalizes_after_resuming_from_db() { let data_dir = harness.data_dir; let original_chain = harness.chain; - let resumed_harness = BeaconChainHarness::resume_from_disk_store( + let mut resumed_harness = BeaconChainHarness::resume_from_disk_store( MinimalEthSpec, store, KEYPAIRS[0..validator_count].to_vec(), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 6ab2908bcec..caa2f9d6cfc 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -10,13 +10,14 @@ extern crate slog_term; use crate::slog::Drain; use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, + AttestationStrategy, BeaconChainHarness, BlockStrategy, BlockingMigratorDiskHarnessType, }; use beacon_chain::BeaconSnapshot; -use beacon_chain::StateSkipConfig; +use maplit::hashset; use rand::Rng; use std::collections::HashMap; use std::collections::HashSet; +use std::convert::TryInto; use std::sync::Arc; use store::{ iter::{BlockRootsIterator, StateRootsIterator}, @@ -37,7 +38,7 @@ lazy_static! { } type E = MinimalEthSpec; -type TestHarness = BeaconChainHarness>; +type TestHarness = BeaconChainHarness>; fn get_store(db_path: &TempDir) -> Arc, LevelDB>> { let spec = MinimalEthSpec::default_spec(); @@ -73,7 +74,7 @@ fn full_participation_no_skips() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let mut harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); harness.extend_chain( num_blocks_produced as usize, @@ -93,7 +94,7 @@ fn randomised_skips() { let mut num_blocks_produced = 0; let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let mut harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let rng = &mut XorShiftRng::from_seed([42; 16]); let mut head_slot = 0; @@ -129,7 +130,7 @@ fn randomised_skips() { fn long_skip() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let mut harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Number of blocks to create in the first run, intentionally not falling on an epoch // boundary in order to check that the DB hot -> cold migration is capable of reaching @@ -180,7 +181,7 @@ fn randao_genesis_storage() { let validator_count = 8; let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), validator_count); + let mut harness = get_harness(store.clone(), validator_count); let num_slots = E::slots_per_epoch() * (E::epochs_per_historical_vector() - 1) as u64; @@ -241,7 +242,7 @@ fn split_slot_restore() { let split_slot = { let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let mut harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let num_blocks = 4 * E::slots_per_epoch(); @@ -269,7 +270,7 @@ fn epoch_boundary_state_attestation_processing() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let mut harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let late_validators = vec![0, 1]; let timely_validators = (2..LOW_VALIDATOR_COUNT).collect::>(); @@ -355,16 +356,18 @@ fn epoch_boundary_state_attestation_processing() { fn delete_blocks_and_states() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let validators_keypairs = + types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); + let mut harness = + BeaconChainHarness::new_with_disk_store(MinimalEthSpec, store.clone(), validators_keypairs); - let unforked_blocks = 4 * E::slots_per_epoch(); + let unforked_blocks: u64 = 4 * E::slots_per_epoch(); // Finalize an initial portion of the chain. - harness.extend_chain( - unforked_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + let initial_slots: Vec = (1..=unforked_blocks).map(Into::into).collect(); + let state = harness.get_current_state(); + let all_validators = harness.get_all_validators(); + harness.add_attested_blocks_at_slots(state, &initial_slots, &all_validators); // Create a fork post-finalization. let two_thirds = (LOW_VALIDATOR_COUNT / 3) * 2; @@ -373,24 +376,37 @@ fn delete_blocks_and_states() { let fork_blocks = 2 * E::slots_per_epoch(); - let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block( - &honest_validators, - &faulty_validators, - fork_blocks as usize, - fork_blocks as usize, - ); + let slot_u64: u64 = harness.get_current_slot().as_u64() + 1; + + let fork1_slots: Vec = (slot_u64..(slot_u64 + fork_blocks)) + .map(Into::into) + .collect(); + let fork2_slots: Vec = (slot_u64 + 1..(slot_u64 + 1 + fork_blocks)) + .map(Into::into) + .collect(); + + let fork1_state = harness.get_current_state(); + let fork2_state = fork1_state.clone(); + let results = harness.add_blocks_on_multiple_chains(vec![ + (fork1_state, fork1_slots, honest_validators), + (fork2_state, fork2_slots, faulty_validators), + ]); + + let honest_head = results[0].2; + let faulty_head = results[1].2; assert_ne!(honest_head, faulty_head, "forks should be distinct"); let head_info = harness.chain.head_info().expect("should get head"); assert_eq!(head_info.slot, unforked_blocks + fork_blocks); assert_eq!( - head_info.block_root, honest_head, + head_info.block_root, + honest_head.into(), "the honest chain should be the canonical chain", ); let faulty_head_block = store - .get_block(&faulty_head) + .get_block(&faulty_head.into()) .expect("no errors") .expect("faulty head block exists"); @@ -402,31 +418,35 @@ fn delete_blocks_and_states() { .expect("no db error") .expect("faulty head state exists"); - let states_to_delete = StateRootsIterator::new(store.clone(), &faulty_head_state) - .map(Result::unwrap) - .take_while(|(_, slot)| *slot > unforked_blocks) - .collect::>(); - // Delete faulty fork // Attempting to load those states should find them unavailable - for (state_root, slot) in &states_to_delete { - store.delete_state(state_root, *slot).unwrap(); - assert_eq!(store.get_state(state_root, Some(*slot)).unwrap(), None); + for (state_root, slot) in + StateRootsIterator::new(store.clone(), &faulty_head_state).map(Result::unwrap) + { + if slot <= unforked_blocks { + break; + } + store.delete_state(&state_root, slot).unwrap(); + assert_eq!(store.get_state(&state_root, Some(slot)).unwrap(), None); } // Double-deleting should also be OK (deleting non-existent things is fine) - for (state_root, slot) in &states_to_delete { - store.delete_state(state_root, *slot).unwrap(); + for (state_root, slot) in + StateRootsIterator::new(store.clone(), &faulty_head_state).map(Result::unwrap) + { + if slot <= unforked_blocks { + break; + } + store.delete_state(&state_root, slot).unwrap(); } // Deleting the blocks from the fork should remove them completely - let blocks_to_delete = BlockRootsIterator::new(store.clone(), &faulty_head_state) - .map(Result::unwrap) - // Extra +1 here accounts for the skipped slot that started this fork - .take_while(|(_, slot)| *slot > unforked_blocks + 1) - .collect::>(); - - for (block_root, _) in blocks_to_delete { + for (block_root, slot) in + BlockRootsIterator::new(store.clone(), &faulty_head_state).map(Result::unwrap) + { + if slot <= unforked_blocks + 1 { + break; + } store.delete_block(&block_root).unwrap(); assert_eq!(store.get_block(&block_root).unwrap(), None); } @@ -437,11 +457,12 @@ fn delete_blocks_and_states() { .chain .rev_iter_state_roots() .expect("rev iter ok") - .map(Result::unwrap) - .filter(|(_, slot)| *slot < split_slot); + .map(Result::unwrap); for (state_root, slot) in finalized_states { - store.delete_state(&state_root, slot).unwrap(); + if slot < split_slot { + store.delete_state(&state_root, slot).unwrap(); + } } // After all that, the chain dump should still be OK @@ -452,35 +473,52 @@ fn delete_blocks_and_states() { // See https://github.com/sigp/lighthouse/issues/845 fn multi_epoch_fork_valid_blocks_test( initial_blocks: usize, - num_fork1_blocks: usize, - num_fork2_blocks: usize, + num_fork1_blocks_: usize, + num_fork2_blocks_: usize, num_fork1_validators: usize, ) -> (TempDir, TestHarness, Hash256, Hash256) { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let validators_keypairs = + types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); + let mut harness = + BeaconChainHarness::new_with_disk_store(MinimalEthSpec, store, validators_keypairs); + + let num_fork1_blocks: u64 = num_fork1_blocks_.try_into().unwrap(); + let num_fork2_blocks: u64 = num_fork2_blocks_.try_into().unwrap(); // Create the initial portion of the chain if initial_blocks > 0 { - harness.extend_chain( - initial_blocks, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + let initial_slots: Vec = (1..=initial_blocks).map(Into::into).collect(); + let state = harness.get_current_state(); + let all_validators = harness.get_all_validators(); + harness.add_attested_blocks_at_slots(state, &initial_slots, &all_validators); } assert!(num_fork1_validators <= LOW_VALIDATOR_COUNT); let fork1_validators: Vec = (0..num_fork1_validators).collect(); let fork2_validators: Vec = (num_fork1_validators..LOW_VALIDATOR_COUNT).collect(); - let (head1, head2) = harness.generate_two_forks_by_skipping_a_block( - &fork1_validators, - &fork2_validators, - num_fork1_blocks, - num_fork2_blocks, - ); + let fork1_state = harness.get_current_state(); + let fork2_state = fork1_state.clone(); - (db_path, harness, head1, head2) + let slot_u64: u64 = harness.get_current_slot().as_u64() + 1; + let fork1_slots: Vec = (slot_u64..(slot_u64 + num_fork1_blocks)) + .map(Into::into) + .collect(); + let fork2_slots: Vec = (slot_u64 + 1..(slot_u64 + 1 + num_fork2_blocks)) + .map(Into::into) + .collect(); + + let results = harness.add_blocks_on_multiple_chains(vec![ + (fork1_state, fork1_slots, fork1_validators), + (fork2_state, fork2_slots, fork2_validators), + ]); + + let head1 = results[0].2; + let head2 = results[1].2; + + (db_path, harness, head1.into(), head2.into()) } // This is the minimal test of block production with different shufflings. @@ -512,8 +550,7 @@ fn block_production_different_shuffling_long() { fn multiple_attestations_per_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store, HIGH_VALIDATOR_COUNT); - let chain = &harness.chain; + let mut harness = get_harness(store, HIGH_VALIDATOR_COUNT); harness.extend_chain( MainnetEthSpec::slots_per_epoch() as usize * 3, @@ -521,14 +558,14 @@ fn multiple_attestations_per_block() { AttestationStrategy::AllValidators, ); - let head = chain.head().unwrap(); + let head = harness.chain.head().unwrap(); let committees_per_slot = head .beacon_state .get_committee_count_at_slot(head.beacon_state.slot) .unwrap(); assert!(committees_per_slot > 1); - for snapshot in chain.chain_dump().unwrap() { + for snapshot in harness.chain.chain_dump().unwrap() { assert_eq!( snapshot.beacon_block.message.body.attestations.len() as u64, if snapshot.beacon_block.slot() <= 1 { @@ -544,7 +581,7 @@ fn multiple_attestations_per_block() { fn shuffling_compatible_linear_chain() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let mut harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Skip the block at the end of the first epoch. let head_block_root = harness.extend_chain( @@ -568,7 +605,7 @@ fn shuffling_compatible_linear_chain() { fn shuffling_compatible_missing_pivot_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let mut harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Skip the block at the end of the first epoch. harness.extend_chain( @@ -726,150 +763,160 @@ fn check_shuffling_compatible( // Ensure blocks from abandoned forks are pruned from the Hot DB #[test] fn prunes_abandoned_fork_between_two_finalized_checkpoints() { - const VALIDATOR_COUNT: usize = 24; - const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; - let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); - const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + const HONEST_VALIDATOR_COUNT: usize = 16 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; + let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); - let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; - - let slot = harness.get_chain_slot(); - let state = harness.get_head_state(); - let (canonical_blocks_pre_finalization, _, slot, _, state) = - harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); - let (stray_blocks, stray_states, _, stray_head, _) = harness.add_stray_blocks( - harness.get_head_state(), - slot, - slots_per_epoch - 1, - &faulty_validators, + let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let mut rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); + let slots_per_epoch = rig.slots_per_epoch(); + let mut state = rig.get_current_state(); + + let canonical_chain_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); + let (canonical_chain_blocks_pre_finalization, _, _, new_state) = + rig.add_attested_blocks_at_slots(state, &canonical_chain_slots, &honest_validators); + state = new_state; + let canonical_chain_slot: u64 = rig.get_current_slot().into(); + + let stray_slots: Vec = (canonical_chain_slot + 1..rig.epoch_start_slot(2)) + .map(Slot::new) + .collect(); + let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( + rig.get_current_state(), + &stray_slots, + &adversarial_validators, ); // Precondition: Ensure all stray_blocks blocks are still known for &block_hash in stray_blocks.values() { - let block = harness.chain.get_block(&block_hash.into()).unwrap(); assert!( - block.is_some(), + rig.block_exists(block_hash), "stray block {} should be still present", block_hash ); } for (&slot, &state_hash) in &stray_states { - let state = harness - .chain - .get_state(&state_hash.into(), Some(slot)) - .unwrap(); assert!( - state.is_some(), + rig.hot_state_exists(state_hash), "stray state {} at slot {} should be still present", state_hash, slot ); } - // Precondition: Only genesis is finalized - let chain_dump = harness.chain.chain_dump().unwrap(); - assert_eq!( - get_finalized_epoch_boundary_blocks(&chain_dump), - vec![Hash256::zero().into()].into_iter().collect(), - ); + assert_eq!(rig.get_finalized_checkpoints(), hashset! {},); - assert!(harness.chain.knows_head(&stray_head)); + assert!(rig.chain.knows_head(&stray_head)); // Trigger finalization - let (canonical_blocks_post_finalization, _, _, _, _) = - harness.add_canonical_chain_blocks(state, slot, slots_per_epoch * 5, &honest_validators); + let finalization_slots: Vec = ((canonical_chain_slot + 1) + ..=(canonical_chain_slot + slots_per_epoch * 5)) + .map(Slot::new) + .collect(); + let (canonical_chain_blocks_post_finalization, _, _, _) = + rig.add_attested_blocks_at_slots(state, &finalization_slots, &honest_validators); // Postcondition: New blocks got finalized - let chain_dump = harness.chain.chain_dump().unwrap(); - let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); assert_eq!( - finalized_blocks, - vec![ - Hash256::zero().into(), - canonical_blocks_pre_finalization[&Slot::new(slots_per_epoch as u64)], - canonical_blocks_post_finalization[&Slot::new((slots_per_epoch * 2) as u64)], - ] - .into_iter() - .collect() + rig.get_finalized_checkpoints(), + hashset! { + canonical_chain_blocks_pre_finalization[&rig.epoch_start_slot(1).into()], + canonical_chain_blocks_post_finalization[&rig.epoch_start_slot(2).into()], + }, ); // Postcondition: Ensure all stray_blocks blocks have been pruned for &block_hash in stray_blocks.values() { - let block = harness.chain.get_block(&block_hash.into()).unwrap(); assert!( - block.is_none(), + !rig.block_exists(block_hash), "abandoned block {} should have been pruned", block_hash ); } for (&slot, &state_hash) in &stray_states { - let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); assert!( - state.is_none(), - "stray state {} at slot {} should have been deleted", + !rig.hot_state_exists(state_hash), + "stray state {} at slot {} should have been pruned", + state_hash, + slot + ); + assert!( + !rig.cold_state_exists(state_hash), + "stray state {} at slot {} should have been pruned", state_hash, slot ); } - assert!(!harness.chain.knows_head(&stray_head)); + assert!(!rig.chain.knows_head(&stray_head)); } #[test] fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { - const VALIDATOR_COUNT: usize = 24; - const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; - let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); - const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + const HONEST_VALIDATOR_COUNT: usize = 16 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; + let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); - let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let all_validators: Vec = (0..VALIDATOR_COUNT).collect(); - let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let mut rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); + let slots_per_epoch = rig.slots_per_epoch(); + let state = rig.get_current_state(); // Fill up 0th epoch - let slot = harness.get_chain_slot(); - let state = harness.get_head_state(); - let (canonical_blocks_zeroth_epoch, _, slot, _, state) = - harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + let canonical_chain_slots_zeroth_epoch: Vec = + (1..rig.epoch_start_slot(1)).map(Slot::new).collect(); + let (_, _, _, state) = rig.add_attested_blocks_at_slots( + state, + &canonical_chain_slots_zeroth_epoch, + &honest_validators, + ); // Fill up 1st epoch - let (_, _, canonical_slot, shared_head, canonical_state) = - harness.add_canonical_chain_blocks(state, slot, 1, &all_validators); - let (stray_blocks, stray_states, _, stray_head, _) = harness.add_stray_blocks( - canonical_state.clone(), - canonical_slot, - 1, - &faulty_validators, + let canonical_chain_slots_first_epoch: Vec = (rig.epoch_start_slot(1) + ..=rig.epoch_start_slot(1) + 1) + .map(Slot::new) + .collect(); + let (canonical_chain_blocks_first_epoch, _, shared_head, state) = rig + .add_attested_blocks_at_slots( + state.clone(), + &canonical_chain_slots_first_epoch, + &honest_validators, + ); + let canonical_chain_slot: u64 = rig.get_current_slot().into(); + + let stray_chain_slots_first_epoch: Vec = (rig.epoch_start_slot(1) + 2 + ..=rig.epoch_start_slot(1) + 2) + .map(Slot::new) + .collect(); + let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( + state.clone(), + &stray_chain_slots_first_epoch, + &adversarial_validators, ); // Preconditions for &block_hash in stray_blocks.values() { - let block = harness.chain.get_block(&block_hash.into()).unwrap(); assert!( - block.is_some(), + rig.block_exists(block_hash), "stray block {} should be still present", block_hash ); } for (&slot, &state_hash) in &stray_states { - let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); assert!( - state.is_some(), + rig.hot_state_exists(state_hash), "stray state {} at slot {} should be still present", state_hash, slot ); } - let chain_dump = harness.chain.chain_dump().unwrap(); + let chain_dump = rig.chain.chain_dump().unwrap(); assert_eq!( get_finalized_epoch_boundary_blocks(&chain_dump), vec![Hash256::zero().into()].into_iter().collect(), @@ -878,172 +925,167 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { assert!(get_blocks(&chain_dump).contains(&shared_head)); // Trigger finalization - let (canonical_blocks, _, _, _, _) = harness.add_canonical_chain_blocks( - canonical_state, - canonical_slot, - slots_per_epoch * 5, - &honest_validators, - ); + let finalization_slots: Vec = ((canonical_chain_slot + 1) + ..=(canonical_chain_slot + slots_per_epoch * 5)) + .map(Slot::new) + .collect(); + let (canonical_chain_blocks, _, _, _) = + rig.add_attested_blocks_at_slots(state, &finalization_slots, &honest_validators); // Postconditions - let chain_dump = harness.chain.chain_dump().unwrap(); - let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); assert_eq!( - finalized_blocks, - vec![ - Hash256::zero().into(), - canonical_blocks_zeroth_epoch[&Slot::new(slots_per_epoch as u64)], - canonical_blocks[&Slot::new((slots_per_epoch * 2) as u64)], - ] - .into_iter() - .collect() + rig.get_finalized_checkpoints(), + hashset! { + canonical_chain_blocks_first_epoch[&rig.epoch_start_slot(1).into()], + canonical_chain_blocks[&rig.epoch_start_slot(2).into()], + }, ); for &block_hash in stray_blocks.values() { assert!( - harness - .chain - .get_block(&block_hash.into()) - .unwrap() - .is_none(), + !rig.block_exists(block_hash), "stray block {} should have been pruned", block_hash, ); } for (&slot, &state_hash) in &stray_states { - let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); assert!( - state.is_none(), - "stray state {} at slot {} should have been deleted", + !rig.hot_state_exists(state_hash), + "stray state {} at slot {} should have been pruned", + state_hash, + slot + ); + assert!( + !rig.cold_state_exists(state_hash), + "stray state {} at slot {} should have been pruned", state_hash, slot ); } - assert!(!harness.chain.knows_head(&stray_head)); + assert!(!rig.chain.knows_head(&stray_head)); + let chain_dump = rig.chain.chain_dump().unwrap(); assert!(get_blocks(&chain_dump).contains(&shared_head)); } #[test] fn pruning_does_not_touch_blocks_prior_to_finalization() { - const VALIDATOR_COUNT: usize = 24; - const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; - let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); - const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + const HONEST_VALIDATOR_COUNT: usize = 16; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 8; + const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; + let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); - let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let mut rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); + let slots_per_epoch = rig.slots_per_epoch(); + let mut state = rig.get_current_state(); // Fill up 0th epoch with canonical chain blocks - let slot = harness.get_chain_slot(); - let state = harness.get_head_state(); - let (canonical_blocks_zeroth_epoch, _, slot, _, state) = - harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + let zeroth_epoch_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); + let (canonical_chain_blocks, _, _, new_state) = + rig.add_attested_blocks_at_slots(state, &zeroth_epoch_slots, &honest_validators); + state = new_state; + let canonical_chain_slot: u64 = rig.get_current_slot().into(); // Fill up 1st epoch. Contains a fork. - let (stray_blocks, stray_states, _, stray_head, _) = - harness.add_stray_blocks(state.clone(), slot, slots_per_epoch - 1, &faulty_validators); + let first_epoch_slots: Vec = ((rig.epoch_start_slot(1) + 1)..(rig.epoch_start_slot(2))) + .map(Slot::new) + .collect(); + let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( + state.clone(), + &first_epoch_slots, + &adversarial_validators, + ); // Preconditions for &block_hash in stray_blocks.values() { - let block = harness.chain.get_block(&block_hash.into()).unwrap(); assert!( - block.is_some(), + rig.block_exists(block_hash), "stray block {} should be still present", block_hash ); } for (&slot, &state_hash) in &stray_states { - let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); assert!( - state.is_some(), + rig.hot_state_exists(state_hash), "stray state {} at slot {} should be still present", state_hash, slot ); } - let chain_dump = harness.chain.chain_dump().unwrap(); - assert_eq!( - get_finalized_epoch_boundary_blocks(&chain_dump), - vec![Hash256::zero().into()].into_iter().collect(), - ); + assert_eq!(rig.get_finalized_checkpoints(), hashset! {}); // Trigger finalization - let (_, _, _, _, _) = - harness.add_canonical_chain_blocks(state, slot, slots_per_epoch * 4, &honest_validators); + let slots: Vec = ((canonical_chain_slot + 1) + ..=(canonical_chain_slot + slots_per_epoch * 4)) + .map(Slot::new) + .collect(); + let (_, _, _, _) = rig.add_attested_blocks_at_slots(state, &slots, &honest_validators); // Postconditions - let chain_dump = harness.chain.chain_dump().unwrap(); - let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); assert_eq!( - finalized_blocks, - vec![ - Hash256::zero().into(), - canonical_blocks_zeroth_epoch[&Slot::new(slots_per_epoch as u64)], - ] - .into_iter() - .collect() + rig.get_finalized_checkpoints(), + hashset! {canonical_chain_blocks[&rig.epoch_start_slot(1).into()]}, ); for &block_hash in stray_blocks.values() { - let block = harness.chain.get_block(&block_hash.into()).unwrap(); assert!( - block.is_some(), + rig.block_exists(block_hash), "stray block {} should be still present", block_hash ); } for (&slot, &state_hash) in &stray_states { - let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); assert!( - state.is_some(), + rig.hot_state_exists(state_hash), "stray state {} at slot {} should be still present", state_hash, slot ); } - assert!(harness.chain.knows_head(&stray_head)); + assert!(rig.chain.knows_head(&stray_head)); } #[test] -fn prunes_fork_running_past_finalized_checkpoint() { - const VALIDATOR_COUNT: usize = 24; - const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; - let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); - const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; +fn prunes_fork_growing_past_youngest_finalized_checkpoint() { + const HONEST_VALIDATOR_COUNT: usize = 16 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; + let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); - let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let mut rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); + let state = rig.get_current_state(); // Fill up 0th epoch with canonical chain blocks - let slot = harness.get_chain_slot(); - let state = harness.get_head_state(); - let (canonical_blocks_zeroth_epoch, _, slot, _, state) = - harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + let zeroth_epoch_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); + let (canonical_blocks_zeroth_epoch, _, _, state) = + rig.add_attested_blocks_at_slots(state, &zeroth_epoch_slots, &honest_validators); // Fill up 1st epoch. Contains a fork. - let (stray_blocks_first_epoch, stray_states_first_epoch, stray_slot, _, stray_state) = - harness.add_stray_blocks(state.clone(), slot, slots_per_epoch, &faulty_validators); - - let (canonical_blocks_first_epoch, _, canonical_slot, _, canonical_state) = - harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + let slots_first_epoch: Vec = (rig.epoch_start_slot(1) + 1..rig.epoch_start_slot(2)) + .map(Into::into) + .collect(); + let (stray_blocks_first_epoch, stray_states_first_epoch, _, stray_state) = rig + .add_attested_blocks_at_slots(state.clone(), &slots_first_epoch, &adversarial_validators); + let (canonical_blocks_first_epoch, _, _, canonical_state) = + rig.add_attested_blocks_at_slots(state, &slots_first_epoch, &honest_validators); // Fill up 2nd epoch. Extends both the canonical chain and the fork. - let (stray_blocks_second_epoch, stray_states_second_epoch, _, stray_head, _) = harness - .add_stray_blocks( + let stray_slots_second_epoch: Vec = (rig.epoch_start_slot(2) + ..=rig.epoch_start_slot(2) + 1) + .map(Into::into) + .collect(); + let (stray_blocks_second_epoch, stray_states_second_epoch, stray_head, _) = rig + .add_attested_blocks_at_slots( stray_state, - stray_slot, - slots_per_epoch - 1, - &faulty_validators, + &stray_slots_second_epoch, + &adversarial_validators, ); // Precondition: Ensure all stray_blocks blocks are still known @@ -1058,240 +1100,530 @@ fn prunes_fork_running_past_finalized_checkpoint() { .collect(); for &block_hash in stray_blocks.values() { - let block = harness.chain.get_block(&block_hash.into()).unwrap(); assert!( - block.is_some(), + rig.block_exists(block_hash), "stray block {} should be still present", block_hash ); } for (&slot, &state_hash) in &stray_states { - let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); assert!( - state.is_some(), + rig.hot_state_exists(state_hash), "stray state {} at slot {} should be still present", state_hash, slot ); } - // Precondition: Only genesis is finalized - let chain_dump = harness.chain.chain_dump().unwrap(); - assert_eq!( - get_finalized_epoch_boundary_blocks(&chain_dump), - vec![Hash256::zero().into()].into_iter().collect(), - ); + // Precondition: Nothing is finalized yet + assert_eq!(rig.get_finalized_checkpoints(), hashset! {},); - assert!(harness.chain.knows_head(&stray_head)); + assert!(rig.chain.knows_head(&stray_head)); // Trigger finalization - let (canonical_blocks_second_epoch, _, _, _, _) = harness.add_canonical_chain_blocks( - canonical_state, - canonical_slot, - slots_per_epoch * 6, - &honest_validators, - ); - assert_ne!( - harness - .chain - .head() - .unwrap() - .beacon_state - .finalized_checkpoint - .epoch, - 0, - "chain should have finalized" - ); + let canonical_slots: Vec = (rig.epoch_start_slot(2)..=rig.epoch_start_slot(6)) + .map(Into::into) + .collect(); + let (canonical_blocks, _, _, _) = + rig.add_attested_blocks_at_slots(canonical_state, &canonical_slots, &honest_validators); // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch .into_iter() .chain(canonical_blocks_first_epoch.into_iter()) - .chain(canonical_blocks_second_epoch.into_iter()) + .chain(canonical_blocks.into_iter()) .collect(); // Postcondition: New blocks got finalized - let chain_dump = harness.chain.chain_dump().unwrap(); - let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); assert_eq!( - finalized_blocks, - vec![ - Hash256::zero().into(), - canonical_blocks[&Slot::new(slots_per_epoch as u64 * 3)], - canonical_blocks[&Slot::new(slots_per_epoch as u64 * 4)], - ] - .into_iter() - .collect() + rig.get_finalized_checkpoints(), + hashset! { + canonical_blocks[&rig.epoch_start_slot(1).into()], + canonical_blocks[&rig.epoch_start_slot(2).into()], + }, ); // Postcondition: Ensure all stray_blocks blocks have been pruned for &block_hash in stray_blocks.values() { - let block = harness.chain.get_block(&block_hash.into()).unwrap(); assert!( - block.is_none(), + !rig.block_exists(block_hash), "abandoned block {} should have been pruned", block_hash ); } for (&slot, &state_hash) in &stray_states { - let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); assert!( - state.is_none(), - "stray state {} at slot {} should have been deleted", + !rig.hot_state_exists(state_hash), + "stray state {} at slot {} should have been pruned", + state_hash, + slot + ); + assert!( + !rig.cold_state_exists(state_hash), + "stray state {} at slot {} should have been pruned", state_hash, slot ); } - assert!(!harness.chain.knows_head(&stray_head)); + assert!(!rig.chain.knows_head(&stray_head)); } // This is to check if state outside of normal block processing are pruned correctly. #[test] fn prunes_skipped_slots_states() { - const VALIDATOR_COUNT: usize = 24; - const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; - let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); - const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + const HONEST_VALIDATOR_COUNT: usize = 16 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; + let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); - let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; - - // Arrange skipped slots so as to cross the epoch boundary. That way, we excercise the code - // responsible for storing state outside of normal block processing. - - let canonical_slot = harness.get_chain_slot(); - let canonical_state = harness.get_head_state(); - let (canonical_blocks_zeroth_epoch, _, canonical_slot, _, canonical_state) = harness - .add_canonical_chain_blocks( - canonical_state, - canonical_slot, - slots_per_epoch - 1, - &honest_validators, - ); + let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let mut rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); + let state = rig.get_current_state(); + + let canonical_slots_zeroth_epoch: Vec = + (1..=rig.epoch_start_slot(1)).map(Into::into).collect(); + let (canonical_blocks_zeroth_epoch, _, _, canonical_state) = rig.add_attested_blocks_at_slots( + state.clone(), + &canonical_slots_zeroth_epoch, + &honest_validators, + ); - let (stray_blocks, stray_states, stray_slot, _, _) = harness.add_stray_blocks( + let skipped_slot: Slot = (rig.epoch_start_slot(1) + 1).into(); + + let stray_slots: Vec = ((skipped_slot + 1).into()..rig.epoch_start_slot(2)) + .map(Into::into) + .collect(); + let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots( canonical_state.clone(), - canonical_slot, - slots_per_epoch, - &faulty_validators, + &stray_slots, + &adversarial_validators, ); // Preconditions for &block_hash in stray_blocks.values() { - let block = harness.chain.get_block(&block_hash.into()).unwrap(); assert!( - block.is_some(), + rig.block_exists(block_hash), "stray block {} should be still present", block_hash ); } for (&slot, &state_hash) in &stray_states { - let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); assert!( - state.is_some(), + rig.hot_state_exists(state_hash), "stray state {} at slot {} should be still present", state_hash, slot ); } - let chain_dump = harness.chain.chain_dump().unwrap(); - assert_eq!( - get_finalized_epoch_boundary_blocks(&chain_dump), - vec![Hash256::zero().into()].into_iter().collect(), - ); + assert_eq!(rig.get_finalized_checkpoints(), hashset! {},); // Make sure slots were skipped - let stray_state = harness - .chain - .state_at_slot(stray_slot, StateSkipConfig::WithoutStateRoots) - .unwrap(); - let block_root = stray_state.get_block_root(canonical_slot - 1); - assert_eq!(stray_state.get_block_root(canonical_slot), block_root); - assert_eq!(stray_state.get_block_root(canonical_slot + 1), block_root); - - let skipped_slots = vec![canonical_slot, canonical_slot + 1]; - for &slot in &skipped_slots { - assert_eq!(stray_state.get_block_root(slot), block_root); - let state_hash = stray_state.get_state_root(slot).unwrap(); + assert!(rig.is_skipped_slot(&stray_state, skipped_slot)); + { + let state_hash = (*stray_state.get_state_root(skipped_slot).unwrap()).into(); assert!( - harness - .chain - .get_state(&state_hash, Some(slot)) - .unwrap() - .is_some(), - "skipped slots state should be still present" + rig.hot_state_exists(state_hash), + "skipped slot state {} should be still present", + state_hash ); } // Trigger finalization - let (canonical_blocks_post_finalization, _, _, _, _) = harness.add_canonical_chain_blocks( - canonical_state, - canonical_slot, - slots_per_epoch * 6, + let canonical_slots: Vec = ((skipped_slot + 1).into()..rig.epoch_start_slot(7)) + .map(Into::into) + .collect(); + let (canonical_blocks_post_finalization, _, _, _) = + rig.add_attested_blocks_at_slots(canonical_state, &canonical_slots, &honest_validators); + + // Postconditions + let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch + .into_iter() + .chain(canonical_blocks_post_finalization.into_iter()) + .collect(); + assert_eq!( + rig.get_finalized_checkpoints(), + hashset! { + canonical_blocks[&rig.epoch_start_slot(1).into()], + canonical_blocks[&rig.epoch_start_slot(2).into()], + }, + ); + + for (&slot, &state_hash) in &stray_states { + assert!( + !rig.hot_state_exists(state_hash), + "stray state {} at slot {} should have been pruned", + state_hash, + slot + ); + assert!( + !rig.cold_state_exists(state_hash), + "stray state {} at slot {} should have been pruned", + state_hash, + slot + ); + } + + assert!(rig.is_skipped_slot(&stray_state, skipped_slot)); + { + let state_hash: BeaconStateHash = + (*stray_state.get_state_root(skipped_slot).unwrap()).into(); + assert!( + !rig.hot_state_exists(state_hash), + "skipped slot {} state {} should have been pruned", + skipped_slot, + state_hash + ); + } +} + +// This is to check if state outside of normal block processing are pruned correctly. +#[test] +fn finalizes_non_epoch_start_slot() { + const HONEST_VALIDATOR_COUNT: usize = 16 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; + let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); + let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); + let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let mut rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); + let state = rig.get_current_state(); + + let canonical_slots_zeroth_epoch: Vec = + (1..rig.epoch_start_slot(1)).map(Into::into).collect(); + let (canonical_blocks_zeroth_epoch, _, _, canonical_state) = rig.add_attested_blocks_at_slots( + state.clone(), + &canonical_slots_zeroth_epoch, &honest_validators, ); - assert_eq!( - harness - .chain - .head() - .unwrap() - .beacon_state - .finalized_checkpoint - .epoch, - 2, - "chain should have finalized" + + let skipped_slot: Slot = rig.epoch_start_slot(1).into(); + + let stray_slots: Vec = ((skipped_slot + 1).into()..rig.epoch_start_slot(2)) + .map(Into::into) + .collect(); + let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots( + canonical_state.clone(), + &stray_slots, + &adversarial_validators, ); + // Preconditions + for &block_hash in stray_blocks.values() { + assert!( + rig.block_exists(block_hash), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + assert!( + rig.hot_state_exists(state_hash), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + assert_eq!(rig.get_finalized_checkpoints(), hashset! {}); + + // Make sure slots were skipped + assert!(rig.is_skipped_slot(&stray_state, skipped_slot)); + { + let state_hash = (*stray_state.get_state_root(skipped_slot).unwrap()).into(); + assert!( + rig.hot_state_exists(state_hash), + "skipped slot state {} should be still present", + state_hash + ); + } + + // Trigger finalization + let canonical_slots: Vec = ((skipped_slot + 1).into()..rig.epoch_start_slot(7)) + .map(Into::into) + .collect(); + let (canonical_blocks_post_finalization, _, _, _) = + rig.add_attested_blocks_at_slots(canonical_state, &canonical_slots, &honest_validators); + // Postconditions - let chain_dump = harness.chain.chain_dump().unwrap(); - let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch .into_iter() .chain(canonical_blocks_post_finalization.into_iter()) .collect(); assert_eq!( - finalized_blocks, - vec![ - Hash256::zero().into(), - canonical_blocks[&Slot::new(slots_per_epoch as u64 * 2)], - ] - .into_iter() - .collect() + rig.get_finalized_checkpoints(), + hashset! { + canonical_blocks[&(rig.epoch_start_slot(1)-1).into()], + canonical_blocks[&rig.epoch_start_slot(2).into()], + }, ); for (&slot, &state_hash) in &stray_states { - let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); assert!( - state.is_none(), - "stray state {} at slot {} should have been deleted", + !rig.hot_state_exists(state_hash), + "stray state {} at slot {} should have been pruned", + state_hash, + slot + ); + assert!( + !rig.cold_state_exists(state_hash), + "stray state {} at slot {} should have been pruned", state_hash, slot ); } - for &slot in &skipped_slots { - assert_eq!(stray_state.get_block_root(slot), block_root); - let state_hash = stray_state.get_state_root(slot).unwrap(); + assert!(rig.is_skipped_slot(&stray_state, skipped_slot)); + { + let state_hash: BeaconStateHash = + (*stray_state.get_state_root(skipped_slot).unwrap()).into(); + assert!( + !rig.hot_state_exists(state_hash), + "skipped slot {} state {} should have been pruned", + skipped_slot, + state_hash + ); + } +} + +fn check_all_blocks_exist<'a>( + harness: &TestHarness, + blocks: impl Iterator, +) { + for &block_hash in blocks { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "expected block {:?} to be in DB", + block_hash + ); + } +} + +fn check_all_states_exist<'a>( + harness: &TestHarness, + states: impl Iterator, +) { + for &state_hash in states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_some(), + "expected state {:?} to be in DB", + state_hash, + ); + } +} + +// Check that none of the given states exist in the database. +fn check_no_states_exist<'a>( + harness: &TestHarness, + states: impl Iterator, +) { + for &state_root in states { assert!( harness .chain - .get_state(&state_hash, None) + .get_state(&state_root.into(), None) .unwrap() .is_none(), - "skipped slot {} state {} should have been pruned", - slot, - state_hash + "state {:?} should not be in the DB", + state_root ); } } +// Check that none of the given blocks exist in the database. +fn check_no_blocks_exist<'a>( + harness: &TestHarness, + blocks: impl Iterator, +) { + for &block_hash in blocks { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_none(), + "did not expect block {:?} to be in the DB", + block_hash + ); + } +} + +#[test] +fn prune_single_block_fork() { + let slots_per_epoch = E::slots_per_epoch(); + pruning_test(3 * slots_per_epoch, 1, slots_per_epoch, 0, 1); +} + +#[test] +fn prune_single_block_long_skip() { + let slots_per_epoch = E::slots_per_epoch(); + pruning_test( + 2 * slots_per_epoch, + 1, + 2 * slots_per_epoch, + 2 * slots_per_epoch as u64, + 1, + ); +} + +#[test] +fn prune_shared_skip_states_mid_epoch() { + let slots_per_epoch = E::slots_per_epoch(); + pruning_test( + slots_per_epoch + slots_per_epoch / 2, + 1, + slots_per_epoch, + 2, + slots_per_epoch - 1, + ); +} + +#[test] +fn prune_shared_skip_states_epoch_boundaries() { + let slots_per_epoch = E::slots_per_epoch(); + pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch); + pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch); + pruning_test( + 2 * slots_per_epoch + slots_per_epoch / 2, + slots_per_epoch as u64 / 2, + slots_per_epoch, + slots_per_epoch as u64 / 2 + 1, + slots_per_epoch, + ); + pruning_test( + 2 * slots_per_epoch + slots_per_epoch / 2, + slots_per_epoch as u64 / 2, + slots_per_epoch, + slots_per_epoch as u64 / 2 + 1, + slots_per_epoch, + ); + pruning_test( + 2 * slots_per_epoch - 1, + slots_per_epoch as u64, + 1, + 0, + 2 * slots_per_epoch, + ); +} + +/// Generic harness for pruning tests. +fn pruning_test( + // Number of blocks to start the chain with before forking. + num_initial_blocks: u64, + // Number of skip slots on the main chain after the initial blocks. + num_canonical_skips: u64, + // Number of blocks on the main chain after the skip, but before the finalisation-triggering + // blocks. + num_canonical_middle_blocks: u64, + // Number of skip slots on the fork chain after the initial blocks. + num_fork_skips: u64, + // Number of blocks on the fork chain after the skips. + num_fork_blocks: u64, +) { + const VALIDATOR_COUNT: usize = 24; + const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; + const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let mut harness = get_harness(store.clone(), VALIDATOR_COUNT); + let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + + let slots = |start: Slot, num_blocks: u64| -> Vec { + (start.as_u64()..start.as_u64() + num_blocks) + .map(Slot::new) + .collect() + }; + + let start_slot = Slot::new(1); + let divergence_slot = start_slot + num_initial_blocks; + let (_, _, _, divergence_state) = harness.add_attested_blocks_at_slots( + harness.get_current_state(), + &slots(start_slot, num_initial_blocks)[..], + &honest_validators, + ); + + let mut chains = harness.add_blocks_on_multiple_chains(vec![ + // Canonical chain + ( + divergence_state.clone(), + slots( + divergence_slot + num_canonical_skips, + num_canonical_middle_blocks, + ), + honest_validators.clone(), + ), + // Fork chain + ( + divergence_state.clone(), + slots(divergence_slot + num_fork_skips, num_fork_blocks), + faulty_validators, + ), + ]); + let (_, _, _, canonical_state) = chains.remove(0); + let (stray_blocks, stray_states, _, stray_head_state) = chains.remove(0); + + let stray_head_slot = divergence_slot + num_fork_skips + num_fork_blocks - 1; + let stray_head_state_root = stray_states[&stray_head_slot]; + let stray_states = harness + .chain + .rev_iter_state_roots_from(stray_head_state_root.into(), &stray_head_state) + .map(Result::unwrap) + .map(|(state_root, _)| state_root.into()) + .collect::>(); + + check_all_blocks_exist(&harness, stray_blocks.values()); + check_all_states_exist(&harness, stray_states.iter()); + + let chain_dump = harness.chain.chain_dump().unwrap(); + assert_eq!( + get_finalized_epoch_boundary_blocks(&chain_dump), + vec![Hash256::zero().into()].into_iter().collect(), + ); + + // Trigger finalization + let num_finalization_blocks = 4 * E::slots_per_epoch(); + let canonical_slot = divergence_slot + num_canonical_skips + num_canonical_middle_blocks; + harness.add_attested_blocks_at_slots( + canonical_state, + &slots(canonical_slot, num_finalization_blocks), + &honest_validators, + ); + + // Check that finalization has advanced past the divergence slot. + assert!( + harness + .chain + .head_info() + .unwrap() + .finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()) + > divergence_slot + ); + check_chain_dump( + &harness, + (num_initial_blocks + num_canonical_middle_blocks + num_finalization_blocks + 1) as u64, + ); + + let all_canonical_states = harness + .chain + .rev_iter_state_roots() + .unwrap() + .map(Result::unwrap) + .map(|(state_root, _)| state_root.into()) + .collect::>(); + + check_all_states_exist(&harness, all_canonical_states.iter()); + check_no_states_exist(&harness, stray_states.difference(&all_canonical_states)); + check_no_blocks_exist(&harness, stray_blocks.values()); +} + /// Check that the head state's slot matches `expected_slot`. fn check_slot(harness: &TestHarness, expected_slot: u64) { let state = &harness.chain.head().expect("should get head").beacon_state; @@ -1396,18 +1728,31 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) { } } -/// Check that state and block root iterators can reach genesis +/// Check that every state from the canonical chain is in the database, and that the +/// reverse state and block root iterators reach genesis. fn check_iterators(harness: &TestHarness) { - assert_eq!( - harness - .chain - .rev_iter_state_roots() - .expect("should get iter") - .last() - .map(Result::unwrap) - .map(|(_, slot)| slot), - Some(Slot::new(0)) - ); + let mut min_slot = None; + for (state_root, slot) in harness + .chain + .rev_iter_state_roots() + .expect("should get iter") + .map(Result::unwrap) + { + assert!( + harness + .chain + .store + .get_state(&state_root, Some(slot)) + .unwrap() + .is_some(), + "state {:?} from canonical chain should be in DB", + state_root + ); + min_slot = Some(slot); + } + // Assert that we reached genesis. + assert_eq!(min_slot, Some(Slot::new(0))); + // Assert that the block root iterator reaches genesis. assert_eq!( harness .chain diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index d07b20d2fd6..12f1c4364a4 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,7 +6,8 @@ extern crate lazy_static; use beacon_chain::{ attestation_verification::Error as AttnError, test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType, OP_POOL_DB_KEY, + AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType, + OP_POOL_DB_KEY, }, }; use operation_pool::PersistedOperationPool; @@ -24,8 +25,10 @@ lazy_static! { static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); } -fn get_harness(validator_count: usize) -> BeaconChainHarness> { - let harness = BeaconChainHarness::new( +fn get_harness( + validator_count: usize, +) -> BeaconChainHarness> { + let harness = BeaconChainHarness::new_with_store_config( MinimalEthSpec, KEYPAIRS[0..validator_count].to_vec(), StoreConfig::default(), @@ -64,7 +67,7 @@ fn massive_skips() { fn iterators() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1; - let harness = get_harness(VALIDATOR_COUNT); + let mut harness = get_harness(VALIDATOR_COUNT); harness.extend_chain( num_blocks_produced as usize, @@ -139,7 +142,7 @@ fn iterators() { #[test] fn chooses_fork() { - let harness = get_harness(VALIDATOR_COUNT); + let mut harness = get_harness(VALIDATOR_COUNT); let two_thirds = (VALIDATOR_COUNT / 3) * 2; let delay = MinimalEthSpec::default_spec().min_attestation_inclusion_delay as usize; @@ -190,7 +193,7 @@ fn chooses_fork() { fn finalizes_with_full_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; - let harness = get_harness(VALIDATOR_COUNT); + let mut harness = get_harness(VALIDATOR_COUNT); harness.extend_chain( num_blocks_produced as usize, @@ -225,7 +228,7 @@ fn finalizes_with_full_participation() { fn finalizes_with_two_thirds_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; - let harness = get_harness(VALIDATOR_COUNT); + let mut harness = get_harness(VALIDATOR_COUNT); let two_thirds = (VALIDATOR_COUNT / 3) * 2; let attesters = (0..two_thirds).collect(); @@ -268,7 +271,7 @@ fn finalizes_with_two_thirds_participation() { fn does_not_finalize_with_less_than_two_thirds_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; - let harness = get_harness(VALIDATOR_COUNT); + let mut harness = get_harness(VALIDATOR_COUNT); let two_thirds = (VALIDATOR_COUNT / 3) * 2; let less_than_two_thirds = two_thirds - 1; @@ -305,7 +308,7 @@ fn does_not_finalize_with_less_than_two_thirds_participation() { fn does_not_finalize_without_attestation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; - let harness = get_harness(VALIDATOR_COUNT); + let mut harness = get_harness(VALIDATOR_COUNT); harness.extend_chain( num_blocks_produced as usize, @@ -338,7 +341,7 @@ fn does_not_finalize_without_attestation() { fn roundtrip_operation_pool() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; - let harness = get_harness(VALIDATOR_COUNT); + let mut harness = get_harness(VALIDATOR_COUNT); // Add some attestations harness.extend_chain( @@ -370,7 +373,7 @@ fn roundtrip_operation_pool() { fn unaggregated_attestations_added_to_fork_choice_some_none() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() / 2; - let harness = get_harness(VALIDATOR_COUNT); + let mut harness = get_harness(VALIDATOR_COUNT); harness.extend_chain( num_blocks_produced as usize, @@ -424,7 +427,7 @@ fn unaggregated_attestations_added_to_fork_choice_some_none() { fn attestations_with_increasing_slots() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; - let harness = get_harness(VALIDATOR_COUNT); + let mut harness = get_harness(VALIDATOR_COUNT); let mut attestations = vec![]; @@ -486,7 +489,7 @@ fn attestations_with_increasing_slots() { fn unaggregated_attestations_added_to_fork_choice_all_updated() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1; - let harness = get_harness(VALIDATOR_COUNT); + let mut harness = get_harness(VALIDATOR_COUNT); harness.extend_chain( num_blocks_produced as usize, @@ -541,7 +544,7 @@ fn unaggregated_attestations_added_to_fork_choice_all_updated() { fn run_skip_slot_test(skip_slots: u64) { let num_validators = 8; - let harness_a = get_harness(num_validators); + let mut harness_a = get_harness(num_validators); let harness_b = get_harness(num_validators); for _ in 0..skip_slots { diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index e18c2b65a98..de6f7e59d76 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -31,7 +31,7 @@ slog-async = "2.5.0" tokio = "0.2.21" dirs = "2.0.2" futures = "0.3.5" -reqwest = "0.10.4" +reqwest = { version = "0.10.4", features = ["native-tls-vendored"] } url = "2.1.1" eth1 = { path = "../eth1" } genesis = { path = "../genesis" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 12645b8a5ea..c77ddecef2d 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -135,6 +135,7 @@ where let eth_spec_instance = self.eth_spec_instance.clone(); let data_dir = config.data_dir.clone(); let disabled_forks = config.disabled_forks.clone(); + let chain_config = config.chain.clone(); let graffiti = config.graffiti; let store = @@ -153,6 +154,7 @@ where .store_migrator(store_migrator) .data_dir(data_dir) .custom_spec(spec.clone()) + .chain_config(chain_config) .disabled_forks(disabled_forks) .graffiti(graffiti); @@ -232,8 +234,8 @@ where Ok(self) } - /// Immediately starts the networking stack. - pub fn network(mut self, config: &NetworkConfig) -> Result { + /// Starts the networking stack. + pub async fn network(mut self, config: &NetworkConfig) -> Result { let beacon_chain = self .beacon_chain .clone() @@ -246,6 +248,7 @@ where let (network_globals, network_send) = NetworkService::start(beacon_chain, config, context.executor) + .await .map_err(|e| format!("Failed to start network: {:?}", e))?; self.network_globals = Some(network_globals); @@ -581,7 +584,7 @@ where /// Specifies that the `BeaconChain` should cache eth1 blocks/logs from a remote eth1 node /// (e.g., Parity/Geth) and refer to that cache when collecting deposits or eth1 votes during /// block production. - pub fn caching_eth1_backend(mut self, config: Eth1Config) -> Result { + pub async fn caching_eth1_backend(mut self, config: Eth1Config) -> Result { let context = self .runtime_context .as_ref() @@ -595,6 +598,17 @@ where .clone() .ok_or_else(|| "caching_eth1_backend requires a chain spec".to_string())?; + // Check if the eth1 endpoint we connect to is on the correct network id. + let network_id = + eth1::http::get_network_id(&config.endpoint, Duration::from_millis(15_000)).await?; + + if network_id != config.network_id { + return Err(format!( + "Invalid eth1 network id. Expected {:?}, got {:?}", + config.network_id, network_id + )); + } + let backend = if let Some(eth1_service_from_genesis) = self.eth1_service { eth1_service_from_genesis.update_config(config)?; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index aaf0df46c0a..19088e785b5 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -64,6 +64,7 @@ pub struct Config { pub store: store::StoreConfig, pub network: network::NetworkConfig, pub rest_api: rest_api::Config, + pub chain: beacon_chain::ChainConfig, pub websocket_server: websocket_server::Config, pub eth1: eth1::Config, } @@ -78,6 +79,7 @@ impl Default for Config { genesis: <_>::default(), store: <_>::default(), network: NetworkConfig::default(), + chain: <_>::default(), rest_api: <_>::default(), websocket_server: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 208bd389a53..f82c9971d38 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -122,14 +122,28 @@ pub fn spawn_notifier( head_distance.as_u64(), slot_distance_pretty(head_distance, slot_duration) ); - info!( - log, - "Syncing"; - "peers" => peer_count_pretty(connected_peer_count), - "distance" => distance, - "speed" => sync_speed_pretty(speedo.slots_per_second()), - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)), - ); + + let speed = speedo.slots_per_second(); + let display_speed = speed.map_or(false, |speed| speed != 0.0); + + if display_speed { + info!( + log, + "Syncing"; + "peers" => peer_count_pretty(connected_peer_count), + "distance" => distance, + "speed" => sync_speed_pretty(speed), + "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)), + ); + } else { + info!( + log, + "Syncing"; + "peers" => peer_count_pretty(connected_peer_count), + "distance" => distance, + "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)), + ); + } } else if sync_state.is_synced() { let block_info = if current_slot > head_slot { " … empty".to_string() @@ -220,14 +234,37 @@ fn seconds_pretty(secs: f64) -> String { let hours = d.whole_hours(); let minutes = d.whole_minutes(); + let week_string = if weeks == 1 { "week" } else { "weeks" }; + let day_string = if days == 1 { "day" } else { "days" }; + let hour_string = if hours == 1 { "hr" } else { "hrs" }; + let min_string = if minutes == 1 { "min" } else { "mins" }; + if weeks > 0 { - format!("{:.0} weeks {:.0} days", weeks, days % DAYS_PER_WEEK) + format!( + "{:.0} {} {:.0} {}", + weeks, + week_string, + days % DAYS_PER_WEEK, + day_string + ) } else if days > 0 { - format!("{:.0} days {:.0} hrs", days, hours % HOURS_PER_DAY) + format!( + "{:.0} {} {:.0} {}", + days, + day_string, + hours % HOURS_PER_DAY, + hour_string + ) } else if hours > 0 { - format!("{:.0} hrs {:.0} mins", hours, minutes % MINUTES_PER_HOUR) + format!( + "{:.0} {} {:.0} {}", + hours, + hour_string, + minutes % MINUTES_PER_HOUR, + min_string + ) } else { - format!("{:.0} mins", minutes) + format!("{:.0} {}", minutes, min_string) } } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index e3521d91d4d..404078c802d 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -11,7 +11,7 @@ web3 = "0.11.0" sloggers = "1.0.0" [dependencies] -reqwest = "0.10.4" +reqwest = { version = "0.10.4", features = ["native-tls-vendored"] } futures = { version = "0.3.5", features = ["compat"] } serde_json = "1.0.52" serde = { version = "1.0.110", features = ["derive"] } diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs index c8b5961a762..6dffdaa7c6d 100644 --- a/beacon_node/eth1/src/http.rs +++ b/beacon_node/eth1/src/http.rs @@ -12,8 +12,10 @@ use futures::future::TryFutureExt; use reqwest::{header::CONTENT_TYPE, ClientBuilder, StatusCode}; +use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; use std::ops::Range; +use std::str::FromStr; use std::time::Duration; use types::Hash256; @@ -30,6 +32,40 @@ pub const DEPOSIT_COUNT_RESPONSE_BYTES: usize = 96; /// Number of bytes in deposit contract deposit root (value only). pub const DEPOSIT_ROOT_BYTES: usize = 32; +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub enum Eth1NetworkId { + Goerli, + Mainnet, + Custom(u64), +} + +impl FromStr for Eth1NetworkId { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "1" => Ok(Eth1NetworkId::Mainnet), + "5" => Ok(Eth1NetworkId::Goerli), + custom => { + let network_id = u64::from_str_radix(custom, 10) + .map_err(|e| format!("Failed to parse eth1 network id {}", e))?; + Ok(Eth1NetworkId::Custom(network_id)) + } + } + } +} + +/// Get the eth1 network id of the given endpoint. +pub async fn get_network_id(endpoint: &str, timeout: Duration) -> Result { + let response_body = send_rpc_request(endpoint, "net_version", json!([]), timeout).await?; + Eth1NetworkId::from_str( + response_result(&response_body)? + .ok_or_else(|| "No result was returned for block number".to_string())? + .as_str() + .ok_or_else(|| "Data was not string")?, + ) +} + #[derive(Debug, PartialEq, Clone)] pub struct Block { pub hash: Hash256, diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index bf3c1940aa9..b7149560b49 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -2,7 +2,9 @@ use crate::metrics; use crate::{ block_cache::{BlockCache, Error as BlockCacheError, Eth1Block}, deposit_cache::Error as DepositCacheError, - http::{get_block, get_block_number, get_deposit_logs_in_range, Log}, + http::{ + get_block, get_block_number, get_deposit_logs_in_range, get_network_id, Eth1NetworkId, Log, + }, inner::{DepositUpdater, Inner}, DepositLog, }; @@ -16,6 +18,9 @@ use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{interval_at, Duration, Instant}; use types::ChainSpec; +/// Indicates the default eth1 network we use for the deposit contract. +pub const DEFAULT_NETWORK_ID: Eth1NetworkId = Eth1NetworkId::Goerli; + const STANDARD_TIMEOUT_MILLIS: u64 = 15_000; /// Timeout when doing a eth_blockNumber call. @@ -76,6 +81,8 @@ pub struct Config { pub endpoint: String, /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. pub deposit_contract_address: String, + /// The eth1 network id where the deposit contract is deployed (Goerli/Mainnet). + pub network_id: Eth1NetworkId, /// Defines the first block that the `DepositCache` will start searching for deposit logs. /// /// Setting too high can result in missed logs. Setting too low will result in unnecessary @@ -105,6 +112,7 @@ impl Default for Config { Self { endpoint: "http://localhost:8545".into(), deposit_contract_address: "0x0000000000000000000000000000000000000000".into(), + network_id: DEFAULT_NETWORK_ID, deposit_contract_deploy_block: 1, lowest_cached_block_number: 1, follow_distance: 128, @@ -350,6 +358,29 @@ impl Service { } async fn do_update(&self, update_interval: Duration) -> Result<(), ()> { + let endpoint = self.config().endpoint.clone(); + let config_network = self.config().network_id.clone(); + let result = + get_network_id(&endpoint, Duration::from_millis(STANDARD_TIMEOUT_MILLIS)).await; + match result { + Ok(network_id) => { + if network_id != config_network { + error!( + self.log, + "Failed to update eth1 cache"; + "reason" => "Invalid eth1 network id", + "expected" => format!("{:?}",DEFAULT_NETWORK_ID), + "got" => format!("{:?}",network_id), + ); + return Ok(()); + } + } + Err(e) => { + error!(self.log, "Failed to get eth1 network id"; "error" => e); + return Ok(()); + } + } + let update_result = self.update().await; match update_result { Err(e) => error!( diff --git a/beacon_node/eth2_libp2p/Cargo.toml b/beacon_node/eth2_libp2p/Cargo.toml index c37a9d6ef11..2d81a9447d9 100644 --- a/beacon_node/eth2_libp2p/Cargo.toml +++ b/beacon_node/eth2_libp2p/Cargo.toml @@ -32,7 +32,7 @@ snap = "1.0.0" void = "1.0.2" tokio-io-timeout = "0.4.0" tokio-util = { version = "0.3.1", features = ["codec", "compat"] } -discv5 = { version = "0.1.0-alpha.7", features = ["libp2p"] } +discv5 = { version = "0.1.0-alpha.10", features = ["libp2p"] } tiny-keccak = "2.0.2" environment = { path = "../../lighthouse/environment" } # TODO: Remove rand crate for mainnet @@ -41,9 +41,9 @@ rand = "0.7.3" [dependencies.libp2p] #version = "0.23.0" git = "https://github.com/sigp/rust-libp2p" -rev = "3096cb6b89b2883a79ce5ffcb03d41778a09b695" +rev = "03f998022ce2f566a6c6e6c4206bc0ce4d45109f" default-features = false -features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"] +features = ["websocket", "identify", "mplex", "noise", "gossipsub", "dns", "tcp-tokio"] [dev-dependencies] tokio = { version = "0.2.21", features = ["full"] } diff --git a/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs b/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs index ae1bcd05c36..3ab8dcbec72 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs @@ -131,8 +131,9 @@ impl ProtocolsHandler for DelegatingHandler { type InboundProtocol = DelegateInProto; type OutboundProtocol = DelegateOutProto; type OutboundOpenInfo = DelegateOutInfo; + type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { let gossip_proto = self.gossip_handler.listen_protocol(); let rpc_proto = self.rpc_handler.listen_protocol(); let identify_proto = self.identify_handler.listen_protocol(); @@ -147,24 +148,27 @@ impl ProtocolsHandler for DelegatingHandler { SelectUpgrade::new(rpc_proto.into_upgrade().1, identify_proto.into_upgrade().1), ); - SubstreamProtocol::new(select).with_timeout(timeout) + SubstreamProtocol::new(select, ()).with_timeout(timeout) } fn inject_fully_negotiated_inbound( &mut self, out: >::Output, + _info: Self::InboundOpenInfo, ) { match out { // Gossipsub - EitherOutput::First(out) => self.gossip_handler.inject_fully_negotiated_inbound(out), + EitherOutput::First(out) => { + self.gossip_handler.inject_fully_negotiated_inbound(out, ()) + } // RPC EitherOutput::Second(EitherOutput::First(out)) => { - self.rpc_handler.inject_fully_negotiated_inbound(out) + self.rpc_handler.inject_fully_negotiated_inbound(out, ()) } // Identify - EitherOutput::Second(EitherOutput::Second(out)) => { - self.identify_handler.inject_fully_negotiated_inbound(out) - } + EitherOutput::Second(EitherOutput::Second(out)) => self + .identify_handler + .inject_fully_negotiated_inbound(out, ()), } } @@ -317,10 +321,11 @@ impl ProtocolsHandler for DelegatingHandler { event, ))); } - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info }) => { + Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol.map_upgrade(EitherUpgrade::A), - info: EitherOutput::First(info), + protocol: protocol + .map_upgrade(EitherUpgrade::A) + .map_info(EitherOutput::First), }); } Poll::Pending => (), @@ -333,10 +338,11 @@ impl ProtocolsHandler for DelegatingHandler { Poll::Ready(ProtocolsHandlerEvent::Close(event)) => { return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::RPC(event))); } - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info }) => { + Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol.map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::A(u))), - info: EitherOutput::Second(EitherOutput::First(info)), + protocol: protocol + .map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::A(u))) + .map_info(|info| EitherOutput::Second(EitherOutput::First(info))), }); } Poll::Pending => (), @@ -351,10 +357,11 @@ impl ProtocolsHandler for DelegatingHandler { Poll::Ready(ProtocolsHandlerEvent::Close(event)) => { return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Identify(event))); } - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () }) => { + Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol.map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::B(u))), - info: EitherOutput::Second(EitherOutput::Second(())), + protocol: protocol + .map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::B(u))) + .map_info(|_| EitherOutput::Second(EitherOutput::Second(()))), }); } Poll::Pending => (), diff --git a/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs index 76169504157..605870d0f22 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs @@ -54,16 +54,18 @@ impl ProtocolsHandler for BehaviourHandler { type InboundProtocol = DelegateInProto; type OutboundProtocol = DelegateOutProto; type OutboundOpenInfo = DelegateOutInfo; + type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { self.delegate.listen_protocol() } fn inject_fully_negotiated_inbound( &mut self, out: >::Output, + _info: Self::InboundOpenInfo, ) { - self.delegate.inject_fully_negotiated_inbound(out) + self.delegate.inject_fully_negotiated_inbound(out, ()) } fn inject_fully_negotiated_outbound( @@ -127,11 +129,8 @@ impl ProtocolsHandler for BehaviourHandler { Poll::Ready(ProtocolsHandlerEvent::Close(err)) => { return Poll::Ready(ProtocolsHandlerEvent::Close(err)) } - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info }) => { - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol, - info, - }); + Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { + return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }); } Poll::Pending => (), } diff --git a/beacon_node/eth2_libp2p/src/behaviour/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/mod.rs index 8d8df58f922..07156544974 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/mod.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/mod.rs @@ -1,6 +1,6 @@ use crate::peer_manager::{score::PeerAction, PeerManager, PeerManagerEvent}; use crate::rpc::*; -use crate::types::{GossipEncoding, GossipKind, GossipTopic}; +use crate::types::{EnrBitfield, GossipEncoding, GossipKind, GossipTopic, SubnetDiscovery}; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash}; use futures::prelude::*; @@ -11,7 +11,10 @@ use libp2p::{ identity::Keypair, Multiaddr, }, - gossipsub::{Gossipsub, GossipsubEvent, MessageAuthenticity, MessageId}, + gossipsub::{ + Gossipsub, GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, + MessageId, + }, identify::{Identify, IdentifyEvent}, swarm::{ NetworkBehaviour, NetworkBehaviourAction as NBAction, NotifyHandler, PollParameters, @@ -19,19 +22,23 @@ use libp2p::{ }, PeerId, }; -use slog::{crit, debug, o, trace}; +use slog::{crit, debug, o, trace, warn}; +use ssz::{Decode, Encode}; +use std::fs::File; +use std::io::{Read, Write}; +use std::path::PathBuf; use std::{ collections::VecDeque, marker::PhantomData, sync::Arc, task::{Context, Poll}, - time::Instant, }; use types::{EnrForkId, EthSpec, SignedBeaconBlock, SubnetId}; mod handler; const MAX_IDENTIFY_ADDRESSES: usize = 10; +const METADATA_FILENAME: &str = "metadata"; /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core @@ -61,13 +68,15 @@ pub struct Behaviour { enr_fork_id: EnrForkId, /// The waker for the current thread. waker: Option, + /// Directory where metadata is stored + network_dir: PathBuf, /// Logger for behaviour actions. log: slog::Logger, } /// Implements the combined behaviour for the libp2p service. impl Behaviour { - pub fn new( + pub async fn new( local_key: &Keypair, net_conf: &NetworkConfig, network_globals: Arc>, @@ -86,33 +95,31 @@ impl Behaviour { .eth2() .expect("Local ENR must have a fork id"); - let attnets = network_globals - .local_enr() - .bitfield::() - .expect("Local ENR must have subnet bitfield"); + let meta_data = load_or_build_metadata(&net_conf.network_dir, &log); - let meta_data = MetaData { - seq_number: 1, - attnets, - }; + let gossipsub = Gossipsub::new(MessageAuthenticity::Anonymous, net_conf.gs_config.clone()) + .map_err(|e| format!("Could not construct gossipsub: {:?}", e))?; - // TODO: Until other clients support no author, we will use a 0 peer_id as our author. - let message_author = PeerId::from_bytes(vec![0, 1, 0]).expect("Valid peer id"); + // Temporarily disable scoring until parameters are tested. + /* + gossipsub + .with_peer_score(PeerScoreParams::default(), PeerScoreThresholds::default()) + .expect("Valid score params and thresholds"); + */ Ok(Behaviour { eth2_rpc: RPC::new(log.clone()), - gossipsub: Gossipsub::new( - MessageAuthenticity::Author(message_author), - net_conf.gs_config.clone(), - ), + gossipsub, identify, - peer_manager: PeerManager::new(local_key, net_conf, network_globals.clone(), log)?, + peer_manager: PeerManager::new(local_key, net_conf, network_globals.clone(), log) + .await?, events: VecDeque::new(), peers_to_dc: VecDeque::new(), meta_data, network_globals, enr_fork_id, waker: None, + network_dir: net_conf.network_dir.clone(), log: behaviour_log, }) } @@ -123,7 +130,7 @@ impl Behaviour { /// /// All external dials, dial a multiaddr. This is currently unused but kept here in case any /// part of lighthouse needs to connect to a peer_id in the future. - pub fn _dial(&mut self, peer_id: &PeerId) { + pub fn dial(&mut self, peer_id: &PeerId) { self.peer_manager.dial_peer(peer_id); } @@ -147,6 +154,10 @@ impl Behaviour { GossipEncoding::default(), self.enr_fork_id.fork_digest, ); + + // TODO: Implement scoring + // let topic: Topic = gossip_topic.into(); + // self.gossipsub.set_topic_params(t.hash(), TopicScoreParams::default()); self.subscribe(gossip_topic) } @@ -168,6 +179,12 @@ impl Behaviour { GossipEncoding::default(), self.enr_fork_id.fork_digest, ); + // TODO: Implement scoring + /* + let t: Topic = topic.clone().into(); + self.gossipsub + .set_topic_params(t.hash(), TopicScoreParams::default()); + */ self.subscribe(topic) } @@ -189,9 +206,18 @@ impl Behaviour { .write() .insert(topic.clone()); - let topic_str: String = topic.clone().into(); - debug!(self.log, "Subscribed to topic"; "topic" => topic_str); - self.gossipsub.subscribe(topic.into()) + let topic: Topic = topic.into(); + + match self.gossipsub.subscribe(&topic) { + Err(_) => { + warn!(self.log, "Failed to subscribe to topic"; "topic" => topic.to_string()); + false + } + Ok(v) => { + debug!(self.log, "Subscribed to topic"; "topic" => topic.to_string()); + v + } + } } /// Unsubscribe from a gossipsub topic. @@ -201,8 +227,20 @@ impl Behaviour { .gossipsub_subscriptions .write() .remove(&topic); + // unsubscribe from the topic - self.gossipsub.unsubscribe(topic.into()) + let topic: Topic = topic.into(); + + match self.gossipsub.unsubscribe(&topic) { + Err(_) => { + warn!(self.log, "Failed to unsubscribe from topic"; "topic" => topic.to_string()); + false + } + Ok(v) => { + debug!(self.log, "Unsubscribed to topic"; "topic" => topic.to_string()); + v + } + } } /// Publishes a list of messages on the pubsub (gossipsub) behaviour, choosing the encoding. @@ -211,8 +249,28 @@ impl Behaviour { for topic in message.topics(GossipEncoding::default(), self.enr_fork_id.fork_digest) { match message.encode(GossipEncoding::default()) { Ok(message_data) => { - if let Err(e) = self.gossipsub.publish(&topic.into(), message_data) { + if let Err(e) = self.gossipsub.publish(topic.clone().into(), message_data) { slog::warn!(self.log, "Could not publish message"; "error" => format!("{:?}", e)); + + // add to metrics + match topic.kind() { + GossipKind::Attestation(subnet_id) => { + if let Some(v) = metrics::get_int_gauge( + &metrics::FAILED_ATTESTATION_PUBLISHES_PER_SUBNET, + &[&subnet_id.to_string()], + ) { + v.inc() + }; + } + kind => { + if let Some(v) = metrics::get_int_gauge( + &metrics::FAILED_PUBLISHES_PER_MAIN_TOPIC, + &[&format!("{:?}", kind)], + ) { + v.inc() + }; + } + } } } Err(e) => crit!(self.log, "Could not publish message"; "error" => e), @@ -221,11 +279,21 @@ impl Behaviour { } } - /// Forwards a message that is waiting in gossipsub's mcache. Messages are only propagated - /// once validated by the beacon chain. - pub fn validate_message(&mut self, propagation_source: &PeerId, message_id: MessageId) { - self.gossipsub - .validate_message(&message_id, propagation_source); + /// Informs the gossipsub about the result of a message validation. + /// If the message is valid it will get propagated by gossipsub. + pub fn report_message_validation_result( + &mut self, + propagation_source: &PeerId, + message_id: MessageId, + validation_result: MessageAcceptance, + ) { + if let Err(e) = self.gossipsub.report_message_validation_result( + &message_id, + propagation_source, + validation_result, + ) { + warn!(self.log, "Failed to report message validation"; "message_id" => message_id.to_string(), "peer_id" => propagation_source.to_string(), "error" => format!("{:?}", e)); + } } /* Eth2 RPC behaviour functions */ @@ -300,8 +368,9 @@ impl Behaviour { /// Attempts to discover new peers for a given subnet. The `min_ttl` gives the time at which we /// would like to retain the peers for. - pub fn discover_subnet_peers(&mut self, subnet_id: SubnetId, min_ttl: Option) { - self.peer_manager.discover_subnet_peers(subnet_id, min_ttl) + pub fn discover_subnet_peers(&mut self, subnet_subscriptions: Vec) { + self.peer_manager + .discover_subnet_peers(subnet_subscriptions) } /// Updates the local ENR's "eth2" field with the latest EnrForkId. @@ -345,6 +414,8 @@ impl Behaviour { .local_enr() .bitfield::() .expect("Local discovery must have bitfield"); + // Save the updated metadata to disk + save_metadata_to_disk(&self.network_dir, self.meta_data.clone(), &self.log); } /// Sends a Ping request to the peer. @@ -389,11 +460,25 @@ impl Behaviour { fn on_gossip_event(&mut self, event: GossipsubEvent) { match event { - GossipsubEvent::Message(propagation_source, id, gs_msg) => { + GossipsubEvent::Message { + propagation_source, + message_id: id, + message: gs_msg, + } => { // Note: We are keeping track here of the peer that sent us the message, not the // peer that originally published the message. match PubsubMessage::decode(&gs_msg.topics, &gs_msg.data) { - Err(e) => debug!(self.log, "Could not decode gossipsub message"; "error" => e), + Err(e) => { + debug!(self.log, "Could not decode gossipsub message"; "error" => e); + //reject the message + if let Err(e) = self.gossipsub.report_message_validation_result( + &id, + &propagation_source, + MessageAcceptance::Reject, + ) { + warn!(self.log, "Failed to report message validation"; "message_id" => id.to_string(), "peer_id" => propagation_source.to_string(), "error" => format!("{:?}", e)); + } + } Ok(msg) => { // Notify the network self.add_event(BehaviourEvent::PubsubMessage { @@ -406,23 +491,9 @@ impl Behaviour { } } GossipsubEvent::Subscribed { peer_id, topic } => { - if let Some(topic_metric) = metrics::get_int_gauge( - &metrics::GOSSIPSUB_SUBSCRIBED_PEERS_COUNT, - &[topic.as_str()], - ) { - topic_metric.inc() - } - self.add_event(BehaviourEvent::PeerSubscribed(peer_id, topic)); } - GossipsubEvent::Unsubscribed { peer_id: _, topic } => { - if let Some(topic_metric) = metrics::get_int_gauge( - &metrics::GOSSIPSUB_SUBSCRIBED_PEERS_COUNT, - &[topic.as_str()], - ) { - topic_metric.dec() - } - } + GossipsubEvent::Unsubscribed { .. } => {} } } @@ -743,8 +814,8 @@ impl NetworkBehaviour for Behaviour { .peer_info(peer_id) .map_or(true, |i| !i.has_future_duty()) { - //If we are at our peer limit and we don't need the peer for a future validator - //duty, send goodbye with reason TooManyPeers + // If we are at our peer limit and we don't need the peer for a future validator + // duty, send goodbye with reason TooManyPeers Some(GoodbyeReason::TooManyPeers) } else { None @@ -1035,3 +1106,60 @@ pub enum BehaviourEvent { /// Inform the network to send a Status to this peer. StatusPeer(PeerId), } + +/// Load metadata from persisted file. Return default metadata if loading fails. +fn load_or_build_metadata(network_dir: &PathBuf, log: &slog::Logger) -> MetaData { + // Default metadata + let mut meta_data = MetaData { + seq_number: 0, + attnets: EnrBitfield::::default(), + }; + // Read metadata from persisted file if available + let metadata_path = network_dir.join(METADATA_FILENAME); + if let Ok(mut metadata_file) = File::open(metadata_path) { + let mut metadata_ssz = Vec::new(); + if metadata_file.read_to_end(&mut metadata_ssz).is_ok() { + match MetaData::::from_ssz_bytes(&metadata_ssz) { + Ok(persisted_metadata) => { + meta_data.seq_number = persisted_metadata.seq_number; + // Increment seq number if persisted attnet is not default + if persisted_metadata.attnets != meta_data.attnets { + meta_data.seq_number += 1; + } + debug!(log, "Loaded metadata from disk"); + } + Err(e) => { + debug!( + log, + "Metadata from file could not be decoded"; + "error" => format!("{:?}", e), + ); + } + } + } + }; + + debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number); + save_metadata_to_disk(network_dir, meta_data.clone(), &log); + meta_data +} + +/// Persist metadata to disk +fn save_metadata_to_disk(dir: &PathBuf, metadata: MetaData, log: &slog::Logger) { + let _ = std::fs::create_dir_all(&dir); + match File::create(dir.join(METADATA_FILENAME)) + .and_then(|mut f| f.write_all(&metadata.as_ssz_bytes())) + { + Ok(_) => { + debug!(log, "Metadata written to disk"); + } + Err(e) => { + warn!( + log, + "Could not write metadata to disk"; + "file" => format!("{:?}{:?}",dir, METADATA_FILENAME), + "error" => format!("{}", e) + ); + } + } +} diff --git a/beacon_node/eth2_libp2p/src/config.rs b/beacon_node/eth2_libp2p/src/config.rs index fa4ec9792b8..0410fa10f82 100644 --- a/beacon_node/eth2_libp2p/src/config.rs +++ b/beacon_node/eth2_libp2p/src/config.rs @@ -50,7 +50,10 @@ pub struct Config { pub discv5_config: Discv5Config, /// List of nodes to initially connect to. - pub boot_nodes: Vec, + pub boot_nodes_enr: Vec, + + /// List of nodes to initially connect to, on Multiaddr format. + pub boot_nodes_multiaddr: Vec, /// List of libp2p nodes to initially connect to. pub libp2p_nodes: Vec, @@ -96,8 +99,8 @@ impl Default for Config { let gs_config = GossipsubConfigBuilder::new() .max_transmit_size(GOSSIP_MAX_SIZE) .heartbeat_interval(Duration::from_millis(700)) - .mesh_n(6) - .mesh_n_low(5) + .mesh_n(8) + .mesh_n_low(6) .mesh_n_high(12) .gossip_lazy(6) .fanout_ttl(Duration::from_secs(60)) @@ -108,7 +111,8 @@ impl Default for Config { // prevent duplicates for 550 heartbeats(700millis * 550) = 385 secs .duplicate_cache_time(Duration::from_secs(385)) .message_id_fn(gossip_message_id) - .build(); + .build() + .expect("valid gossipsub configuration"); // discv5 configuration let discv5_config = Discv5ConfigBuilder::new() @@ -136,7 +140,8 @@ impl Default for Config { target_peers: 50, gs_config, discv5_config, - boot_nodes: vec![], + boot_nodes_enr: vec![], + boot_nodes_multiaddr: vec![], libp2p_nodes: vec![], client_version: lighthouse_version::version_with_platform(), disable_discovery: false, diff --git a/beacon_node/eth2_libp2p/src/discovery/enr.rs b/beacon_node/eth2_libp2p/src/discovery/enr.rs index e60bb38babd..6af9f21fb7e 100644 --- a/beacon_node/eth2_libp2p/src/discovery/enr.rs +++ b/beacon_node/eth2_libp2p/src/discovery/enr.rs @@ -6,6 +6,7 @@ use super::enr_ext::CombinedKeyExt; use super::ENR_FILENAME; use crate::types::{Enr, EnrBitfield}; use crate::NetworkConfig; +use discv5::enr::EnrKey; use libp2p::core::identity::Keypair; use slog::{debug, warn}; use ssz::{Decode, Encode}; @@ -48,23 +49,16 @@ impl Eth2Enr for Enr { } } -/// Loads an ENR from file if it exists and matches the current NodeId and sequence number. If none -/// exists, generates a new one. -/// +/// Either use the given ENR or load an ENR from file if it exists and matches the current NodeId +/// and sequence number. /// If an ENR exists, with the same NodeId, this function checks to see if the loaded ENR from -/// disk is suitable to use, otherwise we increment our newly generated ENR's sequence number. -pub fn build_or_load_enr( - local_key: Keypair, +/// disk is suitable to use, otherwise we increment the given ENR's sequence number. +pub fn use_or_load_enr( + enr_key: &CombinedKey, + local_enr: &mut Enr, config: &NetworkConfig, - enr_fork_id: EnrForkId, log: &slog::Logger, -) -> Result { - // Build the local ENR. - // Note: Discovery should update the ENR record's IP to the external IP as seen by the - // majority of our peers, if the CLI doesn't expressly forbid it. - let enr_key = CombinedKey::from_libp2p(&local_key)?; - let mut local_enr = build_enr::(&enr_key, config, enr_fork_id)?; - +) -> Result<(), String> { let enr_f = config.network_dir.join(ENR_FILENAME); if let Ok(mut enr_file) = File::open(enr_f.clone()) { let mut enr_string = String::new(); @@ -78,12 +72,15 @@ pub fn build_or_load_enr( if compare_enr(&local_enr, &disk_enr) { debug!(log, "ENR loaded from disk"; "file" => format!("{:?}", enr_f)); // the stored ENR has the same configuration, use it - return Ok(disk_enr); + *local_enr = disk_enr; + return Ok(()); } // same node id, different configuration - update the sequence number + // Note: local_enr is generated with default(0) attnets value, + // so a non default value in persisted enr will also update sequence number. let new_seq_no = disk_enr.seq().checked_add(1).ok_or_else(|| "ENR sequence number on file is too large. Remove it to generate a new NodeId")?; - local_enr.set_seq(new_seq_no, &enr_key).map_err(|e| { + local_enr.set_seq(new_seq_no, enr_key).map_err(|e| { format!("Could not update ENR sequence number: {:?}", e) })?; debug!(log, "ENR sequence number increased"; "seq" => new_seq_no); @@ -99,15 +96,31 @@ pub fn build_or_load_enr( save_enr_to_disk(&config.network_dir, &local_enr, log); - Ok(local_enr) + Ok(()) } -/// Builds a lighthouse ENR given a `NetworkConfig`. -pub fn build_enr( - enr_key: &CombinedKey, +/// Loads an ENR from file if it exists and matches the current NodeId and sequence number. If none +/// exists, generates a new one. +/// +/// If an ENR exists, with the same NodeId, this function checks to see if the loaded ENR from +/// disk is suitable to use, otherwise we increment our newly generated ENR's sequence number. +pub fn build_or_load_enr( + local_key: Keypair, config: &NetworkConfig, enr_fork_id: EnrForkId, + log: &slog::Logger, ) -> Result { + // Build the local ENR. + // Note: Discovery should update the ENR record's IP to the external IP as seen by the + // majority of our peers, if the CLI doesn't expressly forbid it. + let enr_key = CombinedKey::from_libp2p(&local_key)?; + let mut local_enr = build_enr::(&enr_key, config, enr_fork_id)?; + + use_or_load_enr(&enr_key, &mut local_enr, config, log)?; + Ok(local_enr) +} + +pub fn create_enr_builder_from_config(config: &NetworkConfig) -> EnrBuilder { let mut builder = EnrBuilder::new("v4"); if let Some(enr_address) = config.enr_address { builder.ip(enr_address); @@ -118,7 +131,17 @@ pub fn build_enr( // we always give it our listening tcp port // TODO: Add uPnP support to map udp and tcp ports let tcp_port = config.enr_tcp_port.unwrap_or_else(|| config.libp2p_port); - builder.tcp(tcp_port); + builder.tcp(tcp_port).tcp(config.libp2p_port); + builder +} + +/// Builds a lighthouse ENR given a `NetworkConfig`. +pub fn build_enr( + enr_key: &CombinedKey, + config: &NetworkConfig, + enr_fork_id: EnrForkId, +) -> Result { + let mut builder = create_enr_builder_from_config(config); // set the `eth2` field on our ENR builder.add_value(ETH2_ENR_KEY.into(), enr_fork_id.as_ssz_bytes()); @@ -129,7 +152,6 @@ pub fn build_enr( builder.add_value(BITFIELD_ENR_KEY.into(), bitfield.as_ssz_bytes()); builder - .tcp(config.libp2p_port) .build(enr_key) .map_err(|e| format!("Could not build Local ENR: {:?}", e)) } diff --git a/beacon_node/eth2_libp2p/src/discovery/mod.rs b/beacon_node/eth2_libp2p/src/discovery/mod.rs index a1b50f48e14..8ca6e7fe5fc 100644 --- a/beacon_node/eth2_libp2p/src/discovery/mod.rs +++ b/beacon_node/eth2_libp2p/src/discovery/mod.rs @@ -3,19 +3,19 @@ pub(crate) mod enr; pub mod enr_ext; // Allow external use of the lighthouse ENR builder -pub use enr::{build_enr, CombinedKey, Eth2Enr}; +pub use enr::{build_enr, create_enr_builder_from_config, use_or_load_enr, CombinedKey, Eth2Enr}; pub use enr_ext::{CombinedKeyExt, EnrExt}; pub use libp2p::core::identity::Keypair; use crate::metrics; -use crate::{error, Enr, NetworkConfig, NetworkGlobals}; +use crate::{error, Enr, NetworkConfig, NetworkGlobals, SubnetDiscovery}; use discv5::{enr::NodeId, Discv5, Discv5Event}; use enr::{BITFIELD_ENR_KEY, ETH2_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; use libp2p::core::PeerId; use lru::LruCache; -use slog::{crit, debug, info, warn}; +use slog::{crit, debug, error, info, warn}; use ssz::{Decode, Encode}; use ssz_types::BitVector; use std::{ @@ -163,7 +163,7 @@ pub struct Discovery { impl Discovery { /// NOTE: Creating discovery requires running within a tokio execution environment. - pub fn new( + pub async fn new( local_key: &Keypair, config: &NetworkConfig, network_globals: Arc>, @@ -189,35 +189,84 @@ impl Discovery { .map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?; // Add bootnodes to routing table - for bootnode_enr in config.boot_nodes.clone() { + for bootnode_enr in config.boot_nodes_enr.clone() { debug!( log, "Adding node to routing table"; - "node_id" => format!("{}", bootnode_enr.node_id()), - "peer_id" => format!("{}", bootnode_enr.peer_id()), + "node_id" => bootnode_enr.node_id().to_string(), + "peer_id" => bootnode_enr.peer_id().to_string(), "ip" => format!("{:?}", bootnode_enr.ip()), "udp" => format!("{:?}", bootnode_enr.udp()), "tcp" => format!("{:?}", bootnode_enr.tcp()) ); + let repr = bootnode_enr.to_string(); let _ = discv5.add_enr(bootnode_enr).map_err(|e| { - debug!( + error!( log, "Could not add peer to the local routing table"; - "error" => e.to_string() + "addr" => repr, + "error" => e.to_string(), ) }); } // Start the discv5 service and obtain an event stream let event_stream = if !config.disable_discovery { - discv5.start(listen_socket); + discv5.start(listen_socket).map_err(|e| e.to_string())?; debug!(log, "Discovery service started"); EventStream::Awaiting(Box::pin(discv5.event_stream())) } else { EventStream::InActive }; - // Obtain the event stream + if !config.boot_nodes_multiaddr.is_empty() { + info!(log, "Contacting Multiaddr boot-nodes for their ENR"); + } + + // get futures for requesting the Enrs associated to these multiaddr and wait for their + // completion + let mut fut_coll = config + .boot_nodes_multiaddr + .iter() + .map(|addr| addr.to_string()) + // request the ENR for this multiaddr and keep the original for logging + .map(|addr| { + futures::future::join( + discv5.request_enr(addr.clone()), + futures::future::ready(addr), + ) + }) + .collect::>(); + + while let Some((result, original_addr)) = fut_coll.next().await { + match result { + Ok(Some(enr)) => { + debug!( + log, + "Adding node to routing table"; + "node_id" => enr.node_id().to_string(), + "peer_id" => enr.peer_id().to_string(), + "ip" => format!("{:?}", enr.ip()), + "udp" => format!("{:?}", enr.udp()), + "tcp" => format!("{:?}", enr.tcp()) + ); + let _ = discv5.add_enr(enr).map_err(|e| { + error!( + log, + "Could not add peer to the local routing table"; + "addr" => original_addr.to_string(), + "error" => e.to_string(), + ) + }); + } + Ok(None) => { + error!(log, "No ENR found for MultiAddr"; "addr" => original_addr.to_string()) + } + Err(e) => { + error!(log, "Error getting mapping to ENR"; "multiaddr" => original_addr.to_string(), "error" => e.to_string()) + } + } + } Ok(Self { cached_enrs: LruCache::new(50), @@ -256,12 +305,19 @@ impl Discovery { } /// Processes a request to search for more peers on a subnet. - pub fn discover_subnet_peers(&mut self, subnet_id: SubnetId, min_ttl: Option) { + pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec) { // If the discv5 service isn't running, ignore queries if !self.started { return; } - self.add_subnet_query(subnet_id, min_ttl, 0); + debug!( + self.log, + "Making discovery query for subnets"; + "subnets" => format!("{:?}", subnets_to_discover.iter().map(|s| s.subnet_id).collect::>()) + ); + for subnet in subnets_to_discover { + self.add_subnet_query(subnet.subnet_id, subnet.min_ttl, 0); + } } /// Add an ENR to the routing table of the discovery mechanism. @@ -335,6 +391,9 @@ impl Discovery { // replace the global version *self.network_globals.local_enr.write() = self.discv5.local_enr(); + + // persist modified enr to disk + enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); Ok(()) } @@ -367,6 +426,9 @@ impl Discovery { // replace the global version with discovery version *self.network_globals.local_enr.write() = self.discv5.local_enr(); + + // persist modified enr to disk + enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); } /* Internal Functions */ @@ -459,6 +521,11 @@ impl Discovery { // This query is for searching for peers of a particular subnet // Drain subnet_queries so we can re-use it as we continue to process the queue let grouped_queries: Vec = subnet_queries.drain(..).collect(); + debug!( + self.log, + "Starting grouped subnet query"; + "subnets" => format!("{:?}", grouped_queries.iter().map(|q| q.subnet_id).collect::>()), + ); self.start_subnet_query(grouped_queries); } } @@ -733,8 +800,8 @@ impl Discovery { if enr.eth2() == self.local_enr().eth2() { trace!(self.log, "Peer found in process of query"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket()); } else { - // this is temporary warning for debugging the DHT - warn!(self.log, "Found peer during discovery not on correct fork"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket()); + // this is temporary warning for debugging the DHT + warn!(self.log, "Found peer during discovery not on correct fork"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket()); } */ } diff --git a/beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs b/beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs index e38fd4cf89b..cc38350623d 100644 --- a/beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs +++ b/beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs @@ -1,5 +1,6 @@ ///! The subnet predicate used for searching for a particular subnet. use super::*; +use slog::{debug, trace}; use std::ops::Deref; /// Returns the predicate for a given subnet. @@ -30,7 +31,7 @@ where .collect(); if matches.is_empty() { - debug!( + trace!( log_clone, "Peer found but not on any of the desired subnets"; "peer_id" => format!("{}", enr.peer_id()) diff --git a/beacon_node/eth2_libp2p/src/lib.rs b/beacon_node/eth2_libp2p/src/lib.rs index f5c9e75a263..6fdbb3d31ac 100644 --- a/beacon_node/eth2_libp2p/src/lib.rs +++ b/beacon_node/eth2_libp2p/src/lib.rs @@ -14,16 +14,16 @@ pub mod rpc; mod service; pub mod types; -pub use crate::types::{error, Enr, GossipTopic, NetworkGlobals, PubsubMessage}; +pub use crate::types::{error, Enr, GossipTopic, NetworkGlobals, PubsubMessage, SubnetDiscovery}; pub use behaviour::{BehaviourEvent, PeerRequestId, Request, Response}; pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; -pub use libp2p::gossipsub::{MessageId, Topic, TopicHash}; +pub use libp2p::gossipsub::{Gossipsub, MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p::{core::ConnectedPoint, PeerId, Swarm}; pub use libp2p::{multiaddr, Multiaddr}; pub use metrics::scrape_discovery_metrics; pub use peer_manager::{ client::Client, score::PeerAction, PeerDB, PeerInfo, PeerSyncStatus, SyncInfo, }; -pub use service::{Libp2pEvent, Service, NETWORK_KEY_FILENAME}; +pub use service::{load_private_key, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; diff --git a/beacon_node/eth2_libp2p/src/metrics.rs b/beacon_node/eth2_libp2p/src/metrics.rs index b8677d30552..bffe7292200 100644 --- a/beacon_node/eth2_libp2p/src/metrics.rs +++ b/beacon_node/eth2_libp2p/src/metrics.rs @@ -34,9 +34,20 @@ lazy_static! { "Unsolicited discovery requests per ip per second", &["Addresses"] ); - pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_COUNT: Result = try_create_int_gauge_vec( - "gossipsub_peers_per_topic_count", - "Peers subscribed per topic", + pub static ref PEERS_PER_CLIENT: Result = try_create_int_gauge_vec( + "libp2p_peers_per_client", + "The connected peers via client implementation", + &["Client"] + ); + pub static ref FAILED_ATTESTATION_PUBLISHES_PER_SUBNET: Result = + try_create_int_gauge_vec( + "gossipsub_failed_attestation_publishes_per_subnet", + "Failed attestation publishes per subnet", + &["subnet"] + ); + pub static ref FAILED_PUBLISHES_PER_MAIN_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_failed_publishes_per_main_topic", + "Failed gossip publishes", &["topic_hash"] ); } diff --git a/beacon_node/eth2_libp2p/src/peer_manager/client.rs b/beacon_node/eth2_libp2p/src/peer_manager/client.rs index a31fee49309..f23f32be607 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/client.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/client.rs @@ -20,7 +20,7 @@ pub struct Client { pub agent_string: Option, } -#[derive(Clone, Debug, Serialize)] +#[derive(Clone, Debug, Serialize, PartialEq)] pub enum ClientKind { /// A lighthouse node (the best kind). Lighthouse, @@ -30,6 +30,8 @@ pub enum ClientKind { Teku, /// A Prysm node. Prysm, + /// A lodestar node. + Lodestar, /// An unknown client. Unknown, } @@ -84,6 +86,7 @@ impl std::fmt::Display for Client { "Prysm: version: {}, os_version: {}", self.version, self.os_version ), + ClientKind::Lodestar => write!(f, "Lodestar: version: {}", self.version), ClientKind::Unknown => { if let Some(agent_string) = &self.agent_string { write!(f, "Unknown: {}", agent_string) @@ -95,6 +98,12 @@ impl std::fmt::Display for Client { } } +impl std::fmt::Display for ClientKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} + // helper function to identify clients from their agent_version. Returns the client // kind and it's associated version and the OS kind. fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String) { @@ -157,6 +166,18 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } (kind, version, os_version) } + Some("js-libp2p") => { + let kind = ClientKind::Lodestar; + let mut version = String::from("unknown"); + let mut os_version = version.clone(); + if let Some(agent_version) = agent_split.next() { + version = agent_version.into(); + if let Some(agent_os_version) = agent_split.next() { + os_version = agent_os_version.into(); + } + } + (kind, version, os_version) + } _ => { let unknown = String::from("unknown"); (ClientKind::Unknown, unknown.clone(), unknown) diff --git a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs index d8cdfb4b2a3..2d40f8a0d45 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs @@ -4,7 +4,7 @@ pub use self::peerdb::*; use crate::discovery::{Discovery, DiscoveryEvent}; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::{error, metrics}; -use crate::{EnrExt, NetworkConfig, NetworkGlobals, PeerId}; +use crate::{EnrExt, NetworkConfig, NetworkGlobals, PeerId, SubnetDiscovery}; use futures::prelude::*; use futures::Stream; use hashset_delay::HashSetDelay; @@ -19,7 +19,7 @@ use std::{ task::{Context, Poll}, time::{Duration, Instant}, }; -use types::{EthSpec, SubnetId}; +use types::EthSpec; pub use libp2p::core::{identity::Keypair, Multiaddr}; @@ -88,14 +88,14 @@ pub enum PeerManagerEvent { impl PeerManager { // NOTE: Must be run inside a tokio executor. - pub fn new( + pub async fn new( local_key: &Keypair, config: &NetworkConfig, network_globals: Arc>, log: &slog::Logger, ) -> error::Result { // start the discovery service - let mut discovery = Discovery::new(local_key, config, network_globals.clone(), log)?; + let mut discovery = Discovery::new(local_key, config, network_globals.clone(), log).await?; // start searching for peers discovery.discover_peers(); @@ -213,17 +213,19 @@ impl PeerManager { } /// A request to find peers on a given subnet. - pub fn discover_subnet_peers(&mut self, subnet_id: SubnetId, min_ttl: Option) { + pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec) { // Extend the time to maintain peers if required. - if let Some(min_ttl) = min_ttl { - self.network_globals - .peers - .write() - .extend_peers_on_subnet(subnet_id, min_ttl); + for s in subnets_to_discover.iter() { + if let Some(min_ttl) = s.min_ttl { + self.network_globals + .peers + .write() + .extend_peers_on_subnet(s.subnet_id, min_ttl); + } } // request the subnet query from discovery - self.discovery.discover_subnet_peers(subnet_id, min_ttl); + self.discovery.discover_subnet_peers(subnets_to_discover); } /// A STATUS message has been received from a peer. This resets the status timer. @@ -237,6 +239,27 @@ impl PeerManager { /// /// This is also called when dialing a peer fails. pub fn notify_disconnect(&mut self, peer_id: &PeerId) { + // Decrement the PEERS_PER_CLIENT metric + if let Some(kind) = self + .network_globals + .peers + .read() + .peer_info(peer_id) + .and_then(|peer_info| { + if let Connected { .. } = peer_info.connection_status { + Some(peer_info.client.kind.clone()) + } else { + None + } + }) + { + if let Some(v) = + metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()]) + { + v.dec() + }; + } + self.network_globals.peers.write().disconnect(peer_id); // remove the ping and status timer for the peer @@ -294,8 +317,25 @@ impl PeerManager { /// Updates `PeerInfo` with `identify` information. pub fn identify(&mut self, peer_id: &PeerId, info: &IdentifyInfo) { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { + let previous_kind = peer_info.client.kind.clone(); peer_info.client = client::Client::from_identify_info(info); peer_info.listening_addresses = info.listen_addrs.clone(); + + if previous_kind != peer_info.client.kind { + // update the peer client kind metric + if let Some(v) = metrics::get_int_gauge( + &metrics::PEERS_PER_CLIENT, + &[&peer_info.client.kind.to_string()], + ) { + v.inc() + }; + if let Some(v) = metrics::get_int_gauge( + &metrics::PEERS_PER_CLIENT, + &[&previous_kind.to_string()], + ) { + v.dec() + }; + } } else { crit!(self.log, "Received an Identify response from an unknown peer"; "peer_id" => peer_id.to_string()); } @@ -539,11 +579,8 @@ impl PeerManager { /// /// This is called by `connect_ingoing` and `connect_outgoing`. /// - /// This informs if the peer was accepted in to the db or not. + /// Informs if the peer was accepted in to the db or not. fn connect_peer(&mut self, peer_id: &PeerId, connection: ConnectingType) -> bool { - // TODO: remove after timed updates - //self.update_reputations(); - { let mut peerdb = self.network_globals.peers.write(); if peerdb.connection_status(peer_id).map(|c| c.is_banned()) == Some(true) { @@ -552,7 +589,10 @@ impl PeerManager { } match connection { - ConnectingType::Dialing => peerdb.dialing_peer(peer_id), + ConnectingType::Dialing => { + peerdb.dialing_peer(peer_id); + return true; + } ConnectingType::IngoingConnected => peerdb.connect_outgoing(peer_id), ConnectingType::OutgoingConnected => peerdb.connect_ingoing(peer_id), } @@ -569,6 +609,21 @@ impl PeerManager { self.network_globals.connected_peers() as i64, ); + // Increment the PEERS_PER_CLIENT metric + if let Some(kind) = self + .network_globals + .peers + .read() + .peer_info(peer_id) + .map(|peer_info| peer_info.client.kind.clone()) + { + if let Some(v) = + metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()]) + { + v.inc() + }; + } + true } @@ -690,7 +745,8 @@ impl PeerManager { /// NOTE: Discovery will only add a new query if one isn't already queued. fn heartbeat(&mut self) { // TODO: Provide a back-off time for discovery queries. I.e Queue many initially, then only - // perform discoveries over a larger fixed interval. Perhaps one every 6 heartbeats + // perform discoveries over a larger fixed interval. Perhaps one every 6 heartbeats. This + // is achievable with a leaky bucket let peer_count = self.network_globals.connected_or_dialing_peers(); if peer_count < self.target_peers { // If we need more peers, queue a discovery lookup. diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs index fa9def34f49..2448ee424a7 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs @@ -4,9 +4,10 @@ use super::PeerSyncStatus; use crate::rpc::MetaData; use crate::Multiaddr; use serde::{ - ser::{SerializeStructVariant, Serializer}, + ser::{SerializeStruct, Serializer}, Serialize, }; +use std::net::IpAddr; use std::time::Instant; use types::{EthSpec, SubnetId}; use PeerConnectionStatus::*; @@ -104,6 +105,8 @@ pub enum PeerConnectionStatus { Banned { /// moment when the peer was banned. since: Instant, + /// ip addresses this peer had a the moment of the ban + ip_addresses: Vec, }, /// We are currently dialing this peer. Dialing { @@ -117,29 +120,51 @@ pub enum PeerConnectionStatus { /// Serialization for http requests. impl Serialize for PeerConnectionStatus { fn serialize(&self, serializer: S) -> Result { + let mut s = serializer.serialize_struct("connection_status", 5)?; match self { Connected { n_in, n_out } => { - let mut s = serializer.serialize_struct_variant("", 0, "Connected", 2)?; - s.serialize_field("in", n_in)?; - s.serialize_field("out", n_out)?; + s.serialize_field("status", "connected")?; + s.serialize_field("connections_in", n_in)?; + s.serialize_field("connections_out", n_out)?; + s.serialize_field("last_seen", &0)?; + s.serialize_field("banned_ips", &Vec::::new())?; s.end() } Disconnected { since } => { - let mut s = serializer.serialize_struct_variant("", 1, "Disconnected", 1)?; - s.serialize_field("since", &since.elapsed().as_secs())?; + s.serialize_field("status", "disconnected")?; + s.serialize_field("connections_in", &0)?; + s.serialize_field("connections_out", &0)?; + s.serialize_field("last_seen", &since.elapsed().as_secs())?; + s.serialize_field("banned_ips", &Vec::::new())?; s.end() } - Banned { since } => { - let mut s = serializer.serialize_struct_variant("", 2, "Banned", 1)?; - s.serialize_field("since", &since.elapsed().as_secs())?; + Banned { + since, + ip_addresses, + } => { + s.serialize_field("status", "banned")?; + s.serialize_field("connections_in", &0)?; + s.serialize_field("connections_out", &0)?; + s.serialize_field("last_seen", &since.elapsed().as_secs())?; + s.serialize_field("banned_ips", &ip_addresses)?; s.end() } Dialing { since } => { - let mut s = serializer.serialize_struct_variant("", 3, "Dialing", 1)?; - s.serialize_field("since", &since.elapsed().as_secs())?; + s.serialize_field("status", "dialing")?; + s.serialize_field("connections_in", &0)?; + s.serialize_field("connections_out", &0)?; + s.serialize_field("last_seen", &since.elapsed().as_secs())?; + s.serialize_field("banned_ips", &Vec::::new())?; + s.end() + } + Unknown => { + s.serialize_field("status", "unknown")?; + s.serialize_field("connections_in", &0)?; + s.serialize_field("connections_out", &0)?; + s.serialize_field("last_seen", &0)?; + s.serialize_field("banned_ips", &Vec::::new())?; s.end() } - Unknown => serializer.serialize_unit_variant("", 4, "Unknown"), } } } @@ -218,15 +243,16 @@ impl PeerConnectionStatus { } /// Modifies the status to Banned - pub fn ban(&mut self) { + pub fn ban(&mut self, ip_addresses: Vec) { *self = Banned { since: Instant::now(), + ip_addresses, }; } /// The score system has unbanned the peer. Update the connection status pub fn unban(&mut self) { - if let PeerConnectionStatus::Banned { since } = self { + if let PeerConnectionStatus::Banned { since, .. } = self { *self = PeerConnectionStatus::Disconnected { since: *since } } } diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs index 73942b3fdc9..fc28cc55f37 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs @@ -1,11 +1,13 @@ use super::peer_info::{PeerConnectionStatus, PeerInfo}; use super::peer_sync_status::PeerSyncStatus; use super::score::{Score, ScoreState}; +use crate::multiaddr::Protocol; use crate::rpc::methods::MetaData; use crate::PeerId; use rand::seq::SliceRandom; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; +use std::net::IpAddr; use std::time::Instant; use types::{EthSpec, SubnetId}; @@ -13,6 +15,9 @@ use types::{EthSpec, SubnetId}; const MAX_DC_PEERS: usize = 500; /// The maximum number of banned nodes to remember. const MAX_BANNED_PEERS: usize = 1000; +/// If there are more than `BANNED_PEERS_PER_IP_THRESHOLD` many banned peers with the same IP we ban +/// the IP. +const BANNED_PEERS_PER_IP_THRESHOLD: usize = 5; /// Storage of known peers, their reputation and information pub struct PeerDB { @@ -20,18 +25,72 @@ pub struct PeerDB { peers: HashMap>, /// The number of disconnected nodes in the database. disconnected_peers: usize, - /// The number of banned peers in the database. - banned_peers: usize, + /// Counts banned peers in total and per ip + banned_peers_count: BannedPeersCount, /// PeerDB's logger log: slog::Logger, } +pub struct BannedPeersCount { + /// The number of banned peers in the database. + banned_peers: usize, + /// maps ips to number of banned peers with this ip + banned_peers_per_ip: HashMap, +} + +impl BannedPeersCount { + /// Removes the peer from the counts if it is banned. Returns true if the peer was banned and + /// false otherwise. + pub fn remove_banned_peer(&mut self, connection_status: &PeerConnectionStatus) -> bool { + match connection_status { + PeerConnectionStatus::Banned { ip_addresses, .. } => { + self.banned_peers = self.banned_peers.saturating_sub(1); + for address in ip_addresses { + if let Some(count) = self.banned_peers_per_ip.get_mut(address) { + *count = count.saturating_sub(1); + } + } + true + } + _ => false, //if not banned do nothing + } + } + + pub fn add_banned_peer(&mut self, connection_status: &PeerConnectionStatus) { + if let PeerConnectionStatus::Banned { ip_addresses, .. } = connection_status { + self.banned_peers += 1; + for address in ip_addresses { + *self.banned_peers_per_ip.entry(*address).or_insert(0) += 1; + } + } + } + + pub fn banned_peers(&self) -> usize { + self.banned_peers + } + + /// An IP is considered banned if more than BANNED_PEERS_PER_IP_THRESHOLD banned peers + /// exist with this IP + pub fn ip_is_banned(&self, ip: &IpAddr) -> bool { + self.banned_peers_per_ip + .get(ip) + .map_or(false, |count| *count > BANNED_PEERS_PER_IP_THRESHOLD) + } + + pub fn new() -> Self { + BannedPeersCount { + banned_peers: 0, + banned_peers_per_ip: HashMap::new(), + } + } +} + impl PeerDB { pub fn new(log: &slog::Logger) -> Self { Self { log: log.clone(), disconnected_peers: 0, - banned_peers: 0, + banned_peers_count: BannedPeersCount::new(), peers: HashMap::new(), } } @@ -99,17 +158,35 @@ impl PeerDB { /// Returns true if the Peer is banned. pub fn is_banned(&self, peer_id: &PeerId) -> bool { - match self.peers.get(peer_id).map(|info| info.score.state()) { - Some(ScoreState::Banned) => true, - _ => false, + if let Some(peer) = self.peers.get(peer_id) { + match peer.score.state() { + ScoreState::Banned => true, + _ => self.ip_is_banned(peer), + } + } else { + false } } + fn ip_is_banned(&self, peer: &PeerInfo) -> bool { + peer.listening_addresses.iter().any(|addr| { + addr.iter().any(|p| match p { + Protocol::Ip4(ip) => self.banned_peers_count.ip_is_banned(&ip.into()), + Protocol::Ip6(ip) => self.banned_peers_count.ip_is_banned(&ip.into()), + _ => false, + }) + }) + } + /// Returns true if the Peer is either banned or in the disconnected state. pub fn is_banned_or_disconnected(&self, peer_id: &PeerId) -> bool { - match self.peers.get(peer_id).map(|info| info.score.state()) { - Some(ScoreState::Banned) | Some(ScoreState::Disconnected) => true, - _ => false, + if let Some(peer) = self.peers.get(peer_id) { + match peer.score.state() { + ScoreState::Banned | ScoreState::Disconnected => true, + _ => self.ip_is_banned(peer), + } + } else { + false } } @@ -233,9 +310,10 @@ impl PeerDB { if info.connection_status.is_disconnected() { self.disconnected_peers = self.disconnected_peers.saturating_sub(1); } - if info.connection_status.is_banned() { - self.banned_peers = self.banned_peers.saturating_sub(1); - } + + self.banned_peers_count + .remove_banned_peer(&info.connection_status); + info.connection_status = PeerConnectionStatus::Dialing { since: Instant::now(), }; @@ -284,9 +362,8 @@ impl PeerDB { if info.connection_status.is_disconnected() { self.disconnected_peers = self.disconnected_peers.saturating_sub(1); } - if info.connection_status.is_banned() { - self.banned_peers = self.banned_peers.saturating_sub(1); - } + self.banned_peers_count + .remove_banned_peer(&info.connection_status); info.connection_status.connect_ingoing(); } @@ -297,9 +374,8 @@ impl PeerDB { if info.connection_status.is_disconnected() { self.disconnected_peers = self.disconnected_peers.saturating_sub(1); } - if info.connection_status.is_banned() { - self.banned_peers = self.banned_peers.saturating_sub(1); - } + self.banned_peers_count + .remove_banned_peer(&info.connection_status); info.connection_status.connect_outgoing(); } @@ -329,8 +405,23 @@ impl PeerDB { self.disconnected_peers = self.disconnected_peers.saturating_sub(1); } if !info.connection_status.is_banned() { - info.connection_status.ban(); - self.banned_peers += 1; + info.connection_status + .ban( + info.listening_addresses + .iter() + .fold(Vec::new(), |mut v, a| { + for p in a { + match p { + Protocol::Ip4(ip) => v.push(ip.into()), + Protocol::Ip6(ip) => v.push(ip.into()), + _ => (), + } + } + v + }), + ); + self.banned_peers_count + .add_banned_peer(&info.connection_status); } self.shrink_to_fit(); } @@ -345,8 +436,9 @@ impl PeerDB { }); if info.connection_status.is_banned() { + self.banned_peers_count + .remove_banned_peer(&info.connection_status); info.connection_status.unban(); - self.banned_peers = self.banned_peers.saturating_sub(1); } self.shrink_to_fit(); } @@ -355,9 +447,9 @@ impl PeerDB { /// Drops the peers with the lowest reputation so that the number of /// disconnected peers is less than MAX_DC_PEERS pub fn shrink_to_fit(&mut self) { - // Remove excess baned peers - while self.banned_peers > MAX_BANNED_PEERS { - if let Some(to_drop) = self + // Remove excess banned peers + while self.banned_peers_count.banned_peers() > MAX_BANNED_PEERS { + if let Some(to_drop) = if let Some((id, info)) = self .peers .iter() .filter(|(_, info)| info.connection_status.is_banned()) @@ -366,15 +458,23 @@ impl PeerDB { .score .partial_cmp(&info_b.score) .unwrap_or(std::cmp::Ordering::Equal) - }) - .map(|(id, _)| id.clone()) - { + }) { + self.banned_peers_count + .remove_banned_peer(&info.connection_status); + Some(id.clone()) + } else { + // If there is no minimum, this is a coding error. + crit!( + self.log, + "banned_peers > MAX_BANNED_PEERS despite no banned peers in db!" + ); + // reset banned_peers this will also exit the loop + self.banned_peers_count = BannedPeersCount::new(); + None + } { debug!(self.log, "Removing old banned peer"; "peer_id" => to_drop.to_string()); self.peers.remove(&to_drop); } - // If there is no minimum, this is a coding error. For safety we decrease - // the count to avoid a potential infinite loop. - self.banned_peers = self.banned_peers.saturating_sub(1); } // Remove excess disconnected peers @@ -422,8 +522,11 @@ impl PeerDB { #[cfg(test)] mod tests { use super::*; + use libp2p::core::Multiaddr; use slog::{o, Drain}; + use std::net::{Ipv4Addr, Ipv6Addr}; use types::MinimalEthSpec; + type M = MinimalEthSpec; pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { @@ -503,13 +606,13 @@ mod tests { let p = PeerId::random(); pdb.connect_ingoing(&p); } - assert_eq!(pdb.banned_peers, 0); + assert_eq!(pdb.banned_peers_count.banned_peers(), 0); for p in pdb.connected_peer_ids().cloned().collect::>() { pdb.ban(&p); } - assert_eq!(pdb.banned_peers, MAX_BANNED_PEERS); + assert_eq!(pdb.banned_peers_count.banned_peers(), MAX_BANNED_PEERS); } #[test] @@ -608,24 +711,39 @@ mod tests { pdb.connect_ingoing(&random_peer2); pdb.connect_ingoing(&random_peer3); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - assert_eq!(pdb.banned_peers, pdb.banned_peers().count()); + assert_eq!( + pdb.banned_peers_count.banned_peers(), + pdb.banned_peers().count() + ); pdb.connect_ingoing(&random_peer); pdb.disconnect(&random_peer1); pdb.ban(&random_peer2); pdb.connect_ingoing(&random_peer3); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - assert_eq!(pdb.banned_peers, pdb.banned_peers().count()); + assert_eq!( + pdb.banned_peers_count.banned_peers(), + pdb.banned_peers().count() + ); pdb.ban(&random_peer1); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - assert_eq!(pdb.banned_peers, pdb.banned_peers().count()); + assert_eq!( + pdb.banned_peers_count.banned_peers(), + pdb.banned_peers().count() + ); pdb.connect_outgoing(&random_peer2); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - assert_eq!(pdb.banned_peers, pdb.banned_peers().count()); + assert_eq!( + pdb.banned_peers_count.banned_peers(), + pdb.banned_peers().count() + ); pdb.ban(&random_peer3); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - assert_eq!(pdb.banned_peers, pdb.banned_peers().count()); + assert_eq!( + pdb.banned_peers_count.banned_peers(), + pdb.banned_peers().count() + ); pdb.ban(&random_peer3); pdb.connect_ingoing(&random_peer1); @@ -633,15 +751,191 @@ mod tests { pdb.ban(&random_peer3); pdb.connect_ingoing(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - assert_eq!(pdb.banned_peers, pdb.banned_peers().count()); + assert_eq!( + pdb.banned_peers_count.banned_peers(), + pdb.banned_peers().count() + ); pdb.disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - assert_eq!(pdb.banned_peers, pdb.banned_peers().count()); + assert_eq!( + pdb.banned_peers_count.banned_peers(), + pdb.banned_peers().count() + ); pdb.disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - assert_eq!(pdb.banned_peers, pdb.banned_peers().count()); + assert_eq!( + pdb.banned_peers_count.banned_peers(), + pdb.banned_peers().count() + ); pdb.ban(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); } + + fn connect_peer_with_ips(pdb: &mut PeerDB, ips: Vec>) -> PeerId { + let p = PeerId::random(); + pdb.connect_ingoing(&p); + pdb.peers.get_mut(&p).unwrap().listening_addresses = ips + .into_iter() + .map(|ip_addresses| { + let mut addr = Multiaddr::empty(); + for ip_address in ip_addresses { + addr.push(Protocol::from(ip_address)); + } + addr + }) + .collect(); + p + } + + #[test] + fn test_ban_address() { + let mut pdb = get_db(); + + let ip1: IpAddr = Ipv4Addr::new(1, 2, 3, 4).into(); + let ip2: IpAddr = Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8).into(); + let ip3: IpAddr = Ipv4Addr::new(1, 2, 3, 5).into(); + let ip4: IpAddr = Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 9).into(); + let ip5: IpAddr = Ipv4Addr::new(2, 2, 3, 4).into(); + + let mut peers = Vec::new(); + for i in 0..BANNED_PEERS_PER_IP_THRESHOLD + 2 { + peers.push(connect_peer_with_ips( + &mut pdb, + if i == 0 { + vec![vec![ip1], vec![ip2]] + } else { + vec![vec![ip1, ip2], vec![ip3, ip4]] + }, + )); + } + + let p1 = connect_peer_with_ips(&mut pdb, vec![vec![ip1]]); + let p2 = connect_peer_with_ips(&mut pdb, vec![vec![ip2, ip5]]); + let p3 = connect_peer_with_ips(&mut pdb, vec![vec![ip3], vec![ip5]]); + let p4 = connect_peer_with_ips(&mut pdb, vec![vec![ip5, ip4]]); + let p5 = connect_peer_with_ips(&mut pdb, vec![vec![ip5]]); + + for p in &peers[..BANNED_PEERS_PER_IP_THRESHOLD + 1] { + pdb.ban(p); + } + + //check that ip1 and ip2 are banned but ip3-5 not + assert!(pdb.is_banned(&p1)); + assert!(pdb.is_banned(&p2)); + assert!(!pdb.is_banned(&p3)); + assert!(!pdb.is_banned(&p4)); + assert!(!pdb.is_banned(&p5)); + + //ban also the last peer in peers + pdb.ban(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); + + //check that ip1-ip4 are banned but ip5 not + assert!(pdb.is_banned(&p1)); + assert!(pdb.is_banned(&p2)); + assert!(pdb.is_banned(&p3)); + assert!(pdb.is_banned(&p4)); + assert!(!pdb.is_banned(&p5)); + + //peers[0] gets unbanned + pdb.unban(&peers[0]); + + //nothing changed + assert!(pdb.is_banned(&p1)); + assert!(pdb.is_banned(&p2)); + assert!(pdb.is_banned(&p3)); + assert!(pdb.is_banned(&p4)); + assert!(!pdb.is_banned(&p5)); + + //peers[1] gets unbanned + pdb.unban(&peers[1]); + + //all ips are unbanned + assert!(!pdb.is_banned(&p1)); + assert!(!pdb.is_banned(&p2)); + assert!(!pdb.is_banned(&p3)); + assert!(!pdb.is_banned(&p4)); + assert!(!pdb.is_banned(&p5)); + } + + #[test] + fn test_banned_ip_consistent_after_changing_ips() { + let mut pdb = get_db(); + + let ip1: IpAddr = Ipv4Addr::new(1, 2, 3, 4).into(); + let ip2: IpAddr = Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8).into(); + + let mut peers = Vec::new(); + for _ in 0..BANNED_PEERS_PER_IP_THRESHOLD + 1 { + peers.push(connect_peer_with_ips(&mut pdb, vec![vec![ip1]])); + } + + let p1 = connect_peer_with_ips(&mut pdb, vec![vec![ip1]]); + let p2 = connect_peer_with_ips(&mut pdb, vec![vec![ip2]]); + + //ban all peers + for p in &peers { + pdb.ban(p); + } + + //check ip is banned + assert!(pdb.is_banned(&p1)); + assert!(!pdb.is_banned(&p2)); + + //change addresses of banned peers + for p in &peers { + pdb.peers.get_mut(p).unwrap().listening_addresses = + vec![Multiaddr::empty().with(Protocol::from(ip2))]; + } + + //check still the same ip is banned + assert!(pdb.is_banned(&p1)); + assert!(!pdb.is_banned(&p2)); + + //unban a peer + pdb.unban(&peers[0]); + + //check not banned anymore + assert!(!pdb.is_banned(&p1)); + assert!(!pdb.is_banned(&p2)); + + //check still not banned after new ban + pdb.ban(&peers[0]); + assert!(!pdb.is_banned(&p1)); + assert!(!pdb.is_banned(&p2)); + + //unban and reban all peers + for p in &peers { + pdb.unban(p); + pdb.ban(p); + } + + //ip2 is now banned + assert!(!pdb.is_banned(&p1)); + assert!(pdb.is_banned(&p2)); + + //change ips back again + for p in &peers { + pdb.peers.get_mut(p).unwrap().listening_addresses = + vec![Multiaddr::empty().with(Protocol::from(ip1))]; + } + + //reban every peer except one + for p in &peers[1..] { + pdb.unban(p); + pdb.ban(p); + } + + //nothing is banned + assert!(!pdb.is_banned(&p1)); + assert!(!pdb.is_banned(&p2)); + + //reban last peer + pdb.unban(&peers[0]); + pdb.ban(&peers[0]); + + //ip1 is banned + assert!(pdb.is_banned(&p1)); + assert!(!pdb.is_banned(&p2)); + } } diff --git a/beacon_node/eth2_libp2p/src/rpc/codec/base.rs b/beacon_node/eth2_libp2p/src/rpc/codec/base.rs index 97e80ce8b7c..d7a64b58d1d 100644 --- a/beacon_node/eth2_libp2p/src/rpc/codec/base.rs +++ b/beacon_node/eth2_libp2p/src/rpc/codec/base.rs @@ -1,5 +1,6 @@ //! This handles the various supported encoding mechanism for the Eth 2.0 RPC. +use crate::rpc::methods::ErrorType; use crate::rpc::{RPCCodedResponse, RPCRequest, RPCResponse}; use libp2p::bytes::BufMut; use libp2p::bytes::BytesMut; @@ -8,12 +9,12 @@ use tokio_util::codec::{Decoder, Encoder}; use types::EthSpec; pub trait OutboundCodec: Encoder + Decoder { - type ErrorType; + type CodecErrorType; fn decode_error( &mut self, src: &mut BytesMut, - ) -> Result, ::Error>; + ) -> Result, ::Error>; } /* Global Inbound Codec */ @@ -130,8 +131,8 @@ where impl Decoder for BaseOutboundCodec where TSpec: EthSpec, - TCodec: - OutboundCodec, ErrorType = String> + Decoder>, + TCodec: OutboundCodec, CodecErrorType = ErrorType> + + Decoder>, { type Item = RPCCodedResponse; type Error = ::Error; diff --git a/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs b/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs index d17ac6aa57b..94fd5d4c598 100644 --- a/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs @@ -374,9 +374,12 @@ impl Decoder for SSZSnappyOutboundCodec { } impl OutboundCodec> for SSZSnappyOutboundCodec { - type ErrorType = String; + type CodecErrorType = ErrorType; - fn decode_error(&mut self, src: &mut BytesMut) -> Result, RPCError> { + fn decode_error( + &mut self, + src: &mut BytesMut, + ) -> Result, RPCError> { if self.len.is_none() { // Decode the length of the uncompressed bytes from an unsigned varint match self.inner.decode(src).map_err(RPCError::from)? { @@ -401,9 +404,9 @@ impl OutboundCodec> for SSZSnappyOutboundCodec let n = reader.get_ref().position(); self.len = None; let _read_bytes = src.split_to(n as usize); - Ok(Some( - String::from_utf8_lossy(&>::from_ssz_bytes(&decoded_buffer)?).into(), - )) + Ok(Some(ErrorType(VariableList::from_ssz_bytes( + &decoded_buffer, + )?))) } Err(e) => match e.kind() { // Haven't received enough bytes to decode yet diff --git a/beacon_node/eth2_libp2p/src/rpc/handler.rs b/beacon_node/eth2_libp2p/src/rpc/handler.rs index 9492942d7f5..a4b18b03b5a 100644 --- a/beacon_node/eth2_libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2_libp2p/src/rpc/handler.rs @@ -81,7 +81,7 @@ where TSpec: EthSpec, { /// The upgrade for inbound substreams. - listen_protocol: SubstreamProtocol>, + listen_protocol: SubstreamProtocol, ()>, /// Errors occurring on outbound and inbound connections queued for reporting back. pending_errors: Vec, @@ -116,9 +116,6 @@ where /// Maximum number of concurrent outbound substreams being opened. Value is never modified. max_dial_negotiated: u32, - /// Value to return from `connection_keep_alive`. - keep_alive: KeepAlive, - /// State of the handler. state: HandlerState, @@ -228,7 +225,10 @@ impl RPCHandler where TSpec: EthSpec, { - pub fn new(listen_protocol: SubstreamProtocol>, log: &slog::Logger) -> Self { + pub fn new( + listen_protocol: SubstreamProtocol, ()>, + log: &slog::Logger, + ) -> Self { RPCHandler { listen_protocol, pending_errors: Vec::new(), @@ -243,7 +243,6 @@ where current_outbound_substream_id: SubstreamId(0), state: HandlerState::Active, max_dial_negotiated: 8, - keep_alive: KeepAlive::Yes, outbound_io_error_retries: 0, log: log.clone(), } @@ -253,7 +252,7 @@ where /// /// > **Note**: If you modify the protocol, modifications will only applies to future inbound /// > substreams, not the ones already being negotiated. - pub fn listen_protocol_ref(&self) -> &SubstreamProtocol> { + pub fn listen_protocol_ref(&self) -> &SubstreamProtocol, ()> { &self.listen_protocol } @@ -261,7 +260,7 @@ where /// /// > **Note**: If you modify the protocol, modifications will only apply to future inbound /// > substreams, not the ones already being negotiated. - pub fn listen_protocol_mut(&mut self) -> &mut SubstreamProtocol> { + pub fn listen_protocol_mut(&mut self) -> &mut SubstreamProtocol, ()> { &mut self.listen_protocol } @@ -287,7 +286,6 @@ where TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64), )); } - self.update_keep_alive(); } /// Opens an outbound substream with a request. @@ -295,7 +293,6 @@ where match self.state { HandlerState::Active => { self.dial_queue.push((id, req)); - self.update_keep_alive(); } _ => { self.pending_errors.push(HandlerErr::Outbound { @@ -338,43 +335,6 @@ where } inbound_info.pending_items.push(response); } - - /// Updates the `KeepAlive` returned by `connection_keep_alive`. - /// - /// The handler stays alive as long as there are inbound/outbound substreams established and no - /// items dialing/to be dialed. Otherwise it is given a grace period of inactivity of - /// `self.inactive_timeout`. - fn update_keep_alive(&mut self) { - // Check that we don't have outbound items pending for dialing, nor dialing, nor - // established. Also check that there are no established inbound substreams. - // Errors and events need to be reported back, so check those too. - let should_shutdown = match self.state { - HandlerState::ShuttingDown(_) => { - self.dial_queue.is_empty() - && self.outbound_substreams.is_empty() - && self.inbound_substreams.is_empty() - && self.pending_errors.is_empty() - && self.events_out.is_empty() - && self.dial_negotiated == 0 - } - HandlerState::Deactivated => { - // Regardless of events, the timeout has expired. Force the disconnect. - true - } - _ => false, - }; - - match self.keep_alive { - KeepAlive::Yes if should_shutdown => self.keep_alive = KeepAlive::No, - KeepAlive::Yes => {} // We continue being active - KeepAlive::Until(_) if should_shutdown => self.keep_alive = KeepAlive::No, // Already deemed inactive - KeepAlive::Until(_) => { - // No longer idle - self.keep_alive = KeepAlive::Yes; - } - KeepAlive::No => {} // currently not used - } - } } impl ProtocolsHandler for RPCHandler @@ -387,14 +347,16 @@ where type InboundProtocol = RPCProtocol; type OutboundProtocol = RPCRequest; type OutboundOpenInfo = (RequestId, RPCRequest); // Keep track of the id and the request + type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { self.listen_protocol.clone() } fn inject_fully_negotiated_inbound( &mut self, substream: >::Output, + _info: Self::InboundOpenInfo, ) { // only accept new peer requests when active if !matches!(self.state, HandlerState::Active) { @@ -427,8 +389,6 @@ where self.events_out .push(RPCReceived::Request(self.current_inbound_substream_id, req)); self.current_inbound_substream_id.0 += 1; - - self.update_keep_alive(); } fn inject_fully_negotiated_outbound( @@ -486,8 +446,6 @@ where } self.current_outbound_substream_id.0 += 1; } - - self.update_keep_alive(); } fn inject_event(&mut self, rpc_event: Self::InEvent) { @@ -515,7 +473,6 @@ where // This dialing is now considered failed self.dial_negotiated -= 1; - self.update_keep_alive(); self.outbound_io_error_retries = 0; // map the error @@ -548,7 +505,29 @@ where } fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive + // Check that we don't have outbound items pending for dialing, nor dialing, nor + // established. Also check that there are no established inbound substreams. + // Errors and events need to be reported back, so check those too. + let should_shutdown = match self.state { + HandlerState::ShuttingDown(_) => { + self.dial_queue.is_empty() + && self.outbound_substreams.is_empty() + && self.inbound_substreams.is_empty() + && self.pending_errors.is_empty() + && self.events_out.is_empty() + && self.dial_negotiated == 0 + } + HandlerState::Deactivated => { + // Regardless of events, the timeout has expired. Force the disconnect. + true + } + _ => false, + }; + if should_shutdown { + KeepAlive::No + } else { + KeepAlive::Yes + } } fn poll( @@ -624,8 +603,6 @@ where if let Some(OutboundInfo { proto, req_id, .. }) = self.outbound_substreams.remove(outbound_id.get_ref()) { - self.update_keep_alive(); - let outbound_err = HandlerErr::Outbound { id: req_id, proto, @@ -724,7 +701,6 @@ where self.inbound_substreams.remove(&inbound_id); } - self.update_keep_alive(); // drive outbound streams that need to be processed for outbound_id in self.outbound_substreams.keys().copied().collect::>() { // get the state and mark it as poisoned @@ -813,7 +789,6 @@ where let request_id = entry.get().req_id; self.outbound_substreams_delay.remove(delay_key); entry.remove_entry(); - self.update_keep_alive(); // notify the application error if request.expected_responses() > 1 { // return an end of stream result @@ -844,7 +819,6 @@ where error: e, }; entry.remove_entry(); - self.update_keep_alive(); return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err))); } }, @@ -857,7 +831,6 @@ where let request_id = entry.get().req_id; self.outbound_substreams_delay.remove(delay_key); entry.remove_entry(); - self.update_keep_alive(); // report the stream termination to the user // @@ -894,10 +867,8 @@ where self.dial_negotiated += 1; let (id, req) = self.dial_queue.remove(0); self.dial_queue.shrink_to_fit(); - self.update_keep_alive(); return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(req.clone()), - info: (id, req), + protocol: SubstreamProtocol::new(req.clone(), ()).map_info(|()| (id, req)), }); } Poll::Pending diff --git a/beacon_node/eth2_libp2p/src/rpc/methods.rs b/beacon_node/eth2_libp2p/src/rpc/methods.rs index bc160b9c648..62f6d120bfc 100644 --- a/beacon_node/eth2_libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2_libp2p/src/rpc/methods.rs @@ -19,7 +19,7 @@ type MaxErrorLen = U256; /// Wrapper over SSZ List to represent error message in rpc responses. #[derive(Debug, Clone)] -pub struct ErrorType(VariableList); +pub struct ErrorType(pub VariableList); impl From for ErrorType { fn from(s: String) -> Self { @@ -283,13 +283,13 @@ impl RPCCodedResponse { } /// Builds an RPCCodedResponse from a response code and an ErrorMessage - pub fn from_error(response_code: u8, err: String) -> Self { + pub fn from_error(response_code: u8, err: ErrorType) -> Self { let code = match response_code { 1 => RPCResponseErrorCode::InvalidRequest, 2 => RPCResponseErrorCode::ServerError, _ => RPCResponseErrorCode::Unknown, }; - RPCCodedResponse::Error(code, err.into()) + RPCCodedResponse::Error(code, err) } /// Specifies which response allows for multiple chunks for the stream handler. diff --git a/beacon_node/eth2_libp2p/src/rpc/mod.rs b/beacon_node/eth2_libp2p/src/rpc/mod.rs index 1e53475601f..6ced9b2f144 100644 --- a/beacon_node/eth2_libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2_libp2p/src/rpc/mod.rs @@ -169,9 +169,12 @@ where fn new_handler(&mut self) -> Self::ProtocolsHandler { RPCHandler::new( - SubstreamProtocol::new(RPCProtocol { - phantom: PhantomData, - }), + SubstreamProtocol::new( + RPCProtocol { + phantom: PhantomData, + }, + (), + ), &self.log, ) } diff --git a/beacon_node/eth2_libp2p/src/service.rs b/beacon_node/eth2_libp2p/src/service.rs index aac23385725..67421cc9681 100644 --- a/beacon_node/eth2_libp2p/src/service.rs +++ b/beacon_node/eth2_libp2p/src/service.rs @@ -36,6 +36,8 @@ pub enum Libp2pEvent { Behaviour(BehaviourEvent), /// A new listening address has been established. NewListenAddr(Multiaddr), + /// We reached zero listening addresses. + ZeroListeners, } /// The configuration and state of the libp2p components for the beacon node. @@ -51,7 +53,7 @@ pub struct Service { } impl Service { - pub fn new( + pub async fn new( executor: environment::TaskExecutor, config: &NetworkConfig, enr_fork_id: EnrForkId, @@ -76,7 +78,7 @@ impl Service { &log, )); - info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", enr.peer_id())); + info!(log, "Libp2p Service"; "peer_id" => enr.peer_id().to_string()); let discovery_string = if config.disable_discovery { "None".into() } else { @@ -85,11 +87,12 @@ impl Service { debug!(log, "Attempting to open listening ports"; "address" => format!("{}", config.listen_address), "tcp_port" => config.libp2p_port, "udp_port" => discovery_string); let mut swarm = { - // Set up the transport - tcp/ws with noise and yamux/mplex + // Set up the transport - tcp/ws with noise and mplex let transport = build_transport(local_keypair.clone()) .map_err(|e| format!("Failed to build transport: {:?}", e))?; // Lighthouse network behaviour - let behaviour = Behaviour::new(&local_keypair, config, network_globals.clone(), &log)?; + let behaviour = + Behaviour::new(&local_keypair, config, network_globals.clone(), &log).await?; // use the executor for libp2p struct Executor(environment::TaskExecutor); @@ -102,6 +105,7 @@ impl Service { .notify_handler_buffer_size(std::num::NonZeroUsize::new(32).expect("Not zero")) .connection_event_buffer_size(64) .incoming_connection_limit(10) + .outgoing_connection_limit(config.target_peers * 2) .peer_connection_limit(MAX_CONNECTIONS_PER_PEER) .executor(Box::new(Executor(executor))) .build() @@ -150,7 +154,7 @@ impl Service { } // attempt to connect to any specified boot-nodes - let mut boot_nodes = config.boot_nodes.clone(); + let mut boot_nodes = config.boot_nodes_enr.clone(); boot_nodes.dedup(); for bootnode_enr in boot_nodes { @@ -171,6 +175,16 @@ impl Service { } } + for multiaddr in &config.boot_nodes_multiaddr { + // check TCP support for dialing + if multiaddr + .iter() + .any(|proto| matches!(proto, Protocol::Tcp(_))) + { + dial_addr(multiaddr.clone()); + } + } + let mut subscribed_topics: Vec = vec![]; for topic_kind in &config.topics { if swarm.subscribe_kind(topic_kind.clone()) { @@ -271,10 +285,17 @@ impl Service { debug!(self.log, "Listen address expired"; "multiaddr" => multiaddr.to_string()) } SwarmEvent::ListenerClosed { addresses, reason } => { - debug!(self.log, "Listener closed"; "addresses" => format!("{:?}", addresses), "reason" => format!("{:?}", reason)) + crit!(self.log, "Listener closed"; "addresses" => format!("{:?}", addresses), "reason" => format!("{:?}", reason)); + if Swarm::listeners(&self.swarm).count() == 0 { + return Libp2pEvent::ZeroListeners; + } } SwarmEvent::ListenerError { error } => { - debug!(self.log, "Listener error"; "error" => format!("{:?}", error.to_string())) + // this is non fatal, but we still check + warn!(self.log, "Listener error"; "error" => format!("{:?}", error.to_string())); + if Swarm::listeners(&self.swarm).count() == 0 { + return Libp2pEvent::ZeroListeners; + } } SwarmEvent::Dialing(peer_id) => { debug!(self.log, "Dialing peer"; "peer_id" => peer_id.to_string()); @@ -285,7 +306,7 @@ impl Service { } /// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and -/// yamux or mplex as the multiplexing layer. +/// mplex as the multiplexing layer. fn build_transport( local_private_key: Keypair, ) -> Result, Error> { @@ -300,10 +321,7 @@ fn build_transport( Ok(transport .upgrade(core::upgrade::Version::V1) .authenticate(generate_noise_config(&local_private_key)) - .multiplex(core::upgrade::SelectUpgrade::new( - libp2p::mplex::MplexConfig::new(), - libp2p::yamux::Config::default(), - )) + .multiplex(libp2p::mplex::MplexConfig::new()) .map(|(peer, muxer), _| (peer, core::muxing::StreamMuxerBox::new(muxer))) .timeout(Duration::from_secs(10)) .timeout(Duration::from_secs(10)) @@ -339,7 +357,7 @@ fn keypair_from_bytes(mut bytes: Vec) -> error::Result { /// generated and is then saved to disk. /// /// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5. -fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { +pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { // check for key from disk let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME); if let Ok(mut network_key_file) = File::open(network_key_f.clone()) { diff --git a/beacon_node/eth2_libp2p/src/types/globals.rs b/beacon_node/eth2_libp2p/src/types/globals.rs index 84e183b6bc1..a4dec670a34 100644 --- a/beacon_node/eth2_libp2p/src/types/globals.rs +++ b/beacon_node/eth2_libp2p/src/types/globals.rs @@ -1,10 +1,9 @@ //! A collection of variables that are accessible outside of the network thread itself. use crate::peer_manager::PeerDB; -use crate::rpc::methods::MetaData; use crate::types::SyncState; use crate::Client; use crate::EnrExt; -use crate::{Enr, Eth2Enr, GossipTopic, Multiaddr, PeerId}; +use crate::{Enr, GossipTopic, Multiaddr, PeerId}; use parking_lot::RwLock; use std::collections::HashSet; use std::sync::atomic::{AtomicU16, Ordering}; @@ -13,8 +12,6 @@ use types::EthSpec; pub struct NetworkGlobals { /// The current local ENR. pub local_enr: RwLock, - /// The current node's meta-data. - pub meta_data: RwLock>, /// The local peer_id. pub peer_id: RwLock, /// Listening multiaddrs. @@ -33,17 +30,8 @@ pub struct NetworkGlobals { impl NetworkGlobals { pub fn new(enr: Enr, tcp_port: u16, udp_port: u16, log: &slog::Logger) -> Self { - // set up the local meta data of the node - let meta_data = RwLock::new(MetaData { - seq_number: 0, - attnets: enr - .bitfield::() - .expect("Local ENR must have a bitfield specified"), - }); - NetworkGlobals { local_enr: RwLock::new(enr.clone()), - meta_data, peer_id: RwLock::new(enr.peer_id()), listen_multiaddrs: RwLock::new(Vec::new()), listen_port_tcp: AtomicU16::new(tcp_port), diff --git a/beacon_node/eth2_libp2p/src/types/mod.rs b/beacon_node/eth2_libp2p/src/types/mod.rs index 8f9b07fd339..ec6fcd4af40 100644 --- a/beacon_node/eth2_libp2p/src/types/mod.rs +++ b/beacon_node/eth2_libp2p/src/types/mod.rs @@ -1,6 +1,7 @@ pub mod error; mod globals; mod pubsub; +mod subnet; mod sync_state; mod topics; @@ -13,5 +14,6 @@ pub type Enr = discv5::enr::Enr; pub use globals::NetworkGlobals; pub use pubsub::PubsubMessage; +pub use subnet::SubnetDiscovery; pub use sync_state::SyncState; pub use topics::{GossipEncoding, GossipKind, GossipTopic}; diff --git a/beacon_node/eth2_libp2p/src/types/subnet.rs b/beacon_node/eth2_libp2p/src/types/subnet.rs new file mode 100644 index 00000000000..0136e630100 --- /dev/null +++ b/beacon_node/eth2_libp2p/src/types/subnet.rs @@ -0,0 +1,28 @@ +use std::time::{Duration, Instant}; +use types::SubnetId; + +const DURATION_DIFFERENCE: Duration = Duration::from_millis(1); + +/// A subnet to discover peers on along with the instant after which it's no longer useful. +#[derive(Debug, Clone)] +pub struct SubnetDiscovery { + pub subnet_id: SubnetId, + pub min_ttl: Option, +} + +impl PartialEq for SubnetDiscovery { + fn eq(&self, other: &SubnetDiscovery) -> bool { + self.subnet_id == other.subnet_id + && match (self.min_ttl, other.min_ttl) { + (Some(min_ttl_instant), Some(other_min_ttl_instant)) => { + min_ttl_instant.saturating_duration_since(other_min_ttl_instant) + < DURATION_DIFFERENCE + && other_min_ttl_instant.saturating_duration_since(min_ttl_instant) + < DURATION_DIFFERENCE + } + (None, None) => true, + (None, Some(_)) => true, + (Some(_), None) => true, + } + } +} diff --git a/beacon_node/eth2_libp2p/src/types/topics.rs b/beacon_node/eth2_libp2p/src/types/topics.rs index c8021a5dd44..f564a541377 100644 --- a/beacon_node/eth2_libp2p/src/types/topics.rs +++ b/beacon_node/eth2_libp2p/src/types/topics.rs @@ -1,4 +1,4 @@ -use libp2p::gossipsub::Topic; +use libp2p::gossipsub::IdentTopic as Topic; use serde_derive::{Deserialize, Serialize}; use types::SubnetId; @@ -139,7 +139,7 @@ impl GossipTopic { impl Into for GossipTopic { fn into(self) -> Topic { - Topic::new(self.into()) + Topic::new(self) } } diff --git a/beacon_node/eth2_libp2p/tests/common/mod.rs b/beacon_node/eth2_libp2p/tests/common/mod.rs index 50ff284c24d..dc81cdb6839 100644 --- a/beacon_node/eth2_libp2p/tests/common/mod.rs +++ b/beacon_node/eth2_libp2p/tests/common/mod.rs @@ -10,6 +10,7 @@ use std::time::Duration; use types::{EnrForkId, MinimalEthSpec}; type E = MinimalEthSpec; +use libp2p::gossipsub::GossipsubConfigBuilder; use tempdir::TempDir; pub struct Libp2pInstance(LibP2PService, exit_future::Signal); @@ -80,24 +81,33 @@ pub fn build_config(port: u16, mut boot_nodes: Vec) -> NetworkConfig { config.enr_tcp_port = Some(port); config.enr_udp_port = Some(port); config.enr_address = Some("127.0.0.1".parse().unwrap()); - config.boot_nodes.append(&mut boot_nodes); + config.boot_nodes_enr.append(&mut boot_nodes); config.network_dir = path.into_path(); // Reduce gossipsub heartbeat parameters - config.gs_config.heartbeat_initial_delay = Duration::from_millis(500); - config.gs_config.heartbeat_interval = Duration::from_millis(500); + config.gs_config = GossipsubConfigBuilder::from(config.gs_config) + .heartbeat_initial_delay(Duration::from_millis(500)) + .heartbeat_interval(Duration::from_millis(500)) + .build() + .unwrap(); config } -pub fn build_libp2p_instance(boot_nodes: Vec, log: slog::Logger) -> Libp2pInstance { +pub async fn build_libp2p_instance(boot_nodes: Vec, log: slog::Logger) -> Libp2pInstance { let port = unused_port("tcp").unwrap(); let config = build_config(port, boot_nodes); // launch libp2p service let (signal, exit) = exit_future::signal(); - let executor = - environment::TaskExecutor::new(tokio::runtime::Handle::current(), exit, log.clone()); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let executor = environment::TaskExecutor::new( + tokio::runtime::Handle::current(), + exit, + log.clone(), + shutdown_tx, + ); Libp2pInstance( LibP2PService::new(executor, &config, EnrForkId::default(), &log) + .await .expect("should build libp2p instance") .1, signal, @@ -112,10 +122,11 @@ pub fn get_enr(node: &LibP2PService) -> Enr { // Returns `n` libp2p peers in fully connected topology. #[allow(dead_code)] -pub fn build_full_mesh(log: slog::Logger, n: usize) -> Vec { - let mut nodes: Vec<_> = (0..n) - .map(|_| build_libp2p_instance(vec![], log.clone())) - .collect(); +pub async fn build_full_mesh(log: slog::Logger, n: usize) -> Vec { + let mut nodes = Vec::with_capacity(n); + for _ in 0..n { + nodes.push(build_libp2p_instance(vec![], log.clone()).await); + } let multiaddrs: Vec = nodes .iter() .map(|x| get_enr(&x).multiaddr()[1].clone()) @@ -141,8 +152,8 @@ pub async fn build_node_pair(log: &slog::Logger) -> (Libp2pInstance, Libp2pInsta let sender_log = log.new(o!("who" => "sender")); let receiver_log = log.new(o!("who" => "receiver")); - let mut sender = build_libp2p_instance(vec![], sender_log); - let mut receiver = build_libp2p_instance(vec![], receiver_log); + let mut sender = build_libp2p_instance(vec![], sender_log).await; + let mut receiver = build_libp2p_instance(vec![], receiver_log).await; let receiver_multiaddr = receiver.swarm.local_enr().multiaddr()[1].clone(); @@ -181,10 +192,12 @@ pub async fn build_node_pair(log: &slog::Logger) -> (Libp2pInstance, Libp2pInsta // Returns `n` peers in a linear topology #[allow(dead_code)] -pub fn build_linear(log: slog::Logger, n: usize) -> Vec { - let mut nodes: Vec<_> = (0..n) - .map(|_| build_libp2p_instance(vec![], log.clone())) - .collect(); +pub async fn build_linear(log: slog::Logger, n: usize) -> Vec { + let mut nodes = Vec::with_capacity(n); + for _ in 0..n { + nodes.push(build_libp2p_instance(vec![], log.clone()).await); + } + let multiaddrs: Vec = nodes .iter() .map(|x| get_enr(&x).multiaddr()[1].clone()) diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index cd5aa3b609c..2ef369e3b45 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -39,3 +39,5 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } environment = { path = "../../lighthouse/environment" } itertools = "0.9.0" +num_cpus = "1.13.0" +lru_cache = { path = "../../common/lru_cache" } diff --git a/beacon_node/network/src/attestation_service/mod.rs b/beacon_node/network/src/attestation_service/mod.rs index 8ba791c1693..cf94c5cdd7e 100644 --- a/beacon_node/network/src/attestation_service/mod.rs +++ b/beacon_node/network/src/attestation_service/mod.rs @@ -2,7 +2,7 @@ //! given time. It schedules subscriptions to shard subnets, requests peer discoveries and //! determines whether attestations should be aggregated and/or passed to the beacon node. -use std::collections::VecDeque; +use std::collections::{HashMap, VecDeque}; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; @@ -13,7 +13,7 @@ use rand::seq::SliceRandom; use slog::{crit, debug, error, o, trace, warn}; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::{types::GossipKind, NetworkGlobals}; +use eth2_libp2p::{types::GossipKind, NetworkGlobals, SubnetDiscovery}; use hashset_delay::HashSetDelay; use rest_types::ValidatorSubscription; use slot_clock::SlotClock; @@ -25,11 +25,8 @@ mod tests; /// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the /// slot is less than this number, skip the peer discovery process. -const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 1; -/// The number of slots ahead that we attempt to discover peers for a subscription. If the slot to -/// attest to is greater than this, we queue a discovery request for this many slots prior to -/// subscribing. -const TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 6; +/// Subnet discovery query takes atmost 30 secs, 2 slots take 24s. +const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; /// The time (in slots) before a last seen validator is considered absent and we unsubscribe from the random /// gossip topics that we subscribed to due to the validator connection. const LAST_SEEN_VALIDATOR_TIMEOUT: u32 = 150; @@ -39,12 +36,10 @@ const LAST_SEEN_VALIDATOR_TIMEOUT: u32 = 150; /// Note: The time is calculated as `time = milliseconds_per_slot / ADVANCE_SUBSCRIPTION_TIME`. const ADVANCE_SUBSCRIBE_TIME: u32 = 3; /// The default number of slots before items in hash delay sets used by this class should expire. +/// 36s at 12s slot time const DEFAULT_EXPIRATION_TIMEOUT: u32 = 3; -// 36s at 12s slot time -/// The duration difference between two instance for them to be considered equal. -const DURATION_DIFFERENCE: Duration = Duration::from_millis(1); -#[derive(Debug, Eq, Clone)] +#[derive(Debug, PartialEq, Clone)] pub enum AttServiceMessage { /// Subscribe to the specified subnet id. Subscribe(SubnetId), @@ -54,44 +49,8 @@ pub enum AttServiceMessage { EnrAdd(SubnetId), /// Remove the `SubnetId` from the ENR bitfield. EnrRemove(SubnetId), - /// Discover peers for a particular subnet. - /// The includes the `Instant` we need the discovered peer until. - DiscoverPeers { - subnet_id: SubnetId, - min_ttl: Option, - }, -} - -impl PartialEq for AttServiceMessage { - fn eq(&self, other: &AttServiceMessage) -> bool { - match (self, other) { - (&AttServiceMessage::Subscribe(a), &AttServiceMessage::Subscribe(b)) => a == b, - (&AttServiceMessage::Unsubscribe(a), &AttServiceMessage::Unsubscribe(b)) => a == b, - (&AttServiceMessage::EnrAdd(a), &AttServiceMessage::EnrAdd(b)) => a == b, - (&AttServiceMessage::EnrRemove(a), &AttServiceMessage::EnrRemove(b)) => a == b, - ( - &AttServiceMessage::DiscoverPeers { subnet_id, min_ttl }, - &AttServiceMessage::DiscoverPeers { - subnet_id: other_subnet_id, - min_ttl: other_min_ttl, - }, - ) => { - subnet_id == other_subnet_id - && match (min_ttl, other_min_ttl) { - (Some(min_ttl_instant), Some(other_min_ttl_instant)) => { - min_ttl_instant.saturating_duration_since(other_min_ttl_instant) - < DURATION_DIFFERENCE - && other_min_ttl_instant.saturating_duration_since(min_ttl_instant) - < DURATION_DIFFERENCE - } - (None, None) => true, - (None, Some(_)) => true, - (Some(_), None) => true, - } - } - _ => false, - } - } + /// Discover peers for a list of `SubnetDiscovery`. + DiscoverPeers(Vec), } /// A particular subnet at a given slot. @@ -116,9 +75,6 @@ pub struct AttestationService { /// The collection of currently subscribed random subnets mapped to their expiry deadline. random_subnets: HashSetDelay, - /// A collection of timeouts for when to start searching for peers for a particular shard. - discover_peers: HashSetDelay, - /// A collection of timeouts for when to subscribe to a shard subnet. subscriptions: HashSetDelay, @@ -172,7 +128,6 @@ impl AttestationService { network_globals, beacon_chain, random_subnets: HashSetDelay::new(Duration::from_millis(random_subnet_duration_millis)), - discover_peers: HashSetDelay::new(default_timeout), subscriptions: HashSetDelay::new(default_timeout), unsubscriptions: HashSetDelay::new(default_timeout), aggregate_validators_on_subnet: HashSetDelay::new(default_timeout), @@ -198,6 +153,8 @@ impl AttestationService { &mut self, subscriptions: Vec, ) -> Result<(), String> { + // Maps each subnet_id subscription to it's highest slot + let mut subnets_to_discover: HashMap = HashMap::new(); for subscription in subscriptions { metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_REQUESTS); //NOTE: We assume all subscriptions have been verified before reaching this service @@ -226,15 +183,20 @@ impl AttestationService { continue; } }; + // Ensure each subnet_id inserted into the map has the highest slot as it's value. + // Higher slot corresponds to higher min_ttl in the `SubnetDiscovery` entry. + if let Some(slot) = subnets_to_discover.get(&subnet_id) { + if subscription.slot > *slot { + subnets_to_discover.insert(subnet_id, subscription.slot); + } + } else { + subnets_to_discover.insert(subnet_id, subscription.slot); + } let exact_subnet = ExactSubnet { subnet_id, slot: subscription.slot, }; - // determine if we should run a discovery lookup request and request it if required - if let Err(e) = self.discover_peers_request(exact_subnet.clone()) { - warn!(self.log, "Discovery lookup request error"; "error" => e); - } // determine if the validator is an aggregator. If so, we subscribe to the subnet and // if successful add the validator to a mapping of known aggregators for that exact @@ -264,6 +226,14 @@ impl AttestationService { } } + if let Err(e) = self.discover_peers_request( + subnets_to_discover + .into_iter() + .map(|(subnet_id, slot)| ExactSubnet { subnet_id, slot }), + ) { + warn!(self.log, "Discovery lookup request error"; "error" => e); + }; + // pre-emptively wake the thread to check for new events if let Some(waker) = &self.waker { waker.wake_by_ref(); @@ -290,112 +260,53 @@ impl AttestationService { /// Checks if there are currently queued discovery requests and the time required to make the /// request. /// - /// If there is sufficient time and no other request exists, queues a peer discovery request - /// for the required subnet. - fn discover_peers_request(&mut self, exact_subnet: ExactSubnet) -> Result<(), &'static str> { + /// If there is sufficient time, queues a peer discovery request for all the required subnets. + fn discover_peers_request( + &mut self, + exact_subnets: impl Iterator, + ) -> Result<(), &'static str> { let current_slot = self .beacon_chain .slot_clock .now() .ok_or_else(|| "Could not get the current slot")?; - let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - - // if there is enough time to perform a discovery lookup - if exact_subnet.slot >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) { - // check if a discovery request already exists - if self.discover_peers.get(&exact_subnet).is_some() { - // already a request queued, end - return Ok(()); - } - - // if the slot is more than epoch away, add an event to start looking for peers - if exact_subnet.slot - < current_slot.saturating_add(TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD) - { - // add one slot to ensure we keep the peer for the subscription slot - let min_ttl = self - .beacon_chain - .slot_clock - .duration_to_slot(exact_subnet.slot + 1) - .map(|duration| std::time::Instant::now() + duration); - self.send_or_update_discovery_event(exact_subnet.subnet_id, min_ttl); - } else { - // Queue the discovery event to be executed for - // TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD - - let duration_to_discover = { - let duration_to_next_slot = self + let discovery_subnets: Vec = exact_subnets + .filter_map(|exact_subnet| { + // check if there is enough time to perform a discovery lookup + if exact_subnet.slot + >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) + { + // if the slot is more than epoch away, add an event to start looking for peers + // add one slot to ensure we keep the peer for the subscription slot + let min_ttl = self .beacon_chain .slot_clock - .duration_to_next_slot() - .ok_or_else(|| "Unable to determine duration to next slot")?; - // The -1 is done here to exclude the current slot duration, as we will use - // `duration_to_next_slot`. - let slots_until_discover = exact_subnet - .slot - .saturating_sub(current_slot) - .saturating_sub(1u64) - .saturating_sub(TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD); - - duration_to_next_slot + slot_duration * (slots_until_discover.as_u64() as u32) - }; - - self.discover_peers - .insert_at(exact_subnet, duration_to_discover); - } - } else { - // TODO: Send the time frame needed to have a peer connected, so that we can - // maintain peers for a least this duration. - // We may want to check the global PeerInfo to see estimated timeouts for each - // peer before they can be removed. - return Err("Not enough time for a discovery search"); - } - Ok(()) - } - - /// Checks if we have a discover peers event already and sends a new event if necessary - /// - /// If a message exists for the same subnet, compare the `min_ttl` of the current and - /// existing messages and extend the existing message as necessary. - fn send_or_update_discovery_event(&mut self, subnet_id: SubnetId, min_ttl: Option) { - // track whether this message already exists in the event queue - let mut is_duplicate = false; - - self.events.iter_mut().for_each(|event| { - if let AttServiceMessage::DiscoverPeers { - subnet_id: other_subnet_id, - min_ttl: other_min_ttl, - } = event - { - if subnet_id == *other_subnet_id { - let other_min_ttl_clone = *other_min_ttl; - match (min_ttl, other_min_ttl_clone) { - (Some(min_ttl_instant), Some(other_min_ttl_instant)) => - // only update the min_ttl if it is greater than the existing min_ttl and a DURATION_DIFFERENCE padding - { - if min_ttl_instant.saturating_duration_since(other_min_ttl_instant) - > DURATION_DIFFERENCE - { - *other_min_ttl = min_ttl; - } - } - (None, Some(_)) => {} // Keep the current one as it has an actual min_ttl - (Some(min_ttl), None) => { - // Update the request to include a min_ttl. - *other_min_ttl = Some(min_ttl); - } - (None, None) => {} // Duplicate message, do nothing. - } - is_duplicate = true; - return; + .duration_to_slot(exact_subnet.slot + 1) + .map(|duration| std::time::Instant::now() + duration); + Some(SubnetDiscovery { + subnet_id: exact_subnet.subnet_id, + min_ttl, + }) + } else { + // TODO: Send the time frame needed to have a peer connected, so that we can + // maintain peers for a least this duration. + // We may want to check the global PeerInfo to see estimated timeouts for each + // peer before they can be removed. + warn!(self.log, + "Not enough time for a discovery search"; + "subnet_id" => format!("{:?}", exact_subnet) + ); + None } - }; - }); - if !is_duplicate { + }) + .collect(); + + if !discovery_subnets.is_empty() { self.events - .push_back(AttServiceMessage::DiscoverPeers { subnet_id, min_ttl }); + .push_back(AttServiceMessage::DiscoverPeers(discovery_subnets)); } + Ok(()) } /// Checks the current random subnets and subscriptions to determine if a new subscription for this @@ -547,7 +458,11 @@ impl AttestationService { if !already_subscribed { // send a discovery request and a subscription - self.send_or_update_discovery_event(subnet_id, None); + self.events + .push_back(AttServiceMessage::DiscoverPeers(vec![SubnetDiscovery { + subnet_id, + min_ttl: None, + }])); self.events .push_back(AttServiceMessage::Subscribe(subnet_id)); } @@ -558,20 +473,6 @@ impl AttestationService { /* A collection of functions that handle the various timeouts */ - /// Request a discovery query to find peers for a particular subnet. - fn handle_discover_peers(&mut self, exact_subnet: ExactSubnet) { - debug!(self.log, "Searching for peers for subnet"; "subnet" => *exact_subnet.subnet_id, "target_slot" => exact_subnet.slot); - - // add one slot to ensure we keep the peer for the subscription slot - let min_ttl = self - .beacon_chain - .slot_clock - .duration_to_slot(exact_subnet.slot + 1) - .map(|duration| std::time::Instant::now() + duration); - - self.send_or_update_discovery_event(exact_subnet.subnet_id, min_ttl) - } - /// A queued subscription is ready. /// /// We add subscriptions events even if we are already subscribed to a random subnet (as these @@ -731,15 +632,6 @@ impl Stream for AttestationService { self.waker = Some(cx.waker().clone()); } - // process any peer discovery events - match self.discover_peers.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(exact_subnet))) => self.handle_discover_peers(exact_subnet), - Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for peer discovery requests"; "error"=> e); - } - Poll::Ready(None) | Poll::Pending => {} - } - // process any subscription events match self.subscriptions.poll_next_unpin(cx) { Poll::Ready(Some(Ok(exact_subnet))) => self.handle_subscriptions(exact_subnet), diff --git a/beacon_node/network/src/attestation_service/tests/mod.rs b/beacon_node/network/src/attestation_service/tests/mod.rs index 84d792c1763..5e1ef76a29b 100644 --- a/beacon_node/network/src/attestation_service/tests/mod.rs +++ b/beacon_node/network/src/attestation_service/tests/mod.rs @@ -8,7 +8,9 @@ mod tests { migrate::NullMigrator, }; use eth2_libp2p::discovery::{build_enr, Keypair}; - use eth2_libp2p::{discovery::CombinedKey, CombinedKeyExt, NetworkConfig, NetworkGlobals}; + use eth2_libp2p::{ + discovery::CombinedKey, CombinedKeyExt, NetworkConfig, NetworkGlobals, SubnetDiscovery, + }; use futures::Stream; use genesis::{generate_deterministic_keypairs, interop_genesis_state}; use lazy_static::lazy_static; @@ -120,23 +122,21 @@ mod tests { } } - fn _get_subscriptions( + fn get_subscriptions( validator_count: u64, slot: Slot, committee_count_at_slot: u64, ) -> Vec { - let mut subscriptions: Vec = Vec::new(); - for validator_index in 0..validator_count { - let is_aggregator = true; - subscriptions.push(ValidatorSubscription { - validator_index, - attestation_committee_index: validator_index, - slot, - committee_count_at_slot, - is_aggregator, - }); - } - subscriptions + (0..validator_count) + .map(|validator_index| { + get_subscription( + validator_index, + validator_index, + slot, + committee_count_at_slot, + ) + }) + .collect() } // gets a number of events from the subscription service, or returns none if it times out after a number @@ -210,14 +210,7 @@ mod tests { let events = get_events(attestation_service, no_events_expected, 1).await; assert_matches!( events[..3], - [ - AttServiceMessage::DiscoverPeers { - subnet_id: _any_subnet, - min_ttl: _any_instant - }, - AttServiceMessage::Subscribe(_any1), - AttServiceMessage::EnrAdd(_any3) - ] + [AttServiceMessage::DiscoverPeers(_), AttServiceMessage::Subscribe(_any1), AttServiceMessage::EnrAdd(_any3)] ); // if there are fewer events than expected, there's been a collision if events.len() == no_events_expected { @@ -270,14 +263,7 @@ mod tests { let events = get_events(attestation_service, no_events_expected, 2).await; assert_matches!( events[..3], - [ - AttServiceMessage::DiscoverPeers { - subnet_id: _any_subnet, - min_ttl: _any_instant - }, - AttServiceMessage::Subscribe(_any1), - AttServiceMessage::EnrAdd(_any3) - ] + [AttServiceMessage::DiscoverPeers(_), AttServiceMessage::Subscribe(_any1), AttServiceMessage::EnrAdd(_any3)] ); // if there are fewer events than expected, there's been a collision if events.len() == no_events_expected { @@ -330,19 +316,15 @@ mod tests { &attestation_service.beacon_chain.spec, ) .unwrap(); - let expected = vec![AttServiceMessage::DiscoverPeers { subnet_id, min_ttl }]; + let expected = vec![AttServiceMessage::DiscoverPeers(vec![SubnetDiscovery { + subnet_id, + min_ttl, + }])]; let events = get_events(attestation_service, no_events_expected, 1).await; assert_matches!( events[..3], - [ - AttServiceMessage::DiscoverPeers { - subnet_id: _any_subnet, - min_ttl: _any_instant - }, - AttServiceMessage::Subscribe(_any2), - AttServiceMessage::EnrAdd(_any3) - ] + [AttServiceMessage::DiscoverPeers(_), AttServiceMessage::Subscribe(_any2), AttServiceMessage::EnrAdd(_any3)] ); // if there are fewer events than expected, there's been a collision if events.len() == no_events_expected { @@ -396,21 +378,14 @@ mod tests { ) .unwrap(); let expected = vec![ - AttServiceMessage::DiscoverPeers { subnet_id, min_ttl }, + AttServiceMessage::DiscoverPeers(vec![SubnetDiscovery { subnet_id, min_ttl }]), AttServiceMessage::Subscribe(subnet_id), ]; let events = get_events(attestation_service, no_events_expected, 5).await; assert_matches!( events[..3], - [ - AttServiceMessage::DiscoverPeers { - subnet_id: _any_subnet, - min_ttl: _any_instant - }, - AttServiceMessage::Subscribe(_any2), - AttServiceMessage::EnrAdd(_any3) - ] + [AttServiceMessage::DiscoverPeers(_), AttServiceMessage::Subscribe(_any2), AttServiceMessage::EnrAdd(_any3)] ); // if there are fewer events than expected, there's been a collision if events.len() == no_events_expected { @@ -454,14 +429,7 @@ mod tests { assert_matches!( events[..3], - [ - AttServiceMessage::DiscoverPeers { - subnet_id: _any_subnet, - min_ttl: _any_instant - }, - AttServiceMessage::Subscribe(_any2), - AttServiceMessage::EnrAdd(_any3) - ] + [AttServiceMessage::DiscoverPeers(_), AttServiceMessage::Subscribe(_any2), AttServiceMessage::EnrAdd(_any3)] ); // if there are fewer events than expected, there's been a collision if events.len() == no_events_expected { @@ -517,20 +485,16 @@ mod tests { // expect discover peers because we will enter TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD range let expected: Vec = - vec![AttServiceMessage::DiscoverPeers { subnet_id, min_ttl }]; + vec![AttServiceMessage::DiscoverPeers(vec![SubnetDiscovery { + subnet_id, + min_ttl, + }])]; let events = get_events(attestation_service, no_events_expected, 5).await; assert_matches!( events[..3], - [ - AttServiceMessage::DiscoverPeers { - subnet_id: _any_subnet, - min_ttl: _any_instant - }, - AttServiceMessage::Subscribe(_any2), - AttServiceMessage::EnrAdd(_any3) - ] + [AttServiceMessage::DiscoverPeers(_), AttServiceMessage::Subscribe(_any2), AttServiceMessage::EnrAdd(_any3)] ); // if there are fewer events than expected, there's been a collision if events.len() == no_events_expected { @@ -553,7 +517,7 @@ mod tests { .now() .expect("Could not get current slot"); - let subscriptions = _get_subscriptions( + let subscriptions = get_subscriptions( subscription_count, current_slot + subscription_slot, committee_count, @@ -572,10 +536,9 @@ mod tests { for event in events { match event { - AttServiceMessage::DiscoverPeers { - subnet_id: _any_subnet, - min_ttl: _any_instant, - } => discover_peer_count = discover_peer_count + 1, + AttServiceMessage::DiscoverPeers(_) => { + discover_peer_count = discover_peer_count + 1 + } AttServiceMessage::Subscribe(_any_subnet) => subscribe_count = subscribe_count + 1, AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count = enr_add_count + 1, _ => unexpected_msg_count = unexpected_msg_count + 1, @@ -605,7 +568,7 @@ mod tests { .now() .expect("Could not get current slot"); - let subscriptions = _get_subscriptions( + let subscriptions = get_subscriptions( subscription_count, current_slot + subscription_slot, committee_count, @@ -624,10 +587,9 @@ mod tests { for event in events { match event { - AttServiceMessage::DiscoverPeers { - subnet_id: _any_subnet, - min_ttl: _any_instant, - } => discover_peer_count = discover_peer_count + 1, + AttServiceMessage::DiscoverPeers(_) => { + discover_peer_count = discover_peer_count + 1 + } AttServiceMessage::Subscribe(_any_subnet) => subscribe_count = subscribe_count + 1, AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count = enr_add_count + 1, _ => unexpected_msg_count = unexpected_msg_count + 1, @@ -639,4 +601,40 @@ mod tests { assert_eq!(enr_add_count, 64); assert_eq!(unexpected_msg_count, 0); } + + #[tokio::test] + async fn test_discovery_peers_count() { + let subscription_slot = 10; + let validator_count = 32; + let committee_count = 1; + let expected_events = 97; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = get_subscriptions( + validator_count, + current_slot + subscription_slot, + committee_count, + ); + + // submit sthe subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + let events = get_events(attestation_service, expected_events, 3).await; + + let event = events.get(96); + if let Some(AttServiceMessage::DiscoverPeers(d)) = event { + assert_eq!(d.len(), validator_count as usize); + } else { + panic!("Unexpected event {:?}", event); + } + } } diff --git a/beacon_node/network/src/beacon_processor/chain_segment.rs b/beacon_node/network/src/beacon_processor/chain_segment.rs new file mode 100644 index 00000000000..94c7893f055 --- /dev/null +++ b/beacon_node/network/src/beacon_processor/chain_segment.rs @@ -0,0 +1,237 @@ +use crate::metrics; +use crate::router::processor::FUTURE_SLOT_TOLERANCE; +use crate::sync::manager::SyncMessage; +use crate::sync::{BatchProcessResult, ChainId}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, ChainSegmentResult}; +use eth2_libp2p::PeerId; +use slog::{debug, error, trace, warn}; +use std::sync::Arc; +use tokio::sync::mpsc; +use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock}; + +/// Id associated to a block processing request, either a batch or a single block. +#[derive(Clone, Debug, PartialEq)] +pub enum ProcessId { + /// Processing Id of a range syncing batch. + RangeBatchId(ChainId, Epoch), + /// Processing Id of the parent lookup of a block. + ParentLookup(PeerId, Hash256), +} + +pub fn handle_chain_segment( + chain: Arc>, + process_id: ProcessId, + downloaded_blocks: Vec>, + sync_send: mpsc::UnboundedSender>, + log: slog::Logger, +) { + match process_id { + // this a request from the range sync + ProcessId::RangeBatchId(chain_id, epoch) => { + let len = downloaded_blocks.len(); + let start_slot = if len > 0 { + downloaded_blocks[0].message.slot.as_u64() + } else { + 0 + }; + let end_slot = if len > 0 { + downloaded_blocks[len - 1].message.slot.as_u64() + } else { + 0 + }; + + debug!(log, "Processing batch"; "batch_epoch" => epoch, "blocks" => downloaded_blocks.len(), "first_block_slot" => start_slot, "last_block_slot" => end_slot, "service" => "sync"); + let result = match process_blocks(chain, downloaded_blocks.iter(), &log) { + (_, Ok(_)) => { + debug!(log, "Batch processed"; "batch_epoch" => epoch , "first_block_slot" => start_slot, "last_block_slot" => end_slot, "service"=> "sync"); + BatchProcessResult::Success + } + (imported_blocks, Err(e)) if imported_blocks > 0 => { + debug!(log, "Batch processing failed but imported some blocks"; + "batch_epoch" => epoch, "error" => e, "imported_blocks"=> imported_blocks, "service" => "sync"); + BatchProcessResult::Partial + } + (_, Err(e)) => { + debug!(log, "Batch processing failed"; "batch_epoch" => epoch, "error" => e, "service" => "sync"); + BatchProcessResult::Failed + } + }; + + let msg = SyncMessage::BatchProcessed { + chain_id, + epoch, + downloaded_blocks, + result, + }; + sync_send.send(msg).unwrap_or_else(|_| { + debug!( + log, + "Block processor could not inform range sync result. Likely shutting down." + ); + }); + } + // this a parent lookup request from the sync manager + ProcessId::ParentLookup(peer_id, chain_head) => { + debug!( + log, "Processing parent lookup"; + "last_peer_id" => format!("{}", peer_id), + "blocks" => downloaded_blocks.len() + ); + // parent blocks are ordered from highest slot to lowest, so we need to process in + // reverse + match process_blocks(chain, downloaded_blocks.iter().rev(), &log) { + (_, Err(e)) => { + debug!(log, "Parent lookup failed"; "last_peer_id" => format!("{}", peer_id), "error" => e); + sync_send + .send(SyncMessage::ParentLookupFailed{peer_id, chain_head}) + .unwrap_or_else(|_| { + // on failure, inform to downvote the peer + debug!( + log, + "Block processor could not inform parent lookup result. Likely shutting down." + ); + }); + } + (_, Ok(_)) => { + debug!(log, "Parent lookup processed successfully"); + } + } + } + } +} + +/// Helper function to process blocks batches which only consumes the chain and blocks to process. +fn process_blocks< + 'a, + T: BeaconChainTypes, + I: Iterator>, +>( + chain: Arc>, + downloaded_blocks: I, + log: &slog::Logger, +) -> (usize, Result<(), String>) { + let blocks = downloaded_blocks.cloned().collect::>(); + match chain.process_chain_segment(blocks) { + ChainSegmentResult::Successful { imported_blocks } => { + metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL); + if imported_blocks == 0 { + debug!(log, "All blocks already known"); + } else { + debug!( + log, "Imported blocks from network"; + "count" => imported_blocks, + ); + // Batch completed successfully with at least one block, run fork choice. + run_fork_choice(chain, log); + } + + (imported_blocks, Ok(())) + } + ChainSegmentResult::Failed { + imported_blocks, + error, + } => { + metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL); + let r = handle_failed_chain_segment(error, log); + if imported_blocks > 0 { + run_fork_choice(chain, log); + } + (imported_blocks, r) + } + } +} + +/// Runs fork-choice on a given chain. This is used during block processing after one successful +/// block import. +fn run_fork_choice(chain: Arc>, log: &slog::Logger) { + match chain.fork_choice() { + Ok(()) => trace!( + log, + "Fork choice success"; + "location" => "batch processing" + ), + Err(e) => error!( + log, + "Fork choice failed"; + "error" => format!("{:?}", e), + "location" => "batch import error" + ), + } +} + +/// Helper function to handle a `BlockError` from `process_chain_segment` +fn handle_failed_chain_segment( + error: BlockError, + log: &slog::Logger, +) -> Result<(), String> { + match error { + BlockError::ParentUnknown(block) => { + // blocks should be sequential and all parents should exist + + Err(format!( + "Block has an unknown parent: {}", + block.parent_root() + )) + } + BlockError::BlockIsAlreadyKnown => { + // This can happen for many reasons. Head sync's can download multiples and parent + // lookups can download blocks before range sync + Ok(()) + } + BlockError::FutureSlot { + present_slot, + block_slot, + } => { + if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { + // The block is too far in the future, drop it. + warn!( + log, "Block is ahead of our slot clock"; + "msg" => "block for future slot rejected, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + } else { + // The block is in the future, but not too far. + debug!( + log, "Block is slightly ahead of our slot clock, ignoring."; + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + } + + Err(format!( + "Block with slot {} is higher than the current slot {}", + block_slot, present_slot + )) + } + BlockError::WouldRevertFinalizedSlot { .. } => { + debug!( log, "Finalized or earlier block processed";); + + Ok(()) + } + BlockError::GenesisBlock => { + debug!(log, "Genesis block was processed"); + Ok(()) + } + BlockError::BeaconChainError(e) => { + warn!( + log, "BlockProcessingFailure"; + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", e) + ); + + Err(format!("Internal error whilst processing block: {:?}", e)) + } + other => { + debug!( + log, "Invalid block received"; + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", other), + ); + + Err(format!("Peer sent invalid block. Reason: {:?}", other)) + } + } +} diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs new file mode 100644 index 00000000000..c62bbdeb360 --- /dev/null +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -0,0 +1,819 @@ +//! Provides the `BeaconProcessor`, a mutli-threaded processor for messages received on the network +//! that need to be processed by the `BeaconChain`. +//! +//! Uses `tokio` tasks (instead of raw threads) to provide the following tasks: +//! +//! - A "manager" task, which either spawns worker tasks or enqueues work. +//! - One or more "worker" tasks which perform time-intensive work on the `BeaconChain`. +//! +//! ## Purpose +//! +//! The purpose of the `BeaconProcessor` is to provide two things: +//! +//! 1. Moving long-running, blocking tasks off the main `tokio` executor. +//! 2. A fixed-length buffer for consensus messages. +//! +//! (1) ensures that we don't clog up the networking stack with long-running tasks, potentially +//! causing timeouts. (2) means that we can easily and explicitly reject messages when we're +//! overloaded and also distribute load across time. +//! +//! ## Detail +//! +//! There is a single "manager" thread who listens to two event channels. These events are either: +//! +//! - A new parcel of work (work event). +//! - Indication that a worker has finished a parcel of work (worker idle). +//! +//! Then, there is a maximum of `n` "worker" blocking threads, where `n` is the CPU count. +//! +//! Whenever the manager receives a new parcel of work, it either: +//! +//! - Provided to a newly-spawned worker tasks (if we are not already at `n` workers). +//! - Added to a queue. +//! +//! Whenever the manager receives a notification that a worker has finished a parcel of work, it +//! checks the queues to see if there are more parcels of work that can be spawned in a new worker +//! task. + +use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError}; +use environment::TaskExecutor; +use eth2_libp2p::{MessageId, NetworkGlobals, PeerId}; +use slog::{crit, debug, error, trace, warn, Logger}; +use std::collections::VecDeque; +use std::sync::{Arc, Weak}; +use std::time::{Duration, Instant}; +use tokio::sync::{mpsc, oneshot}; +use types::{ + Attestation, AttesterSlashing, EthSpec, Hash256, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedVoluntaryExit, SubnetId, +}; +use worker::Worker; + +mod chain_segment; +mod worker; + +pub use chain_segment::ProcessId; + +/// The maximum size of the channel for work events to the `BeaconProcessor`. +/// +/// Setting this too low will cause consensus messages to be dropped. +pub const MAX_WORK_EVENT_QUEUE_LEN: usize = 16_384; + +/// The maximum size of the channel for idle events to the `BeaconProcessor`. +/// +/// Setting this too low will prevent new workers from being spawned. It *should* only need to be +/// set to the CPU count, but we set it high to be safe. +const MAX_IDLE_QUEUE_LEN: usize = 16_384; + +/// The maximum number of queued `Attestation` objects that will be stored before we start dropping +/// them. +const MAX_UNAGGREGATED_ATTESTATION_QUEUE_LEN: usize = 16_384; + +/// The maximum number of queued `SignedAggregateAndProof` objects that will be stored before we +/// start dropping them. +const MAX_AGGREGATED_ATTESTATION_QUEUE_LEN: usize = 1_024; + +/// The maximum number of queued `SignedBeaconBlock` objects received on gossip that will be stored +/// before we start dropping them. +const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; + +/// The maximum number of queued `SignedVoluntaryExit` objects received on gossip that will be stored +/// before we start dropping them. +const MAX_GOSSIP_EXIT_QUEUE_LEN: usize = 4_096; + +/// The maximum number of queued `ProposerSlashing` objects received on gossip that will be stored +/// before we start dropping them. +const MAX_GOSSIP_PROPOSER_SLASHING_QUEUE_LEN: usize = 4_096; + +/// The maximum number of queued `AttesterSlashing` objects received on gossip that will be stored +/// before we start dropping them. +const MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN: usize = 4_096; + +/// The maximum number of queued `SignedBeaconBlock` objects received from the network RPC that +/// will be stored before we start dropping them. +const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024; + +/// The maximum number of queued `Vec` objects received during syncing that will +/// be stored before we start dropping them. +const MAX_CHAIN_SEGMENT_QUEUE_LEN: usize = 64; + +/// The name of the manager tokio task. +const MANAGER_TASK_NAME: &str = "beacon_gossip_processor_manager"; +/// The name of the worker tokio tasks. +const WORKER_TASK_NAME: &str = "beacon_gossip_processor_worker"; + +/// The minimum interval between log messages indicating that a queue is full. +const LOG_DEBOUNCE_INTERVAL: Duration = Duration::from_secs(30); + +/// Used to send/receive results from a rpc block import in a blocking task. +pub type BlockResultSender = oneshot::Sender>>; +pub type BlockResultReceiver = oneshot::Receiver>>; + +/// A simple first-in-first-out queue with a maximum length. +struct FifoQueue { + queue: VecDeque, + max_length: usize, +} + +impl FifoQueue { + /// Create a new, empty queue with the given length. + pub fn new(max_length: usize) -> Self { + Self { + queue: VecDeque::default(), + max_length, + } + } + + /// Add a new item to the queue. + /// + /// Drops `item` if the queue is full. + pub fn push(&mut self, item: T, item_desc: &str, log: &Logger) { + if self.queue.len() == self.max_length { + error!( + log, + "Block queue full"; + "msg" => "the system has insufficient resources for load", + "queue_len" => self.max_length, + "queue" => item_desc, + ) + } else { + self.queue.push_back(item); + } + } + + /// Remove the next item from the queue. + pub fn pop(&mut self) -> Option { + self.queue.pop_front() + } + + /// Returns the current length of the queue. + pub fn len(&self) -> usize { + self.queue.len() + } +} + +/// A simple last-in-first-out queue with a maximum length. +struct LifoQueue { + queue: VecDeque, + max_length: usize, +} + +impl LifoQueue { + /// Create a new, empty queue with the given length. + pub fn new(max_length: usize) -> Self { + Self { + queue: VecDeque::default(), + max_length, + } + } + + /// Add a new item to the front of the queue. + /// + /// If the queue is full, the item at the back of the queue is dropped. + pub fn push(&mut self, item: T) { + if self.queue.len() == self.max_length { + self.queue.pop_back(); + } + self.queue.push_front(item); + } + + /// Remove the next item from the queue. + pub fn pop(&mut self) -> Option { + self.queue.pop_front() + } + + /// Returns `true` if the queue is full. + pub fn is_full(&self) -> bool { + self.queue.len() >= self.max_length + } + + /// Returns the current length of the queue. + pub fn len(&self) -> usize { + self.queue.len() + } +} + +/// An event to be processed by the manager task. +#[derive(Debug)] +pub struct WorkEvent { + drop_during_sync: bool, + work: Work, +} + +impl WorkEvent { + /// Create a new `Work` event for some unaggregated attestation. + pub fn unaggregated_attestation( + message_id: MessageId, + peer_id: PeerId, + attestation: Attestation, + subnet_id: SubnetId, + should_import: bool, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::GossipAttestation { + message_id, + peer_id, + attestation: Box::new(attestation), + subnet_id, + should_import, + }, + } + } + + /// Create a new `Work` event for some aggregated attestation. + pub fn aggregated_attestation( + message_id: MessageId, + peer_id: PeerId, + aggregate: SignedAggregateAndProof, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::GossipAggregate { + message_id, + peer_id, + aggregate: Box::new(aggregate), + }, + } + } + + /// Create a new `Work` event for some block. + pub fn gossip_beacon_block( + message_id: MessageId, + peer_id: PeerId, + block: Box>, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::GossipBlock { + message_id, + peer_id, + block, + }, + } + } + + /// Create a new `Work` event for some exit. + pub fn gossip_voluntary_exit( + message_id: MessageId, + peer_id: PeerId, + voluntary_exit: Box, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::GossipVoluntaryExit { + message_id, + peer_id, + voluntary_exit, + }, + } + } + + /// Create a new `Work` event for some proposer slashing. + pub fn gossip_proposer_slashing( + message_id: MessageId, + peer_id: PeerId, + proposer_slashing: Box, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::GossipProposerSlashing { + message_id, + peer_id, + proposer_slashing, + }, + } + } + + /// Create a new `Work` event for some attester slashing. + pub fn gossip_attester_slashing( + message_id: MessageId, + peer_id: PeerId, + attester_slashing: Box>, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::GossipAttesterSlashing { + message_id, + peer_id, + attester_slashing, + }, + } + } + + /// Create a new `Work` event for some block, where the result from computation (if any) is + /// sent to the other side of `result_tx`. + pub fn rpc_beacon_block(block: Box>) -> (Self, BlockResultReceiver) { + let (result_tx, result_rx) = oneshot::channel(); + let event = Self { + drop_during_sync: false, + work: Work::RpcBlock { block, result_tx }, + }; + (event, result_rx) + } + + /// Create a new work event to import `blocks` as a beacon chain segment. + pub fn chain_segment(process_id: ProcessId, blocks: Vec>) -> Self { + Self { + drop_during_sync: false, + work: Work::ChainSegment { process_id, blocks }, + } + } +} + +/// A consensus message (or multiple) from the network that requires processing. +#[derive(Debug)] +pub enum Work { + GossipAttestation { + message_id: MessageId, + peer_id: PeerId, + attestation: Box>, + subnet_id: SubnetId, + should_import: bool, + }, + GossipAggregate { + message_id: MessageId, + peer_id: PeerId, + aggregate: Box>, + }, + GossipBlock { + message_id: MessageId, + peer_id: PeerId, + block: Box>, + }, + GossipVoluntaryExit { + message_id: MessageId, + peer_id: PeerId, + voluntary_exit: Box, + }, + GossipProposerSlashing { + message_id: MessageId, + peer_id: PeerId, + proposer_slashing: Box, + }, + GossipAttesterSlashing { + message_id: MessageId, + peer_id: PeerId, + attester_slashing: Box>, + }, + RpcBlock { + block: Box>, + result_tx: BlockResultSender, + }, + ChainSegment { + process_id: ProcessId, + blocks: Vec>, + }, +} + +impl Work { + /// Provides a `&str` that uniquely identifies each enum variant. + fn str_id(&self) -> &'static str { + match self { + Work::GossipAttestation { .. } => "gossip_attestation", + Work::GossipAggregate { .. } => "gossip_aggregate", + Work::GossipBlock { .. } => "gossip_block", + Work::GossipVoluntaryExit { .. } => "gossip_voluntary_exit", + Work::GossipProposerSlashing { .. } => "gossip_proposer_slashing", + Work::GossipAttesterSlashing { .. } => "gossip_attester_slashing", + Work::RpcBlock { .. } => "rpc_block", + Work::ChainSegment { .. } => "chain_segment", + } + } +} + +/// Provides de-bounce functionality for logging. +#[derive(Default)] +struct TimeLatch(Option); + +impl TimeLatch { + /// Only returns true once every `LOG_DEBOUNCE_INTERVAL`. + fn elapsed(&mut self) -> bool { + let now = Instant::now(); + + let is_elapsed = self.0.map_or(false, |elapse_time| now > elapse_time); + + if is_elapsed || self.0.is_none() { + self.0 = Some(now + LOG_DEBOUNCE_INTERVAL); + } + + is_elapsed + } +} + +/// A mutli-threaded processor for messages received on the network +/// that need to be processed by the `BeaconChain` +/// +/// See module level documentation for more information. +pub struct BeaconProcessor { + pub beacon_chain: Weak>, + pub network_tx: mpsc::UnboundedSender>, + pub sync_tx: mpsc::UnboundedSender>, + pub network_globals: Arc>, + pub executor: TaskExecutor, + pub max_workers: usize, + pub current_workers: usize, + pub log: Logger, +} + +impl BeaconProcessor { + /// Spawns the "manager" task which checks the receiver end of the returned `Sender` for + /// messages which contain some new work which will be: + /// + /// - Performed immediately, if a worker is available. + /// - Queued for later processing, if no worker is currently available. + /// + /// Only `self.max_workers` will ever be spawned at one time. Each worker is a `tokio` task + /// started with `spawn_blocking`. + pub fn spawn_manager(mut self, mut event_rx: mpsc::Receiver>) { + let (idle_tx, mut idle_rx) = mpsc::channel::<()>(MAX_IDLE_QUEUE_LEN); + + // Using LIFO queues for attestations since validator profits rely upon getting fresh + // attestations into blocks. Additionally, later attestations contain more information than + // earlier ones, so we consider them more valuable. + let mut aggregate_queue = LifoQueue::new(MAX_AGGREGATED_ATTESTATION_QUEUE_LEN); + let mut aggregate_debounce = TimeLatch::default(); + let mut attestation_queue = LifoQueue::new(MAX_UNAGGREGATED_ATTESTATION_QUEUE_LEN); + let mut attestation_debounce = TimeLatch::default(); + + // Using a FIFO queue for voluntary exits since it prevents exit censoring. I don't have + // a strong feeling about queue type for exits. + let mut gossip_voluntary_exit_queue = FifoQueue::new(MAX_GOSSIP_EXIT_QUEUE_LEN); + + // Using a FIFO queue for slashing to prevent people from flushing their slashings from the + // queues with lots of junk messages. + let mut gossip_proposer_slashing_queue = + FifoQueue::new(MAX_GOSSIP_PROPOSER_SLASHING_QUEUE_LEN); + let mut gossip_attester_slashing_queue = + FifoQueue::new(MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN); + + // Using a FIFO queue since blocks need to be imported sequentially. + let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); + let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); + let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); + + let executor = self.executor.clone(); + + // The manager future will run on the core executor and delegate tasks to worker + // threads on the blocking executor. + let manager_future = async move { + loop { + // Listen to both the event and idle channels, acting on whichever is ready + // first. + // + // Set `work_event = Some(event)` if there is new work to be done. Otherwise sets + // `event = None` if it was a worker becoming idle. + let work_event = tokio::select! { + // A worker has finished some work. + new_idle_opt = idle_rx.recv() => { + if new_idle_opt.is_some() { + self.current_workers = self.current_workers.saturating_sub(1); + None + } else { + // Exit if all idle senders have been dropped. + // + // This shouldn't happen since this function holds a sender. + crit!( + self.log, + "Gossip processor stopped"; + "msg" => "all idle senders dropped" + ); + break + } + }, + // There is a new piece of work to be handled. + new_work_event_opt = event_rx.recv() => { + if let Some(new_work_event) = new_work_event_opt { + Some(new_work_event) + } else { + // Exit if all event senders have been dropped. + // + // This should happen when the client shuts down. + debug!( + self.log, + "Gossip processor stopped"; + "msg" => "all event senders dropped" + ); + break + } + } + }; + + let _event_timer = + metrics::start_timer(&metrics::BEACON_PROCESSOR_EVENT_HANDLING_SECONDS); + if let Some(event) = &work_event { + metrics::inc_counter_vec( + &metrics::BEACON_PROCESSOR_WORK_EVENTS_RX_COUNT, + &[event.work.str_id()], + ); + } else { + metrics::inc_counter(&metrics::BEACON_PROCESSOR_IDLE_EVENTS_TOTAL); + } + + let can_spawn = self.current_workers < self.max_workers; + let drop_during_sync = work_event + .as_ref() + .map_or(false, |event| event.drop_during_sync); + + match work_event { + // There is no new work event, but we are able to spawn a new worker. + // + // We don't check the `work.drop_during_sync` here. We assume that if it made + // it into the queue at any point then we should process it. + None if can_spawn => { + // Check for chain segments first, they're the most efficient way to get + // blocks into the system. + if let Some(item) = chain_segment_queue.pop() { + self.spawn_worker(idle_tx.clone(), item); + // Check sync blocks before gossip blocks, since we've already explicitly + // requested these blocks. + } else if let Some(item) = rpc_block_queue.pop() { + self.spawn_worker(idle_tx.clone(), item); + // Check gossip blocks before gossip attestations, since a block might be + // required to verify some attestations. + } else if let Some(item) = gossip_block_queue.pop() { + self.spawn_worker(idle_tx.clone(), item); + // Check the aggregates, *then* the unaggregates + // since we assume that aggregates are more valuable to local validators + // and effectively give us more information with less signature + // verification time. + } else if let Some(item) = aggregate_queue.pop() { + self.spawn_worker(idle_tx.clone(), item); + } else if let Some(item) = attestation_queue.pop() { + self.spawn_worker(idle_tx.clone(), item); + // Check slashings after all other consensus messages so we prioritize + // following head. + // + // Check attester slashings before proposer slashings since they have the + // potential to slash multiple validators at once. + } else if let Some(item) = gossip_attester_slashing_queue.pop() { + self.spawn_worker(idle_tx.clone(), item); + } else if let Some(item) = gossip_proposer_slashing_queue.pop() { + self.spawn_worker(idle_tx.clone(), item); + // Check exits last since our validators don't get rewards from them. + } else if let Some(item) = gossip_voluntary_exit_queue.pop() { + self.spawn_worker(idle_tx.clone(), item); + } + } + // There is no new work event and we are unable to spawn a new worker. + // + // I cannot see any good reason why this would happen. + None => { + warn!( + self.log, + "Unexpected gossip processor condition"; + "msg" => "no new work and cannot spawn worker" + ); + } + // The chain is syncing and this event should be dropped during sync. + Some(work_event) + if self.network_globals.sync_state.read().is_syncing() + && drop_during_sync => + { + let work_id = work_event.work.str_id(); + metrics::inc_counter_vec( + &metrics::BEACON_PROCESSOR_WORK_EVENTS_IGNORED_COUNT, + &[work_id], + ); + trace!( + self.log, + "Gossip processor skipping work"; + "msg" => "chain is syncing", + "work_id" => work_id + ); + } + // There is a new work event and the chain is not syncing. Process it. + Some(WorkEvent { work, .. }) => { + let work_id = work.str_id(); + match work { + _ if can_spawn => self.spawn_worker(idle_tx.clone(), work), + Work::GossipAttestation { .. } => attestation_queue.push(work), + Work::GossipAggregate { .. } => aggregate_queue.push(work), + Work::GossipBlock { .. } => { + gossip_block_queue.push(work, work_id, &self.log) + } + Work::GossipVoluntaryExit { .. } => { + gossip_voluntary_exit_queue.push(work, work_id, &self.log) + } + Work::GossipProposerSlashing { .. } => { + gossip_proposer_slashing_queue.push(work, work_id, &self.log) + } + Work::GossipAttesterSlashing { .. } => { + gossip_attester_slashing_queue.push(work, work_id, &self.log) + } + Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), + Work::ChainSegment { .. } => { + chain_segment_queue.push(work, work_id, &self.log) + } + } + } + } + + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL, + self.current_workers as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL, + attestation_queue.len() as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL, + aggregate_queue.len() as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL, + gossip_block_queue.len() as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, + rpc_block_queue.len() as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL, + chain_segment_queue.len() as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_EXIT_QUEUE_TOTAL, + gossip_voluntary_exit_queue.len() as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL, + gossip_proposer_slashing_queue.len() as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL, + gossip_attester_slashing_queue.len() as i64, + ); + + if aggregate_queue.is_full() && aggregate_debounce.elapsed() { + error!( + self.log, + "Aggregate attestation queue full"; + "msg" => "the system has insufficient resources for load", + "queue_len" => aggregate_queue.max_length, + ) + } + + if attestation_queue.is_full() && attestation_debounce.elapsed() { + error!( + self.log, + "Attestation queue full"; + "msg" => "the system has insufficient resources for load", + "queue_len" => attestation_queue.max_length, + ) + } + } + }; + + // Spawn on the core executor. + executor.spawn(manager_future, MANAGER_TASK_NAME); + } + + /// Spawns a blocking worker thread to process some `Work`. + /// + /// Sends an message on `idle_tx` when the work is complete and the task is stopping. + fn spawn_worker(&mut self, mut idle_tx: mpsc::Sender<()>, work: Work) { + let work_id = work.str_id(); + let worker_timer = + metrics::start_timer_vec(&metrics::BEACON_PROCESSOR_WORKER_TIME, &[work_id]); + metrics::inc_counter(&metrics::BEACON_PROCESSOR_WORKERS_SPAWNED_TOTAL); + metrics::inc_counter_vec( + &metrics::BEACON_PROCESSOR_WORK_EVENTS_STARTED_COUNT, + &[work.str_id()], + ); + + let worker_id = self.current_workers; + self.current_workers = self.current_workers.saturating_add(1); + + let chain = if let Some(chain) = self.beacon_chain.upgrade() { + chain + } else { + debug!( + self.log, + "Beacon chain dropped, shutting down"; + ); + return; + }; + + let log = self.log.clone(); + let executor = self.executor.clone(); + + let worker = Worker { + chain, + network_tx: self.network_tx.clone(), + sync_tx: self.sync_tx.clone(), + log: self.log.clone(), + }; + + trace!( + self.log, + "Spawning beacon processor worker"; + "work" => work_id, + "worker" => worker_id, + ); + + executor.spawn_blocking( + move || { + let _worker_timer = worker_timer; + + match work { + /* + * Unaggregated attestation verification. + */ + Work::GossipAttestation { + message_id, + peer_id, + attestation, + subnet_id, + should_import, + } => worker.process_gossip_attestation( + message_id, + peer_id, + *attestation, + subnet_id, + should_import, + ), + /* + * Aggregated attestation verification. + */ + Work::GossipAggregate { + message_id, + peer_id, + aggregate, + } => worker.process_gossip_aggregate(message_id, peer_id, *aggregate), + /* + * Verification for beacon blocks received on gossip. + */ + Work::GossipBlock { + message_id, + peer_id, + block, + } => worker.process_gossip_block(message_id, peer_id, *block), + /* + * Voluntary exits received on gossip. + */ + Work::GossipVoluntaryExit { + message_id, + peer_id, + voluntary_exit, + } => worker.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit), + /* + * Proposer slashings received on gossip. + */ + Work::GossipProposerSlashing { + message_id, + peer_id, + proposer_slashing, + } => worker.process_gossip_proposer_slashing( + message_id, + peer_id, + *proposer_slashing, + ), + /* + * Attester slashings received on gossip. + */ + Work::GossipAttesterSlashing { + message_id, + peer_id, + attester_slashing, + } => worker.process_gossip_attester_slashing( + message_id, + peer_id, + *attester_slashing, + ), + /* + * Verification for beacon blocks received during syncing via RPC. + */ + Work::RpcBlock { block, result_tx } => { + worker.process_rpc_block(*block, result_tx) + } + /* + * Verification for a chain segment (multiple blocks). + */ + Work::ChainSegment { process_id, blocks } => { + worker.process_chain_segment(process_id, blocks) + } + }; + + trace!( + log, + "Beacon processor worker done"; + "work" => work_id, + "worker" => worker_id, + ); + + idle_tx.try_send(()).unwrap_or_else(|e| { + crit!( + log, + "Unable to free worker"; + "msg" => "failed to send idle_tx message", + "error" => e.to_string() + ) + }); + }, + WORKER_TASK_NAME, + ); + } +} diff --git a/beacon_node/network/src/beacon_processor/worker.rs b/beacon_node/network/src/beacon_processor/worker.rs new file mode 100644 index 00000000000..41a49618267 --- /dev/null +++ b/beacon_node/network/src/beacon_processor/worker.rs @@ -0,0 +1,850 @@ +use super::{ + chain_segment::{handle_chain_segment, ProcessId}, + BlockResultSender, +}; +use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; +use beacon_chain::{ + attestation_verification::Error as AttnError, observed_operations::ObservationOutcome, + BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, +}; +use eth2_libp2p::{MessageAcceptance, MessageId, PeerId}; +use slog::{crit, debug, error, info, trace, warn, Logger}; +use ssz::Encode; +use std::sync::Arc; +use tokio::sync::mpsc; +use types::{ + Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedVoluntaryExit, SubnetId, +}; + +/// Contains the context necessary to import blocks, attestations, etc to the beacon chain. +pub struct Worker { + pub chain: Arc>, + pub network_tx: mpsc::UnboundedSender>, + pub sync_tx: mpsc::UnboundedSender>, + pub log: Logger, +} + +impl Worker { + /// Process the unaggregated attestation received from the gossip network and: + /// + /// - If it passes gossip propagation criteria, tell the network thread to forward it. + /// - Attempt to apply it to fork choice. + /// - Attempt to add it to the naive aggregation pool. + /// + /// Raises a log if there are errors. + pub fn process_gossip_attestation( + self, + message_id: MessageId, + peer_id: PeerId, + attestation: Attestation, + subnet_id: SubnetId, + should_import: bool, + ) { + let beacon_block_root = attestation.data.beacon_block_root; + + let attestation = match self + .chain + .verify_unaggregated_attestation_for_gossip(attestation, subnet_id) + { + Ok(attestation) => attestation, + Err(e) => { + self.handle_attestation_verification_failure( + peer_id, + message_id, + beacon_block_root, + "unaggregated", + e, + ); + return; + } + }; + + // Indicate to the `Network` service that this message is valid and can be + // propagated on the gossip network. + self.propagate_validation_result(message_id, peer_id.clone(), MessageAcceptance::Accept); + + if !should_import { + return; + } + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_VERIFIED_TOTAL); + + if let Err(e) = self.chain.apply_attestation_to_fork_choice(&attestation) { + match e { + BeaconChainError::ForkChoiceError(ForkChoiceError::InvalidAttestation(e)) => { + debug!( + self.log, + "Attestation invalid for fork choice"; + "reason" => format!("{:?}", e), + "peer" => peer_id.to_string(), + "beacon_block_root" => format!("{:?}", beacon_block_root) + ) + } + e => error!( + self.log, + "Error applying attestation to fork choice"; + "reason" => format!("{:?}", e), + "peer" => peer_id.to_string(), + "beacon_block_root" => format!("{:?}", beacon_block_root) + ), + } + } + + if let Err(e) = self.chain.add_to_naive_aggregation_pool(attestation) { + debug!( + self.log, + "Attestation invalid for agg pool"; + "reason" => format!("{:?}", e), + "peer" => peer_id.to_string(), + "beacon_block_root" => format!("{:?}", beacon_block_root) + ) + } + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_IMPORTED_TOTAL); + } + + /// Process the aggregated attestation received from the gossip network and: + /// + /// - If it passes gossip propagation criteria, tell the network thread to forward it. + /// - Attempt to apply it to fork choice. + /// - Attempt to add it to the block inclusion pool. + /// + /// Raises a log if there are errors. + pub fn process_gossip_aggregate( + self, + message_id: MessageId, + peer_id: PeerId, + aggregate: SignedAggregateAndProof, + ) { + let beacon_block_root = aggregate.message.aggregate.data.beacon_block_root; + + let aggregate = match self + .chain + .verify_aggregated_attestation_for_gossip(aggregate) + { + Ok(aggregate) => aggregate, + Err(e) => { + // Report the failure to gossipsub + self.handle_attestation_verification_failure( + peer_id, + message_id, + beacon_block_root, + "aggregated", + e, + ); + return; + } + }; + + // Indicate to the `Network` service that this message is valid and can be + // propagated on the gossip network. + self.propagate_validation_result(message_id, peer_id.clone(), MessageAcceptance::Accept); + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL); + + if let Err(e) = self.chain.apply_attestation_to_fork_choice(&aggregate) { + match e { + BeaconChainError::ForkChoiceError(ForkChoiceError::InvalidAttestation(e)) => { + debug!( + self.log, + "Aggregate invalid for fork choice"; + "reason" => format!("{:?}", e), + "peer" => peer_id.to_string(), + "beacon_block_root" => format!("{:?}", beacon_block_root) + ) + } + e => error!( + self.log, + "Error applying aggregate to fork choice"; + "reason" => format!("{:?}", e), + "peer" => peer_id.to_string(), + "beacon_block_root" => format!("{:?}", beacon_block_root) + ), + } + } + + if let Err(e) = self.chain.add_to_block_inclusion_pool(aggregate) { + debug!( + self.log, + "Attestation invalid for op pool"; + "reason" => format!("{:?}", e), + "peer" => peer_id.to_string(), + "beacon_block_root" => format!("{:?}", beacon_block_root) + ) + } + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_IMPORTED_TOTAL); + } + + /// Process the beacon block received from the gossip network and: + /// + /// - If it passes gossip propagation criteria, tell the network thread to forward it. + /// - Attempt to add it to the beacon chain, informing the sync thread if more blocks need to + /// be downloaded. + /// + /// Raises a log if there are errors. + pub fn process_gossip_block( + self, + message_id: MessageId, + peer_id: PeerId, + block: SignedBeaconBlock, + ) { + let verified_block = match self.chain.verify_block_for_gossip(block) { + Ok(verified_block) => { + info!( + self.log, + "New block received"; + "slot" => verified_block.block.slot(), + "hash" => verified_block.block_root.to_string() + ); + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Accept, + ); + verified_block + } + Err(BlockError::ParentUnknown(block)) => { + self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); + return; + } + Err(e @ BlockError::FutureSlot { .. }) + | Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) + | Err(e @ BlockError::BlockIsAlreadyKnown) + | Err(e @ BlockError::RepeatProposal { .. }) + | Err(e @ BlockError::NotFinalizedDescendant { .. }) + | Err(e @ BlockError::BeaconChainError(_)) => { + warn!(self.log, "Could not verify block for gossip, ignoring the block"; + "error" => e.to_string()); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; + } + Err(e @ BlockError::StateRootMismatch { .. }) + | Err(e @ BlockError::IncorrectBlockProposer { .. }) + | Err(e @ BlockError::BlockSlotLimitReached) + | Err(e @ BlockError::ProposalSignatureInvalid) + | Err(e @ BlockError::NonLinearSlots) + | Err(e @ BlockError::UnknownValidator(_)) + | Err(e @ BlockError::PerBlockProcessingError(_)) + | Err(e @ BlockError::NonLinearParentRoots) + | Err(e @ BlockError::BlockIsNotLaterThanParent { .. }) + | Err(e @ BlockError::InvalidSignature) + | Err(e @ BlockError::TooManySkippedSlots { .. }) + | Err(e @ BlockError::GenesisBlock) => { + warn!(self.log, "Could not verify block for gossip, rejecting the block"; + "error" => e.to_string()); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + return; + } + }; + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL); + + let block = Box::new(verified_block.block.clone()); + match self.chain.process_block(verified_block) { + Ok(_block_root) => { + metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); + + trace!( + self.log, + "Gossipsub block processed"; + "peer_id" => peer_id.to_string() + ); + + // TODO: It would be better if we can run this _after_ we publish the block to + // reduce block propagation latency. + // + // The `MessageHandler` would be the place to put this, however it doesn't seem + // to have a reference to the `BeaconChain`. I will leave this for future + // works. + match self.chain.fork_choice() { + Ok(()) => trace!( + self.log, + "Fork choice success"; + "location" => "block gossip" + ), + Err(e) => error!( + self.log, + "Fork choice failed"; + "error" => format!("{:?}", e), + "location" => "block gossip" + ), + } + } + Err(BlockError::ParentUnknown { .. }) => { + // Inform the sync manager to find parents for this block + // This should not occur. It should be checked by `should_forward_block` + error!( + self.log, + "Block with unknown parent attempted to be processed"; + "peer_id" => peer_id.to_string() + ); + self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); + } + other => { + debug!( + self.log, + "Invalid gossip beacon block"; + "outcome" => format!("{:?}", other), + "block root" => format!("{}", block.canonical_root()), + "block slot" => block.slot() + ); + trace!( + self.log, + "Invalid gossip beacon block ssz"; + "ssz" => format!("0x{}", hex::encode(block.as_ssz_bytes())), + ); + } + }; + } + + pub fn process_gossip_voluntary_exit( + self, + message_id: MessageId, + peer_id: PeerId, + voluntary_exit: SignedVoluntaryExit, + ) { + let validator_index = voluntary_exit.message.validator_index; + + let exit = match self.chain.verify_voluntary_exit_for_gossip(voluntary_exit) { + Ok(ObservationOutcome::New(exit)) => exit, + Ok(ObservationOutcome::AlreadyKnown) => { + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Ignore, + ); + debug!( + self.log, + "Dropping exit for already exiting validator"; + "validator_index" => validator_index, + "peer" => peer_id.to_string() + ); + return; + } + Err(e) => { + debug!( + self.log, + "Dropping invalid exit"; + "validator_index" => validator_index, + "peer" => peer_id.to_string(), + "error" => format!("{:?}", e) + ); + // These errors occur due to a fault in the beacon chain. It is not necessarily + // the fault on the peer. + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; + } + }; + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_EXIT_VERIFIED_TOTAL); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + self.chain.import_voluntary_exit(exit); + debug!(self.log, "Successfully imported voluntary exit"); + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_EXIT_IMPORTED_TOTAL); + } + + pub fn process_gossip_proposer_slashing( + self, + message_id: MessageId, + peer_id: PeerId, + proposer_slashing: ProposerSlashing, + ) { + let validator_index = proposer_slashing.signed_header_1.message.proposer_index; + + let slashing = match self + .chain + .verify_proposer_slashing_for_gossip(proposer_slashing) + { + Ok(ObservationOutcome::New(slashing)) => slashing, + Ok(ObservationOutcome::AlreadyKnown) => { + debug!( + self.log, + "Dropping proposer slashing"; + "reason" => "Already seen a proposer slashing for that validator", + "validator_index" => validator_index, + "peer" => peer_id.to_string() + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; + } + Err(e) => { + // This is likely a fault with the beacon chain and not necessarily a + // malicious message from the peer. + debug!( + self.log, + "Dropping invalid proposer slashing"; + "validator_index" => validator_index, + "peer" => peer_id.to_string(), + "error" => format!("{:?}", e) + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; + } + }; + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_PROPOSER_SLASHING_VERIFIED_TOTAL); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + self.chain.import_proposer_slashing(slashing); + debug!(self.log, "Successfully imported proposer slashing"); + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_PROPOSER_SLASHING_IMPORTED_TOTAL); + } + + pub fn process_gossip_attester_slashing( + self, + message_id: MessageId, + peer_id: PeerId, + attester_slashing: AttesterSlashing, + ) { + let slashing = match self + .chain + .verify_attester_slashing_for_gossip(attester_slashing) + { + Ok(ObservationOutcome::New(slashing)) => slashing, + Ok(ObservationOutcome::AlreadyKnown) => { + debug!( + self.log, + "Dropping attester slashing"; + "reason" => "Slashings already known for all slashed validators", + "peer" => peer_id.to_string() + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; + } + Err(e) => { + debug!( + self.log, + "Dropping invalid attester slashing"; + "peer" => peer_id.to_string(), + "error" => format!("{:?}", e) + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; + } + }; + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_VERIFIED_TOTAL); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + if let Err(e) = self.chain.import_attester_slashing(slashing) { + debug!(self.log, "Error importing attester slashing"; "error" => format!("{:?}", e)); + metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_ERROR_TOTAL); + } else { + debug!(self.log, "Successfully imported attester slashing"); + metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); + } + } + + /// Attempt to process a block received from a direct RPC request, returning the processing + /// result on the `result_tx` channel. + /// + /// Raises a log if there are errors publishing the result to the channel. + pub fn process_rpc_block( + self, + block: SignedBeaconBlock, + result_tx: BlockResultSender, + ) { + let block_result = self.chain.process_block(block); + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); + + if result_tx.send(block_result).is_err() { + crit!(self.log, "Failed return sync block result"); + } + } + + /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync + /// thread if more blocks are needed to process it. + pub fn process_chain_segment( + self, + process_id: ProcessId, + blocks: Vec>, + ) { + handle_chain_segment(self.chain, process_id, blocks, self.sync_tx, self.log) + } + + /// Send a message on `message_tx` that the `message_id` sent by `peer_id` should be propagated on + /// the gossip network. + /// + /// Creates a log if there is an interal error. + /// Propagates the result of the validation fot the given message to the network. If the result + /// is valid the message gets forwarded to other peers. + fn propagate_validation_result( + &self, + message_id: MessageId, + propagation_source: PeerId, + validation_result: MessageAcceptance, + ) { + self.network_tx + .send(NetworkMessage::ValidationResult { + propagation_source, + message_id, + validation_result, + }) + .unwrap_or_else(|_| { + warn!( + self.log, + "Could not send propagation request to the network service" + ) + }); + } + + /// Send a message to `sync_tx`. + /// + /// Creates a log if there is an interal error. + fn send_sync_message(&self, message: SyncMessage) { + self.sync_tx + .send(message) + .unwrap_or_else(|_| error!(self.log, "Could not send message to the sync service")); + } + + /// Handle an error whilst verifying an `Attestation` or `SignedAggregateAndProof` from the + /// network. + pub fn handle_attestation_verification_failure( + &self, + peer_id: PeerId, + message_id: MessageId, + beacon_block_root: Hash256, + attestation_type: &str, + error: AttnError, + ) { + metrics::register_attestation_error(&error); + match &error { + AttnError::FutureEpoch { .. } + | AttnError::PastEpoch { .. } + | AttnError::FutureSlot { .. } + | AttnError::PastSlot { .. } => { + /* + * These errors can be triggered by a mismatch between our slot and the peer. + * + * + * The peer has published an invalid consensus message, _only_ if we trust our own clock. + */ + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::InvalidSelectionProof { .. } | AttnError::InvalidSignature => { + /* + * These errors are caused by invalid signatures. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::EmptyAggregationBitfield => { + /* + * The aggregate had no signatures and is therefore worthless. + * + * Whilst we don't gossip this attestation, this act is **not** a clear + * violation of the spec nor indication of fault. + * + */ + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::AggregatorPubkeyUnknown(_) => { + /* + * The aggregator index was higher than any known validator index. This is + * possible in two cases: + * + * 1. The attestation is malformed + * 2. The attestation attests to a beacon_block_root that we do not know. + * + * It should be impossible to reach (2) without triggering + * `AttnError::UnknownHeadBlock`, so we can safely assume the peer is + * faulty. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::AggregatorNotInCommittee { .. } => { + /* + * The aggregator index was higher than any known validator index. This is + * possible in two cases: + * + * 1. The attestation is malformed + * 2. The attestation attests to a beacon_block_root that we do not know. + * + * It should be impossible to reach (2) without triggering + * `AttnError::UnknownHeadBlock`, so we can safely assume the peer is + * faulty. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::AttestationAlreadyKnown { .. } => { + /* + * The aggregate attestation has already been observed on the network or in + * a block. + * + * The peer is not necessarily faulty. + */ + trace!( + self.log, + "Attestation already known"; + "peer_id" => peer_id.to_string(), + "block" => format!("{}", beacon_block_root), + "type" => format!("{:?}", attestation_type), + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; + } + AttnError::AggregatorAlreadyKnown(_) => { + /* + * There has already been an aggregate attestation seen from this + * aggregator index. + * + * The peer is not necessarily faulty. + */ + trace!( + self.log, + "Aggregator already known"; + "peer_id" => peer_id.to_string(), + "block" => format!("{}", beacon_block_root), + "type" => format!("{:?}", attestation_type), + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; + } + AttnError::PriorAttestationKnown { .. } => { + /* + * We have already seen an attestation from this validator for this epoch. + * + * The peer is not necessarily faulty. + */ + trace!( + self.log, + "Prior attestation known"; + "peer_id" => peer_id.to_string(), + "block" => format!("{}", beacon_block_root), + "type" => format!("{:?}", attestation_type), + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; + } + AttnError::ValidatorIndexTooHigh(_) => { + /* + * The aggregator index (or similar field) was higher than the maximum + * possible number of validators. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::UnknownHeadBlock { beacon_block_root } => { + // Note: its a little bit unclear as to whether or not this block is unknown or + // just old. See: + // + // https://github.com/sigp/lighthouse/issues/1039 + + // TODO: Maintain this attestation and re-process once sync completes + debug!( + self.log, + "Attestation for unknown block"; + "peer_id" => peer_id.to_string(), + "block" => format!("{}", beacon_block_root) + ); + // we don't know the block, get the sync manager to handle the block lookup + self.sync_tx + .send(SyncMessage::UnknownBlockHash( + peer_id.clone(), + *beacon_block_root, + )) + .unwrap_or_else(|_| { + warn!( + self.log, + "Failed to send to sync service"; + "msg" => "UnknownBlockHash" + ) + }); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; + } + AttnError::UnknownTargetRoot(_) => { + /* + * The block indicated by the target root is not known to us. + * + * We should always get `AttnError::UnknwonHeadBlock` before we get this + * error, so this means we can get this error if: + * + * 1. The target root does not represent a valid block. + * 2. We do not have the target root in our DB. + * + * For (2), we should only be processing attestations when we should have + * all the available information. Note: if we do a weak-subjectivity sync + * it's possible that this situation could occur, but I think it's + * unlikely. For now, we will declare this to be an invalid message> + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::BadTargetEpoch => { + /* + * The aggregator index (or similar field) was higher than the maximum + * possible number of validators. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::NoCommitteeForSlotAndIndex { .. } => { + /* + * It is not possible to attest this the given committee in the given slot. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::NotExactlyOneAggregationBitSet(_) => { + /* + * The unaggregated attestation doesn't have only one signature. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::AttestsToFutureBlock { .. } => { + /* + * The beacon_block_root is from a higher slot than the attestation. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + + AttnError::InvalidSubnetId { received, expected } => { + /* + * The attestation was received on an incorrect subnet id. + */ + debug!( + self.log, + "Received attestation on incorrect subnet"; + "expected" => format!("{:?}", expected), + "received" => format!("{:?}", received), + ); + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::Invalid(_) => { + /* + * The attestation failed the state_processing verification. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::TooManySkippedSlots { + head_block_slot, + attestation_slot, + } => { + /* + * The attestation references a head block that is too far behind the attestation slot. + * + * The message is not necessarily invalid, but we choose to ignore it. + */ + debug!( + self.log, + "Rejected long skip slot attestation"; + "head_block_slot" => head_block_slot, + "attestation_slot" => attestation_slot, + ); + // In this case we wish to penalize gossipsub peers that do this to avoid future + // attestations that have too many skip slots. + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Reject, + ); + } + AttnError::BeaconChainError(e) => { + /* + * Lighthouse hit an unexpected error whilst processing the attestation. It + * should be impossible to trigger a `BeaconChainError` from the network, + * so we have a bug. + * + * It's not clear if the message is invalid/malicious. + */ + error!( + self.log, + "Unable to validate aggregate"; + "peer_id" => peer_id.to_string(), + "error" => format!("{:?}", e), + ); + self.propagate_validation_result( + message_id, + peer_id.clone(), + MessageAcceptance::Ignore, + ); + } + } + + debug!( + self.log, + "Invalid attestation from network"; + "reason" => format!("{:?}", error), + "block" => format!("{}", beacon_block_root), + "peer_id" => peer_id.to_string(), + "type" => format!("{:?}", attestation_type), + ); + } +} diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index f7cc8051c8a..30795a63ef3 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -6,6 +6,7 @@ pub mod error; pub mod service; mod attestation_service; +mod beacon_processor; mod metrics; mod persisted_dht; mod router; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index b2f789e36ad..e5167a065fa 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -1,35 +1,91 @@ +use beacon_chain::attestation_verification::Error as AttnError; pub use lighthouse_metrics::*; +lazy_static! { + + /* + * Gossip subnets and scoring + */ + pub static ref PEERS_PER_PROTOCOL: Result = try_create_int_gauge_vec( + "gossipsub_peers_per_protocol", + "Peers via supported protocol", + &["protocol"] + ); + + pub static ref GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_subscribed_subnets", + "Subnets currently subscribed to", + &["subnet"] + ); + + pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_peers_per_subnet_topic_count", + "Peers subscribed per subnet topic", + &["subnet"] + ); + + pub static ref MESH_PEERS_PER_MAIN_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_mesh_peers_per_main_topic", + "Mesh peers per main topic", + &["topic_hash"] + ); + + pub static ref MESH_PEERS_PER_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_mesh_peers_per_subnet_topic", + "Mesh peers per subnet topic", + &["subnet"] + ); + + pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_avg_peer_score_per_topic", + "Average peer's score per topic", + &["topic_hash"] + ); + + pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_avg_peer_score_per_subnet_topic", + "Average peer's score per subnet topic", + &["subnet"] + ); + + pub static ref ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT: Result = try_create_int_counter_vec( + "gossipsub_attestations_published_per_subnet_per_slot", + "Failed attestation publishes per subnet", + &["subnet"] + ); +} + lazy_static! { /* * Gossip Rx */ pub static ref GOSSIP_BLOCKS_RX: Result = try_create_int_counter( - "network_gossip_blocks_rx_total", + "gossipsub_blocks_rx_total", "Count of gossip blocks received" ); pub static ref GOSSIP_UNAGGREGATED_ATTESTATIONS_RX: Result = try_create_int_counter( - "network_gossip_unaggregated_attestations_rx_total", + "gossipsub_unaggregated_attestations_rx_total", "Count of gossip unaggregated attestations received" ); pub static ref GOSSIP_AGGREGATED_ATTESTATIONS_RX: Result = try_create_int_counter( - "network_gossip_aggregated_attestations_rx_total", + "gossipsub_aggregated_attestations_rx_total", "Count of gossip aggregated attestations received" ); + /* * Gossip Tx */ pub static ref GOSSIP_BLOCKS_TX: Result = try_create_int_counter( - "network_gossip_blocks_tx_total", + "gossipsub_blocks_tx_total", "Count of gossip blocks transmitted" ); pub static ref GOSSIP_UNAGGREGATED_ATTESTATIONS_TX: Result = try_create_int_counter( - "network_gossip_unaggregated_attestations_tx_total", + "gossipsub_unaggregated_attestations_tx_total", "Count of gossip unaggregated attestations transmitted" ); pub static ref GOSSIP_AGGREGATED_ATTESTATIONS_TX: Result = try_create_int_counter( - "network_gossip_aggregated_attestations_tx_total", + "gossipsub_aggregated_attestations_tx_total", "Count of gossip aggregated attestations transmitted" ); @@ -37,11 +93,309 @@ lazy_static! { * Attestation subnet subscriptions */ pub static ref SUBNET_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( - "network_subnet_subscriptions_total", + "gossipsub_subnet_subscriptions_total", "Count of validator subscription requests." ); pub static ref SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS: Result = try_create_int_counter( - "network_subnet_subscriptions_aggregator_total", + "gossipsub_subnet_subscriptions_aggregator_total", "Count of validator subscription requests where the subscriber is an aggregator." ); + + /* + * Gossip processor + */ + pub static ref BEACON_PROCESSOR_WORK_EVENTS_RX_COUNT: Result = try_create_int_counter_vec( + "beacon_processor_work_events_rx_count", + "Count of work events received (but not necessarily processed)", + &["type"] + ); + pub static ref BEACON_PROCESSOR_WORK_EVENTS_IGNORED_COUNT: Result = try_create_int_counter_vec( + "beacon_processor_work_events_ignored_count", + "Count of work events purposefully ignored", + &["type"] + ); + pub static ref BEACON_PROCESSOR_WORK_EVENTS_STARTED_COUNT: Result = try_create_int_counter_vec( + "beacon_processor_work_events_started_count", + "Count of work events which have been started by a worker", + &["type"] + ); + pub static ref BEACON_PROCESSOR_WORKER_TIME: Result = try_create_histogram_vec( + "beacon_processor_worker_time", + "Time taken for a worker to fully process some parcel of work.", + &["type"] + ); + pub static ref BEACON_PROCESSOR_WORKERS_SPAWNED_TOTAL: Result = try_create_int_counter( + "beacon_processor_workers_spawned_total", + "The number of workers ever spawned by the gossip processing pool." + ); + pub static ref BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_workers_active_total", + "Count of active workers in the gossip processing pool." + ); + pub static ref BEACON_PROCESSOR_IDLE_EVENTS_TOTAL: Result = try_create_int_counter( + "beacon_processor_idle_events_total", + "Count of idle events processed by the gossip processor manager." + ); + pub static ref BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: Result = try_create_histogram( + "beacon_processor_event_handling_seconds", + "Time spent handling a new message and allocating it to a queue or worker." + ); + // Gossip blocks. + pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_gossip_block_queue_total", + "Count of blocks from gossip waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_gossip_block_verified_total", + "Total number of gossip blocks verified for propagation." + ); + pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_gossip_block_imported_total", + "Total number of gossip blocks imported to fork choice, etc." + ); + // Gossip Exits. + pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_exit_queue_total", + "Count of exits from gossip waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_EXIT_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_exit_verified_total", + "Total number of voluntary exits verified for propagation." + ); + pub static ref BEACON_PROCESSOR_EXIT_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_exit_imported_total", + "Total number of voluntary exits imported to the op pool." + ); + // Gossip proposer slashings. + pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_proposer_slashing_queue_total", + "Count of proposer slashings from gossip waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_proposer_slashing_verified_total", + "Total number of proposer slashings verified for propagation." + ); + pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_proposer_slashing_imported_total", + "Total number of proposer slashings imported to the op pool." + ); + // Gossip attester slashings. + pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_attester_slashing_queue_total", + "Count of attester slashings from gossip waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_attester_slashing_verified_total", + "Total number of attester slashings verified for propagation." + ); + pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_attester_slashing_imported_total", + "Total number of attester slashings imported to the op pool." + ); + pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_ERROR_TOTAL: Result = try_create_int_counter( + "beacon_processor_attester_slashing_error_total", + "Total number of attester slashings that raised an error during processing." + ); + // Rpc blocks. + pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_rpc_block_queue_total", + "Count of blocks from the rpc waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_rpc_block_imported_total", + "Total number of gossip blocks imported to fork choice, etc." + ); + // Chain segments. + pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_chain_segment_queue_total", + "Count of chain segments from the rpc waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL: Result = try_create_int_counter( + "beacon_processor_chain_segment_success_total", + "Total number of chain segments successfully processed." + ); + pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL: Result = try_create_int_counter( + "beacon_processor_chain_segment_failed_total", + "Total number of chain segments that failed processing." + ); + // Unaggregated attestations. + pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_unaggregated_attestation_queue_total", + "Count of unagg. attestations waiting to be processed." + ); + pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_unaggregated_attestation_verified_total", + "Total number of unaggregated attestations verified for gossip." + ); + pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_unaggregated_attestation_imported_total", + "Total number of unaggregated attestations imported to fork choice, etc." + ); + // Aggregated attestations. + pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_aggregated_attestation_queue_total", + "Count of agg. attestations waiting to be processed." + ); + pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_aggregated_attestation_verified_total", + "Total number of aggregated attestations verified for gossip." + ); + pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_aggregated_attestation_imported_total", + "Total number of aggregated attestations imported to fork choice, etc." + ); + + /* + * Attestation Errors + */ + pub static ref GOSSIP_ATTESTATION_ERROR_FUTURE_EPOCH: Result = try_create_int_counter( + "gossipsub_attestation_error_future_epoch", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_PAST_EPOCH: Result = try_create_int_counter( + "gossipsub_attestation_error_past_epoch", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_FUTURE_SLOT: Result = try_create_int_counter( + "gossipsub_attestation_error_future_slot", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_PAST_SLOT: Result = try_create_int_counter( + "gossipsub_attestation_error_past_slot", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_INVALID_SELECTION_PROOF: Result = try_create_int_counter( + "gossipsub_attestation_error_invalid_selection_proof", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_INVALID_SIGNATURE: Result = try_create_int_counter( + "gossipsub_attestation_error_invalid_signature", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_EMPTY_AGGREGATION_BITFIELD: Result = try_create_int_counter( + "gossipsub_attestation_error_empty_aggregation_bitfield", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_AGGREGATOR_PUBKEY_UNKNOWN: Result = try_create_int_counter( + "gossipsub_attestation_error_aggregator_pubkey_unknown", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_AGGREGATOR_NOT_IN_COMMITTEE: Result = try_create_int_counter( + "gossipsub_attestation_error_aggregator_not_in_committee", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_ATTESTATION_ALREADY_KNOWN: Result = try_create_int_counter( + "gossipsub_attestation_error_attestation_already_known", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_AGGREGATOR_ALREADY_KNOWN: Result = try_create_int_counter( + "gossipsub_attestation_error_aggregator_already_known", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_PRIOR_ATTESTATION_KNOWN: Result = try_create_int_counter( + "gossipsub_attestation_error_prior_attestation_known", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_VALIDATOR_INDEX_TOO_HIGH: Result = try_create_int_counter( + "gossipsub_attestation_error_validator_index_too_high", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_UNKNOWN_HEAD_BLOCK: Result = try_create_int_counter( + "gossipsub_attestation_error_unknown_head_block", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_UNKNOWN_TARGET_ROOT: Result = try_create_int_counter( + "gossipsub_attestation_error_unknown_target_root", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_BAD_TARGET_EPOCH: Result = try_create_int_counter( + "gossipsub_attestation_error_bad_target_epoch", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_NO_COMMITTEE_FOR_SLOT_AND_INDEX: Result = try_create_int_counter( + "gossipsub_attestation_error_no_committee_for_slot_and_index", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_NOT_EXACTLY_ONE_AGGREGATION_BIT_SET: Result = try_create_int_counter( + "gossipsub_attestation_error_not_exactly_one_aggregation_bit_set", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_ATTESTS_TO_FUTURE_BLOCK: Result = try_create_int_counter( + "gossipsub_attestation_error_attests_to_future_block", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_INVALID_SUBNET_ID: Result = try_create_int_counter( + "gossipsub_attestation_error_invalid_subnet_id", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_INVALID_STATE_PROCESSING: Result = try_create_int_counter( + "gossipsub_attestation_error_invalid_state_processing", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_INVALID_TOO_MANY_SKIPPED_SLOTS: Result = try_create_int_counter( + "gossipsub_attestation_error_invalid_too_many_skipped_slots", + "Count of a specific error type (see metric name)" + ); + pub static ref GOSSIP_ATTESTATION_ERROR_BEACON_CHAIN_ERROR: Result = try_create_int_counter( + "gossipsub_attestation_error_beacon_chain_error", + "Count of a specific error type (see metric name)" + ); +} + +pub fn register_attestation_error(error: &AttnError) { + match error { + AttnError::FutureEpoch { .. } => inc_counter(&GOSSIP_ATTESTATION_ERROR_FUTURE_EPOCH), + AttnError::PastEpoch { .. } => inc_counter(&GOSSIP_ATTESTATION_ERROR_PAST_EPOCH), + AttnError::FutureSlot { .. } => inc_counter(&GOSSIP_ATTESTATION_ERROR_FUTURE_SLOT), + AttnError::PastSlot { .. } => inc_counter(&GOSSIP_ATTESTATION_ERROR_PAST_SLOT), + AttnError::InvalidSelectionProof { .. } => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_INVALID_SELECTION_PROOF) + } + AttnError::InvalidSignature => inc_counter(&GOSSIP_ATTESTATION_ERROR_INVALID_SIGNATURE), + AttnError::EmptyAggregationBitfield => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_EMPTY_AGGREGATION_BITFIELD) + } + AttnError::AggregatorPubkeyUnknown(_) => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_AGGREGATOR_PUBKEY_UNKNOWN) + } + AttnError::AggregatorNotInCommittee { .. } => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_AGGREGATOR_NOT_IN_COMMITTEE) + } + AttnError::AttestationAlreadyKnown { .. } => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_ATTESTATION_ALREADY_KNOWN) + } + AttnError::AggregatorAlreadyKnown(_) => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_AGGREGATOR_ALREADY_KNOWN) + } + AttnError::PriorAttestationKnown { .. } => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_PRIOR_ATTESTATION_KNOWN) + } + AttnError::ValidatorIndexTooHigh(_) => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_VALIDATOR_INDEX_TOO_HIGH) + } + AttnError::UnknownHeadBlock { .. } => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_UNKNOWN_HEAD_BLOCK) + } + AttnError::UnknownTargetRoot(_) => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_UNKNOWN_TARGET_ROOT) + } + AttnError::BadTargetEpoch => inc_counter(&GOSSIP_ATTESTATION_ERROR_BAD_TARGET_EPOCH), + AttnError::NoCommitteeForSlotAndIndex { .. } => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_NO_COMMITTEE_FOR_SLOT_AND_INDEX) + } + AttnError::NotExactlyOneAggregationBitSet(_) => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_NOT_EXACTLY_ONE_AGGREGATION_BIT_SET) + } + AttnError::AttestsToFutureBlock { .. } => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_ATTESTS_TO_FUTURE_BLOCK) + } + AttnError::InvalidSubnetId { .. } => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_INVALID_SUBNET_ID) + } + AttnError::Invalid(_) => inc_counter(&GOSSIP_ATTESTATION_ERROR_INVALID_STATE_PROCESSING), + AttnError::TooManySkippedSlots { .. } => { + inc_counter(&GOSSIP_ATTESTATION_ERROR_INVALID_TOO_MANY_SKIPPED_SLOTS) + } + AttnError::BeaconChainError(_) => inc_counter(&GOSSIP_ATTESTATION_ERROR_BEACON_CHAIN_ERROR), + } } diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index e487330f8d6..0fa2494ff9e 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -9,14 +9,14 @@ pub mod processor; use crate::error; use crate::service::NetworkMessage; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::{ rpc::{RPCError, RequestId}, MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, }; use futures::prelude::*; use processor::Processor; -use slog::{debug, info, o, trace, warn}; +use slog::{debug, o, trace}; use std::sync::Arc; use tokio::sync::mpsc; use types::EthSpec; @@ -26,8 +26,6 @@ use types::EthSpec; /// passing them to the internal message processor. The message processor spawns a syncing thread /// which manages which blocks need to be requested and processed. pub struct Router { - /// A channel to the network service to allow for gossip propagation. - network_send: mpsc::UnboundedSender>, /// Access to the peer db for logging. network_globals: Arc>, /// Processes validated and decoded messages from the network. Has direct access to the @@ -89,13 +87,12 @@ impl Router { executor.clone(), beacon_chain, network_globals.clone(), - network_send.clone(), + network_send, &log, ); // generate the Message handler let mut handler = Router { - network_send, network_globals, processor, log: message_handler_log, @@ -215,56 +212,24 @@ impl Router { match gossip_message { // Attestations should never reach the router. PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => { - if let Some(gossip_verified) = self - .processor - .verify_aggregated_attestation_for_gossip(peer_id.clone(), *aggregate_and_proof) - { - self.propagate_message(id, peer_id.clone()); - self.processor - .import_aggregated_attestation(peer_id, gossip_verified); - } + self.processor + .on_aggregated_attestation_gossip(id, peer_id, *aggregate_and_proof); } PubsubMessage::Attestation(subnet_attestation) => { - if let Some(gossip_verified) = - self.processor.verify_unaggregated_attestation_for_gossip( - peer_id.clone(), - subnet_attestation.1.clone(), - subnet_attestation.0, - ) - { - self.propagate_message(id, peer_id.clone()); - if should_process { - self.processor - .import_unaggregated_attestation(peer_id, gossip_verified); - } - } + self.processor.on_unaggregated_attestation_gossip( + id, + peer_id, + subnet_attestation.1.clone(), + subnet_attestation.0, + should_process, + ); } PubsubMessage::BeaconBlock(block) => { - match self.processor.should_forward_block(block) { - Ok(verified_block) => { - info!(self.log, "New block received"; "slot" => verified_block.block.slot(), "hash" => verified_block.block_root.to_string()); - self.propagate_message(id, peer_id.clone()); - self.processor.on_block_gossip(peer_id, verified_block); - } - Err(BlockError::ParentUnknown(block)) => { - self.processor.on_unknown_parent(peer_id, block); - } - Err(e) => { - // performing a parent lookup - warn!(self.log, "Could not verify block for gossip"; - "error" => format!("{:?}", e)); - } - } + self.processor.on_block_gossip(id, peer_id, block); } PubsubMessage::VoluntaryExit(exit) => { debug!(self.log, "Received a voluntary exit"; "peer_id" => format!("{}", peer_id)); - if let Some(verified_exit) = self - .processor - .verify_voluntary_exit_for_gossip(&peer_id, *exit) - { - self.propagate_message(id, peer_id); - self.processor.import_verified_voluntary_exit(verified_exit); - } + self.processor.on_voluntary_exit_gossip(id, peer_id, exit); } PubsubMessage::ProposerSlashing(proposer_slashing) => { debug!( @@ -272,14 +237,8 @@ impl Router { "Received a proposer slashing"; "peer_id" => format!("{}", peer_id) ); - if let Some(verified_proposer_slashing) = self - .processor - .verify_proposer_slashing_for_gossip(&peer_id, *proposer_slashing) - { - self.propagate_message(id, peer_id); - self.processor - .import_verified_proposer_slashing(verified_proposer_slashing); - } + self.processor + .on_proposer_slashing_gossip(id, peer_id, proposer_slashing); } PubsubMessage::AttesterSlashing(attester_slashing) => { debug!( @@ -287,30 +246,9 @@ impl Router { "Received a attester slashing"; "peer_id" => format!("{}", peer_id) ); - if let Some(verified_attester_slashing) = self - .processor - .verify_attester_slashing_for_gossip(&peer_id, *attester_slashing) - { - self.propagate_message(id, peer_id); - self.processor - .import_verified_attester_slashing(verified_attester_slashing); - } + self.processor + .on_attester_slashing_gossip(id, peer_id, attester_slashing); } } } - - /// Informs the network service that the message should be forwarded to other peers (is valid). - fn propagate_message(&mut self, message_id: MessageId, propagation_source: PeerId) { - self.network_send - .send(NetworkMessage::Validate { - propagation_source, - message_id, - }) - .unwrap_or_else(|_| { - warn!( - self.log, - "Could not send propagation request to the network service" - ) - }); - } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 9d11e785445..f99cea0bb6f 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -1,20 +1,16 @@ +use crate::beacon_processor::{ + BeaconProcessor, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, +}; use crate::service::NetworkMessage; use crate::sync::{PeerSyncInfo, SyncMessage}; -use beacon_chain::{ - attestation_verification::{ - Error as AttnError, SignatureVerifiedAttestation, VerifiedAggregatedAttestation, - VerifiedUnaggregatedAttestation, - }, - observed_operations::ObservationOutcome, - BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, - GossipVerifiedBlock, -}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::rpc::*; -use eth2_libp2p::{NetworkGlobals, PeerAction, PeerId, PeerRequestId, Request, Response}; +use eth2_libp2p::{ + MessageId, NetworkGlobals, PeerAction, PeerId, PeerRequestId, Request, Response, +}; use itertools::process_results; use slog::{debug, error, o, trace, warn}; -use ssz::Encode; -use state_processing::SigVerifiedOp; +use std::cmp; use std::sync::Arc; use tokio::sync::mpsc; use types::{ @@ -22,8 +18,6 @@ use types::{ SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, SubnetId, }; -//TODO: Rate limit requests - /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; @@ -37,6 +31,8 @@ pub struct Processor { sync_send: mpsc::UnboundedSender>, /// A network context to return and handle RPC requests. network: HandlerNetworkContext, + /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. + beacon_processor_send: mpsc::Sender>, /// The `RPCHandler` logger. log: slog::Logger, } @@ -51,20 +47,36 @@ impl Processor { log: &slog::Logger, ) -> Self { let sync_logger = log.new(o!("service"=> "sync")); + let (beacon_processor_send, beacon_processor_receive) = + mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); // spawn the sync thread let sync_send = crate::sync::manager::spawn( - executor, + executor.clone(), beacon_chain.clone(), - network_globals, + network_globals.clone(), network_send.clone(), + beacon_processor_send.clone(), sync_logger, ); + BeaconProcessor { + beacon_chain: Arc::downgrade(&beacon_chain), + network_tx: network_send.clone(), + sync_tx: sync_send.clone(), + network_globals, + executor, + max_workers: cmp::max(1, num_cpus::get()), + current_workers: 0, + log: log.clone(), + } + .spawn_manager(beacon_processor_receive); + Processor { chain: beacon_chain, sync_send, network: HandlerNetworkContext::new(network_send, log.clone()), + beacon_processor_send, log: log.clone(), } } @@ -499,23 +511,6 @@ impl Processor { } } - /// Template function to be called on a block to determine if the block should be propagated - /// across the network. - pub fn should_forward_block( - &mut self, - block: Box>, - ) -> Result, BlockError> { - self.chain.verify_block_for_gossip(*block) - } - - pub fn on_unknown_parent( - &mut self, - peer_id: PeerId, - block: Box>, - ) { - self.send_to_sync(SyncMessage::UnknownBlock(peer_id, block)); - } - /// Process a gossip message declaring a new block. /// /// Attempts to apply to block to the beacon chain. May queue the block for later processing. @@ -523,544 +518,134 @@ impl Processor { /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. pub fn on_block_gossip( &mut self, + message_id: MessageId, peer_id: PeerId, - verified_block: GossipVerifiedBlock, - ) -> bool { - let block = Box::new(verified_block.block.clone()); - match self.chain.process_block(verified_block) { - Ok(_block_root) => { - trace!( - self.log, - "Gossipsub block processed"; - "peer_id" => peer_id.to_string() - ); - - // TODO: It would be better if we can run this _after_ we publish the block to - // reduce block propagation latency. - // - // The `MessageHandler` would be the place to put this, however it doesn't seem - // to have a reference to the `BeaconChain`. I will leave this for future - // works. - match self.chain.fork_choice() { - Ok(()) => trace!( - self.log, - "Fork choice success"; - "location" => "block gossip" - ), - Err(e) => error!( - self.log, - "Fork choice failed"; - "error" => format!("{:?}", e), - "location" => "block gossip" - ), - } - } - Err(BlockError::ParentUnknown { .. }) => { - // Inform the sync manager to find parents for this block - // This should not occur. It should be checked by `should_forward_block` + block: Box>, + ) { + self.beacon_processor_send + .try_send(BeaconWorkEvent::gossip_beacon_block( + message_id, peer_id, block, + )) + .unwrap_or_else(|e| { error!( - self.log, - "Block with unknown parent attempted to be processed"; - "peer_id" => peer_id.to_string() - ); - self.send_to_sync(SyncMessage::UnknownBlock(peer_id, block)); - } - other => { - warn!( - self.log, - "Invalid gossip beacon block"; - "outcome" => format!("{:?}", other), - "block root" => format!("{}", block.canonical_root()), - "block slot" => block.slot() - ); - trace!( - self.log, - "Invalid gossip beacon block ssz"; - "ssz" => format!("0x{}", hex::encode(block.as_ssz_bytes())), - ); - } - } - // TODO: Update with correct block gossip checking - true + &self.log, + "Unable to send to gossip processor"; + "type" => "block gossip", + "error" => e.to_string(), + ) + }) } - /// Handle an error whilst verifying an `Attestation` or `SignedAggregateAndProof` from the - /// network. - pub fn handle_attestation_verification_failure( + pub fn on_unaggregated_attestation_gossip( &mut self, + message_id: MessageId, peer_id: PeerId, - beacon_block_root: Hash256, - attestation_type: &str, - error: AttnError, + unaggregated_attestation: Attestation, + subnet_id: SubnetId, + should_process: bool, ) { - debug!( - self.log, - "Invalid attestation from network"; - "reason" => format!("{:?}", error), - "block" => format!("{}", beacon_block_root), - "peer_id" => peer_id.to_string(), - "type" => format!("{:?}", attestation_type), - ); - - match error { - AttnError::FutureEpoch { .. } - | AttnError::PastEpoch { .. } - | AttnError::FutureSlot { .. } - | AttnError::PastSlot { .. } => { - /* - * These errors can be triggered by a mismatch between our slot and the peer. - * - * - * The peer has published an invalid consensus message, _only_ if we trust our own clock. - */ - } - AttnError::InvalidSelectionProof { .. } | AttnError::InvalidSignature => { - /* - * These errors are caused by invalid signatures. - * - * The peer has published an invalid consensus message. - */ - } - AttnError::EmptyAggregationBitfield => { - /* - * The aggregate had no signatures and is therefore worthless. - * - * Whilst we don't gossip this attestation, this act is **not** a clear - * violation of the spec nor indication of fault. - * - * This may change soon. Reference: - * - * https://github.com/ethereum/eth2.0-specs/pull/1732 - */ - } - AttnError::AggregatorPubkeyUnknown(_) => { - /* - * The aggregator index was higher than any known validator index. This is - * possible in two cases: - * - * 1. The attestation is malformed - * 2. The attestation attests to a beacon_block_root that we do not know. - * - * It should be impossible to reach (2) without triggering - * `AttnError::UnknownHeadBlock`, so we can safely assume the peer is - * faulty. - * - * The peer has published an invalid consensus message. - */ - } - AttnError::AggregatorNotInCommittee { .. } => { - /* - * The aggregator index was higher than any known validator index. This is - * possible in two cases: - * - * 1. The attestation is malformed - * 2. The attestation attests to a beacon_block_root that we do not know. - * - * It should be impossible to reach (2) without triggering - * `AttnError::UnknownHeadBlock`, so we can safely assume the peer is - * faulty. - * - * The peer has published an invalid consensus message. - */ - } - AttnError::AttestationAlreadyKnown { .. } => { - /* - * The aggregate attestation has already been observed on the network or in - * a block. - * - * The peer is not necessarily faulty. - */ - } - AttnError::AggregatorAlreadyKnown(_) => { - /* - * There has already been an aggregate attestation seen from this - * aggregator index. - * - * The peer is not necessarily faulty. - */ - } - AttnError::PriorAttestationKnown { .. } => { - /* - * We have already seen an attestation from this validator for this epoch. - * - * The peer is not necessarily faulty. - */ - } - AttnError::ValidatorIndexTooHigh(_) => { - /* - * The aggregator index (or similar field) was higher than the maximum - * possible number of validators. - * - * The peer has published an invalid consensus message. - */ - } - AttnError::UnknownHeadBlock { beacon_block_root } => { - // Note: its a little bit unclear as to whether or not this block is unknown or - // just old. See: - // - // https://github.com/sigp/lighthouse/issues/1039 - - // TODO: Maintain this attestation and re-process once sync completes - debug!( - self.log, - "Attestation for unknown block"; - "peer_id" => peer_id.to_string(), - "block" => format!("{}", beacon_block_root) - ); - // we don't know the block, get the sync manager to handle the block lookup - self.send_to_sync(SyncMessage::UnknownBlockHash(peer_id, beacon_block_root)); - } - AttnError::UnknownTargetRoot(_) => { - /* - * The block indicated by the target root is not known to us. - * - * We should always get `AttnError::UnknwonHeadBlock` before we get this - * error, so this means we can get this error if: - * - * 1. The target root does not represent a valid block. - * 2. We do not have the target root in our DB. - * - * For (2), we should only be processing attestations when we should have - * all the available information. Note: if we do a weak-subjectivity sync - * it's possible that this situation could occur, but I think it's - * unlikely. For now, we will declare this to be an invalid message> - * - * The peer has published an invalid consensus message. - */ - } - AttnError::BadTargetEpoch => { - /* - * The aggregator index (or similar field) was higher than the maximum - * possible number of validators. - * - * The peer has published an invalid consensus message. - */ - } - AttnError::NoCommitteeForSlotAndIndex { .. } => { - /* - * It is not possible to attest this the given committee in the given slot. - * - * The peer has published an invalid consensus message. - */ - } - AttnError::NotExactlyOneAggregationBitSet(_) => { - /* - * The unaggregated attestation doesn't have only one signature. - * - * The peer has published an invalid consensus message. - */ - } - AttnError::AttestsToFutureBlock { .. } => { - /* - * The beacon_block_root is from a higher slot than the attestation. - * - * The peer has published an invalid consensus message. - */ - } - - AttnError::InvalidSubnetId { received, expected } => { - /* - * The attestation was received on an incorrect subnet id. - */ - debug!( - self.log, - "Received attestation on incorrect subnet"; - "expected" => format!("{:?}", expected), - "received" => format!("{:?}", received), - ) - } - AttnError::Invalid(_) => { - /* - * The attestation failed the state_processing verification. - * - * The peer has published an invalid consensus message. - */ - } - AttnError::BeaconChainError(e) => { - /* - * Lighthouse hit an unexpected error whilst processing the attestation. It - * should be impossible to trigger a `BeaconChainError` from the network, - * so we have a bug. - * - * It's not clear if the message is invalid/malicious. - */ + self.beacon_processor_send + .try_send(BeaconWorkEvent::unaggregated_attestation( + message_id, + peer_id, + unaggregated_attestation, + subnet_id, + should_process, + )) + .unwrap_or_else(|e| { error!( - self.log, - "Unable to validate aggregate"; - "peer_id" => peer_id.to_string(), - "error" => format!("{:?}", e), - ); - } - } - } - - pub fn verify_aggregated_attestation_for_gossip( - &mut self, - peer_id: PeerId, - aggregate_and_proof: SignedAggregateAndProof, - ) -> Option> { - // This is provided to the error handling function to assist with debugging. - let beacon_block_root = aggregate_and_proof.message.aggregate.data.beacon_block_root; - - self.chain - .verify_aggregated_attestation_for_gossip(aggregate_and_proof) - .map_err(|e| { - self.handle_attestation_verification_failure( - peer_id, - beacon_block_root, - "aggregated", - e, + &self.log, + "Unable to send to gossip processor"; + "type" => "unaggregated attestation gossip", + "error" => e.to_string(), ) }) - .ok() } - pub fn import_aggregated_attestation( + pub fn on_aggregated_attestation_gossip( &mut self, + message_id: MessageId, peer_id: PeerId, - verified_attestation: VerifiedAggregatedAttestation, + aggregate: SignedAggregateAndProof, ) { - // This is provided to the error handling function to assist with debugging. - let beacon_block_root = verified_attestation.attestation().data.beacon_block_root; - - self.apply_attestation_to_fork_choice( - peer_id.clone(), - beacon_block_root, - &verified_attestation, - ); - - if let Err(e) = self.chain.add_to_block_inclusion_pool(verified_attestation) { - debug!( - self.log, - "Attestation invalid for op pool"; - "reason" => format!("{:?}", e), - "peer" => peer_id.to_string(), - "beacon_block_root" => format!("{:?}", beacon_block_root) - ) - } + self.beacon_processor_send + .try_send(BeaconWorkEvent::aggregated_attestation( + message_id, peer_id, aggregate, + )) + .unwrap_or_else(|e| { + error!( + &self.log, + "Unable to send to gossip processor"; + "type" => "aggregated attestation gossip", + "error" => e.to_string(), + ) + }) } - pub fn verify_unaggregated_attestation_for_gossip( + pub fn on_voluntary_exit_gossip( &mut self, + message_id: MessageId, peer_id: PeerId, - unaggregated_attestation: Attestation, - subnet_id: SubnetId, - ) -> Option> { - // This is provided to the error handling function to assist with debugging. - let beacon_block_root = unaggregated_attestation.data.beacon_block_root; - - self.chain - .verify_unaggregated_attestation_for_gossip(unaggregated_attestation, subnet_id) - .map_err(|e| { - self.handle_attestation_verification_failure( - peer_id, - beacon_block_root, - "unaggregated", - e, + voluntary_exit: Box, + ) { + self.beacon_processor_send + .try_send(BeaconWorkEvent::gossip_voluntary_exit( + message_id, + peer_id, + voluntary_exit, + )) + .unwrap_or_else(|e| { + error!( + &self.log, + "Unable to send to gossip processor"; + "type" => "voluntary exit gossip", + "error" => e.to_string(), ) }) - .ok() } - pub fn import_unaggregated_attestation( + pub fn on_proposer_slashing_gossip( &mut self, + message_id: MessageId, peer_id: PeerId, - verified_attestation: VerifiedUnaggregatedAttestation, + proposer_slashing: Box, ) { - // This is provided to the error handling function to assist with debugging. - let beacon_block_root = verified_attestation.attestation().data.beacon_block_root; - - self.apply_attestation_to_fork_choice( - peer_id.clone(), - beacon_block_root, - &verified_attestation, - ); - - if let Err(e) = self - .chain - .add_to_naive_aggregation_pool(verified_attestation) - { - debug!( - self.log, - "Attestation invalid for agg pool"; - "reason" => format!("{:?}", e), - "peer" => peer_id.to_string(), - "beacon_block_root" => format!("{:?}", beacon_block_root) - ) - } + self.beacon_processor_send + .try_send(BeaconWorkEvent::gossip_proposer_slashing( + message_id, + peer_id, + proposer_slashing, + )) + .unwrap_or_else(|e| { + error!( + &self.log, + "Unable to send to gossip processor"; + "type" => "proposer slashing gossip", + "error" => e.to_string(), + ) + }) } - /// Apply the attestation to fork choice, suppressing errors. - /// - /// We suppress the errors when adding an attestation to fork choice since the spec - /// permits gossiping attestations that are invalid to be applied to fork choice. - /// - /// An attestation that is invalid for fork choice can still be included in a block. - /// - /// Reference: - /// https://github.com/ethereum/eth2.0-specs/issues/1408#issuecomment-617599260 - fn apply_attestation_to_fork_choice<'a>( - &self, + pub fn on_attester_slashing_gossip( + &mut self, + message_id: MessageId, peer_id: PeerId, - beacon_block_root: Hash256, - attestation: &'a impl SignatureVerifiedAttestation, + attester_slashing: Box>, ) { - if let Err(e) = self.chain.apply_attestation_to_fork_choice(attestation) { - match e { - BeaconChainError::ForkChoiceError(ForkChoiceError::InvalidAttestation(e)) => { - debug!( - self.log, - "Attestation invalid for fork choice"; - "reason" => format!("{:?}", e), - "peer" => peer_id.to_string(), - "beacon_block_root" => format!("{:?}", beacon_block_root) - ) - } - e => error!( - self.log, - "Error applying attestation to fork choice"; - "reason" => format!("{:?}", e), - "peer" => peer_id.to_string(), - "beacon_block_root" => format!("{:?}", beacon_block_root) - ), - } - } - } - - /// Verify a voluntary exit before gossiping or processing it. - /// - /// Errors are logged at debug level. - pub fn verify_voluntary_exit_for_gossip( - &self, - peer_id: &PeerId, - voluntary_exit: SignedVoluntaryExit, - ) -> Option> { - let validator_index = voluntary_exit.message.validator_index; - - match self.chain.verify_voluntary_exit_for_gossip(voluntary_exit) { - Ok(ObservationOutcome::New(sig_verified_exit)) => Some(sig_verified_exit), - Ok(ObservationOutcome::AlreadyKnown) => { - debug!( - self.log, - "Dropping exit for already exiting validator"; - "validator_index" => validator_index, - "peer" => peer_id.to_string() - ); - None - } - Err(e) => { - debug!( - self.log, - "Dropping invalid exit"; - "validator_index" => validator_index, - "peer" => peer_id.to_string(), - "error" => format!("{:?}", e) - ); - None - } - } - } - - /// Import a verified exit into the op pool. - pub fn import_verified_voluntary_exit( - &self, - verified_voluntary_exit: SigVerifiedOp, - ) { - self.chain.import_voluntary_exit(verified_voluntary_exit); - debug!(self.log, "Successfully imported voluntary exit"); - } - - /// Verify a proposer slashing before gossiping or processing it. - /// - /// Errors are logged at debug level. - pub fn verify_proposer_slashing_for_gossip( - &self, - peer_id: &PeerId, - proposer_slashing: ProposerSlashing, - ) -> Option> { - let validator_index = proposer_slashing.signed_header_1.message.proposer_index; - - match self - .chain - .verify_proposer_slashing_for_gossip(proposer_slashing) - { - Ok(ObservationOutcome::New(verified_slashing)) => Some(verified_slashing), - Ok(ObservationOutcome::AlreadyKnown) => { - debug!( - self.log, - "Dropping proposer slashing"; - "reason" => "Already seen a proposer slashing for that validator", - "validator_index" => validator_index, - "peer" => peer_id.to_string() - ); - None - } - Err(e) => { - debug!( - self.log, - "Dropping invalid proposer slashing"; - "validator_index" => validator_index, - "peer" => peer_id.to_string(), - "error" => format!("{:?}", e) - ); - None - } - } - } - - /// Import a verified proposer slashing into the op pool. - pub fn import_verified_proposer_slashing( - &self, - proposer_slashing: SigVerifiedOp, - ) { - self.chain.import_proposer_slashing(proposer_slashing); - debug!(self.log, "Successfully imported proposer slashing"); - } - - /// Verify an attester slashing before gossiping or processing it. - /// - /// Errors are logged at debug level. - pub fn verify_attester_slashing_for_gossip( - &self, - peer_id: &PeerId, - attester_slashing: AttesterSlashing, - ) -> Option>> { - match self - .chain - .verify_attester_slashing_for_gossip(attester_slashing) - { - Ok(ObservationOutcome::New(verified_slashing)) => Some(verified_slashing), - Ok(ObservationOutcome::AlreadyKnown) => { - debug!( - self.log, - "Dropping attester slashing"; - "reason" => "Slashings already known for all slashed validators", - "peer" => peer_id.to_string() - ); - None - } - Err(e) => { - debug!( - self.log, - "Dropping invalid attester slashing"; - "peer" => peer_id.to_string(), - "error" => format!("{:?}", e) - ); - None - } - } - } - - /// Import a verified attester slashing into the op pool. - pub fn import_verified_attester_slashing( - &self, - attester_slashing: SigVerifiedOp>, - ) { - if let Err(e) = self.chain.import_attester_slashing(attester_slashing) { - debug!(self.log, "Error importing attester slashing"; "error" => format!("{:?}", e)); - } else { - debug!(self.log, "Successfully imported attester slashing"); - } + self.beacon_processor_send + .try_send(BeaconWorkEvent::gossip_attester_slashing( + message_id, + peer_id, + attester_slashing, + )) + .unwrap_or_else(|e| { + error!( + &self.log, + "Unable to send to gossip processor"; + "type" => "attester slashing gossip", + "error" => e.to_string(), + ) + }) } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 7409a89def1..e07ebb166ab 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -6,17 +6,18 @@ use crate::{ }; use crate::{error, metrics}; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::Service as LibP2PService; use eth2_libp2p::{ rpc::{GoodbyeReason, RPCResponseErrorCode, RequestId}, Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, Request, Response, }; -use eth2_libp2p::{BehaviourEvent, MessageId, NetworkGlobals, PeerId}; +use eth2_libp2p::{ + types::GossipKind, BehaviourEvent, GossipTopic, MessageId, NetworkGlobals, PeerId, TopicHash, +}; +use eth2_libp2p::{MessageAcceptance, Service as LibP2PService}; use futures::prelude::*; use rest_types::ValidatorSubscription; use slog::{debug, error, info, o, trace, warn}; -use std::sync::Arc; -use std::time::Duration; +use std::{collections::HashMap, sync::Arc, time::Duration}; use store::HotColdDB; use tokio::sync::mpsc; use tokio::time::Delay; @@ -24,6 +25,9 @@ use types::EthSpec; mod tests; +/// The interval (in seconds) that various network metrics will update. +const METRIC_UPDATE_INTERVAL: u64 = 1; + /// Types of messages that the network service can receive. #[derive(Debug)] pub enum NetworkMessage { @@ -55,11 +59,13 @@ pub enum NetworkMessage { /// Publish a list of messages to the gossipsub protocol. Publish { messages: Vec> }, /// Validates a received gossipsub message. This will propagate the message on the network. - Validate { + ValidationResult { /// The peer that sent us the message. We don't send back to this peer. propagation_source: PeerId, /// The id of the message we are validating and propagating. message_id: MessageId, + /// The result of the validation + validation_result: MessageAcceptance, }, /// Reports a peer to the peer manager for performing an action. ReportPeer { peer_id: PeerId, action: PeerAction }, @@ -89,13 +95,15 @@ pub struct NetworkService { network_globals: Arc>, /// A delay that expires when a new fork takes place. next_fork_update: Option, + /// A timer for updating various network metrics. + metrics_update: tokio::time::Interval, /// The logger for the network service. log: slog::Logger, } impl NetworkService { #[allow(clippy::type_complexity)] - pub fn start( + pub async fn start( beacon_chain: Arc>, config: &NetworkConfig, executor: environment::TaskExecutor, @@ -117,7 +125,7 @@ impl NetworkService { // launch libp2p service let (network_globals, mut libp2p) = - LibP2PService::new(executor.clone(), config, enr_fork_id, &network_log)?; + LibP2PService::new(executor.clone(), config, enr_fork_id, &network_log).await?; // Repopulate the DHT with stored ENR's. let enrs_to_load = load_dht::(store.clone()); @@ -126,7 +134,7 @@ impl NetworkService { "Loading peers into the routing table"; "peers" => enrs_to_load.len() ); for enr in enrs_to_load { - libp2p.swarm.add_enr(enr.clone()); + libp2p.swarm.add_enr(enr.clone()); //TODO change? } // launch derived network services @@ -144,8 +152,11 @@ impl NetworkService { let attestation_service = AttestationService::new(beacon_chain.clone(), network_globals.clone(), &network_log); + // create a timer for updating network metrics + let metrics_update = tokio::time::interval(Duration::from_secs(METRIC_UPDATE_INTERVAL)); + // create the network service and spawn the task - let network_log = network_log.new(o!("service"=> "network")); + let network_log = network_log.new(o!("service" => "network")); let network_service = NetworkService { beacon_chain, libp2p, @@ -155,6 +166,7 @@ impl NetworkService { store, network_globals: network_globals.clone(), next_fork_update, + metrics_update, log: network_log, }; @@ -169,12 +181,12 @@ fn spawn_service( mut service: NetworkService, ) -> error::Result<()> { let mut exit_rx = executor.exit(); + let mut shutdown_sender = executor.shutdown_sender(); // spawn on the current executor executor.spawn_without_exit(async move { - // TODO: there is something with this code that prevents cargo fmt from doing anything at - // all. Ok, it is worse, the compiler doesn't show errors over this code beyond ast - // checking + + let mut metric_update_counter = 0; loop { // build the futures to check simultaneously tokio::select! { @@ -203,6 +215,17 @@ fn spawn_service( info!(service.log, "Network service shutdown"); return; } + _ = service.metrics_update.next() => { + // update various network metrics + metric_update_counter +=1; + if metric_update_counter* 1000 % T::EthSpec::default_spec().milliseconds_per_slot == 0 { + // if a slot has occurred, reset the metrics + let _ = metrics::ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT + .as_ref() + .map(|gauge| gauge.reset()); + } + update_gossip_metrics::(&service.libp2p.swarm.gs()); + } // handle a message sent to the network Some(message) = service.network_recv.recv() => { match message { @@ -215,9 +238,10 @@ fn spawn_service( NetworkMessage::SendError{ peer_id, error, id, reason } => { service.libp2p.respond_with_error(peer_id, id, error, reason); } - NetworkMessage::Validate { + NetworkMessage::ValidationResult { propagation_source, message_id, + validation_result, } => { trace!(service.log, "Propagating gossipsub message"; "propagation_peer" => format!("{:?}", propagation_source), @@ -226,7 +250,9 @@ fn spawn_service( service .libp2p .swarm - .validate_message(&propagation_source, message_id); + .report_message_validation_result( + &propagation_source, message_id, validation_result + ); } NetworkMessage::Publish { messages } => { let mut topic_kinds = Vec::new(); @@ -271,8 +297,8 @@ fn spawn_service( AttServiceMessage::EnrRemove(subnet_id) => { service.libp2p.swarm.update_enr_subnet(subnet_id, false); } - AttServiceMessage::DiscoverPeers{subnet_id, min_ttl} => { - service.libp2p.swarm.discover_subnet_peers(subnet_id, min_ttl); + AttServiceMessage::DiscoverPeers(subnets_to_discover) => { + service.libp2p.swarm.discover_subnet_peers(subnets_to_discover); } } } @@ -376,6 +402,12 @@ fn spawn_service( Libp2pEvent::NewListenAddr(multiaddr) => { service.network_globals.listen_multiaddrs.write().push(multiaddr); } + Libp2pEvent::ZeroListeners => { + let _ = shutdown_sender.send("All listeners are closed. Unable to listen").await.map_err(|e| { + warn!(service.log, "failed to send a shutdown signal"; "error" => e.to_string() + ) + }); + } } } } @@ -412,7 +444,11 @@ fn expose_publish_metrics(messages: &[PubsubMessage]) { for message in messages { match message { PubsubMessage::BeaconBlock(_) => metrics::inc_counter(&metrics::GOSSIP_BLOCKS_TX), - PubsubMessage::Attestation(_) => { + PubsubMessage::Attestation(subnet_id) => { + metrics::inc_counter_vec( + &metrics::ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT, + &[&subnet_id.0.to_string()], + ); metrics::inc_counter(&metrics::GOSSIP_UNAGGREGATED_ATTESTATIONS_TX) } PubsubMessage::AggregateAndProofAttestation(_) => { @@ -436,3 +472,163 @@ fn expose_receive_metrics(message: &PubsubMessage) { _ => {} } } + +fn update_gossip_metrics(gossipsub: ð2_libp2p::Gossipsub) { + // Clear the metrics + let _ = metrics::PEERS_PER_PROTOCOL + .as_ref() + .map(|gauge| gauge.reset()); + let _ = metrics::PEERS_PER_PROTOCOL + .as_ref() + .map(|gauge| gauge.reset()); + let _ = metrics::MESH_PEERS_PER_MAIN_TOPIC + .as_ref() + .map(|gauge| gauge.reset()); + let _ = metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC + .as_ref() + .map(|gauge| gauge.reset()); + let _ = metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC + .as_ref() + .map(|gauge| gauge.reset()); + + // reset the mesh peers, showing all subnets + for subnet_id in 0..T::default_spec().attestation_subnet_count { + let _ = metrics::get_int_gauge( + &metrics::MESH_PEERS_PER_SUBNET_TOPIC, + &[&subnet_id.to_string()], + ) + .map(|v| v.set(0)); + + let _ = metrics::get_int_gauge( + &metrics::GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC, + &[&subnet_id.to_string()], + ) + .map(|v| v.set(0)); + + let _ = metrics::get_int_gauge( + &metrics::GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC, + &[&subnet_id.to_string()], + ) + .map(|v| v.set(0)); + } + + // Subnet topics subscribed to + for topic_hash in gossipsub.topics() { + if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { + if let GossipKind::Attestation(subnet_id) = topic.kind() { + let _ = metrics::get_int_gauge( + &metrics::GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC, + &[&subnet_id.to_string()], + ) + .map(|v| v.set(1)); + } + } + } + + // Peers per subscribed subnet + let mut peers_per_topic: HashMap = HashMap::new(); + for (peer_id, topics) in gossipsub.all_peers() { + for topic_hash in topics { + *peers_per_topic.entry(topic_hash.clone()).or_default() += 1; + + if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { + match topic.kind() { + GossipKind::Attestation(subnet_id) => { + if let Some(v) = metrics::get_int_gauge( + &metrics::GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC, + &[&subnet_id.to_string()], + ) { + v.inc() + }; + + // average peer scores + if let Some(score) = gossipsub.peer_score(peer_id) { + if let Some(v) = metrics::get_int_gauge( + &metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC, + &[&subnet_id.to_string()], + ) { + v.add(score as i64) + }; + } + } + kind => { + // main topics + if let Some(score) = gossipsub.peer_score(peer_id) { + if let Some(v) = metrics::get_int_gauge( + &metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, + &[&format!("{:?}", kind)], + ) { + v.add(score as i64) + }; + } + } + } + } + } + } + // adjust to average scores by dividing by number of peers + for (topic_hash, peers) in peers_per_topic.iter() { + if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { + match topic.kind() { + GossipKind::Attestation(subnet_id) => { + // average peer scores + if let Some(v) = metrics::get_int_gauge( + &metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC, + &[&subnet_id.to_string()], + ) { + v.set(v.get() / (*peers as i64)) + }; + } + kind => { + // main topics + if let Some(v) = metrics::get_int_gauge( + &metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, + &[&format!("{:?}", kind)], + ) { + v.set(v.get() / (*peers as i64)) + }; + } + } + } + } + + // mesh peers + for topic_hash in gossipsub.topics() { + let peers = gossipsub.mesh_peers(&topic_hash).count(); + if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { + match topic.kind() { + GossipKind::Attestation(subnet_id) => { + if let Some(v) = metrics::get_int_gauge( + &metrics::MESH_PEERS_PER_SUBNET_TOPIC, + &[&subnet_id.to_string()], + ) { + v.set(peers as i64) + }; + } + kind => { + // main topics + if let Some(v) = metrics::get_int_gauge( + &metrics::MESH_PEERS_PER_MAIN_TOPIC, + &[&format!("{:?}", kind)], + ) { + v.set(peers as i64) + }; + } + } + } + } + + // protocol peers + let mut peers_per_protocol: HashMap = HashMap::new(); + for (_peer, protocol) in gossipsub.peer_protocol() { + *peers_per_protocol.entry(protocol.to_string()).or_default() += 1; + } + + for (protocol, peers) in peers_per_protocol.iter() { + if let Some(v) = + metrics::get_int_gauge(&metrics::PEERS_PER_PROTOCOL, &[&protocol.to_string()]) + { + v.set(*peers) + }; + } +} diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index b2793392443..2efcf889e74 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -23,7 +23,7 @@ mod tests { let log = get_logger(); let beacon_chain = Arc::new( - BeaconChainHarness::new( + BeaconChainHarness::new_with_store_config( MinimalEthSpec, generate_deterministic_keypairs(8), StoreConfig::default(), @@ -40,17 +40,25 @@ mod tests { let runtime = Runtime::new().unwrap(); let (signal, exit) = exit_future::signal(); - let executor = environment::TaskExecutor::new(runtime.handle().clone(), exit, log.clone()); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let executor = environment::TaskExecutor::new( + runtime.handle().clone(), + exit, + log.clone(), + shutdown_tx, + ); let mut config = NetworkConfig::default(); config.libp2p_port = 21212; config.discovery_port = 21212; - config.boot_nodes = enrs.clone(); + config.boot_nodes_enr = enrs.clone(); runtime.spawn(async move { // Create a new network service which implicitly gets dropped at the // end of the block. - let _ = NetworkService::start(beacon_chain.clone(), &config, executor).unwrap(); + let _ = NetworkService::start(beacon_chain.clone(), &config, executor) + .await + .unwrap(); drop(signal); }); runtime.shutdown_timeout(tokio::time::Duration::from_millis(300)); diff --git a/beacon_node/network/src/sync/block_processor.rs b/beacon_node/network/src/sync/block_processor.rs deleted file mode 100644 index a4fe5b418ff..00000000000 --- a/beacon_node/network/src/sync/block_processor.rs +++ /dev/null @@ -1,255 +0,0 @@ -use crate::router::processor::FUTURE_SLOT_TOLERANCE; -use crate::sync::manager::SyncMessage; -use crate::sync::range_sync::{BatchId, ChainId}; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, ChainSegmentResult}; -use eth2_libp2p::PeerId; -use slog::{debug, error, trace, warn}; -use std::sync::{Arc, Weak}; -use tokio::sync::mpsc; -use types::{EthSpec, SignedBeaconBlock}; - -/// Id associated to a block processing request, either a batch or a single block. -#[derive(Clone, Debug, PartialEq)] -pub enum ProcessId { - /// Processing Id of a range syncing batch. - RangeBatchId(ChainId, BatchId), - /// Processing Id of the parent lookup of a block - ParentLookup(PeerId), -} - -/// The result of a block processing request. -// TODO: When correct batch error handling occurs, we will include an error type. -#[derive(Debug)] -pub enum BatchProcessResult { - /// The batch was completed successfully. - Success, - /// The batch processing failed. - Failed, - /// The batch processing failed but managed to import at least one block. - Partial, -} - -/// Spawns a thread handling the block processing of a request: range syncing or parent lookup. -pub fn spawn_block_processor( - chain: Weak>, - process_id: ProcessId, - downloaded_blocks: Vec>, - sync_send: mpsc::UnboundedSender>, - log: slog::Logger, -) { - std::thread::spawn(move || { - match process_id { - // this a request from the range sync - ProcessId::RangeBatchId(chain_id, batch_id) => { - let len = downloaded_blocks.len(); - let start_slot = if len > 0 { - downloaded_blocks[0].message.slot.as_u64() - } else { - 0 - }; - let end_slot = if len > 0 { - downloaded_blocks[len - 1].message.slot.as_u64() - } else { - 0 - }; - - debug!(log, "Processing batch"; "id" => *batch_id, "blocks" => downloaded_blocks.len(), "start_slot" => start_slot, "end_slot" => end_slot); - let result = match process_blocks(chain, downloaded_blocks.iter(), &log) { - (_, Ok(_)) => { - debug!(log, "Batch processed"; "id" => *batch_id , "start_slot" => start_slot, "end_slot" => end_slot); - BatchProcessResult::Success - } - (imported_blocks, Err(e)) if imported_blocks > 0 => { - debug!(log, "Batch processing failed but imported some blocks"; - "id" => *batch_id, "error" => e, "imported_blocks"=> imported_blocks); - BatchProcessResult::Partial - } - (_, Err(e)) => { - debug!(log, "Batch processing failed"; "id" => *batch_id, "error" => e); - BatchProcessResult::Failed - } - }; - - let msg = SyncMessage::BatchProcessed { - chain_id, - batch_id, - downloaded_blocks, - result, - }; - sync_send.send(msg).unwrap_or_else(|_| { - debug!( - log, - "Block processor could not inform range sync result. Likely shutting down." - ); - }); - } - // this a parent lookup request from the sync manager - ProcessId::ParentLookup(peer_id) => { - debug!( - log, "Processing parent lookup"; - "last_peer_id" => format!("{}", peer_id), - "blocks" => downloaded_blocks.len() - ); - // parent blocks are ordered from highest slot to lowest, so we need to process in - // reverse - match process_blocks(chain, downloaded_blocks.iter().rev(), &log) { - (_, Err(e)) => { - warn!(log, "Parent lookup failed"; "last_peer_id" => format!("{}", peer_id), "error" => e); - sync_send - .send(SyncMessage::ParentLookupFailed(peer_id)) - .unwrap_or_else(|_| { - // on failure, inform to downvote the peer - debug!( - log, - "Block processor could not inform parent lookup result. Likely shutting down." - ); - }); - } - (_, Ok(_)) => { - debug!(log, "Parent lookup processed successfully"); - } - } - } - } - }); -} - -/// Helper function to process blocks batches which only consumes the chain and blocks to process. -fn process_blocks< - 'a, - T: BeaconChainTypes, - I: Iterator>, ->( - chain: Weak>, - downloaded_blocks: I, - log: &slog::Logger, -) -> (usize, Result<(), String>) { - if let Some(chain) = chain.upgrade() { - let blocks = downloaded_blocks.cloned().collect::>(); - let (imported_blocks, r) = match chain.process_chain_segment(blocks) { - ChainSegmentResult::Successful { imported_blocks } => { - if imported_blocks == 0 { - debug!(log, "All blocks already known"); - } else { - debug!( - log, "Imported blocks from network"; - "count" => imported_blocks, - ); - // Batch completed successfully with at least one block, run fork choice. - run_fork_choice(chain, log); - } - - (imported_blocks, Ok(())) - } - ChainSegmentResult::Failed { - imported_blocks, - error, - } => { - let r = handle_failed_chain_segment(error, log); - if imported_blocks > 0 { - run_fork_choice(chain, log); - } - (imported_blocks, r) - } - }; - - return (imported_blocks, r); - } - - (0, Ok(())) -} - -/// Runs fork-choice on a given chain. This is used during block processing after one successful -/// block import. -fn run_fork_choice(chain: Arc>, log: &slog::Logger) { - match chain.fork_choice() { - Ok(()) => trace!( - log, - "Fork choice success"; - "location" => "batch processing" - ), - Err(e) => error!( - log, - "Fork choice failed"; - "error" => format!("{:?}", e), - "location" => "batch import error" - ), - } -} - -/// Helper function to handle a `BlockError` from `process_chain_segment` -fn handle_failed_chain_segment( - error: BlockError, - log: &slog::Logger, -) -> Result<(), String> { - match error { - BlockError::ParentUnknown(block) => { - // blocks should be sequential and all parents should exist - - Err(format!( - "Block has an unknown parent: {}", - block.parent_root() - )) - } - BlockError::BlockIsAlreadyKnown => { - // This can happen for many reasons. Head sync's can download multiples and parent - // lookups can download blocks before range sync - Ok(()) - } - BlockError::FutureSlot { - present_slot, - block_slot, - } => { - if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { - // The block is too far in the future, drop it. - warn!( - log, "Block is ahead of our slot clock"; - "msg" => "block for future slot rejected, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - ); - } else { - // The block is in the future, but not too far. - debug!( - log, "Block is slightly ahead of our slot clock, ignoring."; - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - ); - } - - Err(format!( - "Block with slot {} is higher than the current slot {}", - block_slot, present_slot - )) - } - BlockError::WouldRevertFinalizedSlot { .. } => { - debug!( log, "Finalized or earlier block processed";); - - Ok(()) - } - BlockError::GenesisBlock => { - debug!(log, "Genesis block was processed"); - Ok(()) - } - BlockError::BeaconChainError(e) => { - warn!( - log, "BlockProcessingFailure"; - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", e) - ); - - Err(format!("Internal error whilst processing block: {:?}", e)) - } - other => { - debug!( - log, "Invalid block received"; - "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", other), - ); - - Err(format!("Peer sent invalid block. Reason: {:?}", other)) - } - } -} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 2f388e46111..3aa5577d70c 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -33,17 +33,18 @@ //! if an attestation references an unknown block) this manager can search for the block and //! subsequently search for parents if needed. -use super::block_processor::{spawn_block_processor, BatchProcessResult, ProcessId}; use super::network_context::SyncNetworkContext; use super::peer_sync_info::{PeerSyncInfo, PeerSyncType}; -use super::range_sync::{BatchId, ChainId, RangeSync, EPOCHS_PER_BATCH}; +use super::range_sync::{ChainId, RangeSync, EPOCHS_PER_BATCH}; use super::RequestId; +use crate::beacon_processor::{ProcessId, WorkEvent as BeaconWorkEvent}; use crate::service::NetworkMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError}; use eth2_libp2p::rpc::{methods::MAX_REQUEST_BLOCKS, BlocksByRootRequest, GoodbyeReason}; use eth2_libp2p::types::NetworkGlobals; use eth2_libp2p::{PeerAction, PeerId}; use fnv::FnvHashMap; +use lru_cache::LRUCache; use slog::{crit, debug, error, info, trace, warn, Logger}; use smallvec::SmallVec; use ssz_types::VariableList; @@ -51,7 +52,7 @@ use std::boxed::Box; use std::ops::Sub; use std::sync::Arc; use tokio::sync::mpsc; -use types::{EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a @@ -100,13 +101,30 @@ pub enum SyncMessage { /// A batch has been processed by the block processor thread. BatchProcessed { chain_id: ChainId, - batch_id: BatchId, + epoch: Epoch, downloaded_blocks: Vec>, result: BatchProcessResult, }, - /// A parent lookup has failed for a block given by this `peer_id`. - ParentLookupFailed(PeerId), + /// A parent lookup has failed. + ParentLookupFailed { + /// The head of the chain of blocks that failed to process. + chain_head: Hash256, + /// The peer that instigated the chain lookup. + peer_id: PeerId, + }, +} + +/// The result of processing a multiple blocks (a chain segment). +// TODO: When correct batch error handling occurs, we will include an error type. +#[derive(Debug)] +pub enum BatchProcessResult { + /// The batch was completed successfully. + Success, + /// The batch processing failed. + Failed, + /// The batch processing failed but managed to import at least one block. + Partial, } /// Maintains a sequential list of parents to lookup and the lookup's current state. @@ -149,6 +167,9 @@ pub struct SyncManager { /// A collection of parent block lookups. parent_queue: SmallVec<[ParentRequests; 3]>, + /// A cache of failed chain lookups to prevent duplicate searches. + failed_chains: LRUCache, + /// A collection of block hashes being searched for and a flag indicating if a result has been /// received or not. /// @@ -158,8 +179,8 @@ pub struct SyncManager { /// The logger for the import manager. log: Logger, - /// The sending part of input_channel - sync_send: mpsc::UnboundedSender>, + /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. + beacon_processor_send: mpsc::Sender>, } /// Object representing a single block lookup request. @@ -187,6 +208,7 @@ pub fn spawn( beacon_chain: Arc>, network_globals: Arc>, network_send: mpsc::UnboundedSender>, + beacon_processor_send: mpsc::Sender>, log: slog::Logger, ) -> mpsc::UnboundedSender> { assert!( @@ -201,7 +223,7 @@ pub fn spawn( range_sync: RangeSync::new( beacon_chain.clone(), network_globals.clone(), - sync_send.clone(), + beacon_processor_send.clone(), log.clone(), ), network: SyncNetworkContext::new(network_send, network_globals.clone(), log.clone()), @@ -209,9 +231,10 @@ pub fn spawn( network_globals, input_channel: sync_recv, parent_queue: SmallVec::new(), + failed_chains: LRUCache::new(500), single_block_lookups: FnvHashMap::default(), log: log.clone(), - sync_send: sync_send.clone(), + beacon_processor_send, }; // spawn the sync manager thread @@ -300,7 +323,7 @@ impl SyncManager { /// There are two reasons we could have received a BlocksByRoot response /// - We requested a single hash and have received a response for the single_block_lookup /// - We are looking up parent blocks in parent lookup search - fn blocks_by_root_response( + async fn blocks_by_root_response( &mut self, peer_id: PeerId, request_id: RequestId, @@ -318,7 +341,8 @@ impl SyncManager { single_block_hash = Some(block_request.hash); } if let Some(block_hash) = single_block_hash { - self.single_block_lookup_response(peer_id, block, block_hash); + self.single_block_lookup_response(peer_id, block, block_hash) + .await; return; } @@ -337,10 +361,26 @@ impl SyncManager { return; } }; + + // check if the parent of this block isn't in our failed cache. If it is, this + // chain should be dropped and the peer downscored. + if self.failed_chains.contains(&block.message.parent_root) { + debug!(self.log, "Parent chain ignored due to past failure"; "block" => format!("{:?}", block.message.parent_root), "slot" => block.message.slot); + if !parent_request.downloaded_blocks.is_empty() { + // Add the root block to failed chains + self.failed_chains + .insert(parent_request.downloaded_blocks[0].canonical_root()); + } else { + crit!(self.log, "Parent chain has no blocks"); + } + self.network + .report_peer(peer_id, PeerAction::MidToleranceError); + return; + } // add the block to response parent_request.downloaded_blocks.push(block); // queue for processing - self.process_parent_request(parent_request); + self.process_parent_request(parent_request).await; } None => { // this is a stream termination @@ -381,10 +421,40 @@ impl SyncManager { } } + async fn process_block_async( + &mut self, + block: SignedBeaconBlock, + ) -> Option>> { + let (event, rx) = BeaconWorkEvent::rpc_beacon_block(Box::new(block)); + match self.beacon_processor_send.try_send(event) { + Ok(_) => {} + Err(e) => { + error!( + self.log, + "Failed to send sync block to processor"; + "error" => format!("{:?}", e) + ); + return None; + } + } + + match rx.await { + Ok(block_result) => Some(block_result), + Err(_) => { + warn!( + self.log, + "Sync block not processed"; + "msg" => "likely due to system resource exhaustion" + ); + None + } + } + } + /// Processes the response obtained from a single block lookup search. If the block is /// processed or errors, the search ends. If the blocks parent is unknown, a block parent /// lookup search is started. - fn single_block_lookup_response( + async fn single_block_lookup_response( &mut self, peer_id: PeerId, block: SignedBeaconBlock, @@ -399,8 +469,13 @@ impl SyncManager { return; } + let block_result = match self.process_block_async(block.clone()).await { + Some(block_result) => block_result, + None => return, + }; + // we have the correct block, try and process it - match self.chain.process_block(block.clone()) { + match block_result { Ok(block_root) => { info!(self.log, "Processed block"; "block" => format!("{}", block_root)); @@ -461,6 +536,15 @@ impl SyncManager { } } + let block_root = block.canonical_root(); + // If this block or it's parent is part of a known failed chain, ignore it. + if self.failed_chains.contains(&block.message.parent_root) + || self.failed_chains.contains(&block_root) + { + debug!(self.log, "Block is from a past failed chain. Dropping"; "block_root" => format!("{:?}", block_root), "block_slot" => block.message.slot); + return; + } + // Make sure this block is not already being searched for // NOTE: Potentially store a hashset of blocks for O(1) lookups for parent_req in self.parent_queue.iter() { @@ -599,7 +683,7 @@ impl SyncManager { // manager /// A new block has been received for a parent lookup query, process it. - fn process_parent_request(&mut self, mut parent_request: ParentRequests) { + async fn process_parent_request(&mut self, mut parent_request: ParentRequests) { // verify the last added block is the parent of the last requested block if parent_request.downloaded_blocks.len() < 2 { @@ -648,11 +732,19 @@ impl SyncManager { // If the last block in the queue has an unknown parent, we continue the parent // lookup-search. + let chain_block_hash = parent_request.downloaded_blocks[0].canonical_root(); + let newest_block = parent_request .downloaded_blocks .pop() .expect("There is always at least one block in the queue"); - match self.chain.process_block(newest_block.clone()) { + + let block_result = match self.process_block_async(newest_block.clone()).await { + Some(block_result) => block_result, + None => return, + }; + + match block_result { Err(BlockError::ParentUnknown { .. }) => { // need to keep looking for parents // add the block back to the queue and continue the search @@ -660,13 +752,25 @@ impl SyncManager { self.request_parent(parent_request); } Ok(_) | Err(BlockError::BlockIsAlreadyKnown { .. }) => { - spawn_block_processor( - Arc::downgrade(&self.chain), - ProcessId::ParentLookup(parent_request.last_submitted_peer.clone()), - parent_request.downloaded_blocks, - self.sync_send.clone(), - self.log.clone(), + let process_id = ProcessId::ParentLookup( + parent_request.last_submitted_peer.clone(), + chain_block_hash, ); + let blocks = parent_request.downloaded_blocks; + + match self + .beacon_processor_send + .try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) + { + Ok(_) => {} + Err(e) => { + error!( + self.log, + "Failed to send chain segment to processor"; + "error" => format!("{:?}", e) + ); + } + } } Err(outcome) => { // all else we consider the chain a failure and downvote the peer that sent @@ -677,6 +781,10 @@ impl SyncManager { "outcome" => format!("{:?}", outcome), "last_peer" => parent_request.last_submitted_peer.to_string(), ); + + // Add this chain to cache of failed chains + self.failed_chains.insert(chain_block_hash); + // This currently can be a host of errors. We permit this due to the partial // ambiguity. // TODO: Refine the error types and score the peer appropriately. @@ -699,8 +807,17 @@ impl SyncManager { || parent_request.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { let error = if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { + // This is a peer-specific error and the chain could be continued with another + // peer. We don't consider this chain a failure and prevent retries with another + // peer. "too many failed attempts" } else { + if !parent_request.downloaded_blocks.is_empty() { + self.failed_chains + .insert(parent_request.downloaded_blocks[0].canonical_root()); + } else { + crit!(self.log, "Parent lookup has no blocks"); + } "reached maximum lookup-depth" }; @@ -709,6 +826,11 @@ impl SyncManager { "ancestors_found" => parent_request.downloaded_blocks.len(), "reason" => error ); + // Downscore the peer. + self.network.report_peer( + parent_request.last_submitted_peer, + PeerAction::LowToleranceError, + ); return; // drop the request } @@ -760,7 +882,8 @@ impl SyncManager { request_id, beacon_block, } => { - self.blocks_by_root_response(peer_id, request_id, beacon_block.map(|b| *b)); + self.blocks_by_root_response(peer_id, request_id, beacon_block.map(|b| *b)) + .await; } SyncMessage::UnknownBlock(peer_id, block) => { self.add_unknown_block(peer_id, *block); @@ -776,24 +899,25 @@ impl SyncManager { } SyncMessage::BatchProcessed { chain_id, - batch_id, + epoch, downloaded_blocks, result, } => { self.range_sync.handle_block_process_result( &mut self.network, chain_id, - batch_id, + epoch, downloaded_blocks, result, ); } - SyncMessage::ParentLookupFailed(peer_id) => { + SyncMessage::ParentLookupFailed { + chain_head, + peer_id, + } => { // A peer sent an object (block or attestation) that referenced a parent. - // On request for this parent the peer indicated it did not have this - // block. - // This is not fatal. Peer's could prune old blocks so we moderately - // tolerate this behaviour. + // The processing of this chain failed. + self.failed_chains.insert(chain_head); self.network .report_peer(peer_id, PeerAction::MidToleranceError); } diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 2c0fcabb287..1d6e4f54321 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -1,14 +1,14 @@ //! Syncing for lighthouse. //! //! Stores the various syncing methods for the beacon chain. -mod block_processor; pub mod manager; mod network_context; mod peer_sync_info; mod range_sync; -pub use manager::SyncMessage; +pub use manager::{BatchProcessResult, SyncMessage}; pub use peer_sync_info::PeerSyncInfo; +pub use range_sync::ChainId; /// Type of id of rpc requests sent by sync pub type RequestId = usize; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index d487971d1b2..caccce4e614 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -110,7 +110,7 @@ impl SyncNetworkContext { } pub fn report_peer(&mut self, peer_id: PeerId, action: PeerAction) { - debug!(self.log, "Sync reporting peer"; "peer_id" => peer_id.to_string(), "action"=> action.to_string()); + debug!(self.log, "Sync reporting peer"; "peer_id" => peer_id.to_string(), "action" => action.to_string()); self.network_send .send(NetworkMessage::ReportPeer { peer_id, action }) .unwrap_or_else(|_| { diff --git a/beacon_node/network/src/sync/peer_sync_info.rs b/beacon_node/network/src/sync/peer_sync_info.rs index d564383c552..0fad735cb0c 100644 --- a/beacon_node/network/src/sync/peer_sync_info.rs +++ b/beacon_node/network/src/sync/peer_sync_info.rs @@ -86,8 +86,8 @@ impl PeerSyncInfo { // that we are within SLOT_IMPORT_TOLERANCE of our two heads if (self.head_slot >= remote.head_slot && self.head_slot.sub(remote.head_slot).as_usize() <= SLOT_IMPORT_TOLERANCE) - || (self.head_slot < remote.head_slot) - && remote.head_slot.sub(self.head_slot).as_usize() <= SLOT_IMPORT_TOLERANCE + || (self.head_slot < remote.head_slot + && remote.head_slot.sub(self.head_slot).as_usize() <= SLOT_IMPORT_TOLERANCE) { return true; } diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 9d65f11f86b..1fa312c579a 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -9,37 +9,14 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::hash::{Hash, Hasher}; use std::ops::Sub; -use types::{EthSpec, SignedBeaconBlock, Slot}; - -#[derive(Copy, Clone, Debug, PartialEq)] -pub struct BatchId(pub u64); - -impl std::ops::Deref for BatchId { - type Target = u64; - fn deref(&self) -> &Self::Target { - &self.0 - } -} -impl std::ops::DerefMut for BatchId { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl std::convert::From for BatchId { - fn from(id: u64) -> Self { - BatchId(id) - } -} +use types::{Epoch, EthSpec, SignedBeaconBlock, Slot}; /// A collection of sequential blocks that are requested from peers in a single RPC request. #[derive(PartialEq, Debug)] pub struct Batch { - /// The ID of the batch, these are sequential. - pub id: BatchId, - /// The requested start slot of the batch, inclusive. - pub start_slot: Slot, - /// The requested end slot of batch, exlcusive. + /// The requested start epoch of the batch. + pub start_epoch: Epoch, + /// The requested end slot of batch, exclusive. pub end_slot: Slot, /// The `Attempts` that have been made to send us this batch. pub attempts: Vec, @@ -69,10 +46,9 @@ pub struct Attempt { impl Eq for Batch {} impl Batch { - pub fn new(id: BatchId, start_slot: Slot, end_slot: Slot, peer_id: PeerId) -> Self { + pub fn new(start_epoch: Epoch, end_slot: Slot, peer_id: PeerId) -> Self { Batch { - id, - start_slot, + start_epoch, end_slot, attempts: Vec::new(), current_peer: peer_id, @@ -82,12 +58,21 @@ impl Batch { } } + pub fn start_slot(&self) -> Slot { + // batches are shifted by 1 + self.start_epoch.start_slot(T::slots_per_epoch()) + 1 + } + + pub fn end_slot(&self) -> Slot { + self.end_slot + } pub fn to_blocks_by_range_request(&self) -> BlocksByRangeRequest { + let start_slot = self.start_slot(); BlocksByRangeRequest { - start_slot: self.start_slot.into(), + start_slot: start_slot.into(), count: min( T::slots_per_epoch() * EPOCHS_PER_BATCH, - self.end_slot.sub(self.start_slot).into(), + self.end_slot.sub(start_slot).into(), ), step: 1, } @@ -105,7 +90,7 @@ impl Batch { impl Ord for Batch { fn cmp(&self, other: &Self) -> Ordering { - self.id.0.cmp(&other.id.0) + self.start_epoch.cmp(&other.start_epoch) } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index b816b965670..d79dff469b2 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,11 +1,12 @@ -use super::batch::{Batch, BatchId, PendingBatches}; -use crate::sync::block_processor::{spawn_block_processor, BatchProcessResult, ProcessId}; -use crate::sync::network_context::SyncNetworkContext; -use crate::sync::{RequestId, SyncMessage}; +use super::batch::{Batch, PendingBatches}; +use crate::beacon_processor::ProcessId; +use crate::beacon_processor::WorkEvent as BeaconWorkEvent; +use crate::sync::RequestId; +use crate::sync::{network_context::SyncNetworkContext, BatchProcessResult}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::{PeerAction, PeerId}; use rand::prelude::*; -use slog::{crit, debug, warn}; +use slog::{crit, debug, error, warn}; use std::collections::HashSet; use std::sync::Arc; use tokio::sync::mpsc; @@ -72,11 +73,12 @@ pub struct SyncingChain { /// and thus available to download this chain from. pub peer_pool: HashSet, - /// The next batch_id that needs to be downloaded. - to_be_downloaded_id: BatchId, + /// Starting epoch of the next batch that needs to be downloaded. + to_be_downloaded: Epoch, - /// The next batch id that needs to be processed. - to_be_processed_id: BatchId, + /// Starting epoch of the batch that needs to be processed next. + /// This is incremented as the chain advances. + processing_target: Epoch, /// The current state of the chain. pub state: ChainSyncingState, @@ -84,14 +86,13 @@ pub struct SyncingChain { /// The current processing batch, if any. current_processing_batch: Option>, - /// A send channel to the sync manager. This is given to the batch processor thread to report - /// back once batch processing has completed. - sync_send: mpsc::UnboundedSender>, + /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. + beacon_processor_send: mpsc::Sender>, /// A reference to the underlying beacon chain. chain: Arc>, - /// A reference to the sync logger. + /// The chain's log. log: slog::Logger, } @@ -111,7 +112,7 @@ impl SyncingChain { target_head_slot: Slot, target_head_root: Hash256, peer_id: PeerId, - sync_send: mpsc::UnboundedSender>, + beacon_processor_send: mpsc::Sender>, chain: Arc>, log: slog::Logger, ) -> Self { @@ -127,11 +128,11 @@ impl SyncingChain { completed_batches: Vec::new(), processed_batches: Vec::new(), peer_pool, - to_be_downloaded_id: BatchId(1), - to_be_processed_id: BatchId(1), + to_be_downloaded: start_epoch, + processing_target: start_epoch, state: ChainSyncingState::Stopped, current_processing_batch: None, - sync_send, + beacon_processor_send, chain, log, } @@ -139,13 +140,10 @@ impl SyncingChain { /// Returns the latest slot number that has been processed. fn current_processed_slot(&self) -> Slot { - self.start_epoch + // the last slot we processed was included in the previous batch, and corresponds to the + // first slot of the current target epoch + self.processing_target .start_slot(T::EthSpec::slots_per_epoch()) - .saturating_add( - self.to_be_processed_id.saturating_sub(1u64) - * T::EthSpec::slots_per_epoch() - * EPOCHS_PER_BATCH, - ) } /// A batch of blocks has been received. This function gets run on all chains and should @@ -182,21 +180,19 @@ impl SyncingChain { // An entire batch of blocks has been received. This functions checks to see if it can be processed, // remove any batches waiting to be verified and if this chain is syncing, request new // blocks for the peer. - debug!(self.log, "Completed batch received"; "id"=> *batch.id, "blocks" => &batch.downloaded_blocks.len(), "awaiting_batches" => self.completed_batches.len()); + debug!(self.log, "Completed batch received"; "epoch" => batch.start_epoch, "blocks" => &batch.downloaded_blocks.len(), "awaiting_batches" => self.completed_batches.len()); // verify the range of received blocks // Note that the order of blocks is verified in block processing if let Some(last_slot) = batch.downloaded_blocks.last().map(|b| b.slot()) { // the batch is non-empty let first_slot = batch.downloaded_blocks[0].slot(); - if batch.start_slot > first_slot || batch.end_slot < last_slot { + if batch.start_slot() > first_slot || batch.end_slot() < last_slot { warn!(self.log, "BlocksByRange response returned out of range blocks"; - "response_initial_slot" => first_slot, - "requested_initial_slot" => batch.start_slot); - // This is a pretty bad error. We don't consider this fatal, but we don't tolerate - // this much either. - network.report_peer(batch.current_peer, PeerAction::LowToleranceError); - self.to_be_processed_id = batch.id; // reset the id back to here, when incrementing, it will check against completed batches + "response_initial_slot" => first_slot, + "requested_initial_slot" => batch.start_slot()); + // this batch can't be used, so we need to request it again. + self.failed_batch(network, batch); return; } } @@ -242,7 +238,7 @@ impl SyncingChain { // Check if there is a batch ready to be processed if !self.completed_batches.is_empty() - && self.completed_batches[0].id == self.to_be_processed_id + && self.completed_batches[0].start_epoch == self.processing_target { let batch = self.completed_batches.remove(0); @@ -255,18 +251,23 @@ impl SyncingChain { } } - /// Sends a batch to the batch processor. + /// Sends a batch to the beacon processor for async processing in a queue. fn process_batch(&mut self, mut batch: Batch) { - let downloaded_blocks = std::mem::replace(&mut batch.downloaded_blocks, Vec::new()); - let process_id = ProcessId::RangeBatchId(self.id, batch.id); + let blocks = std::mem::replace(&mut batch.downloaded_blocks, Vec::new()); + let process_id = ProcessId::RangeBatchId(self.id, batch.start_epoch); self.current_processing_batch = Some(batch); - spawn_block_processor( - Arc::downgrade(&self.chain.clone()), - process_id, - downloaded_blocks, - self.sync_send.clone(), - self.log.clone(), - ); + + if let Err(e) = self + .beacon_processor_send + .try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) + { + error!( + self.log, + "Failed to send chain segment to processor"; + "msg" => "process_batch", + "error" => format!("{:?}", e) + ); + } } /// The block processor has completed processing a batch. This function handles the result @@ -275,7 +276,7 @@ impl SyncingChain { &mut self, network: &mut SyncNetworkContext, chain_id: ChainId, - batch_id: BatchId, + batch_start_epoch: Epoch, downloaded_blocks: &mut Option>>, result: &BatchProcessResult, ) -> Option { @@ -284,14 +285,14 @@ impl SyncingChain { return None; } match &self.current_processing_batch { - Some(current_batch) if current_batch.id != batch_id => { + Some(current_batch) if current_batch.start_epoch != batch_start_epoch => { debug!(self.log, "Unexpected batch result"; - "chain_id" => self.id, "batch_id" => *batch_id, "expected_batch_id" => *current_batch.id); + "batch_epoch" => batch_start_epoch, "expected_batch_epoch" => current_batch.start_epoch); return None; } None => { debug!(self.log, "Chain was not expecting a batch result"; - "chain_id" => self.id, "batch_id" => *batch_id); + "batch_epoch" => batch_start_epoch); return None; } _ => { @@ -303,7 +304,7 @@ impl SyncingChain { let downloaded_blocks = downloaded_blocks.take().or_else(|| { // if taken by another chain, we are no longer waiting on a result. self.current_processing_batch = None; - crit!(self.log, "Processed batch taken by another chain"; "chain_id" => self.id); + crit!(self.log, "Processed batch taken by another chain"); None })?; @@ -313,16 +314,15 @@ impl SyncingChain { batch.downloaded_blocks = downloaded_blocks; // double check batches are processed in order TODO: Remove for prod - if batch.id != self.to_be_processed_id { + if batch.start_epoch != self.processing_target { crit!(self.log, "Batch processed out of order"; - "chain_id" => self.id, - "processed_batch_id" => *batch.id, - "expected_id" => *self.to_be_processed_id); + "processed_starting_epoch" => batch.start_epoch, + "expected_epoch" => self.processing_target); } let res = match result { BatchProcessResult::Success => { - *self.to_be_processed_id += 1; + self.processing_target += EPOCHS_PER_BATCH; // If the processed batch was not empty, we can validate previous invalidated // blocks including the current batch. @@ -352,7 +352,7 @@ impl SyncingChain { } BatchProcessResult::Partial => { warn!(self.log, "Batch processing failed but at least one block was imported"; - "chain_id" => self.id, "id" => *batch.id, "peer" => format!("{}", batch.current_peer) + "batch_epoch" => batch.start_epoch, "peer" => batch.current_peer.to_string() ); // At least one block was successfully verified and imported, so we can be sure all // previous batches are valid and we only need to download the current failed @@ -370,7 +370,7 @@ impl SyncingChain { let action = PeerAction::LowToleranceError; warn!(self.log, "Batch failed to download. Dropping chain scoring peers"; "score_adjustment" => action.to_string(), - "chain_id" => self.id, "id"=> *batch.id); + "batch_epoch"=> batch.start_epoch); for peer_id in self.peer_pool.drain() { network.report_peer(peer_id, action); } @@ -383,7 +383,7 @@ impl SyncingChain { } BatchProcessResult::Failed => { debug!(self.log, "Batch processing failed"; - "chain_id" => self.id,"id" => *batch.id, "peer" => batch.current_peer.to_string(), "client" => network.client_type(&batch.current_peer).to_string()); + "batch_epoch" => batch.start_epoch, "peer" => batch.current_peer.to_string(), "client" => network.client_type(&batch.current_peer).to_string()); // The batch processing failed // This could be because this batch is invalid, or a previous invalidated batch // is invalid. We need to find out which and downvote the peer that has sent us @@ -398,7 +398,7 @@ impl SyncingChain { let action = PeerAction::LowToleranceError; warn!(self.log, "Batch failed to download. Dropping chain scoring peers"; "score_adjustment" => action.to_string(), - "chain_id" => self.id, "id"=> *batch.id); + "batch_epoch" => batch.start_epoch); for peer_id in self.peer_pool.drain() { network.report_peer(peer_id, action); } @@ -428,11 +428,10 @@ impl SyncingChain { ) { while !self.processed_batches.is_empty() { let mut processed_batch = self.processed_batches.remove(0); - if *processed_batch.id >= *last_batch.id { + if processed_batch.start_epoch >= last_batch.start_epoch { crit!(self.log, "A processed batch had a greater id than the current process id"; - "chain_id" => self.id, - "processed_id" => *processed_batch.id, - "current_id" => *last_batch.id); + "processed_start_epoch" => processed_batch.start_epoch, + "current_start_epoch" => last_batch.start_epoch); } // Go through passed attempts and downscore peers that returned invalid batches @@ -447,11 +446,10 @@ impl SyncingChain { let action = PeerAction::LowToleranceError; debug!( self.log, "Re-processed batch validated. Scoring original peer"; - "chain_id" => self.id, - "batch_id" => *processed_batch.id, - "score_adjustment" => action.to_string(), - "original_peer" => format!("{}",attempt.peer_id), - "new_peer" => format!("{}", processed_batch.current_peer) + "batch_epoch" => processed_batch.start_epoch, + "score_adjustment" => action.to_string(), + "original_peer" => format!("{}",attempt.peer_id), + "new_peer" => format!("{}", processed_batch.current_peer) ); network.report_peer(attempt.peer_id, action); } else { @@ -460,11 +458,10 @@ impl SyncingChain { let action = PeerAction::MidToleranceError; debug!( self.log, "Re-processed batch validated by the same peer."; - "chain_id" => self.id, - "batch_id" => *processed_batch.id, - "score_adjustment" => action.to_string(), - "original_peer" => format!("{}",attempt.peer_id), - "new_peer" => format!("{}", processed_batch.current_peer) + "batch_epoch" => processed_batch.start_epoch, + "score_adjustment" => action.to_string(), + "original_peer" => format!("{}",attempt.peer_id), + "new_peer" => format!("{}", processed_batch.current_peer) ); network.report_peer(attempt.peer_id, action); } @@ -503,7 +500,7 @@ impl SyncingChain { // Find any pre-processed batches awaiting validation while !self.processed_batches.is_empty() { let past_batch = self.processed_batches.remove(0); - *self.to_be_processed_id = std::cmp::min(*self.to_be_processed_id, *past_batch.id); + self.processing_target = std::cmp::min(self.processing_target, past_batch.start_epoch); self.reprocess_batch(network, past_batch); } @@ -547,11 +544,10 @@ impl SyncingChain { batch.current_peer = new_peer.clone(); debug!(self.log, "Re-requesting batch"; - "chain_id" => self.id, - "start_slot" => batch.start_slot, + "start_slot" => batch.start_slot(), "end_slot" => batch.end_slot -1, // The -1 shows inclusive blocks - "id" => *batch.id, - "peer" => format!("{}", batch.current_peer), + "batch_epoch" => batch.start_epoch, + "peer" => batch.current_peer.to_string(), "retries" => batch.retries, "re-processes" => batch.reprocess_retries); self.send_batch(network, batch); @@ -587,12 +583,11 @@ impl SyncingChain { self.start_epoch = local_finalized_epoch; debug!(self.log, "Updating chain's progress"; - "chain_id" => self.id, "prev_completed_slot" => current_processed_slot, "new_completed_slot" => self.current_processed_slot()); // Re-index batches - *self.to_be_downloaded_id = 1; - *self.to_be_processed_id = 1; + self.to_be_downloaded = local_finalized_epoch; + self.processing_target = local_finalized_epoch; // remove any completed or processed batches self.completed_batches.clear(); @@ -616,7 +611,7 @@ impl SyncingChain { // do not request blocks if the chain is not syncing if let ChainSyncingState::Stopped = self.state { debug!(self.log, "Peer added to a non-syncing chain"; - "chain_id" => self.id, "peer_id" => format!("{}", peer_id)); + "peer_id" => format!("{}", peer_id)); return; } @@ -645,8 +640,7 @@ impl SyncingChain { ) -> Option { if let Some(batch) = self.pending_batches.remove(request_id) { debug!(self.log, "Batch failed. RPC Error"; - "chain_id" => self.id, - "id" => *batch.id, + "batch_epoch" => batch.start_epoch, "retries" => batch.retries, "peer" => format!("{:?}", peer_id)); @@ -683,17 +677,23 @@ impl SyncingChain { batch.current_peer = new_peer.clone(); debug!(self.log, "Re-Requesting batch"; - "chain_id" => self.id, - "start_slot" => batch.start_slot, + "start_slot" => batch.start_slot(), "end_slot" => batch.end_slot -1, // The -1 shows inclusive blocks - - "id" => *batch.id, - "peer" => format!("{:?}", batch.current_peer)); + "batch_epoch" => batch.start_epoch, + "peer" => batch.current_peer.to_string()); self.send_batch(network, batch); ProcessingResult::KeepChain } } + /// Returns true if this chain is currently syncing. + pub fn is_syncing(&self) -> bool { + match self.state { + ChainSyncingState::Syncing => true, + ChainSyncingState::Stopped => false, + } + } + /// Attempts to request the next required batches from the peer pool if the chain is syncing. It will exhaust the peer /// pool and left over batches until the batch buffer is reached or all peers are exhausted. fn request_batches(&mut self, network: &mut SyncNetworkContext) { @@ -709,10 +709,9 @@ impl SyncingChain { if let Some(peer_id) = self.get_next_peer() { if let Some(batch) = self.get_next_batch(peer_id) { debug!(self.log, "Requesting batch"; - "chain_id" => self.id, - "start_slot" => batch.start_slot, - "end_slot" => batch.end_slot -1, // The -1 shows inclusive blocks - "id" => *batch.id, + "start_slot" => batch.start_slot(), + "end_slot" => batch.end_slot -1, // The -1 shows inclusive blocks + "batch_epoch" => batch.start_epoch, "peer" => format!("{}", batch.current_peer)); // send the batch self.send_batch(network, batch); @@ -765,22 +764,15 @@ impl SyncingChain { return None; } - // One is added to the start slot to begin one slot after the epoch boundary - let batch_start_slot = self - .start_epoch - .start_slot(slots_per_epoch) - .saturating_add(1u64) - + self.to_be_downloaded_id.saturating_sub(1) * blocks_per_batch; - // don't request batches beyond the target head slot - if batch_start_slot > self.target_head_slot { + if self.to_be_downloaded.start_slot(slots_per_epoch) > self.target_head_slot { return None; } // truncate the batch to the epoch containing the target head of the chain let batch_end_slot = std::cmp::min( // request either a batch containing the max number of blocks per batch - batch_start_slot + blocks_per_batch, + self.to_be_downloaded.start_slot(slots_per_epoch) + blocks_per_batch + 1, // or a batch of one epoch of blocks, which contains the `target_head_slot` self.target_head_slot .saturating_add(slots_per_epoch) @@ -788,28 +780,9 @@ impl SyncingChain { .start_slot(slots_per_epoch), ); - let batch_id = self.to_be_downloaded_id; - - // Find the next batch id. The largest of the next sequential id, or the next uncompleted - // id - let max_completed_id = self - .completed_batches - .iter() - .last() - .map(|x| x.id.0) - .unwrap_or_else(|| 0); - // TODO: Check if this is necessary - self.to_be_downloaded_id = BatchId(std::cmp::max( - self.to_be_downloaded_id.0 + 1, - max_completed_id + 1, - )); - - Some(Batch::new( - batch_id, - batch_start_slot, - batch_end_slot, - peer_id, - )) + let batch = Some(Batch::new(self.to_be_downloaded, batch_end_slot, peer_id)); + self.to_be_downloaded += EPOCHS_PER_BATCH; + batch } /// Requests the provided batch from the provided peer. @@ -827,14 +800,13 @@ impl SyncingChain { } Err(e) => { warn!(self.log, "Batch request failed"; - "chain_id" => self.id, - "start_slot" => batch.start_slot, - "end_slot" => batch.end_slot -1, // The -1 shows inclusive blocks - "id" => *batch.id, - "peer" => format!("{}", batch.current_peer), - "retries" => batch.retries, - "error" => e, - "re-processes" => batch.reprocess_retries); + "start_slot" => batch.start_slot(), + "end_slot" => batch.end_slot -1, // The -1 shows inclusive blocks + "start_epoch" => batch.start_epoch, + "peer" => batch.current_peer.to_string(), + "retries" => batch.retries, + "error" => e, + "re-processes" => batch.reprocess_retries); self.failed_batch(network, batch); } } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 5d8083a4207..86d31e636bd 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -4,17 +4,20 @@ //! with this struct to to simplify the logic of the other layers of sync. use super::chain::{ChainSyncingState, SyncingChain}; -use crate::sync::manager::SyncMessage; +use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::sync::network_context::SyncNetworkContext; use crate::sync::PeerSyncInfo; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::{types::SyncState, NetworkGlobals, PeerId}; -use slog::{debug, error, info}; +use slog::{debug, error, info, o}; use std::sync::Arc; use tokio::sync::mpsc; use types::EthSpec; use types::{Epoch, Hash256, Slot}; +/// The number of head syncing chains to sync at a time. +const PARALLEL_HEAD_CHAINS: usize = 2; + /// The state of the long range/batch sync. #[derive(Clone)] pub enum RangeSyncState { @@ -205,8 +208,9 @@ impl ChainCollection { /// Updates the state of the chain collection. /// /// This removes any out-dated chains, swaps to any higher priority finalized chains and - /// updates the state of the collection. - pub fn update_finalized(&mut self, network: &mut SyncNetworkContext) { + /// updates the state of the collection. This starts head chains syncing if any are required to + /// do so. + pub fn update(&mut self, network: &mut SyncNetworkContext) { let local_epoch = { let local = match PeerSyncInfo::from_chain(&self.beacon_chain) { Some(local) => local, @@ -222,9 +226,25 @@ impl ChainCollection { local.finalized_epoch }; - // Remove any outdated finalized chains + // Remove any outdated finalized/head chains self.purge_outdated_chains(network); + // Choose the best finalized chain if one needs to be selected. + self.update_finalized_chains(network, local_epoch); + + if self.finalized_syncing_index().is_none() { + // Handle head syncing chains if there are no finalized chains left. + self.update_head_chains(network, local_epoch); + } + } + + /// This looks at all current finalized chains and decides if a new chain should be prioritised + /// or not. + fn update_finalized_chains( + &mut self, + network: &mut SyncNetworkContext, + local_epoch: Epoch, + ) { // Check if any chains become the new syncing chain if let Some(index) = self.finalized_syncing_index() { // There is a current finalized chain syncing @@ -269,30 +289,90 @@ impl ChainCollection { head_root: chain.target_head_root, }; self.state = state; - } else { - // There are no finalized chains, update the state. - if self.head_chains.is_empty() { - self.state = RangeSyncState::Idle; - } else { - // for the syncing API, we find the minimal start_slot and the maximum - // target_slot of all head chains to report back. - - let (min_epoch, max_slot) = self.head_chains.iter().fold( - (Epoch::from(0u64), Slot::from(0u64)), - |(min, max), chain| { - ( - std::cmp::min(min, chain.start_epoch), - std::cmp::max(max, chain.target_head_slot), - ) - }, - ); - let head_state = RangeSyncState::Head { - start_slot: min_epoch.start_slot(T::EthSpec::slots_per_epoch()), - head_slot: max_slot, - }; - self.state = head_state; + } + } + + /// Start syncing any head chains if required. + fn update_head_chains( + &mut self, + network: &mut SyncNetworkContext, + local_epoch: Epoch, + ) { + // There are no finalized chains, update the state. + if self.head_chains.is_empty() { + self.state = RangeSyncState::Idle; + return; + } + + let mut currently_syncing = self + .head_chains + .iter() + .filter(|chain| chain.is_syncing()) + .count(); + let mut not_syncing = self.head_chains.len() - currently_syncing; + + // Find all head chains that are not currently syncing ordered by peer count. + while currently_syncing <= PARALLEL_HEAD_CHAINS && not_syncing > 0 { + // Find the chain with the most peers and start syncing + if let Some((_index, chain)) = self + .head_chains + .iter_mut() + .filter(|chain| !chain.is_syncing()) + .enumerate() + .max_by_key(|(_index, chain)| chain.peer_pool.len()) + { + // start syncing this chain + debug!(self.log, "New head chain started syncing"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_epoch"=> chain.start_epoch); + chain.start_syncing(network, local_epoch); } + + // update variables + currently_syncing = self + .head_chains + .iter() + .filter(|chain| chain.is_syncing()) + .count(); + not_syncing = self.head_chains.len() - currently_syncing; } + + // Start + // for the syncing API, we find the minimal start_slot and the maximum + // target_slot of all head chains to report back. + + let (min_epoch, max_slot) = self + .head_chains + .iter() + .filter(|chain| chain.is_syncing()) + .fold( + (Epoch::from(0u64), Slot::from(0u64)), + |(min, max), chain| { + ( + std::cmp::min(min, chain.start_epoch), + std::cmp::max(max, chain.target_head_slot), + ) + }, + ); + let head_state = RangeSyncState::Head { + start_slot: min_epoch.start_slot(T::EthSpec::slots_per_epoch()), + head_slot: max_slot, + }; + self.state = head_state; + } + + /// This is called once a head chain has completed syncing. It removes all non-syncing head + /// chains and re-status their peers. + pub fn clear_head_chains(&mut self, network: &mut SyncNetworkContext) { + let log_ref = &self.log; + self.head_chains.retain(|chain| { + if !chain.is_syncing() + { + debug!(log_ref, "Removing old head chain"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); + chain.status_peers(network); + false + } else { + true + } + }); } /// Add a new finalized chain to the collection. @@ -302,7 +382,7 @@ impl ChainCollection { target_head: Hash256, target_slot: Slot, peer_id: PeerId, - sync_send: mpsc::UnboundedSender>, + beacon_processor_send: mpsc::Sender>, ) { let chain_id = rand::random(); self.finalized_chains.push(SyncingChain::new( @@ -311,9 +391,9 @@ impl ChainCollection { target_slot, target_head, peer_id, - sync_send, + beacon_processor_send, self.beacon_chain.clone(), - self.log.clone(), + self.log.new(o!("chain" => chain_id)), )); } @@ -321,12 +401,11 @@ impl ChainCollection { #[allow(clippy::too_many_arguments)] pub fn new_head_chain( &mut self, - network: &mut SyncNetworkContext, remote_finalized_epoch: Epoch, target_head: Hash256, target_slot: Slot, peer_id: PeerId, - sync_send: mpsc::UnboundedSender>, + beacon_processor_send: mpsc::Sender>, ) { // remove the peer from any other head chains @@ -336,18 +415,16 @@ impl ChainCollection { self.head_chains.retain(|chain| !chain.peer_pool.is_empty()); let chain_id = rand::random(); - let mut new_head_chain = SyncingChain::new( + let new_head_chain = SyncingChain::new( chain_id, remote_finalized_epoch, target_slot, target_head, peer_id, - sync_send, + beacon_processor_send, self.beacon_chain.clone(), self.log.clone(), ); - // All head chains can sync simultaneously - new_head_chain.start_syncing(network, remote_finalized_epoch); self.head_chains.push(new_head_chain); } @@ -511,7 +588,7 @@ impl ChainCollection { debug!(self.log, "Chain was removed"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); // update the state - self.update_finalized(network); + self.update(network); } /// Returns the index of finalized chain that is currently syncing. Returns `None` if no diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index b8782955979..85db6d378fe 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -8,6 +8,5 @@ mod range; mod sync_type; pub use batch::Batch; -pub use batch::BatchId; pub use chain::{ChainId, EPOCHS_PER_BATCH}; pub use range::RangeSync; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index f6a1d80e475..59dfec16de6 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -42,10 +42,9 @@ use super::chain::{ChainId, ProcessingResult}; use super::chain_collection::{ChainCollection, RangeSyncState}; use super::sync_type::RangeSyncType; -use super::BatchId; -use crate::sync::block_processor::BatchProcessResult; -use crate::sync::manager::SyncMessage; +use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::sync::network_context::SyncNetworkContext; +use crate::sync::BatchProcessResult; use crate::sync::PeerSyncInfo; use crate::sync::RequestId; use beacon_chain::{BeaconChain, BeaconChainTypes}; @@ -54,7 +53,7 @@ use slog::{debug, error, trace}; use std::collections::HashSet; use std::sync::Arc; use tokio::sync::mpsc; -use types::{EthSpec, SignedBeaconBlock}; +use types::{Epoch, EthSpec, SignedBeaconBlock}; /// The primary object dealing with long range/batch syncing. This contains all the active and /// non-active chains that need to be processed before the syncing is considered complete. This @@ -69,9 +68,8 @@ pub struct RangeSync { /// finalized chain(s) complete, these peer's get STATUS'ed to update their head slot before /// the head chains are formed and downloaded. awaiting_head_peers: HashSet, - /// The sync manager channel, allowing the batch processor thread to callback the sync task - /// once complete. - sync_send: mpsc::UnboundedSender>, + /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. + beacon_processor_send: mpsc::Sender>, /// The syncing logger. log: slog::Logger, } @@ -80,14 +78,14 @@ impl RangeSync { pub fn new( beacon_chain: Arc>, network_globals: Arc>, - sync_send: mpsc::UnboundedSender>, + beacon_processor_send: mpsc::Sender>, log: slog::Logger, ) -> Self { RangeSync { beacon_chain: beacon_chain.clone(), chains: ChainCollection::new(beacon_chain, network_globals, log.clone()), awaiting_head_peers: HashSet::new(), - sync_send, + beacon_processor_send, log, } } @@ -162,28 +160,28 @@ impl RangeSync { .chains .get_finalized_mut(remote_info.finalized_root, remote_finalized_slot) { - debug!(self.log, "Finalized chain exists, adding peer"; "peer_id" => format!("{:?}", peer_id), "target_root" => format!("{}", chain.target_head_root), "end_slot" => chain.target_head_slot, "start_epoch"=> chain.start_epoch); + debug!(self.log, "Finalized chain exists, adding peer"; "peer_id" => peer_id.to_string(), "target_root" => chain.target_head_root.to_string(), "targe_slot" => chain.target_head_slot); // add the peer to the chain's peer pool chain.add_peer(network, peer_id); // check if the new peer's addition will favour a new syncing chain. - self.chains.update_finalized(network); + self.chains.update(network); // update the global sync state if necessary self.chains.update_sync_state(); } else { // there is no finalized chain that matches this peer's last finalized target // create a new finalized chain - debug!(self.log, "New finalized chain added to sync"; "peer_id" => format!("{:?}", peer_id), "start_epoch" => local_finalized_slot, "end_slot" => remote_finalized_slot, "finalized_root" => format!("{}", remote_info.finalized_root)); + debug!(self.log, "New finalized chain added to sync"; "peer_id" => format!("{:?}", peer_id), "start_slot" => local_finalized_slot, "end_slot" => remote_finalized_slot, "finalized_root" => format!("{}", remote_info.finalized_root)); self.chains.new_finalized_chain( local_info.finalized_epoch, remote_info.finalized_root, remote_finalized_slot, peer_id, - self.sync_send.clone(), + self.beacon_processor_send.clone(), ); - self.chains.update_finalized(network); + self.chains.update(network); // update the global sync state self.chains.update_sync_state(); } @@ -223,15 +221,14 @@ impl RangeSync { debug!(self.log, "Creating a new syncing head chain"; "head_root" => format!("{}",remote_info.head_root), "start_epoch" => start_epoch, "head_slot" => remote_info.head_slot, "peer_id" => format!("{:?}", peer_id)); self.chains.new_head_chain( - network, start_epoch, remote_info.head_root, remote_info.head_slot, peer_id, - self.sync_send.clone(), + self.beacon_processor_send.clone(), ); } - self.chains.update_finalized(network); + self.chains.update(network); self.chains.update_sync_state(); } } @@ -272,7 +269,7 @@ impl RangeSync { &mut self, network: &mut SyncNetworkContext, chain_id: ChainId, - batch_id: BatchId, + epoch: Epoch, downloaded_blocks: Vec>, result: BatchProcessResult, ) { @@ -280,19 +277,13 @@ impl RangeSync { let mut downloaded_blocks = Some(downloaded_blocks); match self.chains.finalized_request(|chain| { - chain.on_batch_process_result( - network, - chain_id, - batch_id, - &mut downloaded_blocks, - &result, - ) + chain.on_batch_process_result(network, chain_id, epoch, &mut downloaded_blocks, &result) }) { Some((index, ProcessingResult::RemoveChain)) => { let chain = self.chains.remove_finalized_chain(index); debug!(self.log, "Finalized chain removed"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); // update the state of the collection - self.chains.update_finalized(network); + self.chains.update(network); // the chain is complete, re-status it's peers chain.status_peers(network); @@ -320,7 +311,7 @@ impl RangeSync { chain.on_batch_process_result( network, chain_id, - batch_id, + epoch, &mut downloaded_blocks, &result, ) @@ -331,8 +322,12 @@ impl RangeSync { // the chain is complete, re-status it's peers and remove it chain.status_peers(network); + // Remove non-syncing head chains and re-status the peers + // This removes a build-up of potentially duplicate head chains. Any + // legitimate head chains will be re-established + self.chains.clear_head_chains(network); // update the state of the collection - self.chains.update_finalized(network); + self.chains.update(network); // update the global state and log any change self.chains.update_sync_state(); } @@ -340,7 +335,7 @@ impl RangeSync { None => { // This can happen if a chain gets purged due to being out of date whilst a // batch process is in progress. - debug!(self.log, "No chains match the block processing id"; "id" => *batch_id); + debug!(self.log, "No chains match the block processing id"; "batch_epoch" => epoch, "chain_id" => chain_id); } } } @@ -361,7 +356,7 @@ impl RangeSync { self.remove_peer(network, peer_id); // update the state of the collection - self.chains.update_finalized(network); + self.chains.update(network); // update the global state and inform the user self.chains.update_sync_state(); } diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index 6d023659c1a..38a5a1e7d55 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -34,7 +34,6 @@ hex = "0.4.2" parking_lot = "0.11.0" futures = "0.3.5" operation_pool = { path = "../operation_pool" } -rayon = "1.3.0" environment = { path = "../../lighthouse/environment" } uhttp_sse = "0.5.1" bus = "2.2.3" diff --git a/beacon_node/rest_api/src/advanced.rs b/beacon_node/rest_api/src/advanced.rs deleted file mode 100644 index 6ee14891fd0..00000000000 --- a/beacon_node/rest_api/src/advanced.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::response_builder::ResponseBuilder; -use crate::ApiResult; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use hyper::{Body, Request}; -use operation_pool::PersistedOperationPool; -use std::sync::Arc; - -/// Returns the `proto_array` fork choice struct, encoded as JSON. -/// -/// Useful for debugging or advanced inspection of the chain. -pub fn get_fork_choice( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - ResponseBuilder::new(&req)?.body_no_ssz( - &*beacon_chain - .fork_choice - .read() - .proto_array() - .core_proto_array(), - ) -} - -/// Returns the `PersistedOperationPool` struct. -/// -/// Useful for debugging or advanced inspection of the stored operations. -pub fn get_operation_pool( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - ResponseBuilder::new(&req)?.body(&PersistedOperationPool::from_operation_pool( - &beacon_chain.op_pool, - )) -} diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 73c6bd1ea91..0ac95f7ca9b 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -1,14 +1,13 @@ use crate::helpers::*; -use crate::response_builder::ResponseBuilder; use crate::validator::get_state_for_epoch; -use crate::{ApiError, ApiResult, UrlQuery}; +use crate::Context; +use crate::{ApiError, UrlQuery}; use beacon_chain::{ observed_operations::ObservationOutcome, BeaconChain, BeaconChainTypes, StateSkipConfig, }; -use bus::BusReader; use futures::executor::block_on; use hyper::body::Bytes; -use hyper::{Body, Request, Response}; +use hyper::{Body, Request}; use rest_types::{ BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, ValidatorRequest, ValidatorResponse, @@ -16,20 +15,20 @@ use rest_types::{ use std::io::Write; use std::sync::Arc; -use slog::{error, Logger}; +use slog::error; use types::{ AttesterSlashing, BeaconState, EthSpec, Hash256, ProposerSlashing, PublicKeyBytes, RelativeEpoch, SignedBeaconBlockHash, Slot, }; -/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. +/// Returns a summary of the head of the beacon chain. pub fn get_head( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + ctx: Arc>, +) -> Result { + let beacon_chain = &ctx.beacon_chain; let chain_head = beacon_chain.head()?; - let head = CanonicalHeadResponse { + Ok(CanonicalHeadResponse { slot: chain_head.beacon_state.slot, block_root: chain_head.beacon_block_root, state_root: chain_head.beacon_state_root, @@ -51,33 +50,27 @@ pub fn get_head( .epoch .start_slot(T::EthSpec::slots_per_epoch()), previous_justified_block_root: chain_head.beacon_state.previous_justified_checkpoint.root, - }; - - ResponseBuilder::new(&req)?.body(&head) + }) } -/// HTTP handler to return a list of head BeaconBlocks. -pub fn get_heads( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - let heads = beacon_chain +/// Return the list of heads of the beacon chain. +pub fn get_heads(ctx: Arc>) -> Vec { + ctx.beacon_chain .heads() .into_iter() .map(|(beacon_block_root, beacon_block_slot)| HeadBeaconBlock { beacon_block_root, beacon_block_slot, }) - .collect::>(); - - ResponseBuilder::new(&req)?.body(&heads) + .collect() } /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. pub fn get_block( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { + let beacon_chain = &ctx.beacon_chain; let query_params = ["root", "slot"]; let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; @@ -85,7 +78,7 @@ pub fn get_block( ("slot", value) => { let target = parse_slot(&value)?; - block_root_at_slot(&beacon_chain, target)?.ok_or_else(|| { + block_root_at_slot(beacon_chain, target)?.ok_or_else(|| { ApiError::NotFound(format!( "Unable to find SignedBeaconBlock for slot {:?}", target @@ -103,30 +96,26 @@ pub fn get_block( )) })?; - let response = BlockResponse { + Ok(BlockResponse { root: block_root, beacon_block: block, - }; - - ResponseBuilder::new(&req)?.body(&response) + }) } /// HTTP handler to return a `SignedBeaconBlock` root at a given `slot`. pub fn get_block_root( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result { let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; let target = parse_slot(&slot_string)?; - let root = block_root_at_slot(&beacon_chain, target)?.ok_or_else(|| { + block_root_at_slot(&ctx.beacon_chain, target)?.ok_or_else(|| { ApiError::NotFound(format!( "Unable to find SignedBeaconBlock for slot {:?}", target )) - })?; - - ResponseBuilder::new(&req)?.body(&root) + }) } fn make_sse_response_chunk(new_head_hash: SignedBeaconBlockHash) -> std::io::Result { @@ -140,43 +129,27 @@ fn make_sse_response_chunk(new_head_hash: SignedBeaconBlockHash) -> std::io::Res Ok(bytes) } -pub fn stream_forks( - log: Logger, - mut events: BusReader, -) -> ApiResult { +pub fn stream_forks(ctx: Arc>) -> Result { + let mut events = ctx.events.lock().add_rx(); let (mut sender, body) = Body::channel(); std::thread::spawn(move || { while let Ok(new_head_hash) = events.recv() { let chunk = match make_sse_response_chunk(new_head_hash) { Ok(chunk) => chunk, Err(e) => { - error!(log, "Failed to make SSE chunk"; "error" => e.to_string()); + error!(ctx.log, "Failed to make SSE chunk"; "error" => e.to_string()); sender.abort(); break; } }; - if let Err(bytes) = block_on(sender.send_data(chunk)) { - error!(log, "Couldn't stream piece {:?}", bytes); + match block_on(sender.send_data(chunk)) { + Err(e) if e.is_closed() => break, + Err(e) => error!(ctx.log, "Couldn't stream piece {:?}", e), + Ok(_) => (), } } }); - let response = Response::builder() - .status(200) - .header("Content-Type", "text/event-stream") - .header("Connection", "Keep-Alive") - .header("Cache-Control", "no-cache") - .header("Access-Control-Allow-Origin", "*") - .body(body) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e)))?; - Ok(response) -} - -/// HTTP handler to return the `Fork` of the current head. -pub fn get_fork( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - ResponseBuilder::new(&req)?.body(&beacon_chain.head()?.beacon_state.fork) + Ok(body) } /// HTTP handler to which accepts a query string of a list of validator pubkeys and maps it to a @@ -185,9 +158,9 @@ pub fn get_fork( /// This method is limited to as many `pubkeys` that can fit in a URL. See `post_validators` for /// doing bulk requests. pub fn get_validators( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { let query = UrlQuery::from_request(&req)?; let validator_pubkeys = query @@ -202,17 +175,14 @@ pub fn get_validators( None }; - let validators = - validator_responses_by_pubkey(beacon_chain, state_root_opt, validator_pubkeys)?; - - ResponseBuilder::new(&req)?.body(&validators) + validator_responses_by_pubkey(&ctx.beacon_chain, state_root_opt, validator_pubkeys) } /// HTTP handler to return all validators, each as a `ValidatorResponse`. pub fn get_all_validators( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { let query = UrlQuery::from_request(&req)?; let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) { @@ -221,23 +191,21 @@ pub fn get_all_validators( None }; - let mut state = get_state_from_root_opt(&beacon_chain, state_root_opt)?; + let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?; state.update_pubkey_cache()?; - let validators = state + state .validators .iter() .map(|validator| validator_response_by_pubkey(&state, validator.pubkey.clone())) - .collect::, _>>()?; - - ResponseBuilder::new(&req)?.body(&validators) + .collect::, _>>() } /// HTTP handler to return all active validators, each as a `ValidatorResponse`. pub fn get_active_validators( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { let query = UrlQuery::from_request(&req)?; let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) { @@ -246,17 +214,15 @@ pub fn get_active_validators( None }; - let mut state = get_state_from_root_opt(&beacon_chain, state_root_opt)?; + let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?; state.update_pubkey_cache()?; - let validators = state + state .validators .iter() .filter(|validator| validator.is_active_at(state.current_epoch())) .map(|validator| validator_response_by_pubkey(&state, validator.pubkey.clone())) - .collect::, _>>()?; - - ResponseBuilder::new(&req)?.body(&validators) + .collect::, _>>() } /// HTTP handler to which accepts a `ValidatorRequest` and returns a `ValidatorResponse` for @@ -264,17 +230,11 @@ pub fn get_active_validators( /// /// This method allows for a basically unbounded list of `pubkeys`, where as the `get_validators` /// request is limited by the max number of pubkeys you can fit in a URL. -pub async fn post_validators( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - let response_builder = ResponseBuilder::new(&req); - - let body = req.into_body(); - let chunks = hyper::body::to_bytes(body) - .await - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; - serde_json::from_slice::(&chunks) +pub fn post_validators( + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { + serde_json::from_slice::(&req.into_body()) .map_err(|e| { ApiError::BadRequest(format!( "Unable to parse JSON into ValidatorRequest: {:?}", @@ -283,12 +243,11 @@ pub async fn post_validators( }) .and_then(|bulk_request| { validator_responses_by_pubkey( - beacon_chain, + &ctx.beacon_chain, bulk_request.state_root, bulk_request.pubkeys, ) }) - .and_then(|validators| response_builder?.body(&validators)) } /// Returns either the state given by `state_root_opt`, or the canonical head state if it is @@ -315,11 +274,11 @@ fn get_state_from_root_opt( /// Maps a vec of `validator_pubkey` to a vec of `ValidatorResponse`, using the state at the given /// `state_root`. If `state_root.is_none()`, uses the canonial head state. fn validator_responses_by_pubkey( - beacon_chain: Arc>, + beacon_chain: &BeaconChain, state_root_opt: Option, validator_pubkeys: Vec, ) -> Result, ApiError> { - let mut state = get_state_from_root_opt(&beacon_chain, state_root_opt)?; + let mut state = get_state_from_root_opt(beacon_chain, state_root_opt)?; state.update_pubkey_cache()?; validator_pubkeys @@ -370,24 +329,25 @@ fn validator_response_by_pubkey( /// HTTP handler pub fn get_committees( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { let query = UrlQuery::from_request(&req)?; let epoch = query.epoch()?; - let mut state = get_state_for_epoch(&beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; + let mut state = + get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err(|e| { ApiError::ServerError(format!("Failed to get state suitable for epoch: {:?}", e)) })?; state - .build_committee_cache(relative_epoch, &beacon_chain.spec) + .build_committee_cache(relative_epoch, &ctx.beacon_chain.spec) .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; - let committees = state + Ok(state .get_beacon_committees_at_epoch(relative_epoch) .map_err(|e| ApiError::ServerError(format!("Unable to get all committees: {:?}", e)))? .into_iter() @@ -396,9 +356,7 @@ pub fn get_committees( index: c.index, committee: c.committee.to_vec(), }) - .collect::>(); - - ResponseBuilder::new(&req)?.body(&committees) + .collect::>()) } /// HTTP handler to return a `BeaconState` at a given `root` or `slot`. @@ -406,10 +364,10 @@ pub fn get_committees( /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn get_state( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - let head_state = beacon_chain.head()?.beacon_state; + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { + let head_state = ctx.beacon_chain.head()?.beacon_state; let (key, value) = match UrlQuery::from_request(&req) { Ok(query) => { @@ -427,11 +385,12 @@ pub fn get_state( }; let (root, state): (Hash256, BeaconState) = match (key.as_ref(), value) { - ("slot", value) => state_at_slot(&beacon_chain, parse_slot(&value)?)?, + ("slot", value) => state_at_slot(&ctx.beacon_chain, parse_slot(&value)?)?, ("root", value) => { let root = &parse_root(&value)?; - let state = beacon_chain + let state = ctx + .beacon_chain .store .get_state(root, None)? .ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))?; @@ -441,12 +400,10 @@ pub fn get_state( _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), }; - let response = StateResponse { + Ok(StateResponse { root, beacon_state: state, - }; - - ResponseBuilder::new(&req)?.body(&response) + }) } /// HTTP handler to return a `BeaconState` root at a given `slot`. @@ -454,15 +411,13 @@ pub fn get_state( /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn get_state_root( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result { let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; let slot = parse_slot(&slot_string)?; - let root = state_root_at_slot(&beacon_chain, slot, StateSkipConfig::WithStateRoots)?; - - ResponseBuilder::new(&req)?.body(&root) + state_root_at_slot(&ctx.beacon_chain, slot, StateSkipConfig::WithStateRoots) } /// HTTP handler to return a `BeaconState` at the genesis block. @@ -470,50 +425,28 @@ pub fn get_state_root( /// This is an undocumented convenience method used during testing. For production, simply do a /// state request at slot 0. pub fn get_genesis_state( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?; - - ResponseBuilder::new(&req)?.body(&state) -} - -/// Read the genesis time from the current beacon chain state. -pub fn get_genesis_time( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - ResponseBuilder::new(&req)?.body(&beacon_chain.head_info()?.genesis_time) -} - -/// Read the `genesis_validators_root` from the current beacon chain state. -pub fn get_genesis_validators_root( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - ResponseBuilder::new(&req)?.body(&beacon_chain.head_info()?.genesis_validators_root) + ctx: Arc>, +) -> Result, ApiError> { + state_at_slot(&ctx.beacon_chain, Slot::new(0)).map(|(_root, state)| state) } -pub async fn proposer_slashing( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - let response_builder = ResponseBuilder::new(&req); - +pub fn proposer_slashing( + req: Request>, + ctx: Arc>, +) -> Result { let body = req.into_body(); - let chunks = hyper::body::to_bytes(body) - .await - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; - serde_json::from_slice::(&chunks) + serde_json::from_slice::(&body) .map_err(|e| format!("Unable to parse JSON into ProposerSlashing: {:?}", e)) .and_then(move |proposer_slashing| { - if beacon_chain.eth1_chain.is_some() { - let obs_outcome = beacon_chain + if ctx.beacon_chain.eth1_chain.is_some() { + let obs_outcome = ctx + .beacon_chain .verify_proposer_slashing_for_gossip(proposer_slashing) .map_err(|e| format!("Error while verifying proposer slashing: {:?}", e))?; if let ObservationOutcome::New(verified_proposer_slashing) = obs_outcome { - beacon_chain.import_proposer_slashing(verified_proposer_slashing); + ctx.beacon_chain + .import_proposer_slashing(verified_proposer_slashing); Ok(()) } else { Err("Proposer slashing for that validator index already known".into()) @@ -522,22 +455,17 @@ pub async fn proposer_slashing( Err("Cannot insert proposer slashing on node without Eth1 connection.".to_string()) } }) - .map_err(ApiError::BadRequest) - .and_then(|_| response_builder?.body(&true)) -} + .map_err(ApiError::BadRequest)?; -pub async fn attester_slashing( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - let response_builder = ResponseBuilder::new(&req); + Ok(true) +} +pub fn attester_slashing( + req: Request>, + ctx: Arc>, +) -> Result { let body = req.into_body(); - let chunks = hyper::body::to_bytes(body) - .await - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; - - serde_json::from_slice::>(&chunks) + serde_json::from_slice::>(&body) .map_err(|e| { ApiError::BadRequest(format!( "Unable to parse JSON into AttesterSlashing: {:?}", @@ -545,13 +473,13 @@ pub async fn attester_slashing( )) }) .and_then(move |attester_slashing| { - if beacon_chain.eth1_chain.is_some() { - beacon_chain + if ctx.beacon_chain.eth1_chain.is_some() { + ctx.beacon_chain .verify_attester_slashing_for_gossip(attester_slashing) .map_err(|e| format!("Error while verifying attester slashing: {:?}", e)) .and_then(|outcome| { if let ObservationOutcome::New(verified_attester_slashing) = outcome { - beacon_chain + ctx.beacon_chain .import_attester_slashing(verified_attester_slashing) .map_err(|e| { format!("Error while importing attester slashing: {:?}", e) @@ -566,6 +494,7 @@ pub async fn attester_slashing( "Cannot insert attester slashing on node without Eth1 connection.".to_string(), )) } - }) - .and_then(|_| response_builder?.body(&true)) + })?; + + Ok(true) } diff --git a/beacon_node/rest_api/src/consensus.rs b/beacon_node/rest_api/src/consensus.rs index a006b379f70..d82b05b7a7f 100644 --- a/beacon_node/rest_api/src/consensus.rs +++ b/beacon_node/rest_api/src/consensus.rs @@ -1,8 +1,7 @@ use crate::helpers::*; -use crate::response_builder::ResponseBuilder; -use crate::{ApiError, ApiResult, UrlQuery}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use hyper::{Body, Request}; +use crate::{ApiError, Context, UrlQuery}; +use beacon_chain::BeaconChainTypes; +use hyper::Request; use rest_types::{IndividualVotesRequest, IndividualVotesResponse}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -50,38 +49,31 @@ impl Into for TotalBalances { /// HTTP handler return a `VoteCount` for some given `Epoch`. pub fn get_vote_count( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result { let query = UrlQuery::from_request(&req)?; let epoch = query.epoch()?; // This is the last slot of the given epoch (one prior to the first slot of the next epoch). let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1; - let (_root, state) = state_at_slot(&beacon_chain, target_slot)?; - let spec = &beacon_chain.spec; + let (_root, state) = state_at_slot(&ctx.beacon_chain, target_slot)?; + let spec = &ctx.beacon_chain.spec; let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; validator_statuses.process_attestations(&state, spec)?; - let report: VoteCount = validator_statuses.total_balances.into(); - - ResponseBuilder::new(&req)?.body(&report) + Ok(validator_statuses.total_balances.into()) } -pub async fn post_individual_votes( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - let response_builder = ResponseBuilder::new(&req); - +pub fn post_individual_votes( + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { let body = req.into_body(); - let chunks = hyper::body::to_bytes(body) - .await - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; - serde_json::from_slice::(&chunks) + serde_json::from_slice::(&body) .map_err(|e| { ApiError::BadRequest(format!( "Unable to parse JSON into ValidatorDutiesRequest: {:?}", @@ -94,8 +86,8 @@ pub async fn post_individual_votes( // This is the last slot of the given epoch (one prior to the first slot of the next epoch). let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1; - let (_root, mut state) = state_at_slot(&beacon_chain, target_slot)?; - let spec = &beacon_chain.spec; + let (_root, mut state) = state_at_slot(&ctx.beacon_chain, target_slot)?; + let spec = &ctx.beacon_chain.spec; let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; validator_statuses.process_attestations(&state, spec)?; @@ -135,5 +127,4 @@ pub async fn post_individual_votes( }) .collect::, _>>() }) - .and_then(|votes| response_builder?.body_no_ssz(&votes)) } diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 51a12d1bf09..66b5bd1a0c1 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -1,9 +1,7 @@ -use crate::{ApiError, ApiResult, NetworkChannel}; +use crate::{ApiError, NetworkChannel}; use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig}; use bls::PublicKeyBytes; use eth2_libp2p::PubsubMessage; -use http::header; -use hyper::{Body, Request}; use itertools::process_results; use network::NetworkMessage; use ssz::Decode; @@ -41,21 +39,6 @@ pub fn parse_committee_index(string: &str) -> Result { .map_err(|e| ApiError::BadRequest(format!("Unable to parse committee index: {:?}", e))) } -/// Checks the provided request to ensure that the `content-type` header. -/// -/// The content-type header should either be omitted, in which case JSON is assumed, or it should -/// explicitly specify `application/json`. If anything else is provided, an error is returned. -pub fn check_content_type_for_json(req: &Request) -> Result<(), ApiError> { - match req.headers().get(header::CONTENT_TYPE) { - Some(h) if h == "application/json" => Ok(()), - Some(h) => Err(ApiError::BadRequest(format!( - "The provided content-type {:?} is not available, this endpoint only supports json.", - h - ))), - _ => Ok(()), - } -} - /// Parse an SSZ object from some hex-encoded bytes. /// /// E.g., A signature is `"0x0000000000000000000000000000000000000000000000000000000000000000"` @@ -228,14 +211,8 @@ pub fn state_root_at_slot( } } -pub fn implementation_pending_response(_req: Request) -> ApiResult { - Err(ApiError::NotImplemented( - "API endpoint has not yet been implemented, but is planned to be soon.".to_owned(), - )) -} - pub fn publish_beacon_block_to_network( - chan: NetworkChannel, + chan: &NetworkChannel, block: SignedBeaconBlock, ) -> Result<(), ApiError> { // send the block via SSZ encoding diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 988a0910c7f..405e08e217d 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -1,22 +1,15 @@ #[macro_use] -mod macros; -#[macro_use] extern crate lazy_static; +mod router; extern crate network as client_network; -mod advanced; mod beacon; pub mod config; mod consensus; -mod error; mod helpers; mod lighthouse; mod metrics; -mod network; mod node; -mod response_builder; -mod router; -mod spec; mod url_query; mod validator; @@ -24,7 +17,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use bus::Bus; use client_network::NetworkMessage; pub use config::ApiEncodingFormat; -use error::{ApiError, ApiResult}; use eth2_config::Eth2Config; use eth2_libp2p::NetworkGlobals; use futures::future::TryFutureExt; @@ -32,6 +24,7 @@ use hyper::server::conn::AddrStream; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Request, Server}; use parking_lot::Mutex; +use rest_types::ApiError; use slog::{info, warn}; use std::net::SocketAddr; use std::path::PathBuf; @@ -42,6 +35,7 @@ use url_query::UrlQuery; pub use crate::helpers::parse_pubkey_bytes; pub use config::Config; +pub use router::Context; pub type NetworkChannel = mpsc::UnboundedSender>; @@ -63,36 +57,28 @@ pub fn start_server( events: Arc>>, ) -> Result { let log = executor.log(); - let inner_log = log.clone(); - let rest_api_config = Arc::new(config.clone()); let eth2_config = Arc::new(eth2_config); + let context = Arc::new(Context { + executor: executor.clone(), + config: config.clone(), + beacon_chain, + network_globals: network_info.network_globals.clone(), + network_chan: network_info.network_chan, + eth2_config, + log: log.clone(), + db_path, + freezer_db_path, + events, + }); + // Define the function that will build the request handler. let make_service = make_service_fn(move |_socket: &AddrStream| { - let beacon_chain = beacon_chain.clone(); - let log = inner_log.clone(); - let rest_api_config = rest_api_config.clone(); - let eth2_config = eth2_config.clone(); - let network_globals = network_info.network_globals.clone(); - let network_channel = network_info.network_chan.clone(); - let db_path = db_path.clone(); - let freezer_db_path = freezer_db_path.clone(); - let events = events.clone(); + let ctx = context.clone(); async move { Ok::<_, hyper::Error>(service_fn(move |req: Request| { - router::route( - req, - beacon_chain.clone(), - network_globals.clone(), - network_channel.clone(), - rest_api_config.clone(), - eth2_config.clone(), - log.clone(), - db_path.clone(), - freezer_db_path.clone(), - events.clone(), - ) + router::on_http_request(req, ctx.clone()) })) } }); diff --git a/beacon_node/rest_api/src/lighthouse.rs b/beacon_node/rest_api/src/lighthouse.rs index 556046ab376..4d0fae926df 100644 --- a/beacon_node/rest_api/src/lighthouse.rs +++ b/beacon_node/rest_api/src/lighthouse.rs @@ -1,24 +1,16 @@ //! This contains a collection of lighthouse specific HTTP endpoints. -use crate::response_builder::ResponseBuilder; -use crate::ApiResult; -use eth2_libp2p::{NetworkGlobals, PeerInfo}; -use hyper::{Body, Request}; +use crate::{ApiError, Context}; +use beacon_chain::BeaconChainTypes; +use eth2_libp2p::PeerInfo; use serde::Serialize; use std::sync::Arc; use types::EthSpec; -/// The syncing state of the beacon node. -pub fn syncing( - req: Request, - network_globals: Arc>, -) -> ApiResult { - ResponseBuilder::new(&req)?.body_no_ssz(&network_globals.sync_state()) -} - /// Returns all known peers and corresponding information -pub fn peers(req: Request, network_globals: Arc>) -> ApiResult { - let peers: Vec> = network_globals +pub fn peers(ctx: Arc>) -> Result>, ApiError> { + Ok(ctx + .network_globals .peers .read() .peers() @@ -26,16 +18,15 @@ pub fn peers(req: Request, network_globals: Arc( - req: Request, - network_globals: Arc>, -) -> ApiResult { - let peers: Vec> = network_globals +pub fn connected_peers( + ctx: Arc>, +) -> Result>, ApiError> { + Ok(ctx + .network_globals .peers .read() .connected_peers() @@ -43,14 +34,13 @@ pub fn connected_peers( peer_id: peer_id.to_string(), peer_info: peer_info.clone(), }) - .collect(); - ResponseBuilder::new(&req)?.body_no_ssz(&peers) + .collect()) } /// Information returned by `peers` and `connected_peers`. #[derive(Clone, Debug, Serialize)] #[serde(bound = "T: EthSpec")] -struct Peer { +pub struct Peer { /// The Peer's ID peer_id: String, /// The PeerInfo associated with the peer. diff --git a/beacon_node/rest_api/src/macros.rs b/beacon_node/rest_api/src/macros.rs deleted file mode 100644 index f43224e5db6..00000000000 --- a/beacon_node/rest_api/src/macros.rs +++ /dev/null @@ -1,11 +0,0 @@ -macro_rules! try_future { - ($expr:expr) => { - match $expr { - core::result::Result::Ok(val) => val, - core::result::Result::Err(err) => return Err(std::convert::From::from(err)), - } - }; - ($expr:expr,) => { - $crate::try_future!($expr) - }; -} diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 87b4f628576..4b1ba737d7a 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -1,42 +1,38 @@ -use crate::response_builder::ResponseBuilder; -use crate::{ApiError, ApiResult}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use hyper::{Body, Request}; +use crate::{ApiError, Context}; +use beacon_chain::BeaconChainTypes; use lighthouse_metrics::{Encoder, TextEncoder}; use rest_types::Health; -use std::path::PathBuf; use std::sync::Arc; pub use lighthouse_metrics::*; lazy_static! { + pub static ref BEACON_HTTP_API_REQUESTS_TOTAL: Result = + try_create_int_counter_vec( + "beacon_http_api_requests_total", + "Count of HTTP requests received", + &["endpoint"] + ); + pub static ref BEACON_HTTP_API_SUCCESS_TOTAL: Result = + try_create_int_counter_vec( + "beacon_http_api_success_total", + "Count of HTTP requests that returned 200 OK", + &["endpoint"] + ); + pub static ref BEACON_HTTP_API_ERROR_TOTAL: Result = try_create_int_counter_vec( + "beacon_http_api_error_total", + "Count of HTTP that did not return 200 OK", + &["endpoint"] + ); + pub static ref BEACON_HTTP_API_TIMES_TOTAL: Result = try_create_histogram_vec( + "beacon_http_api_times_total", + "Duration to process HTTP requests", + &["endpoint"] + ); pub static ref REQUEST_RESPONSE_TIME: Result = try_create_histogram( "http_server_request_duration_seconds", "Time taken to build a response to a HTTP request" ); - pub static ref REQUEST_COUNT: Result = try_create_int_counter( - "http_server_request_total", - "Total count of HTTP requests received" - ); - pub static ref SUCCESS_COUNT: Result = try_create_int_counter( - "http_server_success_total", - "Total count of HTTP 200 responses sent" - ); - pub static ref VALIDATOR_GET_BLOCK_REQUEST_RESPONSE_TIME: Result = - try_create_histogram( - "http_server_validator_block_get_request_duration_seconds", - "Time taken to respond to GET /validator/block" - ); - pub static ref VALIDATOR_GET_ATTESTATION_REQUEST_RESPONSE_TIME: Result = - try_create_histogram( - "http_server_validator_attestation_get_request_duration_seconds", - "Time taken to respond to GET /validator/attestation" - ); - pub static ref VALIDATOR_GET_DUTIES_REQUEST_RESPONSE_TIME: Result = - try_create_histogram( - "http_server_validator_duties_get_request_duration_seconds", - "Time taken to respond to GET /validator/duties" - ); pub static ref PROCESS_NUM_THREADS: Result = try_create_int_gauge( "process_num_threads", "Number of threads used by the current process" @@ -77,11 +73,8 @@ lazy_static! { /// /// This is a HTTP handler method. pub fn get_prometheus( - req: Request, - beacon_chain: Arc>, - db_path: PathBuf, - freezer_db_path: PathBuf, -) -> ApiResult { + ctx: Arc>, +) -> std::result::Result { let mut buffer = vec![]; let encoder = TextEncoder::new(); @@ -101,9 +94,9 @@ pub fn get_prometheus( // using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into // a string that can be returned via HTTP. - slot_clock::scrape_for_metrics::(&beacon_chain.slot_clock); - store::scrape_for_metrics(&db_path, &freezer_db_path); - beacon_chain::scrape_for_metrics(&beacon_chain); + slot_clock::scrape_for_metrics::(&ctx.beacon_chain.slot_clock); + store::scrape_for_metrics(&ctx.db_path, &ctx.freezer_db_path); + beacon_chain::scrape_for_metrics(&ctx.beacon_chain); eth2_libp2p::scrape_discovery_metrics(); // This will silently fail if we are unable to observe the health. This is desired behaviour @@ -133,6 +126,5 @@ pub fn get_prometheus( .unwrap(); String::from_utf8(buffer) - .map(|string| ResponseBuilder::new(&req)?.body_text(string)) - .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e)))? + .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e))) } diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs deleted file mode 100644 index ae2486d3415..00000000000 --- a/beacon_node/rest_api/src/network.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::error::ApiResult; -use crate::response_builder::ResponseBuilder; -use crate::NetworkGlobals; -use beacon_chain::BeaconChainTypes; -use eth2_libp2p::{Multiaddr, PeerId}; -use hyper::{Body, Request}; -use std::sync::Arc; - -/// HTTP handler to return the list of libp2p multiaddr the client is listening on. -/// -/// Returns a list of `Multiaddr`, serialized according to their `serde` impl. -pub fn get_listen_addresses( - req: Request, - network: Arc>, -) -> ApiResult { - let multiaddresses: Vec = network.listen_multiaddrs(); - ResponseBuilder::new(&req)?.body_no_ssz(&multiaddresses) -} - -/// HTTP handler to return the network port the client is listening on. -/// -/// Returns the TCP port number in its plain form (which is also valid JSON serialization) -pub fn get_listen_port( - req: Request, - network: Arc>, -) -> ApiResult { - ResponseBuilder::new(&req)?.body(&network.listen_port_tcp()) -} - -/// HTTP handler to return the Discv5 ENR from the client's libp2p service. -/// -/// ENR is encoded as base64 string. -pub fn get_enr( - req: Request, - network: Arc>, -) -> ApiResult { - ResponseBuilder::new(&req)?.body_no_ssz(&network.local_enr().to_base64()) -} - -/// HTTP handler to return the `PeerId` from the client's libp2p service. -/// -/// PeerId is encoded as base58 string. -pub fn get_peer_id( - req: Request, - network: Arc>, -) -> ApiResult { - ResponseBuilder::new(&req)?.body_no_ssz(&network.local_peer_id().to_base58()) -} - -/// HTTP handler to return the number of peers connected in the client's libp2p service. -pub fn get_peer_count( - req: Request, - network: Arc>, -) -> ApiResult { - ResponseBuilder::new(&req)?.body(&network.connected_peers()) -} - -/// HTTP handler to return the list of peers connected to the client's libp2p service. -/// -/// Peers are presented as a list of `PeerId::to_string()`. -pub fn get_peer_list( - req: Request, - network: Arc>, -) -> ApiResult { - let connected_peers: Vec = network - .peers - .read() - .connected_peer_ids() - .map(PeerId::to_string) - .collect(); - ResponseBuilder::new(&req)?.body_no_ssz(&connected_peers) -} diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs index e779bb09078..bd5615de34d 100644 --- a/beacon_node/rest_api/src/node.rs +++ b/beacon_node/rest_api/src/node.rs @@ -1,23 +1,19 @@ -use crate::response_builder::ResponseBuilder; -use crate::{ApiError, ApiResult}; -use eth2_libp2p::{types::SyncState, NetworkGlobals}; -use hyper::{Body, Request}; -use lighthouse_version::version_with_platform; -use rest_types::{Health, SyncingResponse, SyncingStatus}; +use crate::{ApiError, Context}; +use beacon_chain::BeaconChainTypes; +use eth2_libp2p::types::SyncState; +use rest_types::{SyncingResponse, SyncingStatus}; use std::sync::Arc; -use types::{EthSpec, Slot}; +use types::Slot; -/// Read the version string from the current Lighthouse build. -pub fn get_version(req: Request) -> ApiResult { - ResponseBuilder::new(&req)?.body_no_ssz(&version_with_platform()) -} +/// Returns a syncing status. +pub fn syncing(ctx: Arc>) -> Result { + let current_slot = ctx + .beacon_chain + .head_info() + .map_err(|e| ApiError::ServerError(format!("Unable to read head slot: {:?}", e)))? + .slot; -pub fn syncing( - req: Request, - network: Arc>, - current_slot: Slot, -) -> ApiResult { - let (starting_slot, highest_slot) = match network.sync_state() { + let (starting_slot, highest_slot) = match ctx.network_globals.sync_state() { SyncState::SyncingFinalized { start_slot, head_slot, @@ -36,14 +32,8 @@ pub fn syncing( highest_slot, }; - ResponseBuilder::new(&req)?.body(&SyncingResponse { - is_syncing: network.is_syncing(), + Ok(SyncingResponse { + is_syncing: ctx.network_globals.is_syncing(), sync_status, }) } - -pub fn get_health(req: Request) -> ApiResult { - let health = Health::observe().map_err(ApiError::ServerError)?; - - ResponseBuilder::new(&req)?.body_no_ssz(&health) -} diff --git a/beacon_node/rest_api/src/response_builder.rs b/beacon_node/rest_api/src/response_builder.rs deleted file mode 100644 index 2377167915b..00000000000 --- a/beacon_node/rest_api/src/response_builder.rs +++ /dev/null @@ -1,83 +0,0 @@ -use super::{ApiError, ApiResult}; -use crate::config::ApiEncodingFormat; -use hyper::header; -use hyper::{Body, Request, Response, StatusCode}; -use serde::Serialize; -use ssz::Encode; - -pub struct ResponseBuilder { - encoding: ApiEncodingFormat, -} - -impl ResponseBuilder { - pub fn new(req: &Request) -> Result { - let accept_header: String = req - .headers() - .get(header::ACCEPT) - .map_or(Ok(""), |h| h.to_str()) - .map_err(|e| { - ApiError::BadRequest(format!( - "The Accept header contains invalid characters: {:?}", - e - )) - }) - .map(String::from)?; - - // JSON is our default encoding, unless something else is requested. - let encoding = ApiEncodingFormat::from(accept_header.as_str()); - Ok(Self { encoding }) - } - - pub fn body(self, item: &T) -> ApiResult { - match self.encoding { - ApiEncodingFormat::SSZ => Response::builder() - .status(StatusCode::OK) - .header("content-type", "application/ssz") - .body(Body::from(item.as_ssz_bytes())) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))), - _ => self.body_no_ssz(item), - } - } - - pub fn body_no_ssz(self, item: &T) -> ApiResult { - let (body, content_type) = match self.encoding { - ApiEncodingFormat::JSON => ( - Body::from(serde_json::to_string(&item).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize response body as JSON: {:?}", - e - )) - })?), - "application/json", - ), - ApiEncodingFormat::SSZ => { - return Err(ApiError::UnsupportedType( - "Response cannot be encoded as SSZ.".into(), - )); - } - ApiEncodingFormat::YAML => ( - Body::from(serde_yaml::to_string(&item).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize response body as YAML: {:?}", - e - )) - })?), - "application/yaml", - ), - }; - - Response::builder() - .status(StatusCode::OK) - .header("content-type", content_type) - .body(body) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) - } - - pub fn body_text(self, text: String) -> ApiResult { - Response::builder() - .status(StatusCode::OK) - .header("content-type", "text/plain; charset=utf-8") - .body(Body::from(text)) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) - } -} diff --git a/beacon_node/rest_api/src/router.rs b/beacon_node/rest_api/src/router.rs index a66094db13f..bed7ba77aa6 100644 --- a/beacon_node/rest_api/src/router.rs +++ b/beacon_node/rest_api/src/router.rs @@ -1,241 +1,322 @@ use crate::{ - advanced, beacon, config::Config, consensus, error::ApiError, helpers, lighthouse, metrics, - network, node, spec, validator, NetworkChannel, + beacon, config::Config, consensus, lighthouse, metrics, node, validator, NetworkChannel, }; use beacon_chain::{BeaconChain, BeaconChainTypes}; use bus::Bus; +use environment::TaskExecutor; use eth2_config::Eth2Config; -use eth2_libp2p::NetworkGlobals; +use eth2_libp2p::{NetworkGlobals, PeerId}; use hyper::header::HeaderValue; use hyper::{Body, Method, Request, Response}; +use lighthouse_version::version_with_platform; +use operation_pool::PersistedOperationPool; use parking_lot::Mutex; +use rest_types::{ApiError, Handler, Health}; use slog::debug; use std::path::PathBuf; use std::sync::Arc; use std::time::Instant; -use types::{SignedBeaconBlockHash, Slot}; +use types::{EthSpec, SignedBeaconBlockHash}; -// Allowing more than 7 arguments. -#[allow(clippy::too_many_arguments)] -pub async fn route( +pub struct Context { + pub executor: TaskExecutor, + pub config: Config, + pub beacon_chain: Arc>, + pub network_globals: Arc>, + pub network_chan: NetworkChannel, + pub eth2_config: Arc, + pub log: slog::Logger, + pub db_path: PathBuf, + pub freezer_db_path: PathBuf, + pub events: Arc>>, +} + +pub async fn on_http_request( req: Request, - beacon_chain: Arc>, - network_globals: Arc>, - network_channel: NetworkChannel, - rest_api_config: Arc, - eth2_config: Arc, - local_log: slog::Logger, - db_path: PathBuf, - freezer_db_path: PathBuf, - events: Arc>>, + ctx: Arc>, ) -> Result, ApiError> { - metrics::inc_counter(&metrics::REQUEST_COUNT); - let received_instant = Instant::now(); - let path = req.uri().path().to_string(); - let log = local_log.clone(); - let result = { - let _timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME); - - match (req.method(), path.as_ref()) { - // Methods for Client - (&Method::GET, "/node/health") => node::get_health(req), - (&Method::GET, "/node/version") => node::get_version(req), - (&Method::GET, "/node/syncing") => { - // inform the current slot, or set to 0 - let current_slot = beacon_chain - .head_info() - .map(|info| info.slot) - .unwrap_or_else(|_| Slot::from(0u64)); - - node::syncing::(req, network_globals, current_slot) - } - - // Methods for Network - (&Method::GET, "/network/enr") => network::get_enr::(req, network_globals), - (&Method::GET, "/network/peer_count") => { - network::get_peer_count::(req, network_globals) - } - (&Method::GET, "/network/peer_id") => network::get_peer_id::(req, network_globals), - (&Method::GET, "/network/peers") => network::get_peer_list::(req, network_globals), - (&Method::GET, "/network/listen_port") => { - network::get_listen_port::(req, network_globals) - } - (&Method::GET, "/network/listen_addresses") => { - network::get_listen_addresses::(req, network_globals) - } + let _timer = metrics::start_timer_vec(&metrics::BEACON_HTTP_API_TIMES_TOTAL, &[&path]); + metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_REQUESTS_TOTAL, &[&path]); - // Methods for Beacon Node - (&Method::GET, "/beacon/head") => beacon::get_head::(req, beacon_chain), - (&Method::GET, "/beacon/heads") => beacon::get_heads::(req, beacon_chain), - (&Method::GET, "/beacon/block") => beacon::get_block::(req, beacon_chain), - (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req, beacon_chain), - (&Method::GET, "/beacon/fork") => beacon::get_fork::(req, beacon_chain), - (&Method::GET, "/beacon/fork/stream") => { - let reader = events.lock().add_rx(); - beacon::stream_forks::(log, reader) - } - (&Method::GET, "/beacon/genesis_time") => { - beacon::get_genesis_time::(req, beacon_chain) - } - (&Method::GET, "/beacon/genesis_validators_root") => { - beacon::get_genesis_validators_root::(req, beacon_chain) - } - (&Method::GET, "/beacon/validators") => beacon::get_validators::(req, beacon_chain), - (&Method::POST, "/beacon/validators") => { - beacon::post_validators::(req, beacon_chain).await - } - (&Method::GET, "/beacon/validators/all") => { - beacon::get_all_validators::(req, beacon_chain) - } - (&Method::GET, "/beacon/validators/active") => { - beacon::get_active_validators::(req, beacon_chain) - } - (&Method::GET, "/beacon/state") => beacon::get_state::(req, beacon_chain), - (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req, beacon_chain), - (&Method::GET, "/beacon/state/genesis") => { - beacon::get_genesis_state::(req, beacon_chain) - } - (&Method::GET, "/beacon/committees") => beacon::get_committees::(req, beacon_chain), - (&Method::POST, "/beacon/proposer_slashing") => { - beacon::proposer_slashing::(req, beacon_chain).await - } - (&Method::POST, "/beacon/attester_slashing") => { - beacon::attester_slashing::(req, beacon_chain).await - } - - // Methods for Validator - (&Method::POST, "/validator/duties") => { - let timer = - metrics::start_timer(&metrics::VALIDATOR_GET_DUTIES_REQUEST_RESPONSE_TIME); - let response = validator::post_validator_duties::(req, beacon_chain); - drop(timer); - response.await - } - (&Method::POST, "/validator/subscribe") => { - validator::post_validator_subscriptions::(req, network_channel).await - } - (&Method::GET, "/validator/duties/all") => { - validator::get_all_validator_duties::(req, beacon_chain) - } - (&Method::GET, "/validator/duties/active") => { - validator::get_active_validator_duties::(req, beacon_chain) - } - (&Method::GET, "/validator/block") => { - let timer = - metrics::start_timer(&metrics::VALIDATOR_GET_BLOCK_REQUEST_RESPONSE_TIME); - let response = validator::get_new_beacon_block::(req, beacon_chain, log); - drop(timer); - response - } - (&Method::POST, "/validator/block") => { - validator::publish_beacon_block::(req, beacon_chain, network_channel, log).await - } - (&Method::GET, "/validator/attestation") => { - let timer = - metrics::start_timer(&metrics::VALIDATOR_GET_ATTESTATION_REQUEST_RESPONSE_TIME); - let response = validator::get_new_attestation::(req, beacon_chain); - drop(timer); - response - } - (&Method::GET, "/validator/aggregate_attestation") => { - validator::get_aggregate_attestation::(req, beacon_chain) - } - (&Method::POST, "/validator/attestations") => { - validator::publish_attestations::(req, beacon_chain, network_channel, log).await - } - (&Method::POST, "/validator/aggregate_and_proofs") => { - validator::publish_aggregate_and_proofs::( - req, - beacon_chain, - network_channel, - log, - ) - .await - } - - // Methods for consensus - (&Method::GET, "/consensus/global_votes") => { - consensus::get_vote_count::(req, beacon_chain) - } - (&Method::POST, "/consensus/individual_votes") => { - consensus::post_individual_votes::(req, beacon_chain).await - } - - // Methods for bootstrap and checking configuration - (&Method::GET, "/spec") => spec::get_spec::(req, beacon_chain), - (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), - (&Method::GET, "/spec/deposit_contract") => { - helpers::implementation_pending_response(req) - } - (&Method::GET, "/spec/eth2_config") => spec::get_eth2_config::(req, eth2_config), - - // Methods for advanced parameters - (&Method::GET, "/advanced/fork_choice") => { - advanced::get_fork_choice::(req, beacon_chain) - } - (&Method::GET, "/advanced/operation_pool") => { - advanced::get_operation_pool::(req, beacon_chain) - } - - (&Method::GET, "/metrics") => { - metrics::get_prometheus::(req, beacon_chain, db_path, freezer_db_path) - } - - // Lighthouse specific - (&Method::GET, "/lighthouse/syncing") => { - lighthouse::syncing::(req, network_globals) - } - - (&Method::GET, "/lighthouse/peers") => { - lighthouse::peers::(req, network_globals) - } - - (&Method::GET, "/lighthouse/connected_peers") => { - lighthouse::connected_peers::(req, network_globals) - } - _ => Err(ApiError::NotFound( - "Request path and/or method not found.".to_owned(), - )), - } - }; - - let request_processing_duration = Instant::now().duration_since(received_instant); - - // Map the Rust-friendly `Result` in to a http-friendly response. In effect, this ensures that - // any `Err` returned from our response handlers becomes a valid http response to the client - // (e.g., a response with a 404 or 500 status). + let received_instant = Instant::now(); + let log = ctx.log.clone(); + let allow_origin = ctx.config.allow_origin.clone(); - match result { + match route(req, ctx).await { Ok(mut response) => { - if rest_api_config.allow_origin != "" { + metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_SUCCESS_TOTAL, &[&path]); + + if allow_origin != "" { let headers = response.headers_mut(); headers.insert( hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, - HeaderValue::from_str(&rest_api_config.allow_origin)?, + HeaderValue::from_str(&allow_origin)?, ); headers.insert(hyper::header::VARY, HeaderValue::from_static("Origin")); } debug!( - local_log, + log, "HTTP API request successful"; "path" => path, - "duration_ms" => request_processing_duration.as_millis() + "duration_ms" => Instant::now().duration_since(received_instant).as_millis() ); - metrics::inc_counter(&metrics::SUCCESS_COUNT); Ok(response) } Err(error) => { + metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_ERROR_TOTAL, &[&path]); + debug!( - local_log, + log, "HTTP API request failure"; "path" => path, - "duration_ms" => request_processing_duration.as_millis() + "duration_ms" => Instant::now().duration_since(received_instant).as_millis() ); Ok(error.into()) } } } + +async fn route( + req: Request, + ctx: Arc>, +) -> Result, ApiError> { + let path = req.uri().path().to_string(); + let ctx = ctx.clone(); + let method = req.method().clone(); + let executor = ctx.executor.clone(); + let handler = Handler::new(req, ctx, executor)?; + + match (method, path.as_ref()) { + (Method::GET, "/node/version") => handler + .static_value(version_with_platform()) + .await? + .serde_encodings(), + (Method::GET, "/node/health") => handler + .static_value(Health::observe().map_err(ApiError::ServerError)?) + .await? + .serde_encodings(), + (Method::GET, "/node/syncing") => handler + .allow_body() + .in_blocking_task(|_, ctx| node::syncing(ctx)) + .await? + .serde_encodings(), + (Method::GET, "/network/enr") => handler + .in_core_task(|_, ctx| Ok(ctx.network_globals.local_enr().to_base64())) + .await? + .serde_encodings(), + (Method::GET, "/network/peer_count") => handler + .in_core_task(|_, ctx| Ok(ctx.network_globals.connected_peers())) + .await? + .serde_encodings(), + (Method::GET, "/network/peer_id") => handler + .in_core_task(|_, ctx| Ok(ctx.network_globals.local_peer_id().to_base58())) + .await? + .serde_encodings(), + (Method::GET, "/network/peers") => handler + .in_blocking_task(|_, ctx| { + Ok(ctx + .network_globals + .peers + .read() + .connected_peer_ids() + .map(PeerId::to_string) + .collect::>()) + }) + .await? + .serde_encodings(), + (Method::GET, "/network/listen_port") => handler + .in_core_task(|_, ctx| Ok(ctx.network_globals.listen_port_tcp())) + .await? + .serde_encodings(), + (Method::GET, "/network/listen_addresses") => handler + .in_blocking_task(|_, ctx| Ok(ctx.network_globals.listen_multiaddrs())) + .await? + .serde_encodings(), + (Method::GET, "/beacon/head") => handler + .in_blocking_task(|_, ctx| beacon::get_head(ctx)) + .await? + .all_encodings(), + (Method::GET, "/beacon/heads") => handler + .in_blocking_task(|_, ctx| Ok(beacon::get_heads(ctx))) + .await? + .all_encodings(), + (Method::GET, "/beacon/block") => handler + .in_blocking_task(beacon::get_block) + .await? + .all_encodings(), + (Method::GET, "/beacon/block_root") => handler + .in_blocking_task(beacon::get_block_root) + .await? + .all_encodings(), + (Method::GET, "/beacon/fork") => handler + .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.fork)) + .await? + .all_encodings(), + (Method::GET, "/beacon/fork/stream") => { + handler.sse_stream(|_, ctx| beacon::stream_forks(ctx)).await + } + (Method::GET, "/beacon/genesis_time") => handler + .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.genesis_time)) + .await? + .all_encodings(), + (Method::GET, "/beacon/genesis_validators_root") => handler + .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.genesis_validators_root)) + .await? + .all_encodings(), + (Method::GET, "/beacon/validators") => handler + .in_blocking_task(beacon::get_validators) + .await? + .all_encodings(), + (Method::POST, "/beacon/validators") => handler + .allow_body() + .in_blocking_task(beacon::post_validators) + .await? + .all_encodings(), + (Method::GET, "/beacon/validators/all") => handler + .in_blocking_task(beacon::get_all_validators) + .await? + .all_encodings(), + (Method::GET, "/beacon/validators/active") => handler + .in_blocking_task(beacon::get_active_validators) + .await? + .all_encodings(), + (Method::GET, "/beacon/state") => handler + .in_blocking_task(beacon::get_state) + .await? + .all_encodings(), + (Method::GET, "/beacon/state_root") => handler + .in_blocking_task(beacon::get_state_root) + .await? + .all_encodings(), + (Method::GET, "/beacon/state/genesis") => handler + .in_blocking_task(|_, ctx| beacon::get_genesis_state(ctx)) + .await? + .all_encodings(), + (Method::GET, "/beacon/committees") => handler + .in_blocking_task(beacon::get_committees) + .await? + .all_encodings(), + (Method::POST, "/beacon/proposer_slashing") => handler + .allow_body() + .in_blocking_task(beacon::proposer_slashing) + .await? + .serde_encodings(), + (Method::POST, "/beacon/attester_slashing") => handler + .allow_body() + .in_blocking_task(beacon::attester_slashing) + .await? + .serde_encodings(), + (Method::POST, "/validator/duties") => handler + .allow_body() + .in_blocking_task(validator::post_validator_duties) + .await? + .serde_encodings(), + (Method::POST, "/validator/subscribe") => handler + .allow_body() + .in_blocking_task(validator::post_validator_subscriptions) + .await? + .serde_encodings(), + (Method::GET, "/validator/duties/all") => handler + .in_blocking_task(validator::get_all_validator_duties) + .await? + .serde_encodings(), + (Method::GET, "/validator/duties/active") => handler + .in_blocking_task(validator::get_active_validator_duties) + .await? + .serde_encodings(), + (Method::GET, "/validator/block") => handler + .in_blocking_task(validator::get_new_beacon_block) + .await? + .serde_encodings(), + (Method::POST, "/validator/block") => handler + .allow_body() + .in_blocking_task(validator::publish_beacon_block) + .await? + .serde_encodings(), + (Method::GET, "/validator/attestation") => handler + .in_blocking_task(validator::get_new_attestation) + .await? + .serde_encodings(), + (Method::GET, "/validator/aggregate_attestation") => handler + .in_blocking_task(validator::get_aggregate_attestation) + .await? + .serde_encodings(), + (Method::POST, "/validator/attestations") => handler + .allow_body() + .in_blocking_task(validator::publish_attestations) + .await? + .serde_encodings(), + (Method::POST, "/validator/aggregate_and_proofs") => handler + .allow_body() + .in_blocking_task(validator::publish_aggregate_and_proofs) + .await? + .serde_encodings(), + (Method::GET, "/consensus/global_votes") => handler + .allow_body() + .in_blocking_task(consensus::get_vote_count) + .await? + .serde_encodings(), + (Method::POST, "/consensus/individual_votes") => handler + .allow_body() + .in_blocking_task(consensus::post_individual_votes) + .await? + .serde_encodings(), + (Method::GET, "/spec") => handler + // TODO: this clone is not ideal. + .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.spec.clone())) + .await? + .serde_encodings(), + (Method::GET, "/spec/slots_per_epoch") => handler + .static_value(T::EthSpec::slots_per_epoch()) + .await? + .serde_encodings(), + (Method::GET, "/spec/eth2_config") => handler + // TODO: this clone is not ideal. + .in_blocking_task(|_, ctx| Ok(ctx.eth2_config.as_ref().clone())) + .await? + .serde_encodings(), + (Method::GET, "/advanced/fork_choice") => handler + .in_blocking_task(|_, ctx| { + Ok(ctx + .beacon_chain + .fork_choice + .read() + .proto_array() + .core_proto_array() + .clone()) + }) + .await? + .serde_encodings(), + (Method::GET, "/advanced/operation_pool") => handler + .in_blocking_task(|_, ctx| { + Ok(PersistedOperationPool::from_operation_pool( + &ctx.beacon_chain.op_pool, + )) + }) + .await? + .serde_encodings(), + (Method::GET, "/metrics") => handler + .in_blocking_task(|_, ctx| metrics::get_prometheus(ctx)) + .await? + .text_encoding(), + (Method::GET, "/lighthouse/syncing") => handler + .in_blocking_task(|_, ctx| Ok(ctx.network_globals.sync_state())) + .await? + .serde_encodings(), + (Method::GET, "/lighthouse/peers") => handler + .in_blocking_task(|_, ctx| lighthouse::peers(ctx)) + .await? + .serde_encodings(), + (Method::GET, "/lighthouse/connected_peers") => handler + .in_blocking_task(|_, ctx| lighthouse::connected_peers(ctx)) + .await? + .serde_encodings(), + _ => Err(ApiError::NotFound( + "Request path and/or method not found.".to_owned(), + )), + } +} diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs deleted file mode 100644 index 6fd2575f5f2..00000000000 --- a/beacon_node/rest_api/src/spec.rs +++ /dev/null @@ -1,28 +0,0 @@ -use super::ApiResult; -use crate::response_builder::ResponseBuilder; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_config::Eth2Config; -use hyper::{Body, Request}; -use std::sync::Arc; -use types::EthSpec; - -/// HTTP handler to return the full spec object. -pub fn get_spec( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - ResponseBuilder::new(&req)?.body_no_ssz(&beacon_chain.spec) -} - -/// HTTP handler to return the full Eth2Config object. -pub fn get_eth2_config( - req: Request, - eth2_config: Arc, -) -> ApiResult { - ResponseBuilder::new(&req)?.body_no_ssz(eth2_config.as_ref()) -} - -/// HTTP handler to return the full spec object. -pub fn get_slots_per_epoch(req: Request) -> ApiResult { - ResponseBuilder::new(&req)?.body(&T::EthSpec::slots_per_epoch()) -} diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 7d4ab6b84dc..e1c3c37dbf7 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,39 +1,32 @@ -use crate::helpers::{check_content_type_for_json, publish_beacon_block_to_network}; -use crate::response_builder::ResponseBuilder; -use crate::{ApiError, ApiResult, NetworkChannel, UrlQuery}; +use crate::helpers::{parse_hex_ssz_bytes, publish_beacon_block_to_network}; +use crate::{ApiError, Context, NetworkChannel, UrlQuery}; use beacon_chain::{ attestation_verification::Error as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, StateSkipConfig, }; use bls::PublicKeyBytes; use eth2_libp2p::PubsubMessage; -use hyper::{Body, Request}; +use hyper::Request; use network::NetworkMessage; -use rayon::prelude::*; use rest_types::{ValidatorDutiesRequest, ValidatorDutyBytes, ValidatorSubscription}; use slog::{error, info, trace, warn, Logger}; use std::sync::Arc; use types::beacon_state::EthSpec; use types::{ - Attestation, AttestationData, BeaconState, Epoch, RelativeEpoch, SelectionProof, + Attestation, AttestationData, BeaconBlock, BeaconState, Epoch, RelativeEpoch, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, SubnetId, }; /// HTTP Handler to retrieve the duties for a set of validators during a particular epoch. This /// method allows for collecting bulk sets of validator duties without risking exceeding the max /// URL length with query pairs. -pub async fn post_validator_duties( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { - let response_builder = ResponseBuilder::new(&req); - +pub fn post_validator_duties( + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { let body = req.into_body(); - let chunks = hyper::body::to_bytes(body) - .await - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; - serde_json::from_slice::(&chunks) + serde_json::from_slice::(&body) .map_err(|e| { ApiError::BadRequest(format!( "Unable to parse JSON into ValidatorDutiesRequest: {:?}", @@ -42,29 +35,22 @@ pub async fn post_validator_duties( }) .and_then(|bulk_request| { return_validator_duties( - beacon_chain, + &ctx.beacon_chain.clone(), bulk_request.epoch, bulk_request.pubkeys.into_iter().map(Into::into).collect(), ) }) - .and_then(|duties| response_builder?.body_no_ssz(&duties)) } /// HTTP Handler to retrieve subscriptions for a set of validators. This allows the node to /// organise peer discovery and topic subscription for known validators. -pub async fn post_validator_subscriptions( - req: Request, - network_chan: NetworkChannel, -) -> ApiResult { - try_future!(check_content_type_for_json(&req)); - let response_builder = ResponseBuilder::new(&req); - +pub fn post_validator_subscriptions( + req: Request>, + ctx: Arc>, +) -> Result<(), ApiError> { let body = req.into_body(); - let chunks = hyper::body::to_bytes(body) - .await - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; - serde_json::from_slice(&chunks) + serde_json::from_slice(&body) .map_err(|e| { ApiError::BadRequest(format!( "Unable to parse JSON into ValidatorSubscriptions: {:?}", @@ -72,7 +58,7 @@ pub async fn post_validator_subscriptions( )) }) .and_then(move |subscriptions: Vec| { - network_chan + ctx.network_chan .send(NetworkMessage::Subscribe { subscriptions }) .map_err(|e| { ApiError::ServerError(format!( @@ -82,19 +68,18 @@ pub async fn post_validator_subscriptions( })?; Ok(()) }) - .and_then(|_| response_builder?.body_no_ssz(&())) } /// HTTP Handler to retrieve all validator duties for the given epoch. pub fn get_all_validator_duties( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { let query = UrlQuery::from_request(&req)?; let epoch = query.epoch()?; - let state = get_state_for_epoch(&beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; + let state = get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; let validator_pubkeys = state .validators @@ -102,21 +87,19 @@ pub fn get_all_validator_duties( .map(|validator| validator.pubkey.clone()) .collect(); - let duties = return_validator_duties(beacon_chain, epoch, validator_pubkeys)?; - - ResponseBuilder::new(&req)?.body_no_ssz(&duties) + return_validator_duties(&ctx.beacon_chain, epoch, validator_pubkeys) } /// HTTP Handler to retrieve all active validator duties for the given epoch. pub fn get_active_validator_duties( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { let query = UrlQuery::from_request(&req)?; let epoch = query.epoch()?; - let state = get_state_for_epoch(&beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; + let state = get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; let validator_pubkeys = state .validators @@ -125,9 +108,7 @@ pub fn get_active_validator_duties( .map(|validator| validator.pubkey.clone()) .collect(); - let duties = return_validator_duties(beacon_chain, epoch, validator_pubkeys)?; - - ResponseBuilder::new(&req)?.body_no_ssz(&duties) + return_validator_duties(&ctx.beacon_chain, epoch, validator_pubkeys) } /// Helper function to return the state that can be used to determine the duties for some `epoch`. @@ -163,7 +144,7 @@ pub fn get_state_for_epoch( /// Helper function to get the duties for some `validator_pubkeys` in some `epoch`. fn return_validator_duties( - beacon_chain: Arc>, + beacon_chain: &BeaconChain, epoch: Epoch, validator_pubkeys: Vec, ) -> Result, ApiError> { @@ -279,20 +260,26 @@ fn return_validator_duties( /// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. pub fn get_new_beacon_block( - req: Request, - beacon_chain: Arc>, - log: Logger, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { let query = UrlQuery::from_request(&req)?; let slot = query.slot()?; let randao_reveal = query.randao_reveal()?; - let (new_block, _state) = beacon_chain - .produce_block(randao_reveal, slot) + let validator_graffiti = if let Some((_key, value)) = query.first_of_opt(&["graffiti"]) { + Some(parse_hex_ssz_bytes(&value)?) + } else { + None + }; + + let (new_block, _state) = ctx + .beacon_chain + .produce_block(randao_reveal, slot, validator_graffiti) .map_err(|e| { error!( - log, + ctx.log, "Error whilst producing block"; "error" => format!("{:?}", e) ); @@ -303,48 +290,40 @@ pub fn get_new_beacon_block( )) })?; - ResponseBuilder::new(&req)?.body(&new_block) + Ok(new_block) } /// HTTP Handler to publish a SignedBeaconBlock, which has been signed by a validator. -pub async fn publish_beacon_block( - req: Request, - beacon_chain: Arc>, - network_chan: NetworkChannel, - log: Logger, -) -> ApiResult { - try_future!(check_content_type_for_json(&req)); - let response_builder = ResponseBuilder::new(&req); - +pub fn publish_beacon_block( + req: Request>, + ctx: Arc>, +) -> Result<(), ApiError> { let body = req.into_body(); - let chunks = hyper::body::to_bytes(body) - .await - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; - serde_json::from_slice(&chunks).map_err(|e| { + serde_json::from_slice(&body).map_err(|e| { ApiError::BadRequest(format!("Unable to parse JSON into SignedBeaconBlock: {:?}", e)) }) .and_then(move |block: SignedBeaconBlock| { let slot = block.slot(); - match beacon_chain.process_block(block.clone()) { + match ctx.beacon_chain.process_block(block.clone()) { Ok(block_root) => { // Block was processed, publish via gossipsub info!( - log, + ctx.log, "Block from local validator"; "block_root" => format!("{}", block_root), "block_slot" => slot, ); - publish_beacon_block_to_network::(network_chan, block)?; + publish_beacon_block_to_network::(&ctx.network_chan, block)?; // Run the fork choice algorithm and enshrine a new canonical head, if // found. // // The new head may or may not be the block we just received. - if let Err(e) = beacon_chain.fork_choice() { + if let Err(e) = ctx.beacon_chain.fork_choice() { error!( - log, + ctx.log, "Failed to find beacon chain head"; "error" => format!("{:?}", e) ); @@ -358,9 +337,9 @@ pub async fn publish_beacon_block( // - Excessive time between block produce and publish. // - A validator is using another beacon node to produce blocks and // submitting them here. - if beacon_chain.head()?.beacon_block_root != block_root { + if ctx.beacon_chain.head()?.beacon_block_root != block_root { warn!( - log, + ctx.log, "Block from validator is not head"; "desc" => "potential re-org", ); @@ -372,7 +351,7 @@ pub async fn publish_beacon_block( } Err(BlockError::BeaconChainError(e)) => { error!( - log, + ctx.log, "Error whilst processing block"; "error" => format!("{:?}", e) ); @@ -384,7 +363,7 @@ pub async fn publish_beacon_block( } Err(other) => { warn!( - log, + ctx.log, "Invalid block from local validator"; "outcome" => format!("{:?}", other) ); @@ -396,41 +375,41 @@ pub async fn publish_beacon_block( } } }) - .and_then(|_| response_builder?.body_no_ssz(&())) } /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. pub fn get_new_attestation( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { let query = UrlQuery::from_request(&req)?; let slot = query.slot()?; let index = query.committee_index()?; - let attestation = beacon_chain + ctx.beacon_chain .produce_unaggregated_attestation(slot, index) - .map_err(|e| ApiError::BadRequest(format!("Unable to produce attestation: {:?}", e)))?; - - ResponseBuilder::new(&req)?.body(&attestation) + .map_err(|e| ApiError::BadRequest(format!("Unable to produce attestation: {:?}", e))) } /// HTTP Handler to retrieve the aggregate attestation for a slot pub fn get_aggregate_attestation( - req: Request, - beacon_chain: Arc>, -) -> ApiResult { + req: Request>, + ctx: Arc>, +) -> Result, ApiError> { let query = UrlQuery::from_request(&req)?; let attestation_data = query.attestation_data()?; - match beacon_chain.get_aggregated_attestation(&attestation_data) { - Ok(Some(attestation)) => ResponseBuilder::new(&req)?.body(&attestation), + match ctx + .beacon_chain + .get_aggregated_attestation(&attestation_data) + { + Ok(Some(attestation)) => Ok(attestation), Ok(None) => Err(ApiError::NotFound(format!( "No matching aggregate attestation for slot {:?} is known in slot {:?}", attestation_data.slot, - beacon_chain.slot() + ctx.beacon_chain.slot() ))), Err(e) => Err(ApiError::ServerError(format!( "Unable to obtain attestation: {:?}", @@ -440,22 +419,13 @@ pub fn get_aggregate_attestation( } /// HTTP Handler to publish a list of Attestations, which have been signed by a number of validators. -pub async fn publish_attestations( - req: Request, - beacon_chain: Arc>, - network_chan: NetworkChannel, - log: Logger, -) -> ApiResult { - try_future!(check_content_type_for_json(&req)); - let response_builder = ResponseBuilder::new(&req); - - let body = req.into_body(); - let chunk = hyper::body::to_bytes(body) - .await - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; +pub fn publish_attestations( + req: Request>, + ctx: Arc>, +) -> Result<(), ApiError> { + let bytes = req.into_body(); - let chunks = chunk.iter().cloned().collect::>(); - serde_json::from_slice(&chunks.as_slice()) + serde_json::from_slice(&bytes) .map_err(|e| { ApiError::BadRequest(format!( "Unable to deserialize JSON into a list of attestations: {:?}", @@ -466,16 +436,16 @@ pub async fn publish_attestations( .map( move |attestations: Vec<(Attestation, SubnetId)>| { attestations - .into_par_iter() + .into_iter() .enumerate() .map(|(i, (attestation, subnet_id))| { process_unaggregated_attestation( - &beacon_chain, - network_chan.clone(), + &ctx.beacon_chain, + ctx.network_chan.clone(), attestation, subnet_id, i, - &log, + &ctx.log, ) }) .collect::>>() @@ -485,7 +455,7 @@ pub async fn publish_attestations( // // Note: this will only provide info about the _first_ failure, not all failures. .and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result)) - .and_then(|_| response_builder?.body_no_ssz(&())) + .map(|_| ()) } /// Processes an unaggregrated attestation that was included in a list of attestations with the @@ -558,21 +528,13 @@ fn process_unaggregated_attestation( } /// HTTP Handler to publish an Attestation, which has been signed by a validator. -#[allow(clippy::redundant_clone)] // false positives in this function. -pub async fn publish_aggregate_and_proofs( - req: Request, - beacon_chain: Arc>, - network_chan: NetworkChannel, - log: Logger, -) -> ApiResult { - try_future!(check_content_type_for_json(&req)); - let response_builder = ResponseBuilder::new(&req); +pub fn publish_aggregate_and_proofs( + req: Request>, + ctx: Arc>, +) -> Result<(), ApiError> { let body = req.into_body(); - let chunk = hyper::body::to_bytes(body) - .await - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; - let chunks = chunk.iter().cloned().collect::>(); - serde_json::from_slice(&chunks.as_slice()) + + serde_json::from_slice(&body) .map_err(|e| { ApiError::BadRequest(format!( "Unable to deserialize JSON into a list of SignedAggregateAndProof: {:?}", @@ -583,15 +545,15 @@ pub async fn publish_aggregate_and_proofs( .map( move |signed_aggregates: Vec>| { signed_aggregates - .into_par_iter() + .into_iter() .enumerate() .map(|(i, signed_aggregate)| { process_aggregated_attestation( - &beacon_chain, - network_chan.clone(), + &ctx.beacon_chain, + ctx.network_chan.clone(), signed_aggregate, i, - &log, + &ctx.log, ) }) .collect::>>() @@ -601,7 +563,6 @@ pub async fn publish_aggregate_and_proofs( // // Note: this will only provide info about the _first_ failure, not all failures. .and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result)) - .and_then(|_| response_builder?.body_no_ssz(&())) } /// Processes an aggregrated attestation that was included in a list of attestations with the index diff --git a/beacon_node/rest_api/tests/test.rs b/beacon_node/rest_api/tests/test.rs index d5bda936b21..160ee667ccf 100644 --- a/beacon_node/rest_api/tests/test.rs +++ b/beacon_node/rest_api/tests/test.rs @@ -460,7 +460,7 @@ fn validator_block_post() { remote_node .http .validator() - .produce_block(slot, randao_reveal), + .produce_block(slot, randao_reveal, None), ) .expect("should fetch block from http api"); @@ -547,7 +547,7 @@ fn validator_block_get() { remote_node .http .validator() - .produce_block(slot, randao_reveal.clone()), + .produce_block(slot, randao_reveal.clone(), None), ) .expect("should fetch block from http api"); @@ -555,7 +555,50 @@ fn validator_block_get() { .client .beacon_chain() .expect("client should have beacon chain") - .produce_block(randao_reveal, slot) + .produce_block(randao_reveal, slot, None) + .expect("should produce block"); + + assert_eq!( + block, expected_block, + "the block returned from the API should be as expected" + ); +} + +#[test] +fn validator_block_get_with_graffiti() { + let mut env = build_env(); + + let spec = &E::default_spec(); + + let node = build_node(&mut env, testing_client_config()); + let remote_node = node.remote_node().expect("should produce remote node"); + + let beacon_chain = node + .client + .beacon_chain() + .expect("client should have beacon chain"); + + let slot = Slot::new(1); + let randao_reveal = get_randao_reveal(beacon_chain, slot, spec); + + let block = env + .runtime() + .block_on(remote_node.http.validator().produce_block( + slot, + randao_reveal.clone(), + Some(*b"test-graffiti-test-graffiti-test"), + )) + .expect("should fetch block from http api"); + + let (expected_block, _state) = node + .client + .beacon_chain() + .expect("client should have beacon chain") + .produce_block( + randao_reveal, + slot, + Some(*b"test-graffiti-test-graffiti-test"), + ) .expect("should produce block"); assert_eq!( @@ -761,7 +804,7 @@ fn get_version() { let version = env .runtime() .block_on(remote_node.http.node().get_version()) - .expect("should fetch eth2 config from http api"); + .expect("should fetch version from http api"); assert_eq!( lighthouse_version::version_with_platform(), diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 0bd1e4da25e..6caa8acd9b2 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -71,8 +71,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("boot-nodes") .long("boot-nodes") .allow_hyphen_values(true) - .value_name("ENR-LIST") - .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network.") + .value_name("ENR/MULTIADDR LIST") + .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported.") .takes_value(true), ) .arg( @@ -249,4 +249,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("GRAFFITI") .takes_value(true) ) + .arg( + Arg::with_name("max-skip-slots") + .long("max-skip-slots") + .help( + "Refuse to skip more than this many slots when processing a block or attestation. \ + This prevents nodes on minority forks from wasting our time and RAM, \ + but might need to be raised or set to 'none' in times of extreme network \ + outage." + ) + .value_name("NUM_SLOTS") + .takes_value(true) + .default_value("700") + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c917d572fd8..f9abfca6aeb 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -2,7 +2,7 @@ use beacon_chain::builder::PUBKEY_CACHE_FILENAME; use clap::ArgMatches; use clap_utils::BAD_TESTNET_DIR_MESSAGE; use client::{config::DEFAULT_DATADIR, ClientConfig, ClientGenesis}; -use eth2_libp2p::{Enr, Multiaddr}; +use eth2_libp2p::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig}; use eth2_testnet_config::Eth2TestnetConfig; use slog::{crit, info, Logger}; use ssz::Encode; @@ -75,130 +75,13 @@ pub fn get_config( /* * Networking */ - // If a network dir has been specified, override the `datadir` definition. - if let Some(dir) = cli_args.value_of("network-dir") { - client_config.network.network_dir = PathBuf::from(dir); - } else { - client_config.network.network_dir = client_config.data_dir.join(NETWORK_DIR); - }; - - if let Some(listen_address_str) = cli_args.value_of("listen-address") { - let listen_address = listen_address_str - .parse() - .map_err(|_| format!("Invalid listen address: {:?}", listen_address_str))?; - client_config.network.listen_address = listen_address; - } - - if let Some(target_peers_str) = cli_args.value_of("target-peers") { - client_config.network.target_peers = target_peers_str - .parse::() - .map_err(|_| format!("Invalid number of target peers: {}", target_peers_str))?; - } - - if let Some(port_str) = cli_args.value_of("port") { - let port = port_str - .parse::() - .map_err(|_| format!("Invalid port: {}", port_str))?; - client_config.network.libp2p_port = port; - client_config.network.discovery_port = port; - } - - if let Some(port_str) = cli_args.value_of("discovery-port") { - let port = port_str - .parse::() - .map_err(|_| format!("Invalid port: {}", port_str))?; - client_config.network.discovery_port = port; - } - - if let Some(boot_enr_str) = cli_args.value_of("boot-nodes") { - client_config.network.boot_nodes = boot_enr_str - .split(',') - .map(|enr| enr.parse().map_err(|_| format!("Invalid ENR: {}", enr))) - .collect::, _>>()?; - } - - if let Some(libp2p_addresses_str) = cli_args.value_of("libp2p-addresses") { - client_config.network.libp2p_nodes = libp2p_addresses_str - .split(',') - .map(|multiaddr| { - multiaddr - .parse() - .map_err(|_| format!("Invalid Multiaddr: {}", multiaddr)) - }) - .collect::, _>>()?; - } - - if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { - client_config.network.enr_udp_port = Some( - enr_udp_port_str - .parse::() - .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, - ); - } - - if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp-port") { - client_config.network.enr_tcp_port = Some( - enr_tcp_port_str - .parse::() - .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, - ); - } - - if cli_args.is_present("enr-match") { - // set the enr address to localhost if the address is 0.0.0.0 - if client_config.network.listen_address - == "0.0.0.0".parse::().expect("valid ip addr") - { - client_config.network.enr_address = - Some("127.0.0.1".parse::().expect("valid ip addr")); - } else { - client_config.network.enr_address = Some(client_config.network.listen_address); - } - client_config.network.enr_udp_port = Some(client_config.network.discovery_port); - } - - if let Some(enr_address) = cli_args.value_of("enr-address") { - let resolved_addr = match enr_address.parse::() { - Ok(addr) => addr, // // Input is an IpAddr - Err(_) => { - let mut addr = enr_address.to_string(); - // Appending enr-port to the dns hostname to appease `to_socket_addrs()` parsing. - // Since enr-update is disabled with a dns address, not setting the enr-udp-port - // will make the node undiscoverable. - if let Some(enr_udp_port) = client_config.network.enr_udp_port { - addr.push_str(&format!(":{}", enr_udp_port.to_string())); - } else { - return Err( - "enr-udp-port must be set for node to be discoverable with dns address" - .into(), - ); - } - // `to_socket_addr()` does the dns resolution - // Note: `to_socket_addrs()` is a blocking call - let resolved_addr = if let Ok(mut resolved_addrs) = addr.to_socket_addrs() { - // Pick the first ip from the list of resolved addresses - resolved_addrs - .next() - .map(|a| a.ip()) - .ok_or_else(|| "Resolved dns addr contains no entries".to_string())? - } else { - return Err(format!("Failed to parse enr-address: {}", enr_address)); - }; - client_config.network.discv5_config.enr_update = false; - resolved_addr - } - }; - client_config.network.enr_address = Some(resolved_addr); - } - - if cli_args.is_present("disable_enr_auto_update") { - client_config.network.discv5_config.enr_update = false; - } - - if cli_args.is_present("disable-discovery") { - client_config.network.disable_discovery = true; - slog::warn!(log, "Discovery is disabled. New peers will not be found"); - } + set_network_config( + &mut client_config.network, + cli_args, + &client_config.data_dir, + &log, + false, + )?; /* * Http server @@ -337,7 +220,7 @@ pub fn get_config( client_config.eth1.follow_distance = spec.eth1_follow_distance; if let Some(mut boot_nodes) = eth2_testnet_config.boot_enr { - client_config.network.boot_nodes.append(&mut boot_nodes) + client_config.network.boot_nodes_enr.append(&mut boot_nodes) } if let Some(genesis_state) = eth2_testnet_config.genesis_state { @@ -368,9 +251,176 @@ pub fn get_config( client_config.graffiti[..trimmed_graffiti_len] .copy_from_slice(&raw_graffiti[..trimmed_graffiti_len]); + if let Some(max_skip_slots) = cli_args.value_of("max-skip-slots") { + client_config.chain.import_max_skip_slots = match max_skip_slots { + "none" => None, + n => Some( + n.parse() + .map_err(|_| "Invalid max-skip-slots".to_string())?, + ), + }; + } + Ok(client_config) } +/// Sets the network config from the command line arguments +pub fn set_network_config( + config: &mut NetworkConfig, + cli_args: &ArgMatches, + data_dir: &PathBuf, + log: &Logger, + use_listening_port_as_enr_port_by_default: bool, +) -> Result<(), String> { + // If a network dir has been specified, override the `datadir` definition. + if let Some(dir) = cli_args.value_of("network-dir") { + config.network_dir = PathBuf::from(dir); + } else { + config.network_dir = data_dir.join(NETWORK_DIR); + }; + + if let Some(listen_address_str) = cli_args.value_of("listen-address") { + let listen_address = listen_address_str + .parse() + .map_err(|_| format!("Invalid listen address: {:?}", listen_address_str))?; + config.listen_address = listen_address; + } + + if let Some(target_peers_str) = cli_args.value_of("target-peers") { + config.target_peers = target_peers_str + .parse::() + .map_err(|_| format!("Invalid number of target peers: {}", target_peers_str))?; + } + + if let Some(port_str) = cli_args.value_of("port") { + let port = port_str + .parse::() + .map_err(|_| format!("Invalid port: {}", port_str))?; + config.libp2p_port = port; + config.discovery_port = port; + } + + if let Some(port_str) = cli_args.value_of("discovery-port") { + let port = port_str + .parse::() + .map_err(|_| format!("Invalid port: {}", port_str))?; + config.discovery_port = port; + } + + if let Some(boot_enr_str) = cli_args.value_of("boot-nodes") { + let mut enrs: Vec = vec![]; + let mut multiaddrs: Vec = vec![]; + for addr in boot_enr_str.split(',') { + match addr.parse() { + Ok(enr) => enrs.push(enr), + Err(_) => { + // parsing as ENR failed, try as Multiaddr + let multi: Multiaddr = addr + .parse() + .map_err(|_| format!("Not valid as ENR nor Multiaddr: {}", addr))?; + if !multi.iter().any(|proto| matches!(proto, Protocol::Udp(_))) { + slog::error!(log, "Missing UDP in Multiaddr {}", multi.to_string()); + } + if !multi.iter().any(|proto| matches!(proto, Protocol::P2p(_))) { + slog::error!(log, "Missing P2P in Multiaddr {}", multi.to_string()); + } + multiaddrs.push(multi); + } + } + } + config.boot_nodes_enr = enrs; + config.boot_nodes_multiaddr = multiaddrs; + } + + if let Some(libp2p_addresses_str) = cli_args.value_of("libp2p-addresses") { + config.libp2p_nodes = libp2p_addresses_str + .split(',') + .map(|multiaddr| { + multiaddr + .parse() + .map_err(|_| format!("Invalid Multiaddr: {}", multiaddr)) + }) + .collect::, _>>()?; + } + + if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { + config.enr_udp_port = Some( + enr_udp_port_str + .parse::() + .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, + ); + } + + if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp-port") { + config.enr_tcp_port = Some( + enr_tcp_port_str + .parse::() + .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, + ); + } + + if cli_args.is_present("enr-match") { + // set the enr address to localhost if the address is 0.0.0.0 + if config.listen_address == "0.0.0.0".parse::().expect("valid ip addr") { + config.enr_address = Some("127.0.0.1".parse::().expect("valid ip addr")); + } else { + config.enr_address = Some(config.listen_address); + } + config.enr_udp_port = Some(config.discovery_port); + } + + if let Some(enr_address) = cli_args.value_of("enr-address") { + let resolved_addr = match enr_address.parse::() { + Ok(addr) => addr, // // Input is an IpAddr + Err(_) => { + let mut addr = enr_address.to_string(); + // Appending enr-port to the dns hostname to appease `to_socket_addrs()` parsing. + // Since enr-update is disabled with a dns address, not setting the enr-udp-port + // will make the node undiscoverable. + if let Some(enr_udp_port) = config.enr_udp_port.or_else(|| { + if use_listening_port_as_enr_port_by_default { + Some(config.discovery_port) + } else { + None + } + }) { + addr.push_str(&format!(":{}", enr_udp_port.to_string())); + } else { + return Err( + "enr-udp-port must be set for node to be discoverable with dns address" + .into(), + ); + } + // `to_socket_addr()` does the dns resolution + // Note: `to_socket_addrs()` is a blocking call + let resolved_addr = if let Ok(mut resolved_addrs) = addr.to_socket_addrs() { + // Pick the first ip from the list of resolved addresses + resolved_addrs + .next() + .map(|a| a.ip()) + .ok_or_else(|| "Resolved dns addr contains no entries".to_string())? + } else { + return Err(format!("Failed to parse enr-address: {}", enr_address)); + }; + config.discv5_config.enr_update = false; + resolved_addr + } + }; + config.enr_address = Some(resolved_addr); + } + + if cli_args.is_present("disable_enr_auto_update") { + config.discv5_config.enr_update = false; + } + + if cli_args.is_present("disable-discovery") { + config.disable_discovery = true; + slog::warn!(log, "Discovery is disabled. New peers will not be found"); + } + + Ok(()) +} + /// Gets the datadir which should be used. pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { // Read the `--datadir` flag. diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 02108c13a30..19931916013 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -7,7 +7,7 @@ mod config; pub use beacon_chain; pub use cli::cli_app; pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; -pub use config::{get_data_dir, get_eth2_testnet_config}; +pub use config::{get_data_dir, get_eth2_testnet_config, set_network_config}; pub use eth2_config::Eth2Config; use beacon_chain::events::TeeEventHandler; @@ -100,7 +100,9 @@ impl ProductionBeaconNode { "endpoint" => &client_config.eth1.endpoint, "method" => "json rpc via http" ); - builder.caching_eth1_backend(client_config.eth1.clone())? + builder + .caching_eth1_backend(client_config.eth1.clone()) + .await? } else if client_config.dummy_eth1_backend { warn!( log, @@ -126,7 +128,8 @@ impl ProductionBeaconNode { let builder = builder .build_beacon_chain()? - .network(&client_config.network)? + .network(&client_config.network) + .await? .notifier()?; let builder = if client_config.rest_api.enabled { diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 48917af7357..08e810866f9 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -14,7 +14,7 @@ use crate::{ }; use lru::LruCache; use parking_lot::{Mutex, RwLock}; -use slog::{debug, error, trace, warn, Logger}; +use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ @@ -30,6 +30,16 @@ use types::*; /// 32-byte key for accessing the `split` of the freezer DB. pub const SPLIT_DB_KEY: &str = "FREEZERDBSPLITFREEZERDBSPLITFREE"; +/// Defines how blocks should be replayed on states. +#[derive(PartialEq)] +pub enum BlockReplay { + /// Perform all transitions faithfully to the specification. + Accurate, + /// Don't compute state roots, eventually computing an invalid beacon state that can only be + /// used for obtaining shuffling. + InconsistentStateRoots, +} + /// On-disk database that stores finalized states efficiently. /// /// Stores vector fields like the `block_roots` and `state_roots` separately, and only stores @@ -137,6 +147,12 @@ impl HotColdDB, LevelDB> { // Load the previous split slot from the database (if any). This ensures we can // stop and restart correctly. if let Some(split) = db.load_split()? { + info!( + db.log, + "Hot-Cold DB initialized"; + "split_slot" => split.slot, + "split_state" => format!("{:?}", split.state_root) + ); *db.split.write() = split; } Ok(db) @@ -208,6 +224,13 @@ impl, Cold: ItemStore> HotColdDB } /// Fetch a state from the store. + /// + /// If `slot` is provided then it will be used as a hint as to which database should + /// be checked. Importantly, if the slot hint is provided and indicates a slot that lies + /// in the freezer database, then only the freezer database will be accessed and `Ok(None)` + /// will be returned if the provided `state_root` doesn't match the state root of the + /// frozen state at `slot`. Consequently, if a state from a non-canonical chain is desired, it's + /// best to set `slot` to `None`, or call `load_hot_state` directly. pub fn get_state( &self, state_root: &Hash256, @@ -217,18 +240,46 @@ impl, Cold: ItemStore> HotColdDB if let Some(slot) = slot { if slot < self.get_split_slot() { - self.load_cold_state_by_slot(slot).map(Some) + // Although we could avoid a DB lookup by shooting straight for the + // frozen state using `load_cold_state_by_slot`, that would be incorrect + // in the case where the caller provides a `state_root` that's off the canonical + // chain. This way we avoid returning a state that doesn't match `state_root`. + self.load_cold_state(state_root) } else { - self.load_hot_state(state_root) + self.load_hot_state(state_root, BlockReplay::Accurate) } } else { - match self.load_hot_state(state_root)? { + match self.load_hot_state(state_root, BlockReplay::Accurate)? { Some(state) => Ok(Some(state)), None => self.load_cold_state(state_root), } } } + /// Fetch a state from the store, but don't compute all of the values when replaying blocks + /// upon that state (e.g., state roots). Additionally, only states from the hot store are + /// returned. + /// + /// See `Self::get_state` for information about `slot`. + /// + /// ## Warning + /// + /// The returned state **is not a valid beacon state**, it can only be used for obtaining + /// shuffling to process attestations. + pub fn get_inconsistent_state_for_attestation_verification_only( + &self, + state_root: &Hash256, + slot: Option, + ) -> Result>, Error> { + metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT); + + if slot.map_or(false, |slot| slot < self.get_split_slot()) { + Ok(None) + } else { + self.load_hot_state(state_root, BlockReplay::InconsistentStateRoots) + } + } + /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. /// /// It is assumed that all states being deleted reside in the hot DB, even if their slot is less @@ -272,8 +323,11 @@ impl, Cold: ItemStore> HotColdDB }) = self.load_hot_state_summary(state_root)? { // NOTE: minor inefficiency here because we load an unnecessary hot state summary + // + // `BlockReplay` should be irrelevant here since we never replay blocks for an epoch + // boundary state in the hot DB. let state = self - .load_hot_state(&epoch_boundary_state_root)? + .load_hot_state(&epoch_boundary_state_root, BlockReplay::Accurate)? .ok_or_else(|| { HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root) })?; @@ -404,7 +458,11 @@ impl, Cold: ItemStore> HotColdDB /// Load a post-finalization state from the hot database. /// /// Will replay blocks from the nearest epoch boundary. - pub fn load_hot_state(&self, state_root: &Hash256) -> Result>, Error> { + pub fn load_hot_state( + &self, + state_root: &Hash256, + block_replay: BlockReplay, + ) -> Result>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); if let Some(HotStateSummary { @@ -425,7 +483,7 @@ impl, Cold: ItemStore> HotColdDB } else { let blocks = self.load_blocks_to_replay(boundary_state.slot, slot, latest_block_root)?; - self.replay_blocks(boundary_state, blocks, slot)? + self.replay_blocks(boundary_state, blocks, slot, block_replay)? }; Ok(Some(state)) @@ -556,7 +614,7 @@ impl, Cold: ItemStore> HotColdDB )?; // 3. Replay the blocks on top of the low restore point. - self.replay_blocks(low_restore_point, blocks, slot) + self.replay_blocks(low_restore_point, blocks, slot, BlockReplay::Accurate) } /// Get a suitable block root for backtracking from `high_restore_point` to the state at `slot`. @@ -613,9 +671,19 @@ impl, Cold: ItemStore> HotColdDB fn replay_blocks( &self, mut state: BeaconState, - blocks: Vec>, + mut blocks: Vec>, target_slot: Slot, + block_replay: BlockReplay, ) -> Result, Error> { + if block_replay == BlockReplay::InconsistentStateRoots { + for i in 0..blocks.len() { + blocks[i].message.state_root = Hash256::zero(); + if i > 0 { + blocks[i].message.parent_root = blocks[i - 1].canonical_root() + } + } + } + let state_root_from_prev_block = |i: usize, state: &BeaconState| { if i > 0 { let prev_block = &blocks[i - 1].message; @@ -635,10 +703,14 @@ impl, Cold: ItemStore> HotColdDB } while state.slot < block.message.slot { - let state_root = state_root_from_prev_block(i, &state); + let state_root = match block_replay { + BlockReplay::Accurate => state_root_from_prev_block(i, &state), + BlockReplay::InconsistentStateRoots => Some(Hash256::zero()), + }; per_slot_processing(&mut state, state_root, &self.spec) .map_err(HotColdDBError::BlockReplaySlotError)?; } + per_block_processing( &mut state, &block, @@ -650,7 +722,10 @@ impl, Cold: ItemStore> HotColdDB } while state.slot < target_slot { - let state_root = state_root_from_prev_block(blocks.len(), &state); + let state_root = match block_replay { + BlockReplay::Accurate => state_root_from_prev_block(blocks.len(), &state), + BlockReplay::InconsistentStateRoots => Some(Hash256::zero()), + }; per_slot_processing(&mut state, state_root, &self.spec) .map_err(HotColdDBError::BlockReplaySlotError)?; } @@ -750,7 +825,7 @@ impl, Cold: ItemStore> HotColdDB } /// Advance the split point of the store, moving new finalized states to the freezer. -pub fn process_finalization, Cold: ItemStore>( +pub fn migrate_database, Cold: ItemStore>( store: Arc>, frozen_head_root: Hash256, frozen_head: &BeaconState, diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 02e314dc627..27187022686 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -27,7 +27,7 @@ pub mod iter; use std::borrow::Cow; pub use self::config::StoreConfig; -pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; +pub use self::hot_cold_store::{BlockReplay, HotColdDB, HotStateSummary, Split}; pub use self::leveldb_store::LevelDB; pub use self::memory_store::MemoryStore; pub use self::partial_beacon_state::PartialBeaconState; diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index d50824adfd2..88d82e4d94b 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -7,6 +7,7 @@ * [Installation](./installation.md) * [Docker](./docker.md) * [Raspberry Pi 4](./pi.md) + * [Cross-Compiling](./cross-compiling.md) * [Key Management](./key-management.md) * [Create a wallet](./wallet-create.md) * [Create a validator](./validator-create.md) diff --git a/book/src/become-a-validator-source.md b/book/src/become-a-validator-source.md index 891584331b5..adab86ce398 100644 --- a/book/src/become-a-validator-source.md +++ b/book/src/become-a-validator-source.md @@ -3,7 +3,9 @@ ## 0. Install Rust If you don't have Rust installed already, visit [rustup.rs](https://rustup.rs/) to install it. -> Note: if you're not familiar with Rust or you'd like more detailed instructions, see our [installation guide](./installation.md). +> Notes: +> - If you're not familiar with Rust or you'd like more detailed instructions, see our [installation guide](./installation.md). +> - Windows is presently only supported via [WSL](https://docs.microsoft.com/en-us/windows/wsl/about). ## 1. Download and install Lighthouse @@ -86,7 +88,7 @@ validator](./validator-create.md). A two-step example follows: Create a wallet with: ```bash -lighthouse --testnet medalla account wallet create --name my-validators --passphrase-file my-validators.pass +lighthouse --testnet medalla account wallet create --name my-validators --password-file my-validators.pass ``` The output will look like this: @@ -122,7 +124,7 @@ used to restore your validator if there is a data loss. Create a validator from the wallet with: ```bash -lighthouse --testnet medalla account validator create --wallet-name my-validators --wallet-passphrase my-validators.pass --count 1 +lighthouse --testnet medalla account validator create --wallet-name my-validators --wallet-password my-validators.pass --count 1 ``` The output will look like this: diff --git a/book/src/cross-compiling.md b/book/src/cross-compiling.md new file mode 100644 index 00000000000..837cc13a66d --- /dev/null +++ b/book/src/cross-compiling.md @@ -0,0 +1,41 @@ +# Cross-compiling + +Lighthouse supports cross-compiling, allowing users to run a binary on one +platform (e.g., `aarch64`) that was compiled on another platform (e.g., +`x86_64`). + + +## Instructions + +Cross-compiling requires [`Docker`](https://docs.docker.com/engine/install/), +[`rustembedded/cross`](https://github.com/rust-embedded/cross) and for the +current user to be in the `docker` group. + +The binaries will be created in the `target/` directory of the Lighthouse +project. + +### Targets + +The `Makefile` in the project contains four targets for cross-compiling: + +- `build-x86_64`: builds an optimized version for x86_64 processors (suitable + for most users). +- `build-x86_64-portable`: builds a version x86_64 processors which avoids + using some modern CPU instructions that might cause an "illegal + instruction" error on older CPUs. +- `build-aarch64`: builds an optimized version for 64bit ARM processors + (suitable for Raspberry Pi 4). +- `build-aarch64-portable`: builds a version 64 bit ARM processors which avoids + using some modern CPU instructions that might cause an "illegal + instruction" error on older CPUs. + + +### Example + +```bash +cd lighthouse +make build-aarch64 +``` + +The `lighthouse` binary will be compiled inside a Docker container and placed +in `lighthouse/target/aarch64-unknown-linux-gnu/release`. diff --git a/book/src/key-management.md b/book/src/key-management.md index ea06b8bc97b..53edec221d9 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -35,16 +35,16 @@ items, starting at one easy-to-backup mnemonic and ending with multiple keypairs. Creating a single validator looks like this: 1. Create a **wallet** and record the **mnemonic**: - - `lighthouse account wallet create --name wally --passphrase-file wally.pass` + - `lighthouse account wallet create --name wally --password-file wally.pass` 1. Create the voting and withdrawal **keystores** for one validator: - - `lighthouse account validator create --wallet-name wally --wallet-passphrase wally.pass --count 1` + - `lighthouse account validator create --wallet-name wally --wallet-password wally.pass --count 1` In step (1), we created a wallet in `~/.lighthouse/wallets` with the name -`mywallet`. We encrypted this using a pre-defined password in the -`mywallet.pass` file. Then, in step (2), we created one new validator in the -`~/.lighthouse/validators` directory using `mywallet` (unlocking it with -`mywallet.pass`) and storing the passwords to the validators voting key in +`wally`. We encrypted this using a pre-defined password in the +`wally.pass` file. Then, in step (2), we created one new validator in the +`~/.lighthouse/validators` directory using `wally` (unlocking it with +`wally.pass`) and storing the passwords to the validators voting key in `~/.lighthouse/secrets`. Thanks to the hierarchical key derivation scheme, we can delete all of the diff --git a/book/src/pi.md b/book/src/pi.md index c080fd0dab1..e5c24f73f2d 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -5,6 +5,11 @@ Tested on: - Raspberry Pi 4 Model B (4GB) - `Ubuntu 20.04 LTS (GNU/Linux 5.4.0-1011-raspi aarch64)` + +*Note: [Lighthouse supports cross-compiling](./cross-compiling.md) to target a +Raspberry Pi (`aarch64`). Compiling on a faster machine (i.e., `x86_64` +desktop) may be convenient.* + ### 1. Install Ubuntu Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi). diff --git a/book/src/validator-create.md b/book/src/validator-create.md index 7f856a139d2..25112e74872 100644 --- a/book/src/validator-create.md +++ b/book/src/validator-create.md @@ -17,7 +17,7 @@ lighthouse account validator create --help Creates new validators from an existing EIP-2386 wallet using the EIP-2333 HD key derivation scheme. USAGE: - lighthouse account_manager validator create [FLAGS] [OPTIONS] --wallet-name --wallet-passphrase + lighthouse account_manager validator create [FLAGS] [OPTIONS] --wallet-name --wallet-password FLAGS: -h, --help Prints help information @@ -56,7 +56,7 @@ OPTIONS: The path where the validator directories will be created. Defaults to ~/.lighthouse/validators --wallet-name Use the wallet identified by this name - --wallet-passphrase + --wallet-password A path to a file containing the password which will unlock the wallet. ``` @@ -66,12 +66,12 @@ The example assumes that the `wally` wallet was generated from the [wallet](./wallet-create.md) example. ```bash -lighthouse --testnet medalla account validator create --name wally --wallet-passphrase wally.pass --count 1 +lighthouse --testnet medalla account validator create --name wally --wallet-password wally.pass --count 1 ``` This command will: -- Derive a new BLS keypair from `wally`, updating it so that it generates a +- Derive a single new BLS keypair from `wally`, updating it so that it generates a new key next time. - Create a new directory in `~/.lighthouse/validators` containing: - An encrypted keystore containing the validators voting keypair. diff --git a/book/src/wallet-create.md b/book/src/wallet-create.md index 1505566d499..fe0ac6dc94a 100644 --- a/book/src/wallet-create.md +++ b/book/src/wallet-create.md @@ -25,7 +25,7 @@ lighthouse account wallet create --help Creates a new HD (hierarchical-deterministic) EIP-2386 wallet. USAGE: - lighthouse account_manager wallet create [OPTIONS] --name --passphrase-file + lighthouse account_manager wallet create [OPTIONS] --name --password-file FLAGS: -h, --help Prints help information @@ -39,7 +39,7 @@ OPTIONS: --name The wallet will be created with this name. It is not allowed to create two wallets with the same name for the same --base-dir. - --passphrase-file + --password-file A path to a file containing the password which will unlock the wallet. If the file does not exist, a random password will be generated and saved at that path. To avoid confusion, if the file does not already exist it must include a '.pass' suffix. @@ -61,7 +61,7 @@ Creates a new wallet named `wally` with a randomly generated password saved to `./wallet.pass`: ```bash -lighthouse account wallet create --name wally --passphrase-file wally.pass +lighthouse account wallet create --name wally --password-file wally.pass ``` > Notes: diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 34da588ca1e..e69428e8003 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,10 +1,11 @@ [package] name = "boot_node" -version = "0.1.0" +version = "0.2.8" authors = ["Sigma Prime "] edition = "2018" [dependencies] +beacon_node = { path = "../beacon_node" } clap = "2.33.0" eth2_libp2p = { path = "../beacon_node/eth2_libp2p" } slog = "2.5.2" diff --git a/boot_node/src/cli.rs b/boot_node/src/cli.rs index db47bcf26eb..31d541c3083 100644 --- a/boot_node/src/cli.rs +++ b/boot_node/src/cli.rs @@ -12,7 +12,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { surface compared to a full beacon node.") .settings(&[clap::AppSettings::ColoredHelp]) .arg( - Arg::with_name("boot-node-enr-address") + Arg::with_name("enr-address") .value_name("IP-ADDRESS") .help("The external IP address/ DNS address to broadcast to other peers on how to reach this node. \ If a DNS address is provided, the enr-address is set to the IP address it resolves to and \ @@ -44,7 +44,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true), ) .arg( - Arg::with_name("enr-port") + Arg::with_name("enr-udp-port") .long("enr-port") .value_name("PORT") .help("The UDP port of the boot node's ENR. This is the port that external peers will dial to reach this boot node. Set this only if the external port differs from the listening port.") diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 3d6570973a4..a08aed00cbb 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -1,7 +1,12 @@ +use beacon_node::{get_data_dir, set_network_config}; use clap::ArgMatches; use discv5::{enr::CombinedKey, Enr}; +use eth2_libp2p::{ + discovery::{create_enr_builder_from_config, use_or_load_enr}, + load_private_key, CombinedKeyExt, NetworkConfig, +}; use std::convert::TryFrom; -use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; +use std::net::SocketAddr; /// A set of configuration parameters for the bootnode, established from CLI arguments. pub struct BootNodeConfig { @@ -17,17 +22,22 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { type Error = String; fn try_from(matches: &ArgMatches<'_>) -> Result { - let listen_address = matches - .value_of("listen-address") - .expect("required parameter") - .parse::() - .map_err(|_| "Invalid listening address".to_string())?; + let data_dir = get_data_dir(matches); - let listen_port = matches - .value_of("port") - .expect("required parameter") - .parse::() - .map_err(|_| "Invalid listening port".to_string())?; + let mut network_config = NetworkConfig::default(); + + let logger = slog_scope::logger(); + + set_network_config(&mut network_config, matches, &data_dir, &logger, true)?; + + let private_key = load_private_key(&network_config, &logger); + let local_key = CombinedKey::from_libp2p(&private_key)?; + + let mut local_enr = create_enr_builder_from_config(&network_config) + .build(&local_key) + .map_err(|e| format!("Failed to build ENR: {:?}", e))?; + + use_or_load_enr(&local_key, &mut local_enr, &network_config, &logger)?; let boot_nodes = { if let Some(boot_nodes) = matches.value_of("boot-nodes") { @@ -40,34 +50,11 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { } }; - let enr_port = { - if let Some(port) = matches.value_of("boot-node-enr-port") { - port.parse::() - .map_err(|_| "Invalid ENR port".to_string())? - } else { - listen_port - } - }; - - let enr_address = { - let address_string = matches - .value_of("boot-node-enr-address") - .expect("required parameter"); - resolve_address(address_string.into(), enr_port)? - }; - let auto_update = matches.is_present("enable-enr_auto_update"); // the address to listen on - let listen_socket = SocketAddr::new(listen_address, enr_port); - - // Generate a new key and build a new ENR - let local_key = CombinedKey::generate_secp256k1(); - let local_enr = discv5::enr::EnrBuilder::new("v4") - .ip(enr_address) - .udp(enr_port) - .build(&local_key) - .map_err(|e| format!("Failed to build ENR: {:?}", e))?; + let listen_socket = + SocketAddr::new(network_config.listen_address, network_config.discovery_port); Ok(BootNodeConfig { listen_socket, @@ -78,25 +65,3 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { }) } } - -/// Resolves an IP/DNS string to an IpAddr. -fn resolve_address(address_string: String, port: u16) -> Result { - match address_string.parse::() { - Ok(addr) => Ok(addr), // valid IpAddr - Err(_) => { - let mut addr = address_string.clone(); - // Appending enr-port to the dns hostname to appease `to_socket_addrs()` parsing. - addr.push_str(&format!(":{}", port.to_string())); - // `to_socket_addr()` does the dns resolution - // Note: `to_socket_addrs()` is a blocking call - addr.to_socket_addrs() - .map(|mut resolved_addrs| - // Pick the first ip from the list of resolved addresses - resolved_addrs - .next() - .map(|a| a.ip()) - .ok_or_else(|| "Resolved dns addr contains no entries".to_string())) - .map_err(|_| format!("Failed to parse enr-address: {}", address_string))? - } - } -} diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index f066b6e41be..2a554666304 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -9,6 +9,8 @@ mod server; pub use cli::cli_app; use config::BootNodeConfig; +const LOG_CHANNEL_SIZE: usize = 2048; + /// Run the bootnode given the CLI configuration. pub fn run(matches: &ArgMatches<'_>, debug_level: String) { let debug_level = match debug_level.as_str() { @@ -26,7 +28,9 @@ pub fn run(matches: &ArgMatches<'_>, debug_level: String) { let decorator = slog_term::TermDecorator::new().build(); let decorator = logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); let drain = slog_term::FullFormat::new(decorator).build().fuse(); - slog_async::Async::new(drain).build() + slog_async::Async::new(drain) + .chan_size(LOG_CHANNEL_SIZE) + .build() }; let drain = match debug_level { diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 162b308880e..415e44eecc0 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -43,7 +43,10 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { } // start the server - discv5.start(config.listen_socket); + if let Err(e) = discv5.start(config.listen_socket) { + slog::crit!(log, "Could not start discv5 server"; "error" => e.to_string()); + return; + } // if there are peers in the local routing table, establish a session by running a query if !discv5.table_entries_id().is_empty() { diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index c1f8e0ad57d..733c771bee9 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -310,6 +310,15 @@ fn is_voting_keystore(file_name: &str) -> bool { return true; } + // The format exported by Prysm. I don't have a reference for this, but it was shared via + // Discord to Paul H. + if Regex::new("keystore-[0-9]+.json") + .expect("regex is valid") + .is_match(file_name) + { + return true; + } + false } @@ -318,8 +327,12 @@ mod tests { use super::*; #[test] - fn voting_keystore_filename() { + fn voting_keystore_filename_lighthouse() { assert!(is_voting_keystore(VOTING_KEYSTORE_FILE)); + } + + #[test] + fn voting_keystore_filename_launchpad() { assert!(!is_voting_keystore("cats")); assert!(!is_voting_keystore(&format!("a{}", VOTING_KEYSTORE_FILE))); assert!(!is_voting_keystore(&format!("{}b", VOTING_KEYSTORE_FILE))); @@ -337,4 +350,14 @@ mod tests { "keystore-m_12381_3600_1_0-1593476250.json" )); } + + #[test] + fn voting_keystore_filename_prysm() { + assert!(is_voting_keystore("keystore-0.json")); + assert!(is_voting_keystore("keystore-1.json")); + assert!(is_voting_keystore("keystore-101238259.json")); + assert!(!is_voting_keystore("keystore-.json")); + assert!(!is_voting_keystore("keystore-0a.json")); + assert!(!is_voting_keystore("keystore-cats.json")); + } } diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index c796ff47734..02014305df7 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" build = "build.rs" [build-dependencies] -reqwest = { version = "0.10.4", features = ["blocking", "json"] } +reqwest = { version = "0.10.4", features = ["blocking", "json", "native-tls-vendored"] } serde_json = "1.0.52" sha2 = "0.9.1" hex = "0.4.2" diff --git a/common/eth2_testnet_config/Cargo.toml b/common/eth2_testnet_config/Cargo.toml index f7621af80d7..f8cd65380dc 100644 --- a/common/eth2_testnet_config/Cargo.toml +++ b/common/eth2_testnet_config/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" build = "build.rs" [build-dependencies] -reqwest = { version = "0.10.4", features = ["blocking"] } +reqwest = { version = "0.10.4", features = ["blocking", "native-tls-vendored"] } eth2_config = { path = "../eth2_config"} handlebars = "3.3.0" serde_json = "1.0.56" diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index a0f59c54b54..0a4251e06df 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -57,8 +57,8 @@ use prometheus::{HistogramOpts, HistogramTimer, Opts}; pub use prometheus::{ - Encoder, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntGauge, IntGaugeVec, Result, - TextEncoder, + Encoder, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, + IntGaugeVec, Result, TextEncoder, }; /// Collect all the metrics for reporting. @@ -66,7 +66,7 @@ pub fn gather() -> Vec { prometheus::gather() } -/// Attempts to crate an `IntCounter`, returning `Err` if the registry does not accept the counter +/// Attempts to create an `IntCounter`, returning `Err` if the registry does not accept the counter /// (potentially due to naming conflict). pub fn try_create_int_counter(name: &str, help: &str) -> Result { let opts = Opts::new(name, help); @@ -75,7 +75,7 @@ pub fn try_create_int_counter(name: &str, help: &str) -> Result { Ok(counter) } -/// Attempts to crate an `IntGauge`, returning `Err` if the registry does not accept the counter +/// Attempts to create an `IntGauge`, returning `Err` if the registry does not accept the counter /// (potentially due to naming conflict). pub fn try_create_int_gauge(name: &str, help: &str) -> Result { let opts = Opts::new(name, help); @@ -84,7 +84,7 @@ pub fn try_create_int_gauge(name: &str, help: &str) -> Result { Ok(gauge) } -/// Attempts to crate a `Gauge`, returning `Err` if the registry does not accept the counter +/// Attempts to create a `Gauge`, returning `Err` if the registry does not accept the counter /// (potentially due to naming conflict). pub fn try_create_float_gauge(name: &str, help: &str) -> Result { let opts = Opts::new(name, help); @@ -93,7 +93,7 @@ pub fn try_create_float_gauge(name: &str, help: &str) -> Result { Ok(gauge) } -/// Attempts to crate a `Histogram`, returning `Err` if the registry does not accept the counter +/// Attempts to create a `Histogram`, returning `Err` if the registry does not accept the counter /// (potentially due to naming conflict). pub fn try_create_histogram(name: &str, help: &str) -> Result { let opts = HistogramOpts::new(name, help); @@ -102,7 +102,7 @@ pub fn try_create_histogram(name: &str, help: &str) -> Result { Ok(histogram) } -/// Attempts to crate a `HistogramVec`, returning `Err` if the registry does not accept the counter +/// Attempts to create a `HistogramVec`, returning `Err` if the registry does not accept the counter /// (potentially due to naming conflict). pub fn try_create_histogram_vec( name: &str, @@ -115,7 +115,7 @@ pub fn try_create_histogram_vec( Ok(histogram_vec) } -/// Attempts to crate a `IntGaugeVec`, returning `Err` if the registry does not accept the gauge +/// Attempts to create a `IntGaugeVec`, returning `Err` if the registry does not accept the gauge /// (potentially due to naming conflict). pub fn try_create_int_gauge_vec( name: &str, @@ -128,7 +128,7 @@ pub fn try_create_int_gauge_vec( Ok(counter_vec) } -/// Attempts to crate a `GaugeVec`, returning `Err` if the registry does not accept the gauge +/// Attempts to create a `GaugeVec`, returning `Err` if the registry does not accept the gauge /// (potentially due to naming conflict). pub fn try_create_float_gauge_vec( name: &str, @@ -141,6 +141,20 @@ pub fn try_create_float_gauge_vec( Ok(counter_vec) } +/// Attempts to create a `IntGaugeVec`, returning `Err` if the registry does not accept the gauge +/// (potentially due to naming conflict). +pub fn try_create_int_counter_vec( + name: &str, + help: &str, + label_names: &[&str], +) -> Result { + let opts = Opts::new(name, help); + let counter_vec = IntCounterVec::new(opts, label_names)?; + prometheus::register(Box::new(counter_vec.clone()))?; + Ok(counter_vec) +} + +/// If `int_gauge_vec.is_ok()`, returns a gauge with the given `name`. pub fn get_int_gauge(int_gauge_vec: &Result, name: &[&str]) -> Option { if let Ok(int_gauge_vec) = int_gauge_vec { Some(int_gauge_vec.get_metric_with_label_values(name).ok()?) @@ -149,6 +163,42 @@ pub fn get_int_gauge(int_gauge_vec: &Result, name: &[&str]) -> Opti } } +/// If `int_gauge_vec.is_ok()`, sets the gauge with the given `name` to the given `value` +/// otherwise returns false. +pub fn set_int_gauge(int_gauge_vec: &Result, name: &[&str], value: i64) -> bool { + if let Ok(int_gauge_vec) = int_gauge_vec { + int_gauge_vec + .get_metric_with_label_values(name) + .map(|v| { + v.set(value); + true + }) + .unwrap_or_else(|_| false) + } else { + false + } +} + +/// If `int_counter_vec.is_ok()`, returns a counter with the given `name`. +pub fn get_int_counter( + int_counter_vec: &Result, + name: &[&str], +) -> Option { + if let Ok(int_counter_vec) = int_counter_vec { + Some(int_counter_vec.get_metric_with_label_values(name).ok()?) + } else { + None + } +} + +/// Increments the `int_counter_vec` with the given `name`. +pub fn inc_counter_vec(int_counter_vec: &Result, name: &[&str]) { + if let Some(counter) = get_int_counter(int_counter_vec, name) { + counter.inc() + } +} + +/// If `histogram_vec.is_ok()`, returns a histogram with the given `name`. pub fn get_histogram(histogram_vec: &Result, name: &[&str]) -> Option { if let Ok(histogram_vec) = histogram_vec { Some(histogram_vec.get_metric_with_label_values(name).ok()?) @@ -157,6 +207,11 @@ pub fn get_histogram(histogram_vec: &Result, name: &[&str]) -> Opt } } +/// Starts a timer on `vec` with the given `name`. +pub fn start_timer_vec(vec: &Result, name: &[&str]) -> Option { + get_histogram(vec, name).map(|h| h.start_timer()) +} + /// Starts a timer for the given `Histogram`, stopping when it gets dropped or given to `stop_timer(..)`. pub fn start_timer(histogram: &Result) -> Option { if let Ok(histogram) = histogram { diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 4204476d718..aaf01c4e1cf 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -10,7 +10,7 @@ use target_info::Target; /// `Lighthouse/v0.2.0-1419501f2+` pub const VERSION: &str = git_version!( args = ["--always", "--dirty=+"], - prefix = "Lighthouse/v0.2.1-", + prefix = "Lighthouse/v0.2.8-", fallback = "unknown" ); diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 5a2a0757f21..35ef0a1ea79 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -4,6 +4,7 @@ extern crate lazy_static; use lighthouse_metrics::{ inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult, }; +use slog_term::Decorator; use std::io::{Result, Write}; pub const MAX_MESSAGE_WIDTH: usize = 40; @@ -19,13 +20,13 @@ lazy_static! { try_create_int_counter("crit_total", "Count of crits logged"); } -pub struct AlignedTermDecorator { - wrapped: slog_term::TermDecorator, +pub struct AlignedTermDecorator { + wrapped: D, message_width: usize, } -impl AlignedTermDecorator { - pub fn new(decorator: slog_term::TermDecorator, message_width: usize) -> AlignedTermDecorator { +impl AlignedTermDecorator { + pub fn new(decorator: D, message_width: usize) -> Self { AlignedTermDecorator { wrapped: decorator, message_width, @@ -33,7 +34,7 @@ impl AlignedTermDecorator { } } -impl slog_term::Decorator for AlignedTermDecorator { +impl Decorator for AlignedTermDecorator { fn with_record( &self, record: &slog::Record, diff --git a/common/lru_cache/Cargo.toml b/common/lru_cache/Cargo.toml new file mode 100644 index 00000000000..df5d9b1628f --- /dev/null +++ b/common/lru_cache/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "lru_cache" +version = "0.1.0" +authors = ["Sigma Prime "] +edition = "2018" + +[dependencies] +fnv = "1.0.7" diff --git a/common/lru_cache/src/lib.rs b/common/lru_cache/src/lib.rs new file mode 100644 index 00000000000..51df38bcfe4 --- /dev/null +++ b/common/lru_cache/src/lib.rs @@ -0,0 +1,7 @@ +//! A library to provide fast and efficient LRU Cache's without updating. + +mod space; +mod time; + +pub use space::LRUCache; +pub use time::LRUTimeCache; diff --git a/common/lru_cache/src/space.rs b/common/lru_cache/src/space.rs new file mode 100644 index 00000000000..db588632a96 --- /dev/null +++ b/common/lru_cache/src/space.rs @@ -0,0 +1,93 @@ +///! This implements a time-based LRU cache for fast checking of duplicates +use fnv::FnvHashSet; +use std::collections::VecDeque; + +/// Cache that stores keys until the size is used up. Does not update elements for efficiency. +pub struct LRUCache +where + Key: Eq + std::hash::Hash + Clone, +{ + /// The duplicate cache. + map: FnvHashSet, + /// An ordered list of keys by order. + list: VecDeque, + // The max size of the cache, + size: usize, +} + +impl LRUCache +where + Key: Eq + std::hash::Hash + Clone, +{ + pub fn new(size: usize) -> Self { + LRUCache { + map: FnvHashSet::default(), + list: VecDeque::new(), + size, + } + } + + /// Determines if the key is in the cache. + pub fn contains(&self, key: &Key) -> bool { + self.map.contains(key) + } + + // Inserts new elements and removes any expired elements. + // + // If the key was not present this returns `true`. If the value was already present this + // returns `false`. + pub fn insert(&mut self, key: Key) -> bool { + // check the cache before removing elements + let result = self.map.insert(key.clone()); + + // add the new key to the list, if it doesn't already exist. + if result { + self.list.push_back(key); + } + // remove any overflow keys + self.update(); + result + } + + /// Removes any expired elements from the cache. + fn update(&mut self) { + // remove any expired results + for _ in 0..self.map.len().saturating_sub(self.size) { + if let Some(key) = self.list.pop_front() { + self.map.remove(&key); + } + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn cache_added_entries_exist() { + let mut cache = LRUCache::new(5); + + cache.insert("t"); + cache.insert("e"); + + // Should report that 't' and 't' already exists + assert!(!cache.insert("t")); + assert!(!cache.insert("e")); + } + + #[test] + fn cache_entries_get_removed() { + let mut cache = LRUCache::new(2); + + cache.insert("t"); + assert!(!cache.insert("t")); + cache.insert("e"); + assert!(!cache.insert("e")); + // add another element to clear the first key + cache.insert("s"); + assert!(!cache.insert("s")); + // should be removed from the cache + assert!(cache.insert("t")); + } +} diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs new file mode 100644 index 00000000000..30f890a8c6d --- /dev/null +++ b/common/lru_cache/src/time.rs @@ -0,0 +1,126 @@ +///! This implements a time-based LRU cache for fast checking of duplicates +use fnv::FnvHashSet; +use std::collections::VecDeque; +use std::time::{Duration, Instant}; + +struct Element { + /// The key being inserted. + key: Key, + /// The instant the key was inserted. + inserted: Instant, +} + +pub struct LRUTimeCache { + /// The duplicate cache. + map: FnvHashSet, + /// An ordered list of keys by insert time. + list: VecDeque>, + /// The time elements remain in the cache. + ttl: Duration, +} + +impl LRUTimeCache +where + Key: Eq + std::hash::Hash + Clone, +{ + pub fn new(ttl: Duration) -> Self { + LRUTimeCache { + map: FnvHashSet::default(), + list: VecDeque::new(), + ttl, + } + } + + // Inserts new elements and removes any expired elements. + // + // If the key was not present this returns `true`. If the value was already present this + // returns `false`. + pub fn insert_update(&mut self, key: Key) -> bool { + // check the cache before removing elements + let result = self.map.insert(key.clone()); + + let now = Instant::now(); + + // remove any expired results + while let Some(element) = self.list.pop_front() { + if element.inserted + self.ttl > now { + self.list.push_front(element); + break; + } + self.map.remove(&element.key); + } + + // add the new key to the list, if it doesn't already exist. + if result { + self.list.push_back(Element { key, inserted: now }); + } + + result + } + + // Inserts new element does not expire old elements. + // + // If the key was not present this returns `true`. If the value was already present this + // returns `false`. + pub fn insert(&mut self, key: Key) -> bool { + // check the cache before removing elements + let result = self.map.insert(key.clone()); + + // add the new key to the list, if it doesn't already exist. + if result { + self.list.push_back(Element { + key, + inserted: Instant::now(), + }); + } + result + } + + /// Removes any expired elements from the cache. + pub fn update(&mut self) { + let now = Instant::now(); + // remove any expired results + while let Some(element) = self.list.pop_front() { + if element.inserted + self.ttl > now { + self.list.push_front(element); + break; + } + self.map.remove(&element.key); + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn cache_added_entries_exist() { + let mut cache = LRUTimeCache::new(Duration::from_secs(10)); + + cache.insert("t"); + cache.insert("e"); + + // Should report that 't' and 't' already exists + assert!(!cache.insert("t")); + assert!(!cache.insert("e")); + } + + #[test] + fn cache_entries_expire() { + let mut cache = LRUTimeCache::new(Duration::from_millis(100)); + + cache.insert_update("t"); + assert!(!cache.insert_update("t")); + cache.insert_update("e"); + assert!(!cache.insert_update("t")); + assert!(!cache.insert_update("e")); + // sleep until cache expiry + std::thread::sleep(Duration::from_millis(101)); + // add another element to clear previous cache + cache.insert_update("s"); + + // should be removed from the cache + assert!(cache.insert_update("t")); + } +} diff --git a/common/remote_beacon_node/Cargo.toml b/common/remote_beacon_node/Cargo.toml index e9eb4e8a98e..38ee8c7ca58 100644 --- a/common/remote_beacon_node/Cargo.toml +++ b/common/remote_beacon_node/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -reqwest = { version = "0.10.4", features = ["json"] } +reqwest = { version = "0.10.4", features = ["json", "native-tls-vendored"] } url = "2.1.1" serde = "1.0.110" futures = "0.3.5" diff --git a/common/remote_beacon_node/src/lib.rs b/common/remote_beacon_node/src/lib.rs index 4afc403037b..199efefd9da 100644 --- a/common/remote_beacon_node/src/lib.rs +++ b/common/remote_beacon_node/src/lib.rs @@ -11,8 +11,8 @@ use std::marker::PhantomData; use std::time::Duration; use types::{ Attestation, AttestationData, AttesterSlashing, BeaconBlock, BeaconState, CommitteeIndex, - Epoch, EthSpec, Fork, Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, - SignedAggregateAndProof, SignedBeaconBlock, Slot, SubnetId, + Epoch, EthSpec, Fork, Graffiti, Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, + Signature, SignedAggregateAndProof, SignedBeaconBlock, Slot, SubnetId, }; use url::Url; @@ -313,18 +313,21 @@ impl Validator { &self, slot: Slot, randao_reveal: Signature, + graffiti: Option, ) -> Result, Error> { let client = self.0.clone(); let url = self.url("block")?; - client - .json_get::>( - url, - vec![ - ("slot".into(), format!("{}", slot.as_u64())), - ("randao_reveal".into(), as_ssz_hex_string(&randao_reveal)), - ], - ) - .await + + let mut query_pairs = vec![ + ("slot".into(), format!("{}", slot.as_u64())), + ("randao_reveal".into(), as_ssz_hex_string(&randao_reveal)), + ]; + + if let Some(graffiti_bytes) = graffiti { + query_pairs.push(("graffiti".into(), as_ssz_hex_string(&graffiti_bytes))); + } + + client.json_get::>(url, query_pairs).await } /// Subscribes a list of validators to particular slots for attestation production/publication. diff --git a/common/rest_types/Cargo.toml b/common/rest_types/Cargo.toml index 51e69dcca7f..d9e021fe19d 100644 --- a/common/rest_types/Cargo.toml +++ b/common/rest_types/Cargo.toml @@ -14,6 +14,13 @@ state_processing = { path = "../../consensus/state_processing" } bls = { path = "../../crypto/bls" } serde = { version = "1.0.110", features = ["derive"] } rayon = "1.3.0" +hyper = "0.13.5" +tokio = { version = "0.2.21", features = ["sync"] } +environment = { path = "../../lighthouse/environment" } +store = { path = "../../beacon_node/store" } +beacon_chain = { path = "../../beacon_node/beacon_chain" } +serde_json = "1.0.52" +serde_yaml = "0.8.11" [target.'cfg(target_os = "linux")'.dependencies] psutil = "3.1.0" diff --git a/beacon_node/rest_api/src/error.rs b/common/rest_types/src/api_error.rs similarity index 100% rename from beacon_node/rest_api/src/error.rs rename to common/rest_types/src/api_error.rs diff --git a/common/rest_types/src/handler.rs b/common/rest_types/src/handler.rs new file mode 100644 index 00000000000..cbbcd73b19a --- /dev/null +++ b/common/rest_types/src/handler.rs @@ -0,0 +1,247 @@ +use crate::{ApiError, ApiResult}; +use environment::TaskExecutor; +use hyper::header; +use hyper::{Body, Request, Response, StatusCode}; +use serde::Deserialize; +use serde::Serialize; +use ssz::Encode; + +/// Defines the encoding for the API. +#[derive(Clone, Serialize, Deserialize, Copy)] +pub enum ApiEncodingFormat { + JSON, + YAML, + SSZ, +} + +impl ApiEncodingFormat { + pub fn get_content_type(&self) -> &str { + match self { + ApiEncodingFormat::JSON => "application/json", + ApiEncodingFormat::YAML => "application/yaml", + ApiEncodingFormat::SSZ => "application/ssz", + } + } +} + +impl From<&str> for ApiEncodingFormat { + fn from(f: &str) -> ApiEncodingFormat { + match f { + "application/yaml" => ApiEncodingFormat::YAML, + "application/ssz" => ApiEncodingFormat::SSZ, + _ => ApiEncodingFormat::JSON, + } + } +} + +/// Provides a HTTP request handler with Lighthouse-specific functionality. +pub struct Handler { + executor: TaskExecutor, + req: Request<()>, + body: Body, + ctx: T, + encoding: ApiEncodingFormat, + allow_body: bool, +} + +impl Handler { + /// Start handling a new request. + pub fn new(req: Request, ctx: T, executor: TaskExecutor) -> Result { + let (req_parts, body) = req.into_parts(); + let req = Request::from_parts(req_parts, ()); + + let accept_header: String = req + .headers() + .get(header::ACCEPT) + .map_or(Ok(""), |h| h.to_str()) + .map_err(|e| { + ApiError::BadRequest(format!( + "The Accept header contains invalid characters: {:?}", + e + )) + }) + .map(String::from)?; + + Ok(Self { + executor, + req, + body, + ctx, + allow_body: false, + encoding: ApiEncodingFormat::from(accept_header.as_str()), + }) + } + + /// The default behaviour is to return an error if any body is supplied in the request. Calling + /// this function disables that error. + pub fn allow_body(mut self) -> Self { + self.allow_body = true; + self + } + + /// Return a simple static value. + /// + /// Does not use the blocking executor. + pub async fn static_value(self, value: V) -> Result, ApiError> { + // Always check and disallow a body for a static value. + let _ = Self::get_body(self.body, false).await?; + + Ok(HandledRequest { + value, + encoding: self.encoding, + }) + } + + /// Calls `func` in-line, on the core executor. + /// + /// This should only be used for very fast tasks. + pub async fn in_core_task(self, func: F) -> Result, ApiError> + where + V: Send + Sync + 'static, + F: Fn(Request>, T) -> Result + Send + Sync + 'static, + { + let body = Self::get_body(self.body, self.allow_body).await?; + let (req_parts, _) = self.req.into_parts(); + let req = Request::from_parts(req_parts, body); + + let value = func(req, self.ctx)?; + + Ok(HandledRequest { + value, + encoding: self.encoding, + }) + } + + /// Spawns `func` on the blocking executor. + /// + /// This method is suitable for handling long-running or intensive tasks. + pub async fn in_blocking_task(self, func: F) -> Result, ApiError> + where + V: Send + Sync + 'static, + F: Fn(Request>, T) -> Result + Send + Sync + 'static, + { + let ctx = self.ctx; + let body = Self::get_body(self.body, self.allow_body).await?; + let (req_parts, _) = self.req.into_parts(); + let req = Request::from_parts(req_parts, body); + + let value = self + .executor + .clone() + .handle + .spawn_blocking(move || func(req, ctx)) + .await + .map_err(|e| { + ApiError::ServerError(format!( + "Failed to get blocking join handle: {}", + e.to_string() + )) + })??; + + Ok(HandledRequest { + value, + encoding: self.encoding, + }) + } + + /// Call `func`, then return a response that is suitable for an SSE stream. + pub async fn sse_stream(self, func: F) -> ApiResult + where + F: Fn(Request<()>, T) -> Result, + { + let body = func(self.req, self.ctx)?; + + Response::builder() + .status(200) + .header("Content-Type", "text/event-stream") + .header("Connection", "Keep-Alive") + .header("Cache-Control", "no-cache") + .header("Access-Control-Allow-Origin", "*") + .body(body) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) + } + + /// Downloads the bytes for `body`. + async fn get_body(body: Body, allow_body: bool) -> Result, ApiError> { + let bytes = hyper::body::to_bytes(body) + .await + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; + + if !allow_body && !bytes[..].is_empty() { + Err(ApiError::BadRequest( + "The request body must be empty".to_string(), + )) + } else { + Ok(bytes.into_iter().collect()) + } + } +} + +/// A request that has been "handled" and now a result (`value`) needs to be serialize and +/// returned. +pub struct HandledRequest { + encoding: ApiEncodingFormat, + value: V, +} + +impl HandledRequest { + /// Simple encode a string as utf-8. + pub fn text_encoding(self) -> ApiResult { + Response::builder() + .status(StatusCode::OK) + .header("content-type", "text/plain; charset=utf-8") + .body(Body::from(self.value)) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) + } +} + +impl HandledRequest { + /// Suitable for all items which implement `serde` and `ssz`. + pub fn all_encodings(self) -> ApiResult { + match self.encoding { + ApiEncodingFormat::SSZ => Response::builder() + .status(StatusCode::OK) + .header("content-type", "application/ssz") + .body(Body::from(self.value.as_ssz_bytes())) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))), + _ => self.serde_encodings(), + } + } +} + +impl HandledRequest { + /// Suitable for items which only implement `serde`. + pub fn serde_encodings(self) -> ApiResult { + let (body, content_type) = match self.encoding { + ApiEncodingFormat::JSON => ( + Body::from(serde_json::to_string(&self.value).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as JSON: {:?}", + e + )) + })?), + "application/json", + ), + ApiEncodingFormat::SSZ => { + return Err(ApiError::UnsupportedType( + "Response cannot be encoded as SSZ.".into(), + )); + } + ApiEncodingFormat::YAML => ( + Body::from(serde_yaml::to_string(&self.value).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as YAML: {:?}", + e + )) + })?), + "application/yaml", + ), + }; + + Response::builder() + .status(StatusCode::OK) + .header("content-type", content_type) + .body(body) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) + } +} diff --git a/common/rest_types/src/lib.rs b/common/rest_types/src/lib.rs index 79a66e03410..1bedd1cadbc 100644 --- a/common/rest_types/src/lib.rs +++ b/common/rest_types/src/lib.rs @@ -2,20 +2,21 @@ //! //! This is primarily used by the validator client and the beacon node rest API. +mod api_error; mod beacon; mod consensus; +mod handler; mod node; mod validator; +pub use api_error::{ApiError, ApiResult}; pub use beacon::{ BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, ValidatorRequest, ValidatorResponse, }; - +pub use consensus::{IndividualVote, IndividualVotesRequest, IndividualVotesResponse}; +pub use handler::{ApiEncodingFormat, Handler}; +pub use node::{Health, SyncingResponse, SyncingStatus}; pub use validator::{ ValidatorDutiesRequest, ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription, }; - -pub use consensus::{IndividualVote, IndividualVotesRequest, IndividualVotesResponse}; - -pub use node::{Health, SyncingResponse, SyncingStatus}; diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 226ce042545..9fdd1e0770b 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -743,14 +743,25 @@ where Ok(()) } - /// Returns `true` if the block is known. + /// Returns `true` if the block is known **and** a descendant of the finalized root. pub fn contains_block(&self, block_root: &Hash256) -> bool { - self.proto_array.contains_block(block_root) + self.proto_array.contains_block(block_root) && self.is_descendant_of_finalized(*block_root) } - /// Returns a `ProtoBlock` if the block is known. + /// Returns a `ProtoBlock` if the block is known **and** a descendant of the finalized root. pub fn get_block(&self, block_root: &Hash256) -> Option { - self.proto_array.get_block(block_root) + self.proto_array.get_block(block_root).filter(|block| { + // If available, use the parent_root to perform the lookup since it will involve one + // less lookup. This involves making the assumption that the finalized block will + // always have `block.parent_root` of `None`. + self.is_descendant_of_finalized(block.parent_root.unwrap_or(block.root)) + }) + } + + /// Return `true` if `block_root` is equal to the finalized root, or a known descendant of it. + pub fn is_descendant_of_finalized(&self, block_root: Hash256) -> bool { + self.proto_array + .is_descendant(self.fc_store.finalized_checkpoint().root, block_root) } /// Returns the latest message for a given validator, if any. diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 7897c01a45e..d8b71243b40 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1,8 +1,10 @@ #![cfg(not(debug_assertions))] use beacon_chain::{ - test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType}, - BeaconChain, BeaconChainError, BeaconForkChoiceStore, ForkChoiceError, + test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType, + }, + BeaconChain, BeaconChainError, BeaconForkChoiceStore, ForkChoiceError, StateSkipConfig, }; use fork_choice::{ ForkChoiceStore, InvalidAttestation, InvalidBlock, QueuedAttestation, @@ -18,7 +20,7 @@ use types::{BeaconBlock, BeaconState, Hash256, SignedBeaconBlock}; pub type E = MainnetEthSpec; -pub const VALIDATOR_COUNT: usize = 16; +pub const VALIDATOR_COUNT: usize = 32; /// Defines some delay between when an attestation is created and when it is mutated. pub enum MutationDelay { @@ -30,7 +32,7 @@ pub enum MutationDelay { /// A helper struct to make testing fork choice more ergonomic and less repetitive. struct ForkChoiceTest { - harness: BeaconChainHarness>, + harness: BeaconChainHarness>, } impl ForkChoiceTest { @@ -115,22 +117,31 @@ impl ForkChoiceTest { } /// Build the chain whilst `predicate` returns `true`. - pub fn apply_blocks_while(self, mut predicate: F) -> Self + pub fn apply_blocks_while(mut self, mut predicate: F) -> Self where F: FnMut(&BeaconBlock, &BeaconState) -> bool, { self.harness.advance_slot(); - self.harness.extend_chain_while( - |block, state| predicate(&block.message, state), - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + let mut state = self.harness.get_current_state(); + let validators = self.harness.get_all_validators(); + loop { + let slot = self.harness.get_current_slot(); + let (block, state_) = self.harness.make_block(state, slot); + state = state_; + if !predicate(&block.message, &state) { + break; + } + let block_hash = self.harness.process_block(slot, block.clone()); + self.harness + .attest_block(&state, block_hash, &block, &validators); + self.harness.advance_slot(); + } self } /// Apply `count` blocks to the chain (with attestations). - pub fn apply_blocks(self, count: usize) -> Self { + pub fn apply_blocks(mut self, count: usize) -> Self { self.harness.advance_slot(); self.harness.extend_chain( count, @@ -142,7 +153,7 @@ impl ForkChoiceTest { } /// Apply `count` blocks to the chain (without attestations). - pub fn apply_blocks_without_new_attestations(self, count: usize) -> Self { + pub fn apply_blocks_without_new_attestations(mut self, count: usize) -> Self { self.harness.advance_slot(); self.harness.extend_chain( count, @@ -181,13 +192,22 @@ impl ForkChoiceTest { /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts the block was applied successfully. - pub fn apply_block_directly_to_fork_choice(self, mut func: F) -> Self + pub fn apply_block_directly_to_fork_choice(mut self, mut func: F) -> Self where F: FnMut(&mut BeaconBlock, &mut BeaconState), { - let (mut block, mut state) = self.harness.get_block(); + let state = self + .harness + .chain + .state_at_slot( + self.harness.get_current_slot() - 1, + StateSkipConfig::WithStateRoots, + ) + .unwrap(); + let slot = self.harness.get_current_slot(); + let (mut block, mut state) = self.harness.make_block(state, slot); func(&mut block.message, &mut state); - let current_slot = self.harness.chain.slot().unwrap(); + let current_slot = self.harness.get_current_slot(); self.harness .chain .fork_choice @@ -201,7 +221,7 @@ impl ForkChoiceTest { /// /// Asserts that an error occurred and allows inspecting it via `comparison_func`. pub fn apply_invalid_block_directly_to_fork_choice( - self, + mut self, mut mutation_func: F, mut comparison_func: G, ) -> Self @@ -209,9 +229,18 @@ impl ForkChoiceTest { F: FnMut(&mut BeaconBlock, &mut BeaconState), G: FnMut(ForkChoiceError), { - let (mut block, mut state) = self.harness.get_block(); + let state = self + .harness + .chain + .state_at_slot( + self.harness.get_current_slot() - 1, + StateSkipConfig::WithStateRoots, + ) + .unwrap(); + let slot = self.harness.get_current_slot(); + let (mut block, mut state) = self.harness.make_block(state, slot); mutation_func(&mut block.message, &mut state); - let current_slot = self.harness.chain.slot().unwrap(); + let current_slot = self.harness.get_current_slot(); let err = self .harness .chain @@ -267,20 +296,21 @@ impl ForkChoiceTest { /// /// Also returns some info about who created it. fn apply_attestation_to_chain( - self, + mut self, delay: MutationDelay, mut mutation_func: F, mut comparison_func: G, ) -> Self where - F: FnMut(&mut IndexedAttestation, &BeaconChain>), + F: FnMut(&mut IndexedAttestation, &BeaconChain>), G: FnMut(Result<(), BeaconChainError>), { - let chain = &self.harness.chain; - let head = chain.head().expect("should get head"); - let current_slot = chain.slot().expect("should get slot"); + let head = self.harness.chain.head().expect("should get head"); + let current_slot = self.harness.chain.slot().expect("should get slot"); - let mut attestation = chain + let mut attestation = self + .harness + .chain .produce_unaggregated_attestation(current_slot, 0) .expect("should not error while producing attestation"); @@ -298,9 +328,13 @@ impl ForkChoiceTest { .get_committee_count_at_slot(current_slot) .expect("should not error while getting committee count"); - let subnet_id = - SubnetId::compute_subnet::(current_slot, 0, committee_count, &chain.spec) - .expect("should compute subnet id"); + let subnet_id = SubnetId::compute_subnet::( + current_slot, + 0, + committee_count, + &self.harness.chain.spec, + ) + .expect("should compute subnet id"); let validator_sk = generate_deterministic_keypair(validator_index).sk; @@ -309,12 +343,14 @@ impl ForkChoiceTest { &validator_sk, validator_committee_index, &head.beacon_state.fork, - chain.genesis_validators_root, - &chain.spec, + self.harness.chain.genesis_validators_root, + &self.harness.chain.spec, ) .expect("should sign attestation"); - let mut verified_attestation = chain + let mut verified_attestation = self + .harness + .chain .verify_unaggregated_attestation_for_gossip(attestation, subnet_id) .expect("precondition: should gossip verify attestation"); @@ -327,9 +363,15 @@ impl ForkChoiceTest { ); } - mutation_func(verified_attestation.__indexed_attestation_mut(), chain); + mutation_func( + verified_attestation.__indexed_attestation_mut(), + &self.harness.chain, + ); - let result = chain.apply_attestation_to_fork_choice(&verified_attestation); + let result = self + .harness + .chain + .apply_attestation_to_fork_choice(&verified_attestation); comparison_func(result); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index fe8d01d16e5..c89a96628a7 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -29,7 +29,7 @@ pub struct ProtoNode { best_descendant: Option, } -#[derive(PartialEq, Debug, Serialize, Deserialize)] +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct ProtoArray { /// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes /// simply waste time. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 02d2e3df17c..728ee3da5b1 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -208,6 +208,27 @@ impl ProtoArrayForkChoice { }) } + /// Returns `true` if the `descendant_root` has an ancestor with `ancestor_root`. Always + /// returns `false` if either input roots are unknown. + /// + /// ## Notes + /// + /// Still returns `true` if `ancestor_root` is known and `ancestor_root == descendant_root`. + pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { + self.proto_array + .indices + .get(&ancestor_root) + .and_then(|ancestor_index| self.proto_array.nodes.get(*ancestor_index)) + .and_then(|ancestor| { + self.proto_array + .iter_block_roots(&descendant_root) + .take_while(|(_root, slot)| *slot >= ancestor.slot) + .find(|(_root, slot)| *slot == ancestor.slot) + .map(|(root, _slot)| root == ancestor_root) + }) + .unwrap_or(false) + } + pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { if validator_index < self.votes.0.len() { let vote = &self.votes.0[validator_index]; @@ -319,6 +340,73 @@ mod test_compute_deltas { Hash256::from_low_u64_be(i as u64 + 1) } + #[test] + fn finalized_descendant() { + let genesis_slot = Slot::new(0); + let genesis_epoch = Epoch::new(0); + + let state_root = Hash256::from_low_u64_be(0); + let finalized_root = Hash256::from_low_u64_be(1); + let finalized_desc = Hash256::from_low_u64_be(2); + let not_finalized_desc = Hash256::from_low_u64_be(3); + let unknown = Hash256::from_low_u64_be(4); + + let mut fc = ProtoArrayForkChoice::new( + genesis_slot, + state_root, + genesis_epoch, + genesis_epoch, + finalized_root, + ) + .unwrap(); + + // Add block that is a finalized descendant. + fc.proto_array + .on_block(Block { + slot: genesis_slot + 1, + root: finalized_desc, + parent_root: Some(finalized_root), + state_root, + target_root: finalized_root, + justified_epoch: genesis_epoch, + finalized_epoch: genesis_epoch, + }) + .unwrap(); + + // Add block that is *not* a finalized descendant. + fc.proto_array + .on_block(Block { + slot: genesis_slot + 1, + root: not_finalized_desc, + parent_root: None, + state_root, + target_root: finalized_root, + justified_epoch: genesis_epoch, + finalized_epoch: genesis_epoch, + }) + .unwrap(); + + assert!(!fc.is_descendant(unknown, unknown)); + assert!(!fc.is_descendant(unknown, finalized_root)); + assert!(!fc.is_descendant(unknown, finalized_desc)); + assert!(!fc.is_descendant(unknown, not_finalized_desc)); + + assert!(fc.is_descendant(finalized_root, finalized_root)); + assert!(fc.is_descendant(finalized_root, finalized_desc)); + assert!(!fc.is_descendant(finalized_root, not_finalized_desc)); + assert!(!fc.is_descendant(finalized_root, unknown)); + + assert!(!fc.is_descendant(finalized_desc, not_finalized_desc)); + assert!(fc.is_descendant(finalized_desc, finalized_desc)); + assert!(!fc.is_descendant(finalized_desc, finalized_root)); + assert!(!fc.is_descendant(finalized_desc, unknown)); + + assert!(fc.is_descendant(not_finalized_desc, not_finalized_desc)); + assert!(!fc.is_descendant(not_finalized_desc, finalized_desc)); + assert!(!fc.is_descendant(not_finalized_desc, finalized_root)); + assert!(!fc.is_descendant(not_finalized_desc, unknown)); + } + #[test] fn zero_hash() { let validator_count: usize = 16; diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml new file mode 100644 index 00000000000..1fb35736baf --- /dev/null +++ b/consensus/serde_utils/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "serde_utils" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +serde = { version = "1.0.110", features = ["derive"] } +serde_derive = "1.0.110" + +[dev-dependencies] +serde_json = "1.0.52" diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs new file mode 100644 index 00000000000..df2b44b6243 --- /dev/null +++ b/consensus/serde_utils/src/lib.rs @@ -0,0 +1,2 @@ +pub mod quoted_u64; +pub mod quoted_u64_vec; diff --git a/consensus/serde_utils/src/quoted_u64.rs b/consensus/serde_utils/src/quoted_u64.rs new file mode 100644 index 00000000000..2e73a104f19 --- /dev/null +++ b/consensus/serde_utils/src/quoted_u64.rs @@ -0,0 +1,115 @@ +use serde::{Deserializer, Serializer}; +use serde_derive::{Deserialize, Serialize}; +use std::marker::PhantomData; + +/// Serde support for deserializing quoted integers. +/// +/// Configurable so that quotes are either required or optional. +pub struct QuotedIntVisitor { + require_quotes: bool, + _phantom: PhantomData, +} + +impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor +where + T: From + Into + Copy, +{ + type Value = T; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + if self.require_quotes { + write!(formatter, "a quoted integer") + } else { + write!(formatter, "a quoted or unquoted integer") + } + } + + fn visit_str(self, s: &str) -> Result + where + E: serde::de::Error, + { + s.parse::() + .map(T::from) + .map_err(serde::de::Error::custom) + } + + fn visit_u64(self, v: u64) -> Result + where + E: serde::de::Error, + { + if self.require_quotes { + Err(serde::de::Error::custom( + "received unquoted integer when quotes are required", + )) + } else { + Ok(T::from(v)) + } + } +} + +/// Wrapper type for requiring quotes on a `u64`-like type. +/// +/// Unlike using `serde(with = "quoted_u64::require_quotes")` this is composable, and can be nested +/// inside types like `Option`, `Result` and `Vec`. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] +#[serde(transparent)] +pub struct Quoted +where + T: From + Into + Copy, +{ + #[serde(with = "require_quotes")] + pub value: T, +} + +/// Serialize with quotes. +pub fn serialize(value: &T, serializer: S) -> Result +where + S: Serializer, + T: From + Into + Copy, +{ + let v: u64 = (*value).into(); + serializer.serialize_str(&format!("{}", v)) +} + +/// Deserialize with or without quotes. +pub fn deserialize<'de, D, T>(deserializer: D) -> Result +where + D: Deserializer<'de>, + T: From + Into + Copy, +{ + deserializer.deserialize_any(QuotedIntVisitor { + require_quotes: false, + _phantom: PhantomData, + }) +} + +/// Requires quotes when deserializing. +/// +/// Usage: `#[serde(with = "quoted_u64::require_quotes")]`. +pub mod require_quotes { + pub use super::serialize; + use super::*; + + pub fn deserialize<'de, D, T>(deserializer: D) -> Result + where + D: Deserializer<'de>, + T: From + Into + Copy, + { + deserializer.deserialize_any(QuotedIntVisitor { + require_quotes: true, + _phantom: PhantomData, + }) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn require_quotes() { + let x = serde_json::from_str::>("\"8\"").unwrap(); + assert_eq!(x.value, 8); + serde_json::from_str::>("8").unwrap_err(); + } +} diff --git a/consensus/serde_utils/src/quoted_u64_vec.rs b/consensus/serde_utils/src/quoted_u64_vec.rs new file mode 100644 index 00000000000..c5badee5012 --- /dev/null +++ b/consensus/serde_utils/src/quoted_u64_vec.rs @@ -0,0 +1,91 @@ +use serde::ser::SerializeSeq; +use serde::{Deserializer, Serializer}; +use serde_derive::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +#[serde(transparent)] +pub struct QuotedIntWrapper { + #[serde(with = "crate::quoted_u64")] + int: u64, +} + +pub struct QuotedIntVecVisitor; +impl<'a> serde::de::Visitor<'a> for QuotedIntVecVisitor { + type Value = Vec; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "a list of quoted or unquoted integers") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'a>, + { + let mut vec = vec![]; + + while let Some(val) = seq.next_element()? { + let val: QuotedIntWrapper = val; + vec.push(val.int); + } + + Ok(vec) + } +} + +pub fn serialize(value: &[u64], serializer: S) -> Result +where + S: Serializer, +{ + let mut seq = serializer.serialize_seq(Some(value.len()))?; + for &int in value { + seq.serialize_element(&QuotedIntWrapper { int })?; + } + seq.end() +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + deserializer.deserialize_any(QuotedIntVecVisitor) +} + +#[cfg(test)] +mod test { + use super::*; + + #[derive(Debug, Serialize, Deserialize)] + struct Obj { + #[serde(with = "crate::quoted_u64_vec")] + values: Vec, + } + + #[test] + fn quoted_list_success() { + let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); + assert_eq!(obj.values, vec![1, 2, 3, 4]); + } + + #[test] + fn unquoted_list_success() { + let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); + assert_eq!(obj.values, vec![1, 2, 3, 4]); + } + + #[test] + fn mixed_list_success() { + let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); + assert_eq!(obj.values, vec![1, 2, 3, 4]); + } + + #[test] + fn empty_list_success() { + let obj: Obj = serde_json::from_str(r#"{ "values": [] }"#).unwrap(); + assert!(obj.values.is_empty()); + } + + #[test] + fn whole_list_quoted_err() { + serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); + } +} diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index d893ff3ad03..6334eaf4a83 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -25,6 +25,7 @@ rand = "0.7.3" safe_arith = { path = "../safe_arith" } serde = "1.0.110" serde_derive = "1.0.110" +serde_utils = { path = "../serde_utils" } slog = "2.5.2" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 0c8899c0255..6abc795a137 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -7,6 +7,7 @@ use rayon::prelude::*; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use std::cmp::Ordering; +use std::iter::ExactSizeIterator; use tree_hash::{mix_in_length, MerkleHasher, TreeHash}; /// The number of fields on a beacon state. @@ -288,17 +289,17 @@ impl ValidatorsListTreeHashCache { fn recalculate_tree_hash_root(&mut self, validators: &[Validator]) -> Result { let mut list_arena = std::mem::take(&mut self.list_arena); - let leaves = self - .values - .leaves(validators)? - .into_iter() - .flatten() - .map(|h| h.to_fixed_bytes()) - .collect::>(); + let leaves = self.values.leaves(validators)?; + let num_leaves = leaves.iter().map(|arena| arena.len()).sum(); + + let leaves_iter = ForcedExactSizeIterator { + iter: leaves.into_iter().flatten().map(|h| h.to_fixed_bytes()), + len: num_leaves, + }; let list_root = self .list_cache - .recalculate_merkle_root(&mut list_arena, leaves.into_iter())?; + .recalculate_merkle_root(&mut list_arena, leaves_iter)?; self.list_arena = list_arena; @@ -306,6 +307,29 @@ impl ValidatorsListTreeHashCache { } } +/// Provides a wrapper around some `iter` if the number of items in the iterator is known to the +/// programmer but not the compiler. This allows use of `ExactSizeIterator` in some occasions. +/// +/// Care should be taken to ensure `len` is accurate. +struct ForcedExactSizeIterator { + iter: I, + len: usize, +} + +impl> Iterator for ForcedExactSizeIterator { + type Item = V; + + fn next(&mut self) -> Option { + self.iter.next() + } +} + +impl> ExactSizeIterator for ForcedExactSizeIterator { + fn len(&self) -> usize { + self.len + } +} + /// Provides a cache for each of the `Validator` objects in `state.validators` and computes the /// roots of these using Rayon parallelization. #[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 42b922cfbc2..cec17b09a5b 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -25,11 +25,12 @@ use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssi #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Eq, Clone, Copy, Default, Serialize, Deserialize)] #[serde(transparent)] -pub struct Slot(u64); +pub struct Slot(#[serde(with = "serde_utils::quoted_u64")] u64); #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Eq, Clone, Copy, Default, Serialize, Deserialize)] -pub struct Epoch(u64); +#[serde(transparent)] +pub struct Epoch(#[serde(with = "serde_utils::quoted_u64")] u64); impl_common!(Slot); impl_common!(Epoch); diff --git a/consensus/types/src/utils.rs b/consensus/types/src/utils.rs index 51af86692cb..a527fc18fd1 100644 --- a/consensus/types/src/utils.rs +++ b/consensus/types/src/utils.rs @@ -1,3 +1,3 @@ mod serde_utils; -pub use serde_utils::*; +pub use self::serde_utils::*; diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index 138c7628d28..e1cb1fde319 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -17,7 +17,7 @@ eth2_hashing = "0.1.0" ethereum-types = "0.9.1" arbitrary = { version = "0.4.4", features = ["derive"], optional = true } zeroize = { version = "1.0.0", features = ["zeroize_derive"] } -blst = { git = "https://github.com/sigp/blst.git", rev = "22bfb91721af125d1cb08aa201a18477665a45fe" } +blst = { git = "https://github.com/sigp/blst.git", rev = "284f7059642851c760a09fb1708bcb59c7ca323c" } [features] default = ["supranational"] diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml index fdc9ae5003b..6f265b64ded 100644 --- a/crypto/eth2_hashing/Cargo.toml +++ b/crypto/eth2_hashing/Cargo.toml @@ -10,7 +10,7 @@ description = "Hashing primitives used in Ethereum 2.0" lazy_static = { version = "1.4.0", optional = true } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -ring = "0.16.9" +ring = "0.16.12" [target.'cfg(target_arch = "wasm32")'.dependencies] sha2 = "0.9.1" diff --git a/crypto/eth2_keystore/src/json_keystore/mod.rs b/crypto/eth2_keystore/src/json_keystore/mod.rs index a30855d2180..0e2914388b6 100644 --- a/crypto/eth2_keystore/src/json_keystore/mod.rs +++ b/crypto/eth2_keystore/src/json_keystore/mod.rs @@ -24,10 +24,14 @@ use serde_repr::*; pub struct JsonKeystore { pub crypto: Crypto, pub uuid: Uuid, - pub path: String, + /// EIP-2335 does not declare this field as optional, but Prysm is omitting it so we must + /// support it. + pub path: Option, pub pubkey: String, pub version: Version, pub description: Option, + /// Not part of EIP-2335, but `ethdo` and Prysm have adopted it anyway so we must support it. + pub name: Option, } /// Version for `JsonKeystore`. diff --git a/crypto/eth2_keystore/src/keystore.rs b/crypto/eth2_keystore/src/keystore.rs index ede03275ffb..6e3128b003a 100644 --- a/crypto/eth2_keystore/src/keystore.rs +++ b/crypto/eth2_keystore/src/keystore.rs @@ -172,10 +172,11 @@ impl Keystore { }, }, uuid, - path, + path: Some(path), pubkey: keypair.pk.to_hex_string()[2..].to_string(), version: Version::four(), description: None, + name: None, }, }) } @@ -218,8 +219,8 @@ impl Keystore { /// Returns the path for the keystore. /// /// Note: the path is not validated, it is simply whatever string the keystore provided. - pub fn path(&self) -> &str { - &self.json.path + pub fn path(&self) -> Option { + self.json.path.clone() } /// Returns the pubkey for the keystore. diff --git a/crypto/eth2_keystore/tests/eip2335_vectors.rs b/crypto/eth2_keystore/tests/eip2335_vectors.rs index 59d017c6916..3702a218163 100644 --- a/crypto/eth2_keystore/tests/eip2335_vectors.rs +++ b/crypto/eth2_keystore/tests/eip2335_vectors.rs @@ -64,7 +64,7 @@ fn eip2335_test_vector_scrypt() { Uuid::parse_str("1d85ae20-35c5-4611-98e8-aa14a633906f").unwrap(), "uuid" ); - assert_eq!(keystore.path(), "", "path"); + assert_eq!(keystore.path().unwrap(), "", "path"); } #[test] @@ -108,5 +108,5 @@ fn eip2335_test_vector_pbkdf() { Uuid::parse_str("64625def-3331-4eea-ab6f-782f3ed16a83").unwrap(), "uuid" ); - assert_eq!(keystore.path(), "m/12381/60/0/0", "path"); + assert_eq!(keystore.path().unwrap(), "m/12381/60/0/0", "path"); } diff --git a/crypto/eth2_keystore/tests/json.rs b/crypto/eth2_keystore/tests/json.rs index c2466d36b64..93e89156cc6 100644 --- a/crypto/eth2_keystore/tests/json.rs +++ b/crypto/eth2_keystore/tests/json.rs @@ -841,10 +841,7 @@ fn missing_path() { } "#; - match Keystore::from_json_str(&vector) { - Err(Error::InvalidJson(_)) => {} - _ => panic!("expected invalid json error"), - } + assert!(Keystore::from_json_str(&vector).is_ok()); } #[test] @@ -1010,3 +1007,43 @@ fn pbkdf2_missing_parameter() { _ => panic!("expected invalid json error"), } } + +#[test] +fn name_field() { + let vector = r#" + { + "crypto": { + "kdf": { + "function": "scrypt", + "params": { + "dklen": 32, + "n": 262144, + "p": 1, + "r": 8, + "salt": "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" + }, + "message": "" + }, + "checksum": { + "function": "sha256", + "params": {}, + "message": "149aafa27b041f3523c53d7acba1905fa6b1c90f9fef137568101f44b531a3cb" + }, + "cipher": { + "function": "aes-128-ctr", + "params": { + "iv": "264daa3f303d7259501c93d997d84fe6" + }, + "message": "54ecc8863c0550351eee5720f3be6a5d4a016025aa91cd6436cfec938d6a8d30" + } + }, + "pubkey": "9612d7a727c9d0a22e185a1c768478dfe919cada9266988cb32359c11f2b7b27f4ae4040902382ae2910c15e2b420d07", + "uuid": "1d85ae20-35c5-4611-98e8-aa14a633906f", + "path": "", + "version": 4, + "name": "cats" + } + "#; + + assert!(Keystore::from_json_str(&vector).is_ok()); +} diff --git a/crypto/eth2_wallet/tests/tests.rs b/crypto/eth2_wallet/tests/tests.rs index d75b0ab34b9..eb683e72d6a 100644 --- a/crypto/eth2_wallet/tests/tests.rs +++ b/crypto/eth2_wallet/tests/tests.rs @@ -225,13 +225,13 @@ fn key_derivation_from_seed() { .expect("should generate keystores"); assert_eq!( - keystores.voting.path(), + keystores.voting.path().unwrap(), format!("m/12381/3600/{}/0/0", i), "voting path should match" ); assert_eq!( - keystores.withdrawal.path(), + keystores.withdrawal.path().unwrap(), format!("m/12381/3600/{}/0", i), "withdrawal path should match" ); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index f7435b9865d..82c933c87ce 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "0.2.1" +version = "0.2.8" authors = ["Paul Hauner "] edition = "2018" diff --git a/lcli/src/main.rs b/lcli/src/main.rs index aaf6273c9f2..a8663556a32 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -15,7 +15,7 @@ mod transition_blocks; use clap::{App, Arg, ArgMatches, SubCommand}; use environment::EnvironmentBuilder; -use log::Level; +use log::LevelFilter; use parse_hex::run_parse_hex; use std::fs::File; use std::path::PathBuf; @@ -25,7 +25,10 @@ use transition_blocks::run_transition_blocks; use types::{test_utils::TestingBeaconStateBuilder, EthSpec, MainnetEthSpec, MinimalEthSpec}; fn main() { - simple_logger::init_with_level(Level::Info).expect("logger should initialize"); + simple_logger::SimpleLogger::new() + .with_level(LevelFilter::Info) + .init() + .expect("Logger should be initialised"); let matches = App::new("Lighthouse CLI Tool") .version(lighthouse_version::VERSION) diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index ff8a8d1cec1..b2b5ed15fcc 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "0.2.1" +version = "0.2.8" authors = ["Sigma Prime "] edition = "2018" diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 8208f9b6dbf..ec05faaedf5 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -21,4 +21,4 @@ slog-json = "2.3.0" exit-future = "0.2.0" lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -discv5 = "0.1.0-alpha.7" +discv5 = { version = "0.1.0-alpha.10", features = ["libp2p"] } diff --git a/lighthouse/environment/src/executor.rs b/lighthouse/environment/src/executor.rs index f7d06cc516a..b5f415187e9 100644 --- a/lighthouse/environment/src/executor.rs +++ b/lighthouse/environment/src/executor.rs @@ -1,4 +1,5 @@ use crate::metrics; +use futures::channel::mpsc::Sender; use futures::prelude::*; use slog::{debug, trace}; use tokio::runtime::Handle; @@ -7,9 +8,15 @@ use tokio::runtime::Handle; #[derive(Clone)] pub struct TaskExecutor { /// The handle to the runtime on which tasks are spawned - pub(crate) handle: Handle, + pub handle: Handle, /// The receiver exit future which on receiving shuts down the task pub(crate) exit: exit_future::Exit, + /// Sender given to tasks, so that if they encounter a state in which execution cannot + /// continue they can request that everything shuts down. + /// + /// The task must provide a reason for shutting down. + pub(crate) signal_tx: Sender<&'static str>, + pub(crate) log: slog::Logger, } @@ -18,8 +25,18 @@ impl TaskExecutor { /// /// Note: this function is mainly useful in tests. A `TaskExecutor` should be normally obtained from /// a [`RuntimeContext`](struct.RuntimeContext.html) - pub fn new(handle: Handle, exit: exit_future::Exit, log: slog::Logger) -> Self { - Self { handle, exit, log } + pub fn new( + handle: Handle, + exit: exit_future::Exit, + log: slog::Logger, + signal_tx: Sender<&'static str>, + ) -> Self { + Self { + handle, + exit, + signal_tx, + log, + } } /// Spawn a future on the tokio runtime wrapped in an `exit_future::Exit`. The task is canceled @@ -51,7 +68,7 @@ impl TaskExecutor { /// Spawn a future on the tokio runtime. This function does not wrap the task in an `exit_future::Exit` /// like [spawn](#method.spawn). - /// The caller of this function is responsible for wrapping up the task with an `exit_future::Exit` to + /// The caller of this function is responsible for wrapping up the task with an `exit_future::Exit` to /// ensure that the task gets canceled appropriately. /// This function generates prometheus metrics on number of tasks and task duration. /// @@ -121,6 +138,11 @@ impl TaskExecutor { self.exit.clone() } + /// Get a channel to request shutting down. + pub fn shutdown_sender(&self) -> Sender<&'static str> { + self.signal_tx.clone() + } + /// Returns a reference to the logger. pub fn log(&self) -> &slog::Logger { &self.log diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 3d26aa6d2a7..81e8eee60fa 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -9,7 +9,11 @@ use eth2_config::Eth2Config; use eth2_testnet_config::Eth2TestnetConfig; -use futures::channel::oneshot; +use futures::channel::{ + mpsc::{channel, Receiver, Sender}, + oneshot, +}; +use futures::{future, StreamExt}; pub use executor::TaskExecutor; use slog::{info, o, Drain, Level, Logger}; @@ -25,6 +29,7 @@ mod executor; mod metrics; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; +const LOG_CHANNEL_SIZE: usize = 2048; /// Builds an `Environment`. pub struct EnvironmentBuilder { @@ -125,7 +130,9 @@ impl EnvironmentBuilder { match format.to_uppercase().as_str() { "JSON" => { let drain = slog_json::Json::default(std::io::stdout()).fuse(); - slog_async::Async::new(drain).build() + slog_async::Async::new(drain) + .chan_size(LOG_CHANNEL_SIZE) + .build() } _ => return Err("Logging format provided is not supported".to_string()), } @@ -134,7 +141,9 @@ impl EnvironmentBuilder { let decorator = logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); let drain = slog_term::FullFormat::new(decorator).build().fuse(); - slog_async::Async::new(drain).build() + slog_async::Async::new(drain) + .chan_size(LOG_CHANNEL_SIZE) + .build() }; let drain = match debug_level { @@ -151,6 +160,81 @@ impl EnvironmentBuilder { Ok(self) } + /// Sets the logger (and all child loggers) to log to a file. + pub fn log_to_file( + mut self, + path: PathBuf, + debug_level: &str, + log_format: Option<&str>, + ) -> Result { + // Creating a backup if the logfile already exists. + if path.exists() { + let start = SystemTime::now(); + let timestamp = start + .duration_since(UNIX_EPOCH) + .map_err(|e| e.to_string())? + .as_secs(); + let file_stem = path + .file_stem() + .ok_or_else(|| "Invalid file name".to_string())? + .to_str() + .ok_or_else(|| "Failed to create str from filename".to_string())?; + let file_ext = path.extension().unwrap_or_else(|| OsStr::new("")); + let backup_name = format!("{}_backup_{}", file_stem, timestamp); + let backup_path = path.with_file_name(backup_name).with_extension(file_ext); + FsRename(&path, &backup_path).map_err(|e| e.to_string())?; + } + + let file = OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(&path) + .map_err(|e| format!("Unable to open logfile: {:?}", e))?; + + // Setting up the initial logger format and building it. + let drain = if let Some(format) = log_format { + match format.to_uppercase().as_str() { + "JSON" => { + let drain = slog_json::Json::default(file).fuse(); + slog_async::Async::new(drain) + .chan_size(LOG_CHANNEL_SIZE) + .build() + } + _ => return Err("Logging format provided is not supported".to_string()), + } + } else { + let decorator = slog_term::PlainDecorator::new(file); + let decorator = + logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + slog_async::Async::new(drain) + .chan_size(LOG_CHANNEL_SIZE) + .build() + }; + + let drain = match debug_level { + "info" => drain.filter_level(Level::Info), + "debug" => drain.filter_level(Level::Debug), + "trace" => drain.filter_level(Level::Trace), + "warn" => drain.filter_level(Level::Warning), + "error" => drain.filter_level(Level::Error), + "crit" => drain.filter_level(Level::Critical), + unknown => return Err(format!("Unknown debug-level: {}", unknown)), + }; + + let log = Logger::root(drain.fuse(), o!()); + info!( + log, + "Logging to file"; + "path" => format!("{:?}", path) + ); + + self.log = Some(log); + + Ok(self) + } + /// Adds a testnet configuration to the environment. pub fn eth2_testnet_config( mut self, @@ -189,10 +273,13 @@ impl EnvironmentBuilder { /// Consumes the builder, returning an `Environment`. pub fn build(self) -> Result, String> { let (signal, exit) = exit_future::signal(); + let (signal_tx, signal_rx) = channel(1); Ok(Environment { runtime: self .runtime .ok_or_else(|| "Cannot build environment without runtime".to_string())?, + signal_tx, + signal_rx: Some(signal_rx), signal: Some(signal), exit, log: self @@ -224,6 +311,7 @@ impl RuntimeContext { Self { executor: TaskExecutor { handle: self.executor.handle.clone(), + signal_tx: self.executor.signal_tx.clone(), exit: self.executor.exit.clone(), log: self.executor.log.new(o!("service" => service_name)), }, @@ -247,6 +335,10 @@ impl RuntimeContext { /// validator client, or to run tests that involve logging and async task execution. pub struct Environment { runtime: Runtime, + /// Receiver side of an internal shutdown signal. + signal_rx: Option>, + /// Sender to request shutting down. + signal_tx: Sender<&'static str>, signal: Option, exit: exit_future::Exit, log: Logger, @@ -269,6 +361,7 @@ impl Environment { RuntimeContext { executor: TaskExecutor { exit: self.exit.clone(), + signal_tx: self.signal_tx.clone(), handle: self.runtime().handle().clone(), log: self.log.clone(), }, @@ -282,6 +375,7 @@ impl Environment { RuntimeContext { executor: TaskExecutor { exit: self.exit.clone(), + signal_tx: self.signal_tx.clone(), handle: self.runtime().handle().clone(), log: self.log.new(o!("service" => service_name)), }, @@ -290,8 +384,20 @@ impl Environment { } } - /// Block the current thread until Ctrl+C is received. - pub fn block_until_ctrl_c(&mut self) -> Result<(), String> { + /// Block the current thread until a shutdown signal is received. + /// + /// This can be either the user Ctrl-C'ing or a task requesting to shutdown. + pub fn block_until_shutdown_requested(&mut self) -> Result<(), String> { + // future of a task requesting to shutdown + let mut rx = self + .signal_rx + .take() + .ok_or("Inner shutdown already received")?; + let inner_shutdown = + async move { rx.next().await.ok_or("Internal shutdown channel exhausted") }; + futures::pin_mut!(inner_shutdown); + + // setup for handling a Ctrl-C let (ctrlc_send, ctrlc_oneshot) = oneshot::channel(); let ctrlc_send_c = RefCell::new(Some(ctrlc_send)); ctrlc::set_handler(move || { @@ -301,10 +407,18 @@ impl Environment { }) .map_err(|e| format!("Could not set ctrlc handler: {:?}", e))?; - // Block this thread until Crtl+C is pressed. - self.runtime() - .block_on(ctrlc_oneshot) - .map_err(|e| format!("Ctrlc oneshot failed: {:?}", e)) + // Block this thread until a shutdown signal is received. + match self + .runtime() + .block_on(future::select(inner_shutdown, ctrlc_oneshot)) + { + future::Either::Left((Ok(reason), _)) => { + info!(self.log, "Internal shutdown received"; "reason" => reason); + Ok(()) + } + future::Either::Left((Err(e), _)) => Err(e.into()), + future::Either::Right((x, _)) => x.map_err(|e| format!("Ctrlc oneshot failed: {}", e)), + } } /// Shutdown the `tokio` runtime when all tasks are idle. @@ -320,68 +434,6 @@ impl Environment { } } - /// Sets the logger (and all child loggers) to log to a file. - pub fn log_to_json_file( - &mut self, - path: PathBuf, - debug_level: &str, - log_format: Option<&str>, - ) -> Result<(), String> { - // Creating a backup if the logfile already exists. - if path.exists() { - let start = SystemTime::now(); - let timestamp = start - .duration_since(UNIX_EPOCH) - .map_err(|e| e.to_string())? - .as_secs(); - let file_stem = path - .file_stem() - .ok_or_else(|| "Invalid file name".to_string())? - .to_str() - .ok_or_else(|| "Failed to create str from filename".to_string())?; - let file_ext = path.extension().unwrap_or_else(|| OsStr::new("")); - let backup_name = format!("{}_backup_{}", file_stem, timestamp); - let backup_path = path.with_file_name(backup_name).with_extension(file_ext); - FsRename(&path, &backup_path).map_err(|e| e.to_string())?; - } - - let file = OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(&path) - .map_err(|e| format!("Unable to open logfile: {:?}", e))?; - - let log_format = log_format.unwrap_or("JSON"); - let drain = match log_format.to_uppercase().as_str() { - "JSON" => { - let drain = slog_json::Json::default(file).fuse(); - slog_async::Async::new(drain).build() - } - _ => return Err("Logging format provided is not supported".to_string()), - }; - - let drain = match debug_level { - "info" => drain.filter_level(Level::Info), - "debug" => drain.filter_level(Level::Debug), - "trace" => drain.filter_level(Level::Trace), - "warn" => drain.filter_level(Level::Warning), - "error" => drain.filter_level(Level::Error), - "crit" => drain.filter_level(Level::Critical), - unknown => return Err(format!("Unknown debug-level: {}", unknown)), - }; - - self.log = Logger::root(drain.fuse(), o!()); - - info!( - self.log, - "Logging to JSON file"; - "path" => format!("{:?}", path) - ); - - Ok(()) - } - pub fn eth_spec_instance(&self) -> &E { &self.eth_spec_instance } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 6ba806e4b7d..52e90f17980 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -63,7 +63,7 @@ fn main() { .long("logfile") .value_name("FILE") .help( - "File path where output will be written. Default file logging format is JSON.", + "File path where output will be written.", ) .takes_value(true), ) @@ -189,29 +189,33 @@ fn run( // Parse testnet config from the `testnet` and `testnet-dir` flag in that order // else, use the default - let mut optional_testnet_config = Eth2TestnetConfig::hard_coded_default()?; + let mut optional_testnet_config = None; if matches.is_present("testnet") { optional_testnet_config = clap_utils::parse_hardcoded_network(matches, "testnet")?; }; if matches.is_present("testnet-dir") { optional_testnet_config = clap_utils::parse_testnet_dir(matches, "testnet-dir")?; }; + if optional_testnet_config.is_none() { + optional_testnet_config = Eth2TestnetConfig::hard_coded_default()?; + } + + let builder = if let Some(log_path) = matches.value_of("logfile") { + let path = log_path + .parse::() + .map_err(|e| format!("Failed to parse log path: {:?}", e))?; + environment_builder.log_to_file(path, debug_level, log_format)? + } else { + environment_builder.async_logger(debug_level, log_format)? + }; - let mut environment = environment_builder - .async_logger(debug_level, log_format)? + let mut environment = builder .multi_threaded_tokio_runtime()? .optional_eth2_testnet_config(optional_testnet_config)? .build()?; let log = environment.core_context().log().clone(); - if let Some(log_path) = matches.value_of("logfile") { - let path = log_path - .parse::() - .map_err(|e| format!("Failed to parse log path: {:?}", e))?; - environment.log_to_json_file(path, debug_level, log_format)?; - } - // Note: the current code technically allows for starting a beacon node _and_ a validator // client at the same time. // @@ -244,6 +248,7 @@ fn run( log, "Ethereum 2.0 is pre-release. This software is experimental." ); + info!(log, "Lighthouse started"; "version" => VERSION); info!( log, "Configured for testnet"; @@ -298,8 +303,8 @@ fn run( return Err("No subcommand supplied.".into()); } - // Block this thread until Crtl+C is pressed. - environment.block_until_ctrl_c()?; + // Block this thread until we get a ctrl-c or a task sends a shutdown signal. + environment.block_until_shutdown_requested()?; info!(log, "Shutting down.."); environment.fire_signal(); diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 133bbd721f7..6652c1ec64b 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -102,7 +102,7 @@ fn create_wallet>( .arg(CREATE_CMD) .arg(format!("--{}", NAME_FLAG)) .arg(&name) - .arg(format!("--{}", PASSPHRASE_FLAG)) + .arg(format!("--{}", PASSWORD_FLAG)) .arg(password.as_ref().as_os_str()) .arg(format!("--{}", MNEMONIC_FLAG)) .arg(mnemonic.as_ref().as_os_str()), @@ -238,7 +238,7 @@ impl TestValidator { .arg(CREATE_CMD) .arg(format!("--{}", WALLET_NAME_FLAG)) .arg(&self.wallet.name) - .arg(format!("--{}", WALLET_PASSPHRASE_FLAG)) + .arg(format!("--{}", WALLET_PASSWORD_FLAG)) .arg(self.wallet.password_path().into_os_string()) .arg(format!("--{}", VALIDATOR_DIR_FLAG)) .arg(self.validator_dir.clone().into_os_string()) diff --git a/scripts/change_version.sh b/scripts/change_version.sh new file mode 100755 index 00000000000..390e9c122fd --- /dev/null +++ b/scripts/change_version.sh @@ -0,0 +1,32 @@ +# Change the version across multiple files, prior to a release. Use `sed` to +# find/replace the exiting version with the new one. +# +# Takes two arguments: +# +# 1. Current version (e.g., `0.2.6`) +# 2. New version (e.g., `0.2.7`) +# +# ## Example: +# +# `./change_version.sh 0.2.6 0.2.7` + +FROM=$1 +TO=$2 +VERSION_CRATE="../common/lighthouse_version/src/lib.rs" + +update_cargo_toml () { + echo $1 + sed -i -e "s/version = \"$FROM\"/version = \"$TO\"/g" $1 +} + +echo "Changing version from $FROM to $TO" + +update_cargo_toml ../account_manager/Cargo.toml +update_cargo_toml ../beacon_node/Cargo.toml +update_cargo_toml ../boot_node/Cargo.toml +update_cargo_toml ../lcli/Cargo.toml +update_cargo_toml ../lighthouse/Cargo.toml +update_cargo_toml ../validator_client/Cargo.toml + +echo $VERSION_CRATE +sed -i -e "s/$FROM/$TO/g" $VERSION_CRATE diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 0a8de175974..c9934ef98ad 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -19,26 +19,26 @@ Assuming you are happy with the configuration in `var.env`, create the testnet directory, genesis state and validator keys with: ```bash -./setup +./setup.sh ``` Start the first beacon node: ```bash -./beacon_node +./beacon_node.sh ``` In a new terminal, start the validator client which will attach to the first beacon node: ```bash -./validator_client +./validator_client.sh ``` In a new terminal, start the second beacon node which will peer with the first: ```bash -./second_beacon_node +./second_beacon_node.sh ``` ## Additional Info @@ -49,9 +49,9 @@ The beacon nodes and validator client have their `--debug-level` set to `info`. Specify a different debug level like this: ```bash -./validator_client debug -./beacon_node trace -./second_beacon_node warn +./validator_client.sh debug +./beacon_node.sh trace +./second_beacon_node.sh warn ``` ### Starting fresh @@ -59,7 +59,7 @@ Specify a different debug level like this: Delete the current testnet and all related files using: ```bash -./clean +./clean.sh ``` @@ -71,9 +71,9 @@ genesis state will be far in the future, causing lots of skip slots. Update the genesis time to now using: ```bash -./reset_genesis_time +./reset_genesis_time.sh ``` > Note: you probably want to drop the beacon node database and the validator > client slashing database if you do this. When using small validator counts -> it's probably easy to just use `./clean && ./setup`. +> it's probably easy to just use `./clean.sh && ./setup.sh`. diff --git a/scripts/local_testnet/beacon_node.sh b/scripts/local_testnet/beacon_node.sh index f3fab6d4c97..56c632802fe 100755 --- a/scripts/local_testnet/beacon_node.sh +++ b/scripts/local_testnet/beacon_node.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Starts a beacon node based upon a genesis state created by diff --git a/scripts/local_testnet/second_beacon_node.sh b/scripts/local_testnet/second_beacon_node.sh index 4bef7ec63a5..c7b7aadf3bc 100755 --- a/scripts/local_testnet/second_beacon_node.sh +++ b/scripts/local_testnet/second_beacon_node.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Starts a beacon node based upon a genesis state created by @@ -16,5 +16,6 @@ exec lighthouse \ --testnet-dir $TESTNET_DIR \ --dummy-eth1 \ --http \ + --port 9902 \ --http-port 6052 \ --boot-nodes $(cat $BEACON_DIR/beacon/network/enr.dat) diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index a43bad7ead2..8b71183540c 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Produces a testnet specification and a genesis state where the genesis time diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh index 9d639775490..aab44045a4c 100755 --- a/scripts/local_testnet/validator_client.sh +++ b/scripts/local_testnet/validator_client.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Starts a validator client based upon a genesis state created by diff --git a/testing/eth1_test_rig/src/ganache.rs b/testing/eth1_test_rig/src/ganache.rs index faaae0ef35e..9452d524f23 100644 --- a/testing/eth1_test_rig/src/ganache.rs +++ b/testing/eth1_test_rig/src/ganache.rs @@ -14,6 +14,8 @@ use web3::{ /// How long we will wait for ganache to indicate that it is ready. const GANACHE_STARTUP_TIMEOUT_MILLIS: u64 = 10_000; +const NETWORK_ID: u64 = 42; + /// Provides a dedicated `ganachi-cli` instance with a connected `Web3` instance. /// /// Requires that `ganachi-cli` is installed and available on `PATH`. @@ -42,6 +44,8 @@ impl GanacheInstance { .arg(format!("{}", port)) .arg("--mnemonic") .arg("\"vast thought differ pull jewel broom cook wrist tribe word before omit\"") + .arg("--networkId") + .arg(format!("{}", NETWORK_ID)) .spawn() .map_err(|e| { format!( @@ -97,6 +101,11 @@ impl GanacheInstance { endpoint(self.port) } + /// Returns the network id of the ganache instance + pub fn network_id(&self) -> u64 { + NETWORK_ID + } + /// Increase the timestamp on future blocks by `increase_by` seconds. pub async fn increase_time(&self, increase_by: u64) -> Result<(), String> { self.web3 diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index dc46d2ba254..a48f24f3f04 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -10,7 +10,7 @@ beacon_node = { path = "../../beacon_node" } types = { path = "../../consensus/types" } eth2_config = { path = "../../common/eth2_config" } tempdir = "0.3.7" -reqwest = "0.10.4" +reqwest = { version = "0.10.4", features = ["native-tls-vendored"] } url = "2.1.1" serde = "1.0.110" futures = "0.3.5" diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 07e1e344850..6670e3557c9 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -8,6 +8,7 @@ edition = "2018" [dependencies] node_test_rig = { path = "../node_test_rig" } +eth1 = {path = "../../beacon_node/eth1"} types = { path = "../../consensus/types" } validator_client = { path = "../../validator_client" } parking_lot = "0.11.0" diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 444e670104a..de78aaa0578 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -36,11 +36,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("4") .help("Speed up factor")) - .arg(Arg::with_name("end_after_checks") - .short("e") - .long("end_after_checks") + .arg(Arg::with_name("continue_after_checks") + .short("c") + .long("continue_after_checks") .takes_value(false) - .help("End after checks (default true)")) + .help("Continue after checks (default false)")) ) .subcommand( SubCommand::with_name("no-eth1-sim") @@ -64,11 +64,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("4") .help("Speed up factor")) - .arg(Arg::with_name("end_after_checks") - .short("e") - .long("end_after_checks") + .arg(Arg::with_name("continue_after_checks") + .short("c") + .long("continue_after_checks") .takes_value(false) - .help("End after checks (default true)")) + .help("Continue after checks (default false)")) ) .subcommand( SubCommand::with_name("syncing-sim") diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 64c439320ad..75f2256a1f8 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -1,5 +1,6 @@ use crate::{checks, LocalNetwork, E}; use clap::ArgMatches; +use eth1::http::Eth1NetworkId; use eth1_test_rig::GanacheEth1Instance; use futures::prelude::*; use node_test_rig::{ @@ -17,12 +18,12 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { .expect("missing validators_per_node default"); let speed_up_factor = value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default"); - let end_after_checks = !matches.is_present("end_after_checks"); + let continue_after_checks = matches.is_present("continue_after_checks"); println!("Beacon Chain Simulator:"); println!(" nodes:{}", node_count); println!(" validators_per_node:{}", validators_per_node); - println!(" end_after_checks:{}", end_after_checks); + println!(" continue_after_checks:{}", continue_after_checks); // Generate the directories and keystores required for the validator clients. let validator_files = (0..node_count) @@ -73,6 +74,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { */ let ganache_eth1_instance = GanacheEth1Instance::new().await?; let deposit_contract = ganache_eth1_instance.deposit_contract; + let network_id = ganache_eth1_instance.ganache.network_id(); let ganache = ganache_eth1_instance.ganache; let eth1_endpoint = ganache.endpoint(); let deposit_contract_address = deposit_contract.address(); @@ -105,6 +107,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.eth1.follow_distance = 1; beacon_config.dummy_eth1_backend = false; beacon_config.sync_eth1_chain = true; + beacon_config.eth1.network_id = Eth1NetworkId::Custom(network_id); beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); @@ -174,9 +177,9 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { onboarding?; // The `final_future` either completes immediately or never completes, depending on the value - // of `end_after_checks`. + // of `continue_after_checks`. - if !end_after_checks { + if continue_after_checks { future::pending::<()>().await; } /* diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index b2421bcefc8..37ce3ab5664 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -86,7 +86,7 @@ impl LocalNetwork { let boot_node = read_lock.first().expect("should have at least one node"); - beacon_config.network.boot_nodes.push( + beacon_config.network.boot_nodes_enr.push( boot_node .client .enr() diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index b95bdce9313..621b8ef3bf4 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -17,12 +17,12 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { .expect("missing validators_per_node default"); let speed_up_factor = value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default"); - let end_after_checks = !matches.is_present("end_after_checks"); + let continue_after_checks = matches.is_present("continue_after_checks"); println!("Beacon Chain Simulator:"); println!(" nodes:{}", node_count); println!(" validators_per_node:{}", validators_per_node); - println!(" end_after_checks:{}", end_after_checks); + println!(" continue_after_checks:{}", continue_after_checks); // Generate the directories and keystores required for the validator clients. let validator_files = (0..node_count) @@ -141,9 +141,9 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { start_checks?; // The `final_future` either completes immediately or never completes, depending on the value - // of `end_after_checks`. + // of `continue_after_checks`. - if !end_after_checks { + if continue_after_checks { future::pending::<()>().await; } /* diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 2614f6321f9..b3ad54c8e89 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "validator_client" -version = "0.2.0" +version = "0.2.8" authors = ["Paul Hauner ", "Age Manning ", "Luke Anderson "] edition = "2018" diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 05870696561..60d1f4d5514 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -7,7 +7,7 @@ use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; -use types::{EthSpec, PublicKey, Slot}; +use types::{EthSpec, Graffiti, PublicKey, Slot}; /// Builds a `BlockService`. pub struct BlockServiceBuilder { @@ -15,6 +15,7 @@ pub struct BlockServiceBuilder { slot_clock: Option>, beacon_node: Option>, context: Option>, + graffiti: Option, } impl BlockServiceBuilder { @@ -24,6 +25,7 @@ impl BlockServiceBuilder { slot_clock: None, beacon_node: None, context: None, + graffiti: None, } } @@ -47,6 +49,11 @@ impl BlockServiceBuilder { self } + pub fn graffiti(mut self, graffiti: Option) -> Self { + self.graffiti = graffiti; + self + } + pub fn build(self) -> Result, String> { Ok(BlockService { inner: Arc::new(Inner { @@ -62,6 +69,7 @@ impl BlockServiceBuilder { context: self .context .ok_or_else(|| "Cannot build BlockService without runtime_context")?, + graffiti: self.graffiti, }), }) } @@ -73,6 +81,7 @@ pub struct Inner { slot_clock: Arc, beacon_node: RemoteBeaconNode, context: RuntimeContext, + graffiti: Option, } /// Attempts to produce attestations for any block producer(s) at the start of the epoch. @@ -214,7 +223,7 @@ impl BlockService { .beacon_node .http .validator() - .produce_block(slot, randao_reveal) + .produce_block(slot, randao_reveal, self.graffiti) .await .map_err(|e| format!("Error from beacon node when producing block: {:?}", e))?; diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 3479f4b9d32..ed320c24cde 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -60,4 +60,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { node is not synced.", ), ) + // This overwrites the graffiti configured in the beacon node. + .arg( + Arg::with_name("graffiti") + .long("graffiti") + .help("Specify your custom graffiti to be included in blocks.") + .value_name("GRAFFITI") + .takes_value(true) + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 4f1b2079e10..482c4ed7007 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -2,6 +2,7 @@ use clap::ArgMatches; use clap_utils::{parse_optional, parse_path_with_default_in_home_dir}; use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; +use types::{Graffiti, GRAFFITI_BYTES_LEN}; pub const DEFAULT_HTTP_SERVER: &str = "http://localhost:5052/"; pub const DEFAULT_DATA_DIR: &str = ".lighthouse/validators"; @@ -27,6 +28,8 @@ pub struct Config { pub strict_lockfiles: bool, /// If true, don't scan the validators dir for new keystores. pub disable_auto_discover: bool, + /// Graffiti to be inserted everytime we create a block. + pub graffiti: Option, } impl Default for Config { @@ -45,6 +48,7 @@ impl Default for Config { allow_unsynced_beacon_node: false, strict_lockfiles: false, disable_auto_discover: false, + graffiti: None, } } } @@ -80,6 +84,26 @@ impl Config { config.secrets_dir = secrets_dir; } + if let Some(input_graffiti) = cli_args.value_of("graffiti") { + let graffiti_bytes = input_graffiti.as_bytes(); + if graffiti_bytes.len() > GRAFFITI_BYTES_LEN { + return Err(format!( + "Your graffiti is too long! {} bytes maximum!", + GRAFFITI_BYTES_LEN + )); + } else { + // Default graffiti to all 0 bytes. + let mut graffiti = Graffiti::default(); + + // Copy the provided bytes over. + // + // Panic-free because `graffiti_bytes.len()` <= `GRAFFITI_BYTES_LEN`. + graffiti[..graffiti_bytes.len()].copy_from_slice(&graffiti_bytes); + + config.graffiti = Some(graffiti); + } + } + Ok(config) } } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index e5d8428ceb5..220d82a66ae 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -216,6 +216,7 @@ impl ProductionValidatorClient { .validator_store(validator_store.clone()) .beacon_node(beacon_node.clone()) .runtime_context(context.service_context("block".into())) + .graffiti(config.graffiti) .build()?; let attestation_service = AttestationServiceBuilder::new()