From 089f79a84b241ca37aab81c76b02dfe68ca55b29 Mon Sep 17 00:00:00 2001 From: Zac Holme <79027434+Zacholme7@users.noreply.github.com> Date: Mon, 13 Jan 2025 17:03:35 -0600 Subject: [PATCH] Execution integration (#59) --- .github/wordlist.txt | 7 +- .gitignore | 2 +- Cargo.lock | 1493 ++++++++++++++++++-- Cargo.toml | 83 +- anchor/database/src/cluster_operations.rs | 4 +- anchor/database/src/operator_operations.rs | 2 +- anchor/database/src/sql_operations.rs | 2 +- anchor/database/src/state.rs | 18 +- anchor/database/src/tests/state_tests.rs | 6 +- anchor/eth/Cargo.toml | 25 + anchor/eth/README.md | 36 + anchor/eth/execution.rs | 63 + anchor/eth/src/error.rs | 19 + anchor/eth/src/event_parser.rs | 40 + anchor/eth/src/event_processor.rs | 520 +++++++ anchor/eth/src/gen.rs | 24 + anchor/eth/src/lib.rs | 8 + anchor/eth/src/network_actions.rs | 88 ++ anchor/eth/src/sync.rs | 411 ++++++ anchor/eth/src/util.rs | 264 ++++ anchor/http_api/Cargo.toml | 2 +- anchor/network/Cargo.toml | 13 +- book/book.toml | 1 - 23 files changed, 2976 insertions(+), 155 deletions(-) create mode 100644 anchor/eth/Cargo.toml create mode 100644 anchor/eth/README.md create mode 100644 anchor/eth/execution.rs create mode 100644 anchor/eth/src/error.rs create mode 100644 anchor/eth/src/event_parser.rs create mode 100644 anchor/eth/src/event_processor.rs create mode 100644 anchor/eth/src/gen.rs create mode 100644 anchor/eth/src/lib.rs create mode 100644 anchor/eth/src/network_actions.rs create mode 100644 anchor/eth/src/sync.rs create mode 100644 anchor/eth/src/util.rs diff --git a/.github/wordlist.txt b/.github/wordlist.txt index 8420359c..09748715 100644 --- a/.github/wordlist.txt +++ b/.github/wordlist.txt @@ -42,4 +42,9 @@ TODOs UI Validator validator -validators \ No newline at end of file +validators +Holesky +Mainnet +lifecycle +Syncer + diff --git a/.gitignore b/.gitignore index 4240a362..23588ca1 100644 --- a/.gitignore +++ b/.gitignore @@ -19,4 +19,4 @@ perf.data* /.vscode # cross -/zcross \ No newline at end of file +/zcross diff --git a/Cargo.lock b/Cargo.lock index df0f0672..bc70dd91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -78,6 +78,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -98,6 +99,220 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +[[package]] +name = "alloy" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b524b8c28a7145d1fe4950f84360b5de3e307601679ff0558ddc20ea229399" +dependencies = [ + "alloy-consensus", + "alloy-contract", + "alloy-core", + "alloy-eips", + "alloy-genesis", + "alloy-network", + "alloy-provider", + "alloy-pubsub", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-serde", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ws", +] + +[[package]] +name = "alloy-chains" +version = "0.1.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e39f295f876b61a1222d937e1dd31f965e4a1acc3bba98e448dd7e84b1a4566" +dependencies = [ + "alloy-primitives", + "num_enum", + "strum 0.26.3", +] + +[[package]] +name = "alloy-consensus" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae09ffd7c29062431dd86061deefe4e3c6f07fa0d674930095f8dcedb0baf02c" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "auto_impl", + "c-kzg", + "derive_more 1.0.0", + "serde", +] + +[[package]] +name = "alloy-contract" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66430a72d5bf5edead101c8c2f0a24bada5ec9f3cf9909b3e08b6d6899b4803e" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-provider", + "alloy-pubsub", + "alloy-rpc-types-eth", + "alloy-sol-types", + "alloy-transport", + "futures", + "futures-util", + "thiserror 1.0.69", +] + +[[package]] +name = "alloy-core" +version = "0.8.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0713007d14d88a6edb8e248cddab783b698dbb954a28b8eee4bab21cfb7e578" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-primitives", + "alloy-rlp", + "alloy-sol-types", +] + +[[package]] +name = "alloy-dyn-abi" +version = "0.8.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44e3b98c37b3218924cd1d2a8570666b89662be54e5b182643855f783ea68b33" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-type-parser", + "alloy-sol-types", + "const-hex", + "itoa", + "serde", + "serde_json", + "winnow", +] + +[[package]] +name = "alloy-eip2930" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", +] + +[[package]] +name = "alloy-eip7702" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c986539255fb839d1533c128e190e557e52ff652c9ef62939e233a81dd93f7e" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "derive_more 1.0.0", + "serde", +] + +[[package]] +name = "alloy-eips" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b6aa3961694b30ba53d41006131a2fca3bdab22e4c344e46db2c639e7c2dfdd" +dependencies = [ + "alloy-eip2930", + "alloy-eip7702", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "derive_more 1.0.0", + "once_cell", + "serde", + "sha2 0.10.8", +] + +[[package]] +name = "alloy-genesis" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e53f7877ded3921d18a0a9556d55bedf84535567198c9edab2aa23106da91855" +dependencies = [ + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-json-abi" +version = "0.8.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "731ea743b3d843bc657e120fb1d1e9cc94f5dab8107e35a82125a63e6420a102" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-json-rpc" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3694b7e480728c0b3e228384f223937f14c10caef5a4c766021190fc8f283d35" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "serde", + "serde_json", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "alloy-network" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea94b8ceb5c75d7df0a93ba0acc53b55a22b47b532b600a800a87ef04eb5b0b4" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "alloy-signer", + "alloy-sol-types", + "async-trait", + "auto_impl", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "alloy-network-primitives" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df9f3e281005943944d15ee8491534a1c7b3cbf7a7de26f8c433b842b93eb5f9" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "serde", +] + [[package]] name = "alloy-primitives" version = "0.8.18" @@ -129,6 +344,64 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "alloy-provider" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40c1f9eede27bf4c13c099e8e64d54efd7ce80ef6ea47478aa75d5d74e2dba3b" +dependencies = [ + "alloy-chains", + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-pubsub", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ws", + "async-stream", + "async-trait", + "auto_impl", + "dashmap", + "futures", + "futures-utils-wasm", + "lru", + "parking_lot", + "pin-project", + "reqwest 0.12.12", + "schnellru", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-pubsub" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90f1f34232f77341076541c405482e4ae12f0ee7153d8f9969fc1691201b2247" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-transport", + "bimap", + "futures", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower", + "tracing", +] + [[package]] name = "alloy-rlp" version = "0.3.10" @@ -151,6 +424,213 @@ dependencies = [ "syn 2.0.96", ] +[[package]] +name = "alloy-rpc-client" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "374dbe0dc3abdc2c964f36b3d3edf9cdb3db29d16bda34aa123f03d810bec1dd" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-pubsub", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ws", + "futures", + "pin-project", + "reqwest 0.12.12", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c74832aa474b670309c20fffc2a869fa141edab7c79ff7963fad0a08de60bae1" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-eth" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a477281940d82d29315846c7216db45b15e90bcd52309da9f54bcf7ad94a11" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "derive_more 1.0.0", + "itertools 0.13.0", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-serde" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dfa4a7ccf15b2492bb68088692481fd6b2604ccbee1d0d6c44c21427ae4df83" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-signer" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e10aec39d60dc27edcac447302c7803d2371946fb737245320a05b78eb2fafd" +dependencies = [ + "alloy-primitives", + "async-trait", + "auto_impl", + "elliptic-curve", + "k256", + "thiserror 1.0.69", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a07b74d48661ab2e4b50bb5950d74dbff5e61dd8ed03bb822281b706d54ebacb" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19cc9c7f20b90f9be1a8f71a3d8e283a43745137b0837b1a1cb13159d37cad72" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.96", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "713b7e6dfe1cb2f55c80fb05fd22ed085a1b4e48217611365ed0ae598a74c6ac" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck 0.5.0", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.96", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.8.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1eda2711ab2e1fb517fc6e2ffa9728c9a232e296d16810810e6957b781a1b8bc" +dependencies = [ + "serde", + "winnow", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b478bc9c0c4737a04cd976accde4df7eba0bdc0d90ad6ff43d58bc93cf79c1" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", + "serde", +] + +[[package]] +name = "alloy-transport" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f99acddb34000d104961897dbb0240298e8b775a7efffb9fda2a1a3efedd65b3" +dependencies = [ + "alloy-json-rpc", + "base64 0.22.1", + "futures-util", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tower", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-transport-http" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dc013132e34eeadaa0add7e74164c1503988bfba8bae885b32e0918ba85a8a6" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "reqwest 0.12.12", + "serde_json", + "tower", + "tracing", + "url", +] + +[[package]] +name = "alloy-transport-ws" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abd170e600801116d5efe64f74a4fc073dbbb35c807013a7d0a388742aeebba0" +dependencies = [ + "alloy-pubsub", + "alloy-transport", + "futures", + "http 1.2.0", + "rustls 0.23.21", + "serde_json", + "tokio", + "tokio-tungstenite", + "tracing", + "ws_stream_wasm", +] + [[package]] name = "anchor" version = "0.1.0" @@ -161,7 +641,7 @@ dependencies = [ "dirs 5.0.1", "futures", "regex", - "sensitive_url", + "sensitive_url 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "serde", "task_executor", "tokio", @@ -485,6 +965,28 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "async-trait" version = "0.1.85" @@ -496,6 +998,17 @@ dependencies = [ "syn 2.0.96", ] +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures", + "pharos", + "rustc_version 0.4.1", +] + [[package]] name = "asynchronous-codec" version = "0.7.0" @@ -509,6 +1022,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "attohttpc" version = "0.24.1" @@ -643,6 +1162,12 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" + [[package]] name = "bit-set" version = "0.5.3" @@ -720,10 +1245,50 @@ dependencies = [ "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", - "fixed_bytes", + "fixed_bytes 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "hex", + "rand", + "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "serde", + "tree_hash", + "zeroize", +] + +[[package]] +name = "bls" +version = "0.2.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" +dependencies = [ + "alloy-primitives", + "arbitrary", + "blst", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", + "fixed_bytes 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "hex", "rand", - "safe_arith", + "safe_arith 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "serde", + "tree_hash", + "zeroize", +] + +[[package]] +name = "bls" +version = "0.2.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "alloy-primitives", + "arbitrary", + "blst", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", + "fixed_bytes 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", + "hex", + "rand", + "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "serde", "tree_hash", "zeroize", @@ -825,6 +1390,8 @@ dependencies = [ "glob", "hex", "libc", + "once_cell", + "serde", ] [[package]] @@ -950,7 +1517,7 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "clap_utils" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" dependencies = [ "alloy-primitives", "clap", @@ -961,7 +1528,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "types", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] [[package]] @@ -978,9 +1545,9 @@ dependencies = [ "network", "parking_lot", "processor", - "sensitive_url", + "sensitive_url 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "serde", - "strum", + "strum 0.24.1", "task_executor", "tokio", "tracing", @@ -1002,6 +1569,22 @@ dependencies = [ "itertools 0.10.5", ] +[[package]] +name = "compare_fields" +version = "0.2.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" +dependencies = [ + "itertools 0.10.5", +] + +[[package]] +name = "compare_fields" +version = "0.2.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "itertools 0.10.5", +] + [[package]] name = "compare_fields_derive" version = "0.2.0" @@ -1011,6 +1594,24 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "compare_fields_derive" +version = "0.2.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "compare_fields_derive" +version = "0.2.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -1427,7 +2028,7 @@ dependencies = [ "rusqlite", "ssv_types", "tempfile", - "types", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] [[package]] @@ -1554,7 +2155,7 @@ dependencies = [ [[package]] name = "directory" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" dependencies = [ "clap", "clap_utils", @@ -1673,6 +2274,12 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "ecdsa" version = "0.16.9" @@ -1803,6 +2410,26 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "eth" +version = "0.1.0" +dependencies = [ + "alloy", + "base64 0.22.1", + "database", + "futures", + "hex", + "openssl", + "rand", + "reqwest 0.12.12", + "serde", + "ssv_types", + "tokio", + "tracing", + "tracing-subscriber", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", +] + [[package]] name = "eth2" version = "0.1.0" @@ -1819,26 +2446,26 @@ dependencies = [ "libp2p-identity", "mediatype", "multiaddr", - "pretty_reqwest_error", + "pretty_reqwest_error 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", "proto_array", - "reqwest", + "reqwest 0.11.27", "reqwest-eventsource", - "sensitive_url", + "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", "serde", "serde_json", "slashing_protection", "ssz_types", - "types", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=anchor)", "zeroize", ] [[package]] name = "eth2_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" dependencies = [ "paste", - "types", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] [[package]] @@ -1846,7 +2473,33 @@ name = "eth2_interop_keypairs" version = "0.2.0" source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" dependencies = [ - "bls", + "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "ethereum_hashing", + "hex", + "num-bigint", + "serde", + "serde_yaml", +] + +[[package]] +name = "eth2_interop_keypairs" +version = "0.2.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" +dependencies = [ + "bls 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "ethereum_hashing", + "hex", + "num-bigint", + "serde", + "serde_yaml", +] + +[[package]] +name = "eth2_interop_keypairs" +version = "0.2.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "ethereum_hashing", "hex", "num-bigint", @@ -1859,7 +2512,7 @@ name = "eth2_key_derivation" version = "0.1.0" source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" dependencies = [ - "bls", + "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", "num-bigint-dig", "ring 0.16.20", "sha2 0.9.9", @@ -1872,7 +2525,7 @@ version = "0.1.0" source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" dependencies = [ "aes 0.7.5", - "bls", + "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", "eth2_key_derivation", "hex", "hmac 0.11.0", @@ -1891,20 +2544,20 @@ dependencies = [ [[package]] name = "eth2_network_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" dependencies = [ "bytes", "discv5", "eth2_config", - "kzg", + "kzg 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "logging", - "pretty_reqwest_error", - "reqwest", - "sensitive_url", + "pretty_reqwest_error 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", + "reqwest 0.11.27", + "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "serde_yaml", "sha2 0.9.9", "slog", - "types", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", "url", "zip", ] @@ -2098,7 +2751,25 @@ version = "0.1.0" source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" dependencies = [ "alloy-primitives", - "safe_arith", + "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", +] + +[[package]] +name = "fixed_bytes" +version = "0.1.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" +dependencies = [ + "alloy-primitives", + "safe_arith 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", +] + +[[package]] +name = "fixed_bytes" +version = "0.1.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "alloy-primitives", + "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] [[package]] @@ -2240,7 +2911,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.20", + "rustls 0.23.21", "rustls-pki-types", ] @@ -2291,6 +2962,12 @@ dependencies = [ "slab", ] +[[package]] +name = "futures-utils-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" + [[package]] name = "generic-array" version = "0.14.7" @@ -2360,7 +3037,7 @@ checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "gossipsub" version = "0.5.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" dependencies = [ "async-channel", "asynchronous-codec", @@ -2418,6 +3095,31 @@ dependencies = [ "tracing", ] +[[package]] +name = "h2" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.2.0", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" + [[package]] name = "hashbrown" version = "0.14.5" @@ -2464,7 +3166,7 @@ version = "0.1.0" source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" dependencies = [ "eth2", - "metrics", + "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", "procfs", "psutil", ] @@ -2688,7 +3390,7 @@ version = "0.1.0" dependencies = [ "axum", "health_metrics", - "metrics", + "metrics 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "parking_lot", "serde", "tokio", @@ -2719,7 +3421,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", "httparse", @@ -2742,6 +3444,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", + "h2 0.4.7", "http 1.2.0", "http-body 1.0.1", "httparse", @@ -2750,6 +3453,7 @@ dependencies = [ "pin-project-lite", "smallvec", "tokio", + "want", ] [[package]] @@ -2763,7 +3467,24 @@ dependencies = [ "hyper 0.14.32", "rustls 0.21.12", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +dependencies = [ + "futures-util", + "http 1.2.0", + "hyper 1.5.2", + "hyper-util", + "rustls 0.23.21", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.1", + "tower-service", ] [[package]] @@ -2779,6 +3500,22 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.5.2", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.10" @@ -2786,13 +3523,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", + "futures-channel", "futures-util", "http 1.2.0", "http-body 1.0.1", "hyper 1.5.2", "pin-project-lite", + "socket2", "tokio", "tower-service", + "tracing", ] [[package]] @@ -3073,6 +3813,22 @@ dependencies = [ "bytes", ] +[[package]] +name = "int_to_bytes" +version = "0.2.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" +dependencies = [ + "bytes", +] + +[[package]] +name = "int_to_bytes" +version = "0.2.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "bytes", +] + [[package]] name = "io-lifetimes" version = "1.0.11" @@ -3214,6 +3970,44 @@ dependencies = [ "tree_hash", ] +[[package]] +name = "kzg" +version = "0.1.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" +dependencies = [ + "arbitrary", + "c-kzg", + "derivative", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", + "hex", + "rust_eth_kzg", + "serde", + "serde_json", + "tree_hash", +] + +[[package]] +name = "kzg" +version = "0.1.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "arbitrary", + "c-kzg", + "derivative", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", + "hex", + "rust_eth_kzg", + "serde", + "serde_json", + "tree_hash", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -3572,7 +4366,7 @@ dependencies = [ "quinn", "rand", "ring 0.17.8", - "rustls 0.23.20", + "rustls 0.23.21", "socket2", "thiserror 1.0.69", "tokio", @@ -3644,7 +4438,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.20", + "rustls 0.23.21", "rustls-webpki 0.101.7", "thiserror 1.0.69", "x509-parser", @@ -3754,7 +4548,7 @@ dependencies = [ [[package]] name = "lighthouse_network" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -3764,7 +4558,6 @@ dependencies = [ "dirs 3.0.2", "discv5", "either", - "eth2", "ethereum_ssz", "ethereum_ssz_derive", "fnv", @@ -3777,7 +4570,7 @@ dependencies = [ "lighthouse_version", "lru", "lru_cache", - "metrics", + "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "parking_lot", "prometheus-client", "rand", @@ -3788,14 +4581,14 @@ dependencies = [ "smallvec", "snap", "ssz_types", - "strum", + "strum 0.24.1", "superstruct", "task_executor", "tiny-keccak", "tokio", "tokio-io-timeout", "tokio-util", - "types", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", "unsigned-varint 0.8.0", "unused_port", "void", @@ -3804,7 +4597,7 @@ dependencies = [ [[package]] name = "lighthouse_version" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" dependencies = [ "git-version", "target_info", @@ -3853,10 +4646,10 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "logging" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" dependencies = [ "chrono", - "metrics", + "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "parking_lot", "serde", "serde_json", @@ -3893,7 +4686,7 @@ dependencies = [ [[package]] name = "lru_cache" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" dependencies = [ "fnv", ] @@ -3962,8 +4755,30 @@ source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb2 dependencies = [ "alloy-primitives", "ethereum_hashing", - "fixed_bytes", - "safe_arith", + "fixed_bytes 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", +] + +[[package]] +name = "merkle_proof" +version = "0.2.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" +dependencies = [ + "alloy-primitives", + "ethereum_hashing", + "fixed_bytes 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "safe_arith 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", +] + +[[package]] +name = "merkle_proof" +version = "0.2.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "alloy-primitives", + "ethereum_hashing", + "fixed_bytes 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", + "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] [[package]] @@ -3997,6 +4812,22 @@ dependencies = [ "prometheus", ] +[[package]] +name = "metrics" +version = "0.2.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" +dependencies = [ + "prometheus", +] + +[[package]] +name = "metrics" +version = "0.2.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "prometheus", +] + [[package]] name = "milhouse" version = "0.3.0" @@ -4333,6 +5164,26 @@ dependencies = [ "libc", ] +[[package]] +name = "num_enum" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "object" version = "0.36.7" @@ -4579,6 +5430,16 @@ dependencies = [ "ucd-trie", ] +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures", + "rustc_version 0.4.1", +] + [[package]] name = "pin-project" version = "1.1.8" @@ -4691,8 +5552,17 @@ name = "pretty_reqwest_error" version = "0.1.0" source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" dependencies = [ - "reqwest", - "sensitive_url", + "reqwest 0.11.27", + "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", +] + +[[package]] +name = "pretty_reqwest_error" +version = "0.1.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "reqwest 0.11.27", + "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] [[package]] @@ -4724,6 +5594,28 @@ dependencies = [ "toml_edit", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "proc-macro2" version = "1.0.92" @@ -4739,7 +5631,7 @@ version = "0.1.0" dependencies = [ "async-channel", "futures", - "metrics", + "metrics 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "num_cpus", "serde", "task_executor", @@ -4838,11 +5730,11 @@ source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb2 dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", - "safe_arith", + "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", "serde", "serde_yaml", "superstruct", - "types", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=anchor)", ] [[package]] @@ -4921,7 +5813,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.20", + "rustls 0.23.21", "socket2", "thiserror 2.0.10", "tokio", @@ -4939,7 +5831,7 @@ dependencies = [ "rand", "ring 0.17.8", "rustc-hash", - "rustls 0.23.20", + "rustls 0.23.21", "rustls-pki-types", "slab", "thiserror 2.0.10", @@ -5145,12 +6037,12 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.32", - "hyper-rustls", - "hyper-tls", + "hyper-rustls 0.24.2", + "hyper-tls 0.5.0", "ipnet", "js-sys", "log", @@ -5160,7 +6052,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls 0.21.12", - "rustls-pemfile", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", @@ -5168,7 +6060,7 @@ dependencies = [ "system-configuration 0.5.1", "tokio", "tokio-native-tls", - "tokio-rustls", + "tokio-rustls 0.24.1", "tokio-util", "tower-service", "url", @@ -5176,10 +6068,54 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots", + "webpki-roots 0.25.4", "winreg", ] +[[package]] +name = "reqwest" +version = "0.12.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" +dependencies = [ + "base64 0.22.1", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2 0.4.7", + "http 1.2.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.5.2", + "hyper-rustls 0.27.5", + "hyper-tls 0.6.0", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile 2.2.0", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.2", + "system-configuration 0.6.1", + "tokio", + "tokio-native-tls", + "tower", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "windows-registry", +] + [[package]] name = "reqwest-eventsource" version = "0.5.0" @@ -5192,7 +6128,7 @@ dependencies = [ "mime", "nom", "pin-project-lite", - "reqwest", + "reqwest 0.11.27", "thiserror 1.0.69", ] @@ -5436,9 +6372,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.20" +version = "0.23.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" +checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" dependencies = [ "once_cell", "ring 0.17.8", @@ -5457,6 +6393,15 @@ dependencies = [ "base64 0.21.7", ] +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "rustls-pki-types" version = "1.10.1" @@ -5527,6 +6472,16 @@ name = "safe_arith" version = "0.1.0" source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +[[package]] +name = "safe_arith" +version = "0.1.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" + +[[package]] +name = "safe_arith" +version = "0.1.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" + [[package]] name = "salsa20" version = "0.8.1" @@ -5554,6 +6509,17 @@ dependencies = [ "parking_lot", ] +[[package]] +name = "schnellru" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "356285bbf17bea63d9e52e96bd18f039672ac92b55b8cb997d6162a2a37d1649" +dependencies = [ + "ahash", + "cfg-if", + "hashbrown 0.13.2", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -5629,24 +6595,48 @@ dependencies = [ ] [[package]] -name = "semver" -version = "1.0.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" - -[[package]] -name = "semver-parser" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +name = "semver" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" + +[[package]] +name = "semver-parser" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +dependencies = [ + "pest", +] + +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + +[[package]] +name = "sensitive_url" +version = "0.1.0" +source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +dependencies = [ + "serde", + "url", +] + +[[package]] +name = "sensitive_url" +version = "0.1.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" dependencies = [ - "pest", + "serde", + "url", ] [[package]] name = "sensitive_url" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" dependencies = [ "serde", "url", @@ -5828,7 +6818,7 @@ dependencies = [ "ssv_types", "tokio", "tracing", - "types", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] [[package]] @@ -5854,7 +6844,7 @@ dependencies = [ "serde", "serde_json", "tempfile", - "types", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=anchor)", ] [[package]] @@ -5962,11 +6952,11 @@ dependencies = [ [[package]] name = "slot_clock" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" dependencies = [ - "metrics", + "metrics 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "parking_lot", - "types", + "types 0.2.1 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", ] [[package]] @@ -6041,7 +7031,7 @@ dependencies = [ "derive_more 1.0.0", "openssl", "rusqlite", - "types", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] [[package]] @@ -6092,7 +7082,16 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ - "strum_macros", + "strum_macros 0.24.3", +] + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros 0.26.4", ] [[package]] @@ -6108,6 +7107,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.96", +] + [[package]] name = "subtle" version = "2.6.1" @@ -6135,7 +7147,27 @@ source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb2 dependencies = [ "alloy-primitives", "ethereum_hashing", - "fixed_bytes", + "fixed_bytes 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", +] + +[[package]] +name = "swap_or_not_shuffle" +version = "0.2.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" +dependencies = [ + "alloy-primitives", + "ethereum_hashing", + "fixed_bytes 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", +] + +[[package]] +name = "swap_or_not_shuffle" +version = "0.2.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "alloy-primitives", + "ethereum_hashing", + "fixed_bytes 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] [[package]] @@ -6160,6 +7192,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn-solidity" +version = "0.8.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e89d8bf2768d277f40573c83a02a099e96d96dd3104e13ea676194e61ac4b0" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -6171,6 +7215,9 @@ name = "sync_wrapper" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] [[package]] name = "synstructure" @@ -6246,12 +7293,12 @@ checksum = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" [[package]] name = "task_executor" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" dependencies = [ "async-channel", "futures", "logging", - "metrics", + "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "slog", "sloggers", "tokio", @@ -6302,6 +7349,24 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "test_random_derive" +version = "0.2.0" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "test_random_derive" +version = "0.2.0" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "thiserror" version = "1.0.69" @@ -6484,6 +7549,44 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" +dependencies = [ + "rustls 0.23.21", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", + "tokio-util", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9" +dependencies = [ + "futures-util", + "log", + "rustls 0.23.21", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.1", + "tungstenite", + "webpki-roots 0.26.7", +] + [[package]] name = "tokio-util" version = "0.7.13" @@ -6690,6 +7793,26 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "tungstenite" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18e5b8366ee7a95b16d32197d0b2604b43a0be89dc5fac9f8e96ccafbaedda8a" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.2.0", + "httparse", + "log", + "rand", + "rustls 0.23.21", + "rustls-pki-types", + "sha1", + "thiserror 1.0.69", + "utf-8", +] + [[package]] name = "typenum" version = "1.17.0" @@ -6704,23 +7827,121 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", - "bls", - "compare_fields", - "compare_fields_derive", + "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "compare_fields 0.2.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "compare_fields_derive 0.2.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "derivative", + "eth2_interop_keypairs 0.2.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", + "fixed_bytes 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "hex", + "int_to_bytes 0.2.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "itertools 0.10.5", + "kzg 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "log", + "maplit", + "merkle_proof 0.2.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "metastruct", + "milhouse", + "parking_lot", + "rand", + "rand_xorshift", + "rayon", + "regex", + "rpds", + "rusqlite", + "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "serde", + "serde_json", + "serde_yaml", + "slog", + "smallvec", + "ssz_types", + "superstruct", + "swap_or_not_shuffle 0.2.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "tempfile", + "test_random_derive 0.2.0 (git+https://github.com/sigp/lighthouse?branch=anchor)", + "tree_hash", + "tree_hash_derive", +] + +[[package]] +name = "types" +version = "0.2.1" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "arbitrary", + "bls 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "compare_fields 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "compare_fields_derive 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "derivative", + "eth2_interop_keypairs 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", + "fixed_bytes 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "hex", + "int_to_bytes 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "itertools 0.10.5", + "kzg 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "log", + "maplit", + "merkle_proof 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "metastruct", + "milhouse", + "parking_lot", + "rand", + "rand_xorshift", + "rayon", + "regex", + "rpds", + "rusqlite", + "safe_arith 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "serde", + "serde_json", + "serde_yaml", + "slog", + "smallvec", + "ssz_types", + "superstruct", + "swap_or_not_shuffle 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "tempfile", + "test_random_derive 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", + "tree_hash", + "tree_hash_derive", +] + +[[package]] +name = "types" +version = "0.2.1" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "arbitrary", + "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", + "compare_fields 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", + "compare_fields_derive 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "derivative", - "eth2_interop_keypairs", + "eth2_interop_keypairs 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", - "fixed_bytes", + "fixed_bytes 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "hex", - "int_to_bytes", + "int_to_bytes 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "itertools 0.10.5", - "kzg", + "kzg 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "log", "maplit", - "merkle_proof", + "merkle_proof 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "metastruct", "milhouse", "parking_lot", @@ -6730,7 +7951,7 @@ dependencies = [ "regex", "rpds", "rusqlite", - "safe_arith", + "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "serde", "serde_json", "serde_yaml", @@ -6738,9 +7959,9 @@ dependencies = [ "smallvec", "ssz_types", "superstruct", - "swap_or_not_shuffle", + "swap_or_not_shuffle 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "tempfile", - "test_random_derive", + "test_random_derive 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "tree_hash", "tree_hash_derive", ] @@ -6862,7 +8083,7 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "unused_port" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#348fbdb83816ba213945e5c7fcd8c5c5342aa0ed" dependencies = [ "lru_cache", "parking_lot", @@ -6879,6 +8100,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf16_iter" version = "1.0.5" @@ -6910,9 +8137,9 @@ dependencies = [ [[package]] name = "validator_metrics" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=anchor#997991f5513f22bb240816c2e2400cf6a1819a0c" +source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a58586623f84f0fd4b0a5c1de43edfc8010a78" dependencies = [ - "metrics", + "metrics 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", ] [[package]] @@ -7058,6 +8285,20 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmtimer" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" +dependencies = [ + "futures", + "js-sys", + "parking_lot", + "pin-utils", + "slab", + "wasm-bindgen", +] + [[package]] name = "web-sys" version = "0.3.76" @@ -7084,6 +8325,15 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" +[[package]] +name = "webpki-roots" +version = "0.26.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "widestring" version = "0.4.3" @@ -7155,7 +8405,18 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dcc5b895a6377f1ab9fa55acedab1fd5ac0db66ad1e6c7f47e28a22e446a5dd" dependencies = [ - "windows-result", + "windows-result 0.1.2", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result 0.2.0", + "windows-strings", "windows-targets 0.52.6", ] @@ -7168,6 +8429,25 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result 0.2.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -7413,6 +8693,25 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +[[package]] +name = "ws_stream_wasm" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" +dependencies = [ + "async_io_stream", + "futures", + "js-sys", + "log", + "pharos", + "rustc_version 0.4.1", + "send_wrapper", + "thiserror 1.0.69", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index bf5b0842..11a76fee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,17 +1,18 @@ [workspace] # Extra tooling projects will be added. members = [ - "anchor", - "anchor/client", - "anchor/common/qbft", - "anchor/common/ssv_types", - "anchor/common/version", - "anchor/database", - "anchor/http_api", - "anchor/http_metrics", - "anchor/network", - "anchor/processor", - "anchor/signature_collector", + "anchor", + "anchor/client", + "anchor/common/qbft", + "anchor/common/ssv_types", + "anchor/common/version", + "anchor/database", + "anchor/eth", + "anchor/http_api", + "anchor/http_metrics", + "anchor/network", + "anchor/processor", + "anchor/signature_collector", ] resolver = "2" @@ -20,55 +21,69 @@ edition = "2021" # NOTE: The block below is currently not sorted by `cargo sort`. Please keep sorted manually, especially during merges! [workspace.dependencies] +client = { path = "anchor/client" } +eth = { path = "anchor/eth" } +qbft = { path = "anchor/common/qbft" } +http_api = { path = "anchor/http_api" } +http_metrics = { path = "anchor/http_metrics" } +database = { path = "anchor/database" } +network = { path = "anchor/network" } +version = { path = "anchor/common/version" } +processor = { path = "anchor/processor" } +ssv_types = { path = "anchor/common/ssv_types" } +lighthouse_network = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } +task_executor = { git = "https://github.com/sigp/lighthouse", branch = "unstable", default-features = false, features = [ + "tracing", +] } +metrics = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } +validator_metrics = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } +sensitive_url = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } +slot_clock = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } +unused_port = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } +types = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } async-channel = "1.9" axum = "0.7.7" base64 = "0.22.1" clap = { version = "4.5.15", features = ["derive", "wrap_help"] } -client = { path = "anchor/client" } dashmap = "6.1.0" -database = { path = "anchor/database" } derive_more = { version = "1.0.0", features = ["full"] } dirs = "5.0.1" discv5 = "0.9.0" either = "1.13.0" futures = "0.3.30" health_metrics = { git = "https://github.com/sigp/lighthouse", branch = "anchor" } -http_api = { path = "anchor/http_api" } -http_metrics = { path = "anchor/http_metrics" } hyper = "1.4" -lighthouse_network = { git = "https://github.com/sigp/lighthouse", branch = "anchor" } -metrics = { git = "https://github.com/sigp/lighthouse", branch = "anchor" } -network = { path = "anchor/network" } num_cpus = "1" openssl = "0.10.68" parking_lot = "0.12" -processor = { path = "anchor/processor" } -qbft = { path = "anchor/common/qbft" } rusqlite = "0.28.0" -sensitive_url = { git = "https://github.com/sigp/lighthouse", branch = "anchor" } serde = { version = "1.0.208", features = ["derive"] } -slot_clock = { git = "https://github.com/sigp/lighthouse", branch = "anchor" } -ssv_types = { path = "anchor/common/ssv_types" } strum = { version = "0.24", features = ["derive"] } -task_executor = { git = "https://github.com/sigp/lighthouse", branch = "anchor", default-features = false, features = [ "tracing", ] } tokio = { version = "1.39.2", features = [ - "rt", - "rt-multi-thread", - "time", - "signal", - "macros", + "rt", + "rt-multi-thread", + "time", + "signal", + "macros", ] } tower-http = { version = "0.6", features = ["cors"] } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["fmt", "env-filter"] } -types = { git = "https://github.com/sigp/lighthouse", branch = "anchor" } -unused_port = { git = "https://github.com/sigp/lighthouse", branch = "anchor" } -validator_metrics = { git = "https://github.com/sigp/lighthouse", branch = "anchor" } -version = { path = "anchor/common/version" } +hex = "0.4.3" +alloy = { version = "0.6.4", features = [ + "sol-types", + "transports", + "json", + "contract", + "pubsub", + "provider-ws", + "rpc-types", + "rlp", +] } +reqwest = "0.12.12" [profile.maxperf] inherits = "release" lto = "fat" codegen-units = 1 incremental = false - diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 30919a7c..4dd2afd3 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -140,8 +140,8 @@ impl NetworkDatabase { // bump the nonce in memory if !self.state.single_state.nonces.contains_key(owner) { - // if it does not yet exist in memory, then create an entry and set it to one - self.state.single_state.nonces.insert(*owner, 1); + // if it does not yet exist in memory, then create an entry and set it to zero + self.state.single_state.nonces.insert(*owner, 0); } else { // otherwise, just increment the entry let mut entry = self diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index 74e47426..8cda5a8a 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -34,7 +34,7 @@ impl NetworkDatabase { // Check to see if this operator is the current operator let own_id = self.state.single_state.id.load(Ordering::Relaxed); - if own_id == u64::MAX { + if own_id == (u64::MAX / 2) { // If the keys match, this is the current operator so we want to save the id let keys_match = pem_key == self.pubkey.public_key_to_pem().unwrap_or_default(); if keys_match { diff --git a/anchor/database/src/sql_operations.rs b/anchor/database/src/sql_operations.rs index fb182294..40b2c8d5 100644 --- a/anchor/database/src/sql_operations.rs +++ b/anchor/database/src/sql_operations.rs @@ -132,7 +132,7 @@ pub(crate) static SQL: LazyLock> = LazyLock: m.insert(SqlStatement::GetAllNonces, "SELECT * FROM nonce"); m.insert( SqlStatement::BumpNonce, - "INSERT INTO nonce (owner, nonce) VALUES (?1, 1) + "INSERT INTO nonce (owner, nonce) VALUES (?1, 0) ON CONFLICT (owner) DO UPDATE SET nonce = nonce + 1", ); diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 1bf491b7..11feaba0 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -34,15 +34,9 @@ impl NetworkState { let id = if let Ok(Some(operator_id)) = Self::does_self_exist(&conn, pubkey) { operator_id } else { - // If it does not exist, just default the state since we do not know who we are - return Ok(Self { - multi_state: MultiState { - shares: MultiIndexMap::default(), - validator_metadata: MultiIndexMap::default(), - clusters: MultiIndexMap::default(), - }, - single_state: SingleState::default(), - }); + // It does not exist, just default to some impossible operator + // SQL bounded by u32::max + OperatorId(u64::MAX / 2) }; // First Phase: Fetch data from the database @@ -297,13 +291,13 @@ impl NetworkDatabase { .load(Ordering::Relaxed) } - /// Get the nonce of the owner if it exists - pub fn get_nonce(&self, owner: &Address) -> u16 { + /// Get the next nonce of the owner if it exists + pub fn get_next_nonce(&self, owner: &Address) -> u16 { self.state .single_state .nonces .get(owner) - .map(|v| *v) + .map(|v| *v + 1) .unwrap_or(0) } } diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index 9e3b6a96..598b4d6c 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -126,7 +126,7 @@ mod state_database_tests { let owner = Address::random(); // this is the first time getting the nonce, so it should be zero - let nonce = fixture.db.get_nonce(&owner); + let nonce = fixture.db.get_next_nonce(&owner); assert_eq!(nonce, 0); // increment the nonce and then confirm that is is one @@ -134,7 +134,7 @@ mod state_database_tests { .db .bump_nonce(&owner) .expect("Failed in increment nonce"); - let nonce = fixture.db.get_nonce(&owner); + let nonce = fixture.db.get_next_nonce(&owner); assert_eq!(nonce, 1); } @@ -153,6 +153,6 @@ mod state_database_tests { .expect("Failed to create database"); // confirm that nonce is 1 - assert_eq!(fixture.db.get_nonce(&owner), 1); + assert_eq!(fixture.db.get_next_nonce(&owner), 1); } } diff --git a/anchor/eth/Cargo.toml b/anchor/eth/Cargo.toml new file mode 100644 index 00000000..5c9c2ab6 --- /dev/null +++ b/anchor/eth/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "eth" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[[bin]] +name = "execution" +path = "execution.rs" + +[dependencies] +alloy = { workspace = true } +base64 = { workspace = true } +database = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +openssl = { workspace = true } +rand = "0.8.5" +reqwest = { workspace = true } +serde = { workspace = true } +ssv_types = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +types = { workspace = true } diff --git a/anchor/eth/README.md b/anchor/eth/README.md new file mode 100644 index 00000000..106a9fa5 --- /dev/null +++ b/anchor/eth/README.md @@ -0,0 +1,36 @@ +## Execution Layer +This crate implements the execution layer component of the SSV node, responsible for monitoring and processing SSV network events on Ethereum L1 networks (Mainnet and Holesky). + +## Overview +The execution layer client maintains synchronization with the SSV network contract by: +* Processing historical events from contract deployment +* Monitoring live contract events +* Managing validator and operator state changes +* Handling cluster lifecycle events + +## Components +### SSV Event Syncer +This is the core synchronization engine that: +* Manages connections to an Ethereum execution client +* Handles historical and live event processing +* Maintains event ordering and state consistency +* Processes events in configurable batch sizes + +### Event Processor +This processes network events and interacts with the database to validate event logs and persist them into the database. + +## Event Types +```rust +event OperatorAdded(uint64 indexed operatorId, address indexed owner, bytes publicKey, uint256 fee) +event OperatorRemoved(uint64 indexed operatorId) +event ValidatorAdded(address indexed owner, uint64[] operatorIds, bytes publicKey, bytes shares, Cluster cluster) +event ValidatorRemoved(address indexed owner, uint64[] operatorIds, bytes publicKey, Cluster cluster) +event ClusterLiquidated(address indexed owner, uint64[] operatorIds, Cluster cluster) +event ClusterReactivated(address indexed owner, uint64[] operatorIds, Cluster cluster) +event FeeRecipientAddressUpdated(address indexed owner, address recipientAddress) +event ValidatorExited(address indexed owner, uint64[] operatorIds, bytes publicKey) +``` + +## Contract Addresses +* Mainnet: `0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1` +* Holesky: `0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA` diff --git a/anchor/eth/execution.rs b/anchor/eth/execution.rs new file mode 100644 index 00000000..0fd78fa1 --- /dev/null +++ b/anchor/eth/execution.rs @@ -0,0 +1,63 @@ +use base64::prelude::*; +use database::NetworkDatabase; +use eth::{Config, Network, SsvEventSyncer}; +use openssl::rsa::Rsa; +use std::path::Path; +use std::sync::Arc; +use tracing_subscriber::{fmt, prelude::*, EnvFilter}; + +// This is a test binary to execute event syncing for the SSV Network +#[tokio::main] +async fn main() { + // Setup a log filter & tracing + let filter = EnvFilter::builder() + .parse("info,hyper=off,hyper_util=off,alloy_transport_http=off,reqwest=off,alloy_rpc_client=off") + .expect("filter should be valid"); + tracing_subscriber::registry() + .with(fmt::layer()) + .with(filter) + .init(); + + // Dummy configuration with endpoint and network + let rpc_endpoint = "http://127.0.0.1:8545"; + let _ws_endpoint = "ws://127.0.0.1:8546"; + let ws_endpoint = "wss://eth.merkle.io"; + let beacon_endpoint = "http://127.0.0.1:5052"; + let config = Config { + http_url: String::from(rpc_endpoint), + ws_url: String::from(ws_endpoint), + beacon_url: String::from(beacon_endpoint), + network: Network::Mainnet, + }; + + // Setup mock operator data + let path = Path::new("db.sqlite"); + let pem_data = "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBMVg2MUFXY001QUNLaGN5MTlUaEIKby9HMWlhN1ByOVUralJ5aWY5ZjAyRG9sd091V2ZLLzdSVUlhOEhEbHBvQlVERDkwRTVQUGdJSy9sTXB4RytXbwpwQ2N5bTBpWk9UT0JzNDE5bEh3TzA4bXFja1JsZEg5WExmbmY2UThqWFR5Ym1yYzdWNmwyNVprcTl4U0owbHR1CndmTnVTSzNCZnFtNkQxOUY0aTVCbmVaSWhjRVJTYlFLWDFxbWNqYnZFL2cyQko4TzhaZUgrd0RzTHJiNnZXQVIKY3BYWG1uelE3Vlp6ZklHTGVLVU1CTTh6SW0rcXI4RGZ4SEhSeVU1QTE3cFU4cy9MNUp5RXE1RGJjc2Q2dHlnbQp5UE9BYUNzWldVREI3UGhLOHpUWU9WYi9MM1lnSTU4bjFXek5IM0s5cmFreUppTmUxTE9GVVZzQTFDUnhtQ2YzCmlRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K"; + let pem_decoded = BASE64_STANDARD.decode(pem_data).unwrap(); + let mut pem_string = String::from_utf8(pem_decoded).unwrap(); + pem_string = pem_string + .replace( + "-----BEGIN RSA PUBLIC KEY-----", + "-----BEGIN PUBLIC KEY-----", + ) + .replace("-----END RSA PUBLIC KEY-----", "-----END PUBLIC KEY-----"); + let rsa_pubkey = Rsa::public_key_from_pem(pem_string.as_bytes()) + .map_err(|e| format!("Failed to parse RSA public key: {}", e)) + .unwrap(); + + // The event syncer is spawned into a background task since it is long running and should never + // exist. It will communicate with the rest of the system via processor channels and constantly + // keep the database up to date with new data for the rest of the system + let db = Arc::new(NetworkDatabase::new(path, &rsa_pubkey).unwrap()); + let mut event_syncer = SsvEventSyncer::new(db.clone(), config) + .await + .expect("Failed to construct event syncer"); + tokio::spawn(async move { + // this should never return, if it does we should gracefully handle it and shutdown the + // client. + event_syncer.sync().await + }); + loop { + let _ = tokio::time::sleep(std::time::Duration::from_secs(100)).await; + } +} diff --git a/anchor/eth/src/error.rs b/anchor/eth/src/error.rs new file mode 100644 index 00000000..493a2597 --- /dev/null +++ b/anchor/eth/src/error.rs @@ -0,0 +1,19 @@ +use std::fmt::Display; + +// Custom execution integration layer errors +#[derive(Debug)] +pub enum ExecutionError { + SyncError(String), + InvalidEvent(String), + RpcError(String), + DecodeError(String), + Misc(String), + Duplicate(String), + Database(String), +} + +impl Display for ExecutionError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} diff --git a/anchor/eth/src/event_parser.rs b/anchor/eth/src/event_parser.rs new file mode 100644 index 00000000..3cf6eed1 --- /dev/null +++ b/anchor/eth/src/event_parser.rs @@ -0,0 +1,40 @@ +use crate::error::ExecutionError; +use crate::gen::SSVContract; +use alloy::{rpc::types::Log, sol_types::SolEvent}; + +// Standardized event decoding via common Decoder trait. +pub trait EventDecoder { + type Output; + fn decode_from_log(log: &Log) -> Result; +} + +macro_rules! impl_event_decoder { + ($($event_type:ty),* $(,)?) => { + $( + impl EventDecoder for $event_type { + type Output = $event_type; + + fn decode_from_log(log: &Log) -> Result { + let decoded = Self::decode_log(&log.inner, true) + .map_err(|e| { + ExecutionError::DecodeError( + format!("Failed to decode {} event: {}", stringify!($event_type), e) + ) + })?; + Ok(decoded.data) + } + } + )* + }; +} + +impl_event_decoder! { + SSVContract::OperatorAdded, + SSVContract::OperatorRemoved, + SSVContract::ValidatorAdded, + SSVContract::ValidatorRemoved, + SSVContract::ClusterLiquidated, + SSVContract::ClusterReactivated, + SSVContract::FeeRecipientAddressUpdated, + SSVContract::ValidatorExited +} diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs new file mode 100644 index 00000000..dd8609e7 --- /dev/null +++ b/anchor/eth/src/event_processor.rs @@ -0,0 +1,520 @@ +use crate::error::ExecutionError; +use crate::event_parser::EventDecoder; +use crate::gen::SSVContract; +use crate::network_actions::NetworkAction; +use crate::util::*; + +use alloy::primitives::B256; +use alloy::rpc::types::Log; +use alloy::sol_types::SolEvent; +use database::{NetworkDatabase, UniqueIndex}; +use ssv_types::{Cluster, Operator, OperatorId}; +use std::collections::{HashMap, HashSet}; +use std::str::FromStr; +use std::sync::Arc; +use tracing::{debug, error, info, instrument, trace, warn}; +use types::PublicKey; + +// Specific Handler for a log type +type EventHandler = fn(&EventProcessor, &Log) -> Result<(), ExecutionError>; + +/// The Event Processor. This handles all verification and recording of events. +/// It will be passed logs from the sync layer to be processed and saved into the database +pub struct EventProcessor { + /// Function handlers for event processing + handlers: HashMap, + /// Reference to the database + pub db: Arc, +} + +impl EventProcessor { + /// Construct a new EventProcessor + pub fn new(db: Arc) -> Self { + // Register log handlers for easy dispatch + let mut handlers: HashMap = HashMap::new(); + handlers.insert( + SSVContract::OperatorAdded::SIGNATURE_HASH, + Self::process_operator_added, + ); + handlers.insert( + SSVContract::OperatorRemoved::SIGNATURE_HASH, + Self::process_operator_removed, + ); + handlers.insert( + SSVContract::ValidatorAdded::SIGNATURE_HASH, + Self::process_validator_added, + ); + handlers.insert( + SSVContract::ValidatorRemoved::SIGNATURE_HASH, + Self::process_validator_removed, + ); + handlers.insert( + SSVContract::ClusterLiquidated::SIGNATURE_HASH, + Self::process_cluster_liquidated, + ); + handlers.insert( + SSVContract::ClusterReactivated::SIGNATURE_HASH, + Self::process_cluster_reactivated, + ); + handlers.insert( + SSVContract::FeeRecipientAddressUpdated::SIGNATURE_HASH, + Self::process_fee_recipient_updated, + ); + handlers.insert( + SSVContract::ValidatorExited::SIGNATURE_HASH, + Self::process_validator_exited, + ); + + Self { handlers, db } + } + + /// Process a new set of logs + #[instrument(skip(self, logs), fields(logs_count = logs.len()))] + pub fn process_logs(&self, logs: Vec, live: bool) { + info!(logs_count = logs.len(), "Starting log processing"); + for (index, log) in logs.iter().enumerate() { + trace!(log_index = index, topic = ?log.topic0(), "Processing individual log"); + + // extract the topic0 to retrieve log handler + let topic0 = log.topic0().expect("Log should always have a topic0"); + let handler = self + .handlers + .get(topic0) + .expect("Handler should always exist"); + + // Handle the log and emit warning for any malformed events + if let Err(e) = handler(self, log) { + warn!("Malformed event: {e}"); + continue; + } + + // If live is true, then we are currently in a live sync and want to take some action in + // response to the log. Parse the log into a network action and send to be processed; + if live { + let action = match log.try_into() { + Ok(action) => action, + Err(e) => { + error!("Failed to convert log into NetworkAction {e}"); + NetworkAction::NoOp + } + }; + if action != NetworkAction::NoOp && live { + debug!(action = ?action, "Network action ready for processing"); + // todo!() send off somewhere + } + } + } + + info!(logs_count = logs.len(), "Completed processing logs"); + } + + // A new Operator has been registered in the network. + #[instrument(skip(self, log), fields(operator_id, owner))] + fn process_operator_added(&self, log: &Log) -> Result<(), ExecutionError> { + // Destructure operator added event + let SSVContract::OperatorAdded { + operatorId, // The ID of the newly registered operator + owner, // The EOA owner address + publicKey, // The RSA public key + .. + } = SSVContract::OperatorAdded::decode_from_log(log)?; + let operator_id = OperatorId(operatorId); + + debug!(operator_id = ?operator_id, owner = ?owner, "Processing operator added"); + + // Confirm that this operator does not already exist + if self.db.operator_exists(&operator_id) { + return Err(ExecutionError::Duplicate(format!( + "Operator with id {:?} already exists in database", + operator_id + ))); + } + + // Parse ABI encoded public key string and trim off 0x prefix for hex decoding + let public_key_str = publicKey.to_string(); + let public_key_str = public_key_str.trim_start_matches("0x"); + let data = hex::decode(public_key_str).map_err(|e| { + debug!(operator_id = ?operator_id, error = %e, "Failed to decode public key data from hex"); + ExecutionError::InvalidEvent( + format!("Failed to decode public key data from hex: {e}") + ) + })?; + + // Make sure the data is the expected length + if data.len() != 704 { + debug!(operator_id = ?operator_id, expected = 704, actual = data.len(), "Invalid public key data length"); + return Err(ExecutionError::InvalidEvent(String::from( + "Invalid public key data length. Expected 704, got {data.len()}", + ))); + } + + // Remove abi encoding information and then convert to valid utf8 string + let data = &data[64..]; + let data = String::from_utf8(data.to_vec()).map_err(|e| { + debug!(operator_id = ?operator_id, error = %e, "Failed to convert to UTF8 String"); + ExecutionError::InvalidEvent(format!("Failed to convert to UTF8 String: {e}")) + })?; + let data = data.trim_matches(char::from(0)).to_string(); + + // Construct the Operator and insert it into the database + let operator = Operator::new(&data, operator_id, owner).map_err(|e| { + debug!( + operator_pubkey = ?publicKey, + operator_id = ?operator_id, + error = %e, + "Failed to construct operator" + ); + ExecutionError::InvalidEvent(format!("Failed to construct operator: {e}")) + })?; + self.db.insert_operator(&operator).map_err(|e| { + debug!( + operator_id = ?operator_id, + error = %e, + "Failed to insert operator into database" + ); + ExecutionError::Database(format!("Failed to insert operator into database: {e}")) + })?; + + debug!( + operator_id = ?operator_id, + owner = ?owner, + "Successfully registered operator" + ); + Ok(()) + } + + // An Operator has been removed from the network + #[instrument(skip(self, log), fields(operator_id))] + fn process_operator_removed(&self, log: &Log) -> Result<(), ExecutionError> { + // Extract the ID of the Operator + let SSVContract::OperatorRemoved { operatorId } = + SSVContract::OperatorRemoved::decode_from_log(log)?; + let operator_id = OperatorId(operatorId); + debug!(operator_id = ?operator_id, "Processing operator removed"); + + // Delete the operator from database and in memory + self.db.delete_operator(operator_id).map_err(|e| { + debug!( + operator_id = ?operator_id, + error = %e, + "Failed to remove operator" + ); + ExecutionError::Database(format!("Failed to remove operator: {e}")) + })?; + + debug!(operator_id = ?operatorId, "Operator removed from network"); + Ok(()) + } + + // A new validator has entered the network. This means that a either a new cluster has formed + // and this is the first validator for the cluster, or this validator is joining an existing + // cluster. Perform data verification, store all relevant data, and extract the KeyShare if it + // belongs to this operator + #[instrument(skip(self, log), fields(validator_pubkey, cluster_id, owner))] + fn process_validator_added(&self, log: &Log) -> Result<(), ExecutionError> { + // Parse and destructure log + let SSVContract::ValidatorAdded { + owner, + operatorIds, + publicKey, + shares, + .. + } = SSVContract::ValidatorAdded::decode_from_log(log)?; + debug!(owner = ?owner, operator_count = operatorIds.len(), "Processing validator addition"); + + // Get the expected nonce and then increment it. This will happen regardless of if the + // event is malformed or not + let nonce = self.db.get_next_nonce(&owner); + self.db.bump_nonce(&owner).map_err(|e| { + debug!(owner = ?owner, "Failed to bump nonce"); + ExecutionError::Database(format!("Failed to bump nonce: {e}")) + })?; + + // Process data into a usable form + let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).map_err(|e| { + debug!( + validator_pubkey = %publicKey, + error = %e, + "Failed to create PublicKey" + ); + ExecutionError::InvalidEvent(format!("Failed to create PublicKey: {e}")) + })?; + let cluster_id = compute_cluster_id(owner, operatorIds.clone()); + let operator_ids: Vec = operatorIds.iter().map(|id| OperatorId(*id)).collect(); + + // Perform verification on the operator set and make sure they are all registered in the + // network + debug!(cluster_id = ?cluster_id, "Validating operators"); + validate_operators(&operator_ids).map_err(|e| { + ExecutionError::InvalidEvent(format!("Failed to validate operators: {e}")) + })?; + if operator_ids.iter().any(|id| !self.db.operator_exists(id)) { + error!(cluster_id = ?cluster_id, "One or more operators do not exist"); + return Err(ExecutionError::Database( + "One or more operators do not exist".to_string(), + )); + } + + // Parse the share byte stream into a list of valid Shares and then verify the signature + debug!(cluster_id = ?cluster_id, "Parsing and verifying shares"); + let (signature, shares) = parse_shares( + shares.to_vec(), + &operator_ids, + &cluster_id, + &validator_pubkey, + ) + .map_err(|e| { + debug!(cluster_id = ?cluster_id, error = %e, "Failed to parse shares"); + ExecutionError::InvalidEvent(format!("Failed to parse shares. {e}")) + })?; + + if !verify_signature(signature, nonce, &owner, &validator_pubkey) { + debug!(cluster_id = ?cluster_id, "Signature verification failed"); + return Err(ExecutionError::InvalidEvent( + "Signature verification failed".to_string(), + )); + } + + // Fetch the validator metadata + let validator_metadata = construct_validator_metadata(&validator_pubkey, &cluster_id) + .map_err(|e| { + debug!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); + ExecutionError::Database(format!("Failed to fetch validator metadata: {e}")) + })?; + + // Finally, construct and insert the full cluster and insert into the database + let cluster = Cluster { + cluster_id, + owner, + fee_recipient: owner, + faulty: 0, + liquidated: false, + cluster_members: HashSet::from_iter(operator_ids), + }; + self.db + .insert_validator(cluster, validator_metadata.clone(), shares) + .map_err(|e| { + debug!(cluster_id = ?cluster_id, error = %e, validator_metadata = ?validator_metadata.public_key, "Failed to insert validator into cluster"); + ExecutionError::Database(format!("Failed to insert validator into cluster: {e}")) + })?; + + debug!( + cluster_id = ?cluster_id, + validator_pubkey = %validator_pubkey, + "Successfully added validator" + ); + Ok(()) + } + + // A validator has been removed from the network and its respective cluster + #[instrument(skip(self, log), fields(cluster_id, validator_pubkey, owner))] + fn process_validator_removed(&self, log: &Log) -> Result<(), ExecutionError> { + // Parse and destructure log + let SSVContract::ValidatorRemoved { + owner, + operatorIds, + publicKey, + .. + } = SSVContract::ValidatorRemoved::decode_from_log(log)?; + debug!(owner = ?owner, public_key = ?publicKey, "Processing Validator Removed"); + + // Parse the public key + let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).map_err(|e| { + debug!( + validator_pubkey = %publicKey, + error = %e, + "Failed to construct validator pubkey in removal" + ); + ExecutionError::InvalidEvent(format!("Failed to create PublicKey: {e}")) + })?; + + // Compute the cluster id + let cluster_id = compute_cluster_id(owner, operatorIds.clone()); + + // Get the metadata for this validator + let metadata = match self.db.metadata().get_by(&validator_pubkey) { + Some(data) => data, + None => { + debug!( + cluster_id = ?cluster_id, + "Failed to fetch validator metadata from database" + ); + return Err(ExecutionError::Database( + "Failed to fetch validator metadata from database".to_string(), + )); + } + }; + + // Get the cluster that this validator is in + let cluster = match self.db.clusters().get_by(&validator_pubkey) { + Some(data) => data, + None => { + debug!( + cluster_id = ?cluster_id, + "Failed to fetch cluster from database" + ); + return Err(ExecutionError::Database( + "Failed to fetch cluster from database".to_string(), + )); + } + }; + + // Make sure the right owner is removing this validator + if owner != cluster.owner { + debug!( + cluster_id = ?cluster_id, + expected_owner = ?cluster.owner, + actual_owner = ?owner, + "Owner mismatch for validator removal" + ); + return Err(ExecutionError::InvalidEvent(format!( + "Cluster already exists with a different owner address. Expected {}. Got {}", + cluster.owner, owner + ))); + } + + // Make sure this is the correct validator + if validator_pubkey != metadata.public_key { + debug!( + cluster_id = ?cluster_id, + expected_pubkey = %metadata.public_key, + actual_pubkey = %validator_pubkey, + "Validator pubkey mismatch" + ); + return Err(ExecutionError::InvalidEvent( + "Validator does not match".to_string(), + )); + } + + // Check if we are a member of this cluster, if so we are storing the share and have to + // remove it + if self.db.member_of_cluster(&cluster_id) { + debug!(cluster_id = ?cluster_id, "Removing cluster from local keystore"); + // todo!(): Remove it from the internal keystore when it is made + } + + // Remove the validator and all corresponding cluster data + self.db.delete_validator(&validator_pubkey).map_err(|e| { + debug!( + cluster_id = ?cluster_id, + pubkey = ?validator_pubkey, + error = %e, + "Failed to delete valiidator from database" + ); + ExecutionError::Database(format!("Failed to validator cluster: {e}")) + })?; + + debug!( + cluster_id = ?cluster_id, + validator_pubkey = %validator_pubkey, + "Successfully removed validator and cluster" + ); + Ok(()) + } + + /// A cluster has ran out of operational funds. Set the cluster as liquidated + #[instrument(skip(self, log), fields(cluster_id, owner))] + fn process_cluster_liquidated(&self, log: &Log) -> Result<(), ExecutionError> { + let SSVContract::ClusterLiquidated { + owner, + operatorIds: operator_ids, + .. + } = SSVContract::ClusterLiquidated::decode_from_log(log)?; + + let cluster_id = compute_cluster_id(owner, operator_ids); + + debug!(cluster_id = ?cluster_id, "Processing cluster liquidation"); + + // Update the status of the cluster to be liquidated + self.db.update_status(cluster_id, true).map_err(|e| { + debug!( + cluster_id = ?cluster_id, + error = %e, + "Failed to mark cluster as liquidated" + ); + ExecutionError::Database(format!("Failed to mark cluster as liquidated: {e}")) + })?; + + debug!( + cluster_id = ?cluster_id, + owner = ?owner, + "Cluster marked as liquidated" + ); + Ok(()) + } + + // A cluster that was previously liquidated has had more SSV deposited and is now active + #[instrument(skip(self, log), fields(cluster_id, owner))] + fn process_cluster_reactivated(&self, log: &Log) -> Result<(), ExecutionError> { + let SSVContract::ClusterReactivated { + owner, + operatorIds: operator_ids, + .. + } = SSVContract::ClusterReactivated::decode_from_log(log)?; + + let cluster_id = compute_cluster_id(owner, operator_ids); + + debug!(cluster_id = ?cluster_id, "Processing cluster reactivation"); + + // Update the status of the cluster to be active + self.db.update_status(cluster_id, false).map_err(|e| { + debug!( + cluster_id = ?cluster_id, + error = %e, + "Failed to mark cluster as active" + ); + ExecutionError::Database(format!("Failed to mark cluster as active: {e}")) + })?; + + debug!( + cluster_id = ?cluster_id, + owner = ?owner, + "Cluster reactivated" + ); + Ok(()) + } + + // The fee recipient address of a validator has been changed + #[instrument(skip(self, log), fields(owner))] + fn process_fee_recipient_updated(&self, log: &Log) -> Result<(), ExecutionError> { + let SSVContract::FeeRecipientAddressUpdated { + owner, + recipientAddress, + } = SSVContract::FeeRecipientAddressUpdated::decode_from_log(log)?; + // update the fee recipient address in the database + self.db + .update_fee_recipient(owner, recipientAddress) + .map_err(|e| { + debug!( + owner = ?owner, + error = %e, + "Failed to update fee recipient" + ); + ExecutionError::Database(format!("Failed to update fee recipient: {e}")) + })?; + debug!( + owner = ?owner, + new_recipient = ?recipientAddress, + "Fee recipient address updated" + ); + Ok(()) + } + + // A validator has exited the beacon chain + #[instrument(skip(self, log), fields(validator_pubkey, owner))] + fn process_validator_exited(&self, log: &Log) -> Result<(), ExecutionError> { + let SSVContract::ValidatorExited { + owner, + operatorIds, + publicKey, + } = SSVContract::ValidatorExited::decode_from_log(log)?; + // just create a validator exit task + debug!( + owner = ?owner, + validator_pubkey = ?publicKey, + operator_count = operatorIds.len(), + "Validator exited from network" + ); + Ok(()) + } +} diff --git a/anchor/eth/src/gen.rs b/anchor/eth/src/gen.rs new file mode 100644 index 00000000..ce4439cd --- /dev/null +++ b/anchor/eth/src/gen.rs @@ -0,0 +1,24 @@ +use alloy::sol; + +// Generate bindings around the SSV Network contract +sol! { + #[derive(Debug)] + #[sol(rpc)] + contract SSVContract { + struct Cluster { + uint32 validatorCount; + uint64 networkFeeIndex; + uint64 index; + bool active; + uint256 balance; + } + event OperatorAdded(uint64 indexed operatorId, address indexed owner, bytes publicKey, uint256 fee); + event OperatorRemoved(uint64 indexed operatorId); + event ValidatorAdded(address indexed owner, uint64[] operatorIds, bytes publicKey, bytes shares, Cluster cluster); + event ValidatorExited(address indexed owner, uint64[] operatorIds, bytes publicKey); + event ValidatorRemoved(address indexed owner, uint64[] operatorIds, bytes publicKey, Cluster cluster); + event FeeRecipientAddressUpdated(address indexed owner, address recipientAddress); + event ClusterLiquidated(address indexed owner, uint64[] operatorIds, Cluster cluster); + event ClusterReactivated(address indexed owner, uint64[] operatorIds, Cluster cluster); + } +} diff --git a/anchor/eth/src/lib.rs b/anchor/eth/src/lib.rs new file mode 100644 index 00000000..f8df5fab --- /dev/null +++ b/anchor/eth/src/lib.rs @@ -0,0 +1,8 @@ +pub use sync::{Config, Network, SsvEventSyncer}; +mod error; +mod event_parser; +mod event_processor; +mod gen; +mod network_actions; +mod sync; +mod util; diff --git a/anchor/eth/src/network_actions.rs b/anchor/eth/src/network_actions.rs new file mode 100644 index 00000000..563fdb7e --- /dev/null +++ b/anchor/eth/src/network_actions.rs @@ -0,0 +1,88 @@ +use crate::error::ExecutionError; +use crate::event_parser::EventDecoder; +use crate::gen::SSVContract; + +use alloy::primitives::Address; +use alloy::{rpc::types::Log, sol_types::SolEvent}; +use ssv_types::OperatorId; +use std::str::FromStr; +use types::PublicKey; + +/// Actions that the network has to take in response to a event during the live sync +#[derive(Debug, PartialEq)] +pub enum NetworkAction { + StopValidator { + validator_pubkey: PublicKey, + }, + LiquidateCluster { + owner: Address, + operator_ids: Vec, + }, + ReactivateCluster { + owner: Address, + operator_ids: Vec, + }, + UpdateFeeRecipient { + owner: Address, + recipient: Address, + }, + ExitValidator { + validator_pubkey: PublicKey, + }, + NoOp, +} + +/// Parse a network log into an action to be executed +impl TryFrom<&Log> for NetworkAction { + type Error = ExecutionError; + fn try_from(source: &Log) -> Result { + let topic0 = source.topic0().expect("The log should have a topic0"); + match *topic0 { + SSVContract::ValidatorRemoved::SIGNATURE_HASH => { + let SSVContract::ValidatorRemoved { publicKey, .. } = + SSVContract::ValidatorRemoved::decode_from_log(source)?; + let validator_pubkey = + PublicKey::from_str(&publicKey.to_string()).map_err(|e| { + ExecutionError::InvalidEvent(format!("Failed to create PublicKey: {e}")) + })?; + Ok(NetworkAction::StopValidator { validator_pubkey }) + } + SSVContract::ClusterLiquidated::SIGNATURE_HASH => { + let SSVContract::ClusterLiquidated { + owner, operatorIds, .. + } = SSVContract::ClusterLiquidated::decode_from_log(source)?; + Ok(NetworkAction::LiquidateCluster { + owner, + operator_ids: operatorIds.into_iter().map(OperatorId).collect(), + }) + } + SSVContract::ClusterReactivated::SIGNATURE_HASH => { + let SSVContract::ClusterReactivated { + owner, operatorIds, .. + } = SSVContract::ClusterReactivated::decode_from_log(source)?; + Ok(NetworkAction::ReactivateCluster { + owner, + operator_ids: operatorIds.into_iter().map(OperatorId).collect(), + }) + } + SSVContract::FeeRecipientAddressUpdated::SIGNATURE_HASH => { + let recipient_updated_log = + SSVContract::FeeRecipientAddressUpdated::decode_from_log(source)?; + Ok(NetworkAction::UpdateFeeRecipient { + owner: recipient_updated_log.owner, + recipient: recipient_updated_log.recipientAddress, + }) + } + SSVContract::ValidatorExited::SIGNATURE_HASH => { + let SSVContract::ValidatorExited { publicKey, .. } = + SSVContract::ValidatorExited::decode_from_log(source)?; + let validator_pubkey = + PublicKey::from_str(&publicKey.to_string()).map_err(|e| { + ExecutionError::InvalidEvent(format!("Failed to create PublicKey: {e}")) + })?; + Ok(NetworkAction::ExitValidator { validator_pubkey }) + } + _ => Ok(NetworkAction::NoOp), + } + } +} diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs new file mode 100644 index 00000000..eb0eef62 --- /dev/null +++ b/anchor/eth/src/sync.rs @@ -0,0 +1,411 @@ +use crate::error::ExecutionError; +use crate::event_processor::EventProcessor; +use crate::gen::SSVContract; +use alloy::primitives::{address, Address}; +use alloy::providers::{Provider, ProviderBuilder, RootProvider, WsConnect}; +use alloy::pubsub::PubSubFrontend; +use alloy::rpc::types::{Filter, Log}; +use alloy::sol_types::SolEvent; +use alloy::transports::http::{Client, Http}; +use database::NetworkDatabase; +use futures::future::{try_join_all, Future}; +use futures::StreamExt; +use rand::Rng; +use std::collections::BTreeMap; +use std::sync::{Arc, LazyLock}; +use tokio::time::Duration; +use tracing::{debug, error, info, instrument, warn}; + +/// SSV contract events needed to come up to date with the network +static SSV_EVENTS: LazyLock> = LazyLock::new(|| { + vec![ + // event OperatorAdded(uint64 indexed operatorId, address indexed owner, bytes publicKey, uint256 fee); + SSVContract::OperatorAdded::SIGNATURE, + // event OperatorRemoved(uint64 indexed operatorId); + SSVContract::OperatorRemoved::SIGNATURE, + // event ValidatorAdded(address indexed owner, uint64[] operatorIds, bytes publicKey, bytes shares, Cluster cluster); + SSVContract::ValidatorAdded::SIGNATURE, + // event ValidatorRemoved(address indexed owner, uint64[] operatorIds, bytes publicKey, Cluster cluster); + SSVContract::ValidatorRemoved::SIGNATURE, + // event ClusterLiquidated(address indexed owner, uint64[] operatorIds, Cluster cluster); + SSVContract::ClusterLiquidated::SIGNATURE, + // event ClusterReactivated(address indexed owner, uint64[] operatorIds, Cluster cluster); + SSVContract::ClusterReactivated::SIGNATURE, + // event FeeRecipientAddressUpdated(address indexed owner, address recipientAddress); + SSVContract::FeeRecipientAddressUpdated::SIGNATURE, + // event ValidatorExited(address indexed owner, uint64[] operatorIds, bytes publicKey); + SSVContract::ValidatorExited::SIGNATURE, + ] +}); + +/// Contract deployment addresses +/// Mainnet: https://etherscan.io/address/0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1 +static MAINNET_DEPLOYMENT_ADDRESS: LazyLock
= + LazyLock::new(|| address!("DD9BC35aE942eF0cFa76930954a156B3fF30a4E1")); + +/// Holesky: https://holesky.etherscan.io/address/0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA +static HOLESKY_DEPLOYMENT_ADDRESS: LazyLock
= + LazyLock::new(|| address!("38A4794cCEd47d3baf7370CcC43B560D3a1beEFA")); + +/// Contract deployment block on Ethereum Mainnet +/// Mainnet: https://etherscan.io/tx/0x4a11a560d3c2f693e96f98abb1feb447646b01b36203ecab0a96a1cf45fd650b +const MAINNET_DEPLOYMENT_BLOCK: u64 = 17507487; + +/// Contract deployment block on the Holesky Network +/// Holesky: https://holesky.etherscan.io/tx/0x998c38ff37b47e69e23c21a8079168b7e0e0ade7244781587b00be3f08a725c6 +const HOLESKY_DEPLOYMENT_BLOCK: u64 = 181612; + +/// Batch size for log fetching +const BATCH_SIZE: u64 = 10000; + +/// Batch size for task groups +const GROUP_SIZE: usize = 50; + +/// RPC and WS clients types +type RpcClient = RootProvider>; +type WsClient = RootProvider; + +/// Retry information for log fetching +const MAX_RETRIES: i32 = 5; + +// Block follow distance +const FOLLOW_DISTANCE: u64 = 8; + +/// The maximum number of operators a validator can have +/// https://github.com/ssvlabs/ssv/blob/07095fe31e3ded288af722a9c521117980585d95/eth/eventhandler/validation.go#L15 +pub const MAX_OPERATORS: usize = 13; + +/// Network that is being connected to +#[derive(Debug)] +pub enum Network { + Mainnet, + Holesky, +} + +// TODO!() Dummy config struct that will be replaced +#[derive(Debug)] +pub struct Config { + pub http_url: String, + pub ws_url: String, + pub beacon_url: String, + pub network: Network, +} + +/// Client for interacting with the SSV contract on Ethereum L1 +/// +/// Manages connections to the L1 and monitors SSV contract events to track the state of validator +/// and operators. Provides both historical synchronization and live event monitoring +pub struct SsvEventSyncer { + /// Http client connected to the L1 to fetch historical SSV event information + rpc_client: Arc, + /// Websocket client connected to L1 to stream live SSV event information + ws_client: WsClient, + /// Websocket connection url + ws_url: String, + /// Event processor for logs + event_processor: EventProcessor, + /// The network the node is connected to + network: Network, +} + +impl SsvEventSyncer { + #[instrument(skip(db))] + /// Create a new SsvEventSyncer to sync all of the events from the chain + pub async fn new(db: Arc, config: Config) -> Result { + info!(?config, "Creating new SSV Event Syncer"); + + // Construct HTTP Provider + let http_url = config.http_url.parse().expect("Failed to parse HTTP URL"); + let rpc_client: Arc = Arc::new(ProviderBuilder::new().on_http(http_url)); + + // Construct Websocket Provider + let ws = WsConnect::new(&config.ws_url); + let ws_client = ProviderBuilder::new() + .on_ws(ws.clone()) + .await + .map_err(|e| { + ExecutionError::SyncError(format!( + "Failed to bind to WS: {}, {}", + &config.ws_url, e + )) + })?; + + // Construct an EventProcessor with access to the DB + let event_processor = EventProcessor::new(db); + + Ok(Self { + rpc_client, + ws_client, + ws_url: config.ws_url, + event_processor, + network: config.network, + }) + } + + #[instrument(skip(self))] + /// Initial both a historical sync and a live sync from the chain. This function will transition + /// into a never ending live sync, so it should never return + pub async fn sync(&mut self) -> Result<(), ExecutionError> { + info!("Starting SSV event sync"); + + // Get network specific contract information + let (contract_address, deployment_block) = match self.network { + Network::Mainnet => (*MAINNET_DEPLOYMENT_ADDRESS, MAINNET_DEPLOYMENT_BLOCK), + Network::Holesky => (*HOLESKY_DEPLOYMENT_ADDRESS, HOLESKY_DEPLOYMENT_BLOCK), + }; + + info!( + ?contract_address, + deployment_block, "Using contract configuration" + ); + + info!("Starting historical sync"); + self.historical_sync(contract_address, deployment_block) + .await?; + + info!("Starting live sync"); + self.live_sync(contract_address).await?; + + // If we reach there, there is some non-recoverable error and we should shut down + Err(ExecutionError::SyncError( + "Sync has unexpectedly exited".to_string(), + )) + } + + // Perform a historical sync on the network. This will fetch blocks from the contract deployment + // block up until the current tip of the chain. This way, we can recreate the current state of + // the network through event logs + #[instrument(skip(self, contract_address, deployment_block))] + async fn historical_sync( + &self, + contract_address: Address, + deployment_block: u64, + ) -> Result<(), ExecutionError> { + // Start from the contract deployment block or the last block that has been processed + let last_processed_block = self.event_processor.db.get_last_processed_block() + 1; + let mut start_block = std::cmp::max(deployment_block, last_processed_block); + + loop { + let current_block = self.rpc_client.get_block_number().await.map_err(|e| { + error!(?e, "Failed to fetch block number"); + ExecutionError::RpcError(format!("Unable to fetch block number {}", e)) + })?; + + // Basic verification + if current_block < FOLLOW_DISTANCE { + debug!("Current block less than follow distance, breaking"); + break; + } + let end_block = current_block - FOLLOW_DISTANCE; + if end_block < start_block { + debug!("End block less than start block, breaking"); + break; + } + + // Make sure we have blocks to sync + if start_block == end_block { + info!("Synced up to the tip of the chain, breaking"); + break; + } + + // Here, we have a start..endblock that we need to sync the logs from. This range gets + // broken up into individual ranges of BATCH_SIZE where the logs are fetches from. The + // individual ranges are further broken up into a set of batches that are sequentually + // processes. This makes it so we dont have a ton of logs that all have to be processed + // in one pass + + // Chunk the start and end block range into a set of ranges of size BATCH_SIZE + // and construct a future to fetch the logs in each range + let mut tasks: Vec<_> = (start_block..=end_block) + .step_by(BATCH_SIZE as usize) + .map(|start| { + let (start, end) = (start, std::cmp::min(start + BATCH_SIZE - 1, end_block)); + self.fetch_logs(start, end, contract_address) + }) + .collect(); + + // Further chunk the block ranges into groups where each group covers 500k blocks, so + // there are 50 tasks per group. BATCH_SIZE * 50 = 500k + let mut task_groups = Vec::new(); + while !tasks.is_empty() { + // Drain takes elements from the original vector, moving them to a new vector + // take up to chunk_size elements (or whatever is left if less than chunk_size) + let chunk: Vec<_> = tasks.drain(..tasks.len().min(GROUP_SIZE)).collect(); + task_groups.push(chunk); + } + + info!( + start_block = start_block, + end_block = end_block, + "Syncing all events" + ); + for (index, group) in task_groups.into_iter().enumerate() { + let calculated_start = + start_block + (index as u64 * BATCH_SIZE * GROUP_SIZE as u64); + let calculated_end = calculated_start + (BATCH_SIZE * GROUP_SIZE as u64) - 1; + let calculated_end = std::cmp::min(calculated_end, end_block); + info!( + "Fetching logs for block range {}..{}", + calculated_start, calculated_end + ); + + // Await all of the futures. + let event_logs: Vec> = try_join_all(group).await.map_err(|e| { + ExecutionError::SyncError(format!("Failed to join log future: {e}")) + })?; + let event_logs: Vec = event_logs.into_iter().flatten().collect(); + + // The futures may join out of order block wise. The individual events within the block + // retain their tx ordering. Due to this, we can reassemble back into blocks and be + // confident the order is correct + let mut ordered_event_logs: BTreeMap> = BTreeMap::new(); + for log in event_logs { + let block_num = log + .block_number + .ok_or("Log is missing block number") + .map_err(|e| { + ExecutionError::RpcError(format!("Failed to fetch block number: {e}")) + })?; + ordered_event_logs.entry(block_num).or_default().push(log); + } + let ordered_event_logs: Vec = + ordered_event_logs.into_values().flatten().collect(); + + // Logs are all fetched from the chain and in order, process them but do not send off to + // be processed since we are just reconstructing state + self.event_processor.process_logs(ordered_event_logs, false); + + // Record that we have processed up to this block + self.event_processor + .db + .processed_block(calculated_end) + .expect("Failed to update last processed block number"); + } + + info!("Processed all events up to block {}", end_block); + + // update end block processed information + start_block = end_block + 1; + } + info!("Historical sync completed"); + Ok(()) + } + + // Construct a future that will fetch logs in the range from_block..to_block + #[instrument(skip(self, deployment_address))] + fn fetch_logs( + &self, + from_block: u64, + to_block: u64, + deployment_address: Address, + ) -> impl Future, ExecutionError>> { + // Setup filter and rpc client + let rpc_client = self.rpc_client.clone(); + let filter = Filter::new() + .address(deployment_address) + .from_block(from_block) + .to_block(to_block) + .events(&*SSV_EVENTS); + + // Try to fetch logs with a retry upon error. Try up to MAX_RETRIES times and error if we + // exceed this as we can assume there is some underlying connection issue + async move { + let mut retry_cnt = 0; + loop { + match rpc_client.get_logs(&filter).await { + Ok(logs) => { + debug!(log_count = logs.len(), "Successfully fetched logs"); + return Ok(logs); + } + Err(e) => { + if retry_cnt > MAX_RETRIES { + error!(?e, retry_cnt, "Max retries exceeded while fetching logs"); + return Err(ExecutionError::RpcError( + "Unable to fetch logs".to_string(), + )); + } + + warn!(?e, retry_cnt, "Error fetching logs, retrying"); + + // increment retry_count and jitter retry duration + let jitter = rand::thread_rng().gen_range(0..=100); + let sleep_duration = Duration::from_millis(jitter); + tokio::time::sleep(sleep_duration).await; + retry_cnt += 1; + continue; + } + } + } + } + } + + // Once caught up with the chain, start live sync which will stream in live blocks from the + // network. The events will be processed and duties will be created in response to network + // actions + #[instrument(skip(self, contract_address))] + async fn live_sync(&mut self, contract_address: Address) -> Result<(), ExecutionError> { + info!("Network up to sync.."); + info!("Current state"); + info!(?contract_address, "Starting live sync"); + + loop { + // Try to subscribe to a block stream + let stream = match self.ws_client.subscribe_blocks().await { + Ok(sub) => { + info!("Successfully subscribed to block stream"); + Some(sub.into_stream()) + } + Err(e) => { + error!( + ?e, + "Failed to subscribe to block stream. Retrying in 1 second..." + ); + + // Backend has closed, need to reconnect + let ws = WsConnect::new(&self.ws_url); + if let Ok(ws_client) = ProviderBuilder::new().on_ws(ws).await { + info!("Successfully reconnected to websocket. Catching back up"); + self.ws_client = ws_client; + // Historical sync any missed blocks while down, can pass 0 as deployment + // block since it will use last_processed_block from DB anyways + self.historical_sync(contract_address, 0).await?; + } else { + tokio::time::sleep(Duration::from_secs(1)).await; + } + None + } + }; + + // If we have a connection, continuously stream in blocks + if let Some(mut stream) = stream { + while let Some(block_header) = stream.next().await { + // Block we are interested in is the current block number - follow distance + let relevant_block = block_header.number - FOLLOW_DISTANCE; + debug!( + block_number = block_header.number, + relevant_block, "Processing new block" + ); + + let logs = self + .fetch_logs(relevant_block, relevant_block, contract_address) + .await?; + + info!( + log_count = logs.len(), + "Processing events from block {}", relevant_block + ); + + // process the logs and update the last block we have recorded + self.event_processor.process_logs(logs, true); + self.event_processor + .db + .processed_block(relevant_block) + .expect("Failed to update last processed block number"); + } + } + + // If we get here, the stream ended (likely due to disconnect) + error!("WebSocket stream ended, reconnecting..."); + } + } +} diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs new file mode 100644 index 00000000..7a9ee3ea --- /dev/null +++ b/anchor/eth/src/util.rs @@ -0,0 +1,264 @@ +use crate::sync::MAX_OPERATORS; +use alloy::primitives::{keccak256, Address}; +use ssv_types::{ClusterId, OperatorId, Share, ValidatorIndex, ValidatorMetadata}; +use std::collections::HashSet; +use std::str::FromStr; +use types::{Graffiti, PublicKey, Signature}; + +// phase0.SignatureLength +const SIGNATURE_LENGTH: usize = 96; +// phase0.PublicKeyLength +const PUBLIC_KEY_LENGTH: usize = 48; +// Length of an encrypted key +const ENCRYPTED_KEY_LENGTH: usize = 256; + +// Parses shares from a ValidatorAdded event +// Event contains a bytes stream of the form +// [signature | public keys | encrypted keys]. +pub fn parse_shares( + shares: Vec, + operator_ids: &[OperatorId], + cluster_id: &ClusterId, + validator_pubkey: &PublicKey, +) -> Result<(Vec, Vec), String> { + let operator_count = operator_ids.len(); + + // Calculate offsets for different components within the shares + let signature_offset = SIGNATURE_LENGTH; + let pub_keys_offset = PUBLIC_KEY_LENGTH * operator_count + signature_offset; + let shares_expected_length = ENCRYPTED_KEY_LENGTH * operator_count + pub_keys_offset; + + // Validate total length of shares + if shares_expected_length != shares.len() { + return Err(format!( + "Share data has invalid length: expected {}, got {}", + shares_expected_length, + shares.len() + )); + } + + // Extract all of the components + let signature = shares[..signature_offset].to_vec(); + let share_public_keys = split_bytes( + &shares[signature_offset..pub_keys_offset], + PUBLIC_KEY_LENGTH, + ); + let encrypted_keys: Vec> = + split_bytes(&shares[pub_keys_offset..], ENCRYPTED_KEY_LENGTH); + + // Create the shares from the share public keys and the encrypted private keys + let shares: Vec = share_public_keys + .into_iter() + .zip(encrypted_keys) + .zip(operator_ids) + .map(|((public, encrypted), operator_id)| { + // Add 0x prefix to the hex encoded public key + let public_key_hex = format!("0x{}", hex::encode(&public)); + + // Create public key + let share_pubkey = PublicKey::from_str(&public_key_hex) + .map_err(|e| format!("Failed to create public key: {}", e))?; + + // Convert encrypted key into fixed array + let encrypted_array: [u8; 256] = encrypted + .try_into() + .map_err(|_| "Encrypted key has wrong length".to_string())?; + + Ok(Share { + validator_pubkey: validator_pubkey.clone(), + operator_id: *operator_id, + cluster_id: *cluster_id, + share_pubkey, + encrypted_private_key: encrypted_array, + }) + }) + .collect::, String>>()?; + + Ok((signature, shares)) +} + +// Splits a byte slice into chunks of specified size +fn split_bytes(data: &[u8], chunk_size: usize) -> Vec> { + data.chunks(chunk_size) + .map(|chunk| chunk.to_vec()) + .collect() +} + +// Construct the metadata for the newly added validator +pub fn construct_validator_metadata( + public_key: &PublicKey, + cluster_id: &ClusterId, +) -> Result { + // Default Anchor-SSV Graffiti + let mut bytes = [0u8; 32]; + bytes[..10].copy_from_slice(b"Anchor-SSV"); + + // Note: Validator Index is not included in the event log data and it would require a + // significant refactor to introduce a single non-blocking asynchronous call to fetch this data. + // For this reason, the population of this field is pushed downstream + + Ok(ValidatorMetadata { + index: ValidatorIndex(0), + public_key: public_key.clone(), + graffiti: Graffiti::from(bytes), + cluster_id: *cluster_id, + }) +} + +// Verify that the signature over the share data is correct +pub fn verify_signature( + signature: Vec, + nonce: u16, + owner: &Address, + public_key: &PublicKey, +) -> bool { + // Hash the owner and nonce concatinated + let data = format!("{}:{}", owner, nonce); + let hash = keccak256(data); + + // Deserialize the signature + let signature = match Signature::deserialize(&signature) { + Ok(sig) => sig, + Err(_) => return false, + }; + + // Verify the signature against the message + signature.verify(public_key, hash) +} + +// Perform basic verification on the operator set +pub fn validate_operators(operator_ids: &[OperatorId]) -> Result<(), String> { + let num_operators = operator_ids.len(); + + // make sure there is a valid number of operators + if num_operators > MAX_OPERATORS { + return Err(format!( + "Validator has too many operators: {}", + num_operators + )); + } + if num_operators == 0 { + return Err("Validator has no operators".to_string()); + } + + // make sure count is valid + let threshold = (num_operators - 1) / 3; + if (num_operators - 1) % 3 != 0 || !(1..=4).contains(&threshold) { + return Err(format!( + "Given {} operators. Cannot build a 3f+1 quorum", + num_operators + )); + } + + // make sure there are no duplicates + let mut seen = HashSet::new(); + let are_duplicates = !operator_ids.iter().all(|x| seen.insert(x)); + if are_duplicates { + return Err("Operator IDs contain duplicates".to_string()); + } + + Ok(()) +} + +// Compute an identifier from the cluster from the owners and the chosen operators +pub fn compute_cluster_id(owner: Address, mut operator_ids: Vec) -> ClusterId { + // Sort the operator IDs + operator_ids.sort(); + // 20 bytes for the address and num ids * 32 for ids + let data_size = 20 + (operator_ids.len() * 32); + let mut data: Vec = Vec::with_capacity(data_size); + + // Add the address bytes + data.extend_from_slice(owner.as_slice()); + + // Add the operator IDs as 32 byte values + for id in operator_ids { + let mut id_bytes = [0u8; 32]; + id_bytes[24..].copy_from_slice(&id.to_be_bytes()); + data.extend_from_slice(&id_bytes); + } + + // Hash it all + let hashed_data: [u8; 32] = keccak256(data) + .as_slice() + .try_into() + .expect("Conversion Failed"); + ClusterId(hashed_data) +} + +#[cfg(test)] +mod eth_util_tests { + use super::*; + use alloy::primitives::address; + + #[test] + // Test to make sure cluster id computation is order independent + fn test_cluster_id() { + let owner = Address::random(); + let operator_ids = vec![1, 3, 4, 2]; + let operator_ids_mixed = vec![4, 2, 3, 1]; + + let cluster_id_1 = compute_cluster_id(owner, operator_ids); + let cluster_id_2 = compute_cluster_id(owner, operator_ids_mixed); + assert_eq!(cluster_id_1, cluster_id_2); + } + + #[test] + // Test to make sure a ClusterID matches a current onchain value + fn test_valid_cluster_id() { + // https://ssvscan.io/cluster/a3d1e25b31cb6da1b9636568a221b0d7ae1a57a7f14ace5c97d1093ebf6b786c + let onchain = "a3d1e25b31cb6da1b9636568a221b0d7ae1a57a7f14ace5c97d1093ebf6b786c"; + let operator_ids = vec![62, 256, 259, 282]; + let owner = address!("E1b2308852F0e85D9F23278A6A80131ac8901dBF"); + let cluster_id = compute_cluster_id(owner, operator_ids); + let cluster_id_hex = hex::encode(*cluster_id); + assert_eq!(onchain, cluster_id_hex); + } + + // Test to make sure we can properly verify signatures + #[test] + fn test_sig_verification() { + // random data that was taken from chain + let owner = address!("382f6ff5b9a29fcf1dd2bf8b86c3234dc7ed2df6"); + let public_key = PublicKey::from_str("0x94cbce91137bfda4a7638941a68d6b156712bd1ce80e5dc580adc74a445099cbbfb9f97a6c7c89c6a87e28e0657821ac").expect("Failed to create public key"); + let nonce = 8; + let signature_data = [ + 151, 32, 191, 178, 170, 21, 45, 81, 34, 50, 220, 37, 95, 149, 101, 178, 38, 128, 11, + 195, 98, 241, 226, 70, 46, 8, 168, 133, 99, 23, 73, 126, 61, 33, 197, 226, 105, 11, + 134, 248, 226, 127, 60, 108, 102, 109, 148, 135, 16, 76, 114, 132, 123, 186, 148, 147, + 170, 143, 204, 45, 71, 59, 76, 131, 220, 199, 179, 219, 47, 115, 45, 162, 168, 163, + 223, 110, 38, 9, 166, 82, 34, 227, 53, 50, 31, 105, 74, 122, 179, 172, 22, 245, 89, 32, + 214, 69, + ] + .to_vec(); + assert!(verify_signature( + signature_data.clone(), + nonce, + &owner, + &public_key + )); + + // make sure that a wrong nonce fails the signature check + assert!(!verify_signature( + signature_data, + nonce + 1, + &owner, + &public_key + )); + } + + #[test] + // Ensure that we can properly parse share data into a set of shares + fn test_parse_shares() { + // Onchain share data and some other corresponding info + let share_data = "ab6c91297d2a604d2fc301ad161f99a16baa53e549fd1822acf0f6834450103555b03281d23d0ab7ee944d564f794e040ecd60ad9894747cc6b55ef017876079c1d6aa48595a1791cefc73aa6781c5e26bc644d515e9e9c5bbc8d2b5b173569ba547ba1edf393778d17ad13f2bc8c9b5c2e17b563998a2307b6dddda4d7c6ed3a7f261137fd9c2a81bb1ad1fea6896a8b9719027f01c9b496cf7ade5972e96c94e523e2671662bcfc80d5b6672877de39803d10251d7ecb76794252dea94aa348143c62887bcd62cfb680326c786e22b6a558895f037854e0a70019360c129a788fafe48c18374382cd97a4ea5797bcf982526e76eb89d132e5547f43e9ae9fdf64e061d2f5fcb5bd5ff1de8e7722b53730c6c6a1cc31791fceaabe2e5d79944a7c0d4459ec10153075996e9ef62e4fa9da730873652820c32476c1ddfd10a7b322e67e78759ed9cdec042a09069efc363778f620b3e5ffe01cb1a45bb278768f44342c45736b3a5ccdfbf10b0a10ed26a36af787363398dd776aea98d131738a881739b7e0ee4aa5e280355e2d2254f444ade07c239f5f6870fac2143de480e6ff5e3954d6e441fd16132296960b523bd23fa7b52e357ed03f8201ed4c9b4ed486a66c818e319418c8e34d844b3812f75a74a1607c9bb0eda11c89dbd67858730076e17ed3f6d021c2e57e94e9c3d53e1f6a9c7c2d8373fd5e3340e3a14951e97b7baa5fc1825ba59bb3990f1c607d22756fd178f1a0674d47ee476633f27e961ec3a79b236fb20f863814b47fb9eee75fdbdab99b6901087c41dd31d5320ac3e3c772a8982c64b1c138cbfb968e8a6e59f027bcc53adf2f4f171cbdc6f576dbf313b11485400356865f1f2b0b0533e576d7e3487d5d7d85e8d57aeab4314ec1e49f7647b3eea9a7f1fb805cb944b175c39a2668f96d4cd97afd3dc1258cbaccde6dc5e4b48d4bfd783396505e6f083c5cb3af9e24e90f1eac03f8e8cbc2664b9e6dc81543a1a68973bb03e84f50338ed6c1247447d3a3acef69879900fa9596492cce31130668621f038f365b8b4b1946c95e41e652d868421e574850f5b0b6befb481c93be55c3f9a90f613823942fbd71354ad8202b0121885a0da475d551a86da0c7a983b4d7b403d91adf275b3348fd09b797ccb6be7ebb96efe024588d2f8105e3b7ec5e6cbefd3bb287c82f717597244ea36df07753f0dcc4ce64570fff04447a96cb9f80c6359306c5e45a42e8bbaeb3de9e2ba37aeeed85bcaeb6c61f77c9d26dd4ca853ca09ea8e2e61c675b250c7c6c6c29d7829b3534e0749b9e69b67de569b21f6f0f9a46698b30aad615800aa26ae3629f4b91dfbc3d12cf6b61ed47846b0c0522db60ac41bfc3c4e233bd098180d0257310d58099592d0a5a87e4c6704b64683ee1c746f2a659a01939fbc2b72d196f94452a2b32fa945d1be80a76ba64061bdb73aa23fb83b9e96af949a13e3407a3b37529e79a79814eb172afe4ff56af68417a4191ede4c5c8521ca36c41c0f9e45a960bd32c8a14cb54442e27abf8cf96089736e14340eb017cadf640dbd30014f1802ba6c686e9039f6e5509384a5bfb3f82bef56a4db9778add48a7384d6e25357842a3c591c611908083d420c6e77699793dbf0f1cc597137b48933246c7f5693098a3218312c4ae030dd74b4291e3e1f95702c7f66c22dba7a8ac634e200534c1b6b9c6397c415ab1c448c4eb6481d35250dd83c599cdc05b6e222a4543147e289cf611755dbb1f0968a61c3741a7347db1599b9c4b71e39d4921c7b3bbe018a6a766c7c26fd31e77eb9b727a6a9ca1d72a44317a54e43004f4f42dd5731ed3e83248bc2d5ccef"; + let share_data = hex::decode(share_data).expect("Failed to decode hex string"); + let operator_ids = vec![OperatorId(1), OperatorId(2), OperatorId(3), OperatorId(4)]; + let cluster_id = ClusterId([0u8; 32]); + let pubkey = PublicKey::from_str("0xb1d97447eeb16cffa0464040860db6f12ac0af6a1583a45f4f07fb61e1470f3733f8b7ec8e3c9ff4a9da83086d342ba1").expect("Failed to create public key"); + + let (_, shares) = parse_shares(share_data, &operator_ids, &cluster_id, &pubkey) + .expect("Failed to parse shares"); + assert_eq!(shares.len(), 4); + } +} diff --git a/anchor/http_api/Cargo.toml b/anchor/http_api/Cargo.toml index 7769bf0c..15975bac 100644 --- a/anchor/http_api/Cargo.toml +++ b/anchor/http_api/Cargo.toml @@ -12,6 +12,6 @@ path = "src/lib.rs" axum = { workspace = true } serde = { workspace = true } slot_clock = { workspace = true } -task_executor = { workspace = true } +task_executor = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } diff --git a/anchor/network/Cargo.toml b/anchor/network/Cargo.toml index b3b99468..cc069ab8 100644 --- a/anchor/network/Cargo.toml +++ b/anchor/network/Cargo.toml @@ -8,7 +8,18 @@ authors = ["Sigma Prime "] dirs = { workspace = true } discv5 = { workspace = true } futures = { workspace = true } -libp2p = { version = "0.54", default-features = false, features = ["identify", "yamux", "noise", "secp256k1", "tcp", "tokio", "macros", "gossipsub", "quic", "ping"] } +libp2p = { version = "0.54", default-features = false, features = [ + "identify", + "yamux", + "noise", + "secp256k1", + "tcp", + "tokio", + "macros", + "gossipsub", + "quic", + "ping", +] } lighthouse_network = { workspace = true } serde = { workspace = true } task_executor = { workspace = true } diff --git a/book/book.toml b/book/book.toml index 51a84533..65b5a658 100644 --- a/book/book.toml +++ b/book/book.toml @@ -14,4 +14,3 @@ additional-js = ["mermaid.min.js", "mermaid-init.js"] [preprocessor.mermaid] command = "mdbook-mermaid" -