From 057f482069cae4fce02f50ef623fef4d9b715fe4 Mon Sep 17 00:00:00 2001 From: fbrv Date: Mon, 5 Aug 2024 16:44:38 +0100 Subject: [PATCH 1/5] use builder, add max files --- config.example.toml | 3 ++- crates/cli/src/docker_init.rs | 7 ++++--- crates/common/src/config/log.rs | 3 +++ crates/common/src/utils.rs | 26 ++++++++++++++++++-------- 4 files changed, 27 insertions(+), 12 deletions(-) diff --git a/config.example.toml b/config.example.toml index ac054924..a8ac8abe 100644 --- a/config.example.toml +++ b/config.example.toml @@ -43,4 +43,5 @@ docker_image = "test_builder_log" [logs] duration = "daily" host-path="./logs" -rust-log="info" \ No newline at end of file +rust-log="info" +max_log_files = 100 \ No newline at end of file diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 1465e699..e8599d65 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -8,7 +8,7 @@ use cb_common::{ SIGNER_KEYS, SIGNER_KEYS_ENV, SIGNER_SERVER_ENV, }, loader::SignerLoader, - utils::{random_jwt, ENV_ROLLING_DURATION}, + utils::{random_jwt, ENV_MAX_LOG_FILES, ENV_ROLLING_DURATION, ENV_RUST_LOG}, }; use docker_compose_types::{ Compose, ComposeVolume, DependsOnOptions, Environment, Labels, LoggingParameters, MapOrEmpty, @@ -27,8 +27,6 @@ pub(super) const PROMETHEUS_DATA_VOLUME: &str = "prometheus-data"; const METRICS_NETWORK: &str = "monitoring_network"; const SIGNER_NETWORK: &str = "signer_network"; -const ENV_RUST_LOG: &str = "RUST_LOG"; - /// Builds the docker compose file for the Commit-Boost services // TODO: do more validation for paths, images, etc @@ -54,6 +52,9 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> let mut envs = IndexMap::from([(CB_CONFIG_ENV.into(), CB_CONFIG_NAME.into())]); envs.insert(ENV_ROLLING_DURATION.into(), cb_config.logs.duration.to_string()); envs.insert(ENV_RUST_LOG.into(), cb_config.logs.rust_log); + if let Some(max_files) = cb_config.logs.max_log_files { + envs.insert(ENV_MAX_LOG_FILES.into(), max_files.to_string()); + } // targets to pass to prometheus let mut targets = Vec::new(); let metrics_port = 10000; diff --git a/crates/common/src/config/log.rs b/crates/common/src/config/log.rs index 3bab57f8..caad3a36 100644 --- a/crates/common/src/config/log.rs +++ b/crates/common/src/config/log.rs @@ -13,6 +13,8 @@ pub struct LogsSettings { pub host_path: PathBuf, #[serde(default, rename = "rust-log")] pub rust_log: String, + #[serde(default, rename = "max-log-files")] + pub max_log_files: Option, } impl Default for LogsSettings { @@ -21,6 +23,7 @@ impl Default for LogsSettings { duration: RollingDuration::Hourly, host_path: "/var/log/pbs".into(), rust_log: "info".to_string(), + max_log_files: None, } } } diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index c7299076..e1499b8b 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -11,7 +11,7 @@ use alloy::{ use blst::min_pk::{PublicKey, Signature}; use rand::{distributions::Alphanumeric, Rng}; use reqwest::header::HeaderMap; -use tracing_appender::non_blocking::WorkerGuard; +use tracing_appender::{non_blocking::WorkerGuard, rolling::Rotation}; use tracing_subscriber::{fmt::Layer, prelude::*, EnvFilter}; use crate::{config::CB_BASE_LOG_PATH, types::Chain}; @@ -21,6 +21,10 @@ const MILLIS_PER_SECOND: u64 = 1_000; pub const ENV_ROLLING_DURATION: &str = "ROLLING_DURATION"; +pub const ENV_MAX_LOG_FILES: &str = "MAX_LOG_FILES"; + +pub const ENV_RUST_LOG: &str = "RUST_LOG"; + pub fn timestamp_of_slot_start_millis(slot: u64, chain: Chain) -> u64 { let seconds_since_genesis = chain.genesis_time_sec() + slot * SECONDS_PER_SLOT; seconds_since_genesis * MILLIS_PER_SECOND @@ -118,16 +122,22 @@ pub const fn default_u256() -> U256 { // LOGGING pub fn initialize_tracing_log(module_id: &str) -> WorkerGuard { - let level_env = std::env::var("RUST_LOG").unwrap_or("info".to_owned()); + let level_env = std::env::var(ENV_RUST_LOG).unwrap_or("info".to_owned()); // Log all events to a rolling log file. - + let mut builder = tracing_appender::rolling::Builder::new().filename_prefix(module_id); + if let Ok(value) = env::var(ENV_MAX_LOG_FILES) { + builder = + builder.max_log_files(value.parse().expect("MAX_LOG_FILES is not a valid usize value")); + } let log_file = match env::var(ENV_ROLLING_DURATION).unwrap_or("daily".into()).as_str() { - "minutely" => tracing_appender::rolling::minutely(CB_BASE_LOG_PATH, module_id), - "hourly" => tracing_appender::rolling::hourly(CB_BASE_LOG_PATH, module_id), - "daily" => tracing_appender::rolling::daily(CB_BASE_LOG_PATH, module_id), - "never" => tracing_appender::rolling::never(CB_BASE_LOG_PATH, module_id), + "minutely" => builder.rotation(Rotation::MINUTELY), + "hourly" => builder.rotation(Rotation::HOURLY), + "daily" => builder.rotation(Rotation::DAILY), + "never" => builder.rotation(Rotation::NEVER), _ => panic!("unknown rolling duration value"), - }; + } + .build(CB_BASE_LOG_PATH) + .expect("failed building rolling file appender"); let filter = match level_env.parse::() { Ok(f) => f, From 148f05b274b1f1f9f80ed577adec01bf659b34f2 Mon Sep 17 00:00:00 2001 From: fbrv Date: Mon, 5 Aug 2024 17:01:37 +0100 Subject: [PATCH 2/5] env vars --- crates/cli/src/docker_init.rs | 24 ++++++++++++++++++------ crates/common/src/utils.rs | 12 ++++++------ 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index e8599d65..cf3ad238 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -8,7 +8,7 @@ use cb_common::{ SIGNER_KEYS, SIGNER_KEYS_ENV, SIGNER_SERVER_ENV, }, loader::SignerLoader, - utils::{random_jwt, ENV_MAX_LOG_FILES, ENV_ROLLING_DURATION, ENV_RUST_LOG}, + utils::{random_jwt, MAX_LOG_FILES_ENV, ROLLING_DURATION_ENV, RUST_LOG_ENV}, }; use docker_compose_types::{ Compose, ComposeVolume, DependsOnOptions, Environment, Labels, LoggingParameters, MapOrEmpty, @@ -50,11 +50,11 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> let mut jwts = IndexMap::new(); // envs to write in .env file let mut envs = IndexMap::from([(CB_CONFIG_ENV.into(), CB_CONFIG_NAME.into())]); - envs.insert(ENV_ROLLING_DURATION.into(), cb_config.logs.duration.to_string()); - envs.insert(ENV_RUST_LOG.into(), cb_config.logs.rust_log); - if let Some(max_files) = cb_config.logs.max_log_files { - envs.insert(ENV_MAX_LOG_FILES.into(), max_files.to_string()); - } + let max_files = if let Some(max_files) = cb_config.logs.max_log_files { + max_files.to_string() + } else { + "".to_string() + }; // targets to pass to prometheus let mut targets = Vec::new(); let metrics_port = 10000; @@ -75,6 +75,9 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> let mut pbs_envs = IndexMap::from([ get_env_same(CB_CONFIG_ENV), get_env_val(METRICS_SERVER_ENV, &metrics_port.to_string()), + get_env_val(ROLLING_DURATION_ENV, &cb_config.logs.duration.to_string()), + get_env_val(RUST_LOG_ENV, &cb_config.logs.rust_log), + get_env_val(MAX_LOG_FILES_ENV, &max_files), ]); let mut needs_signer_module = cb_config.pbs.with_signer; @@ -105,6 +108,9 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> get_env_interp(MODULE_JWT_ENV, &jwt_name), get_env_val(METRICS_SERVER_ENV, &metrics_port.to_string()), get_env_val(SIGNER_SERVER_ENV, &signer_server), + get_env_val(ROLLING_DURATION_ENV, &cb_config.logs.duration.to_string()), + get_env_val(RUST_LOG_ENV, &cb_config.logs.rust_log), + get_env_val(MAX_LOG_FILES_ENV, &max_files), ]); envs.insert(jwt_name.clone(), jwt.clone()); @@ -132,6 +138,9 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> get_env_same(CB_CONFIG_ENV), get_env_val(METRICS_SERVER_ENV, &metrics_port.to_string()), get_env_val(BUILDER_SERVER_ENV, &builder_events_port.to_string()), + get_env_val(ROLLING_DURATION_ENV, &cb_config.logs.duration.to_string()), + get_env_val(RUST_LOG_ENV, &cb_config.logs.rust_log), + get_env_val(MAX_LOG_FILES_ENV, &max_files), ]); builder_events_modules.push(format!("{module_cid}:{builder_events_port}")); @@ -191,6 +200,9 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> get_env_same(JWTS_ENV), get_env_val(METRICS_SERVER_ENV, &metrics_port.to_string()), get_env_val(SIGNER_SERVER_ENV, &signer_port.to_string()), + get_env_val(ROLLING_DURATION_ENV, &cb_config.logs.duration.to_string()), + get_env_val(RUST_LOG_ENV, &cb_config.logs.rust_log), + get_env_val(MAX_LOG_FILES_ENV, &max_files), ]); // TODO: generalize this, different loaders may not need volumes but eg ports diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index e1499b8b..0285e6d4 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -19,11 +19,11 @@ use crate::{config::CB_BASE_LOG_PATH, types::Chain}; const SECONDS_PER_SLOT: u64 = 12; const MILLIS_PER_SECOND: u64 = 1_000; -pub const ENV_ROLLING_DURATION: &str = "ROLLING_DURATION"; +pub const ROLLING_DURATION_ENV: &str = "ROLLING_DURATION"; -pub const ENV_MAX_LOG_FILES: &str = "MAX_LOG_FILES"; +pub const MAX_LOG_FILES_ENV: &str = "MAX_LOG_FILES"; -pub const ENV_RUST_LOG: &str = "RUST_LOG"; +pub const RUST_LOG_ENV: &str = "RUST_LOG"; pub fn timestamp_of_slot_start_millis(slot: u64, chain: Chain) -> u64 { let seconds_since_genesis = chain.genesis_time_sec() + slot * SECONDS_PER_SLOT; @@ -122,14 +122,14 @@ pub const fn default_u256() -> U256 { // LOGGING pub fn initialize_tracing_log(module_id: &str) -> WorkerGuard { - let level_env = std::env::var(ENV_RUST_LOG).unwrap_or("info".to_owned()); + let level_env = std::env::var(RUST_LOG_ENV).unwrap_or("info".to_owned()); // Log all events to a rolling log file. let mut builder = tracing_appender::rolling::Builder::new().filename_prefix(module_id); - if let Ok(value) = env::var(ENV_MAX_LOG_FILES) { + if let Ok(value) = env::var(MAX_LOG_FILES_ENV) { builder = builder.max_log_files(value.parse().expect("MAX_LOG_FILES is not a valid usize value")); } - let log_file = match env::var(ENV_ROLLING_DURATION).unwrap_or("daily".into()).as_str() { + let log_file = match env::var(ROLLING_DURATION_ENV).unwrap_or("daily".into()).as_str() { "minutely" => builder.rotation(Rotation::MINUTELY), "hourly" => builder.rotation(Rotation::HOURLY), "daily" => builder.rotation(Rotation::DAILY), From 3ee44fb9f7416a7f4ac6529920cbaa6bc38f6260 Mon Sep 17 00:00:00 2001 From: ltitanb Date: Mon, 5 Aug 2024 14:33:39 +0100 Subject: [PATCH 3/5] add comments in example config --- config.example.toml | 108 +++++++++++++++++++++++++--- crates/common/src/config/metrics.rs | 2 + crates/common/src/config/pbs.rs | 5 +- 3 files changed, 104 insertions(+), 11 deletions(-) diff --git a/config.example.toml b/config.example.toml index ac054924..56297be6 100644 --- a/config.example.toml +++ b/config.example.toml @@ -1,46 +1,136 @@ +# The main configuration file for the Commit-Boost sidecar. +# Some fields are optional and can be omitted, in which case the default value, if present, will be used. + +# Chain spec id. Supported values: Mainnet, Holesky, Helder chain = "Holesky" +# Configuration for the PBS module [pbs] +# Docker image to use for the PBS module. This currently defaults to the image built in `scripts/build_local_images.sh` and will be +# replaced by the official Commit-Boost PBS module once published in a public registry +# OPTIONAL, DEFAULT: commitboost_pbs_default +docker_image = "commitboost_pbs_default" +# Whether to enable the PBS module to request signatures from the Signer module (not used in the default PBS image) +# OPTIONAL, DEFAULT: false +with_signer = false +# Port to receive BuilderAPI calls from beacon node port = 18550 +# Whether to forward `status` calls to relays or skip and return 200 +# OPTIONAL, DEFAULT: true relay_check = true +# Timeout in milliseconds for the `get_header` call to relays. Note that the CL has also a timeout (e.g. 1 second) so +# this should be lower than that, leaving some margin for overhead +# OPTIONAL, DEFAULT: 950 timeout_get_header_ms = 950 +# Timeout in milliseconds for the `submit_blinded_block` call to relays. +# OPTIONAL, DEFAULT: 4000 timeout_get_payload_ms = 4000 +# Timeout in milliseconds for the `register_validator` call to relays. +# OPTIONAL, DEFAULT: 3000 timeout_register_validator_ms = 3000 -skip_sigverify = true +# Whether to skip signature verification of headers against the relay pubkey +# OPTIONAL, DEFAULT: false +skip_sigverify = false +# Minimum bid in ETH that will be accepted from `get_header` +# OPTIONAL, DEFAULT: 0.0 min_bid_eth = 0.0 - +# How late in milliseconds in the slot is "late". This impacts the `get_header` requests, by shortening timeouts for `get_header` calls to +# relays and make sure a header is returned within this deadline. If the request from the CL comes later in the slot, then fetching headers is skipped +# to force local building and miniminzing the risk of missed slots. See also the timing games section below +# OPTIONAL, DEFAULT: 2000 late_in_slot_time_ms = 2000 +# The PBS module needs at least one relay as defined below. [[relays]] +# Relay ID to use in telemetry +# OPTIONAL, DEFAULT: URL hostname id = "example-relay" +# Relay URL in the format scheme://pubkey@host url = "http://0xa1cec75a3f0661e99299274182938151e8433c61a19222347ea1313d839229cb4ce4e3e5aa2bdeb71c8fcf1b084963c2@abc.xyz" +# Headers to send with each request for this relay +# OPTIONAL headers = { X-MyCustomHeader = "MyCustomValue" } + +# Whether to enable timing games, as tuned by `target_first_request_ms` and `frequency_get_header_ms`. +# These values should be carefully chosen for each relay, as each relay has different latency and timing games setups. +# They should only be used by advanced users, and if mis-configured can result in unforeseen effects, e.g. fetching a lower header value, +# or getting a temporary IP ban. +# +# EXAMPLES +# Assuming: timeout_get_header_ms = 950, frequency_get_header_ms = 300, target_first_request_ms = 200, late_in_slot_time_ms = 2000 +# +# 1) CL request comes at 100ms in the slot (max timeout 1050ms in the slot), then: +# - sleep for 100ms +# - send request at 200ms with 850ms timeout +# - send request at 500ms with 550ms timeout +# - send request at 800ms with 250ms timeout +# 2) CL request comes at 1500ms in the slot (max timeout 2000ms in the slot), then: +# - send request at 1500ms with 500ms timeout +# - send request at 1800ms with 200ms timeout +# 3) CL request comes 2500ms in the slot then: +# - return 204 and force local build +# +# OPTIONAL, DEFAULT: false enable_timing_games = false +# Target time in slot when to send the first header request +# OPTIONAL target_first_request_ms = 200 +# Frequency in ms to send get_header requests +# OPTIONAL frequency_get_header_ms = 300 +# Configuration for the Signer Module, only required if any `commit` module is present, or if `pbs.with_signer = true` +# OPTIONAL [signer] +# Docker image to use for the Signer module. This currently defaults to the image built in `scripts/build_local_images.sh` and will be +# replaced by the official Commit-Boost Signer module once published in a public registry +# OPTIONAL, DEFAULT: commitboost_signer +docker_image = "commitboost_signer" +# Configuration for how the Signer module should load validator keys. Currently two types of loaders are supported: +# - File: load keys from a plain text file (unsafe, use only for testing purposes) +# - ValidatorsDir: load keys from a `keys` and `secrets` folder (ERC-2335 style keystores as used in Lighthouse) [signer.loader] +# File: path to the keys file key_path = "./keys.example.json" + +# ValidatorsDir: path to the keys directory # keys_path = "" +# ValidatorsDir: path to the secrets directory # secrets_path = "" +# Configuration for how metrics should be collected and scraped [metrics] +# Path to a `prometheus.yml` file to use in Prometheus. If using a custom config file, be sure to add a +# file discovery section as follows: +# ```yml +# file_sd_configs: +# - files: +# - /etc/prometheus/targets.json +# ``` +# and use the `targets.json` file generated by `commit-boost init` prometheus_config = "./docker/prometheus.yml" +# Whether to start Grafana with built-in dashboards +# OPTIONAL, DEFAULT: true use_grafana = true +# Commit-Boost can optionally run "modules" which extend the capabilities of the sidecar. +# Currently, two types of modules are supported: +# - "commit": modules which request commitment signatures from the validator keys +# - "events": modules which callback to BuilderAPI events as triggered from the PBS modules, used e.g. for monitoring +# If any "commit" module is present, then the [signer] section should also be configured +# OPTIONAL [[modules]] +# Unique ID of the module id = "DA_COMMIT" +# Type of the module. Supported values: commit, events type = "commit" +# Docker image of the module docker_image = "test_da_commit" +# Additional config needed by the business logic of the module should also be set here. +# See also `examples/da_commit/src/main.rs` for more information sleep_secs = 5 -[[modules]] -id = "BUILDER_LOG" -type = "events" -docker_image = "test_builder_log" - [logs] duration = "daily" -host-path="./logs" -rust-log="info" \ No newline at end of file +host-path = "./logs" +rust-log = "info" diff --git a/crates/common/src/config/metrics.rs b/crates/common/src/config/metrics.rs index e0865aad..777eb50f 100644 --- a/crates/common/src/config/metrics.rs +++ b/crates/common/src/config/metrics.rs @@ -2,12 +2,14 @@ use eyre::Result; use serde::{Deserialize, Serialize}; use super::{constants::METRICS_SERVER_ENV, load_env_var}; +use crate::utils::default_bool; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct MetricsConfig { /// Path to prometheus config file pub prometheus_config: String, /// Whether to start a grafana service + #[serde(default = "default_bool::")] pub use_grafana: bool, } diff --git a/crates/common/src/config/pbs.rs b/crates/common/src/config/pbs.rs index 7c46cdd5..1014ce74 100644 --- a/crates/common/src/config/pbs.rs +++ b/crates/common/src/config/pbs.rs @@ -19,7 +19,7 @@ use crate::{ pub struct RelayConfig { /// Relay ID, if missing will default to the URL hostname from the entry pub id: Option, - /// Relay in the form of pubkey@url + /// Relay in the form of scheme://pubkey@host #[serde(rename = "url")] pub entry: RelayEntry, /// Optional headers to send with each request @@ -37,7 +37,8 @@ pub struct RelayConfig { pub struct PbsConfig { /// Port to receive BuilderAPI calls from beacon node pub port: u16, - /// Whether to forward `get_status`` to relays or skip it + /// Whether to forward `get_status` to relays or skip it + #[serde(default = "default_bool::")] pub relay_check: bool, /// Timeout for get_header request in milliseconds #[serde(default = "default_u64::<{ DefaultTimeout::GET_HEADER_MS }>")] From c44e854265dbe5b60755f7dce0210a11b21779ed Mon Sep 17 00:00:00 2001 From: ltitanb Date: Mon, 5 Aug 2024 14:56:09 +0100 Subject: [PATCH 4/5] update docs --- config.example.toml | 3 +-- crates/common/src/config/metrics.rs | 2 +- docs/docs/get_started/configuration.md | 32 +++++++++++++++----------- 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/config.example.toml b/config.example.toml index 56297be6..f350872c 100644 --- a/config.example.toml +++ b/config.example.toml @@ -40,7 +40,7 @@ min_bid_eth = 0.0 # OPTIONAL, DEFAULT: 2000 late_in_slot_time_ms = 2000 -# The PBS module needs at least one relay as defined below. +# The PBS module needs one or more [[relays]] as defined below. [[relays]] # Relay ID to use in telemetry # OPTIONAL, DEFAULT: URL hostname @@ -50,7 +50,6 @@ url = "http://0xa1cec75a3f0661e99299274182938151e8433c61a19222347ea1313d839229cb # Headers to send with each request for this relay # OPTIONAL headers = { X-MyCustomHeader = "MyCustomValue" } - # Whether to enable timing games, as tuned by `target_first_request_ms` and `frequency_get_header_ms`. # These values should be carefully chosen for each relay, as each relay has different latency and timing games setups. # They should only be used by advanced users, and if mis-configured can result in unforeseen effects, e.g. fetching a lower header value, diff --git a/crates/common/src/config/metrics.rs b/crates/common/src/config/metrics.rs index 777eb50f..6a2ae873 100644 --- a/crates/common/src/config/metrics.rs +++ b/crates/common/src/config/metrics.rs @@ -9,7 +9,7 @@ pub struct MetricsConfig { /// Path to prometheus config file pub prometheus_config: String, /// Whether to start a grafana service - #[serde(default = "default_bool::")] + #[serde(default = "default_bool::")] pub use_grafana: bool, } diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 9e7740d9..085d0e08 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -6,23 +6,23 @@ sidebar_position: 2 After the Commit-Boost CLI is setup as detailed in the previous section, you will need to create a `cb-config.toml` file, detailing all the services that you want to run. +For a full explanation of all the fields, check out [here](https://github.com/Commit-Boost/commit-boost-client/blob/main/config.example.toml). + ## Minimal PBS setup on Holesky ```toml chain = "Holesky" [pbs] port = 18550 -# Add relays enpoints here -relays = [] -relay_check = true + +[[relays]] +url = "" [metrics] prometheus_config = "./docker/prometheus.yml" -use_grafana = true ``` You can find a list of MEV-Boost Holesky relays [here](https://www.coincashew.com/coins/overview-eth/mev-boost/mev-relay-list#holesky-testnet-relays). -For the full config parameters, check out [here](https://github.com/Commit-Boost/commit-boost-client/blob/main/config.example.toml#L4-L11) (more detailed docs incoming). Note that in this setup, the signer module will not be started. ## Custom module @@ -30,33 +30,39 @@ We currently provide a test module that needs to be built as a Docker image. To ```bash bash scripts/build_local_module.sh ``` -This will create a Docker image called `test_da_commit` that periodically requests signatures from the validator. +This will create a Docker image called `test_da_commit` that periodically requests signatures from the validator, and a `test_builder_log` module that logs BuilderAPI events. The `cb-config.toml` file needs to be updated as follows: ```toml [pbs] port = 18550 -# Add relays enpoints here -relays = [] -relay_check = true + +[[relays]] +# Add a relay URL here +url = "" [signer] [signer.loader] keys_path = "/path/to/keys" secrets_path = "/path/to.secrets" +[metrics] +prometheus_config = "./docker/prometheus.yml" + [[modules]] id = "DA_COMMIT" +type = "commit" docker_image = "test_da_commit" sleep_secs = 5 -[metrics] -prometheus_config = "./docker/prometheus.yml" -use_grafana = true +[[modules]] +id = "BUILDER_LOG" +type = "events" +docker_image = "test_da_commit" ``` A few things to note: - We now added a `signer` section which will be used to create the Signer module. To load keys in the module, we currently support the Lighthouse `validators_dir` keys and secrets. We're working on adding support for additional keystores, including remote signers. -- There is now a `[[module]]` section which at a minimum needs to specify the module `id` and `docker_image`. Additional parameters needed for the business logic of the module will also be here, +- There is now a `[[modules]]` section which at a minimum needs to specify the module `id`, `type` and `docker_image`. Additional parameters needed for the business logic of the module will also be here, To learn more about developing modules, check out [here](/category/developing). \ No newline at end of file From c5fc0650cf77667d92f470701e16ed9dcf9c49b7 Mon Sep 17 00:00:00 2001 From: ltitanb Date: Mon, 5 Aug 2024 20:40:43 +0100 Subject: [PATCH 5/5] fixes --- .gitignore | 1 + Cargo.toml | 2 +- config.example.toml | 49 ++++++++------ crates/cli/src/docker_init.rs | 62 +++++++++++------- crates/cli/src/lib.rs | 112 -------------------------------- crates/common/src/config/log.rs | 39 ++++++----- crates/common/src/utils.rs | 72 +++++++++++++------- 7 files changed, 136 insertions(+), 201 deletions(-) diff --git a/.gitignore b/.gitignore index 1dfbe15c..0085848c 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,4 @@ Cargo.lock *.docker-compose.yml targets.json .idea/ +logs \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 279fdb12..287d82c3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,7 +53,7 @@ serde_yaml = "0.9.33" # telemetry tracing = "0.1.40" -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +tracing-subscriber = { version = "0.3.18", features = ["env-filter", "json"] } tracing-appender = "0.2.3" prometheus = "0.13.4" diff --git a/config.example.toml b/config.example.toml index 424bbe48..eec19657 100644 --- a/config.example.toml +++ b/config.example.toml @@ -91,27 +91,11 @@ docker_image = "commitboost_signer" [signer.loader] # File: path to the keys file key_path = "./keys.example.json" - # ValidatorsDir: path to the keys directory # keys_path = "" # ValidatorsDir: path to the secrets directory # secrets_path = "" -# Configuration for how metrics should be collected and scraped -[metrics] -# Path to a `prometheus.yml` file to use in Prometheus. If using a custom config file, be sure to add a -# file discovery section as follows: -# ```yml -# file_sd_configs: -# - files: -# - /etc/prometheus/targets.json -# ``` -# and use the `targets.json` file generated by `commit-boost init` -prometheus_config = "./docker/prometheus.yml" -# Whether to start Grafana with built-in dashboards -# OPTIONAL, DEFAULT: true -use_grafana = true - # Commit-Boost can optionally run "modules" which extend the capabilities of the sidecar. # Currently, two types of modules are supported: # - "commit": modules which request commitment signatures from the validator keys @@ -129,8 +113,33 @@ docker_image = "test_da_commit" # See also `examples/da_commit/src/main.rs` for more information sleep_secs = 5 +# Configuration for how metrics should be collected and scraped +[metrics] +# Path to a `prometheus.yml` file to use in Prometheus. If using a custom config file, be sure to add a +# file discovery section as follows: +# ```yml +# file_sd_configs: +# - files: +# - /etc/prometheus/targets.json +# ``` +# and use the `targets.json` file generated by `commit-boost init` +prometheus_config = "./docker/prometheus.yml" +# Whether to start Grafana with built-in dashboards +# OPTIONAL, DEFAULT: true +use_grafana = true + +# Configuration for how logs should be collected and stored +# OPTIONAL [logs] -duration = "daily" -host-path = "./logs" -rust-log = "info" -max_log_files = 100 +# Log rotation policy. Supported values: hourly, daily, never +# OPTIONAL, DEFAULT: daily +rotation = "daily" +# Path to the log directory +# OPTIONAL, DEFAULT: /var/logs/commit-boost +log_dir_path = "./logs" +# Log level. Supported values: trace, debug, info, warn, error +# OPTIONAL, DEFAULT: debug to file, info to stdout +log_level = "debug" +# Maximum number of log files to keep +# OPTIONAL +max_log_files = 30 diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index cf3ad238..1625fc86 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -43,18 +43,14 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> let config_volume = Volumes::Simple(format!("./{}:{}:ro", config_path, CB_CONFIG_NAME)); let log_volume = Volumes::Simple(format!( "{}:{}", - cb_config.logs.host_path.to_str().unwrap(), + cb_config.logs.log_dir_path.to_str().unwrap(), CB_BASE_LOG_PATH )); let mut jwts = IndexMap::new(); // envs to write in .env file let mut envs = IndexMap::from([(CB_CONFIG_ENV.into(), CB_CONFIG_NAME.into())]); - let max_files = if let Some(max_files) = cb_config.logs.max_log_files { - max_files.to_string() - } else { - "".to_string() - }; + // targets to pass to prometheus let mut targets = Vec::new(); let metrics_port = 10000; @@ -74,11 +70,14 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> let mut pbs_envs = IndexMap::from([ get_env_same(CB_CONFIG_ENV), - get_env_val(METRICS_SERVER_ENV, &metrics_port.to_string()), - get_env_val(ROLLING_DURATION_ENV, &cb_config.logs.duration.to_string()), - get_env_val(RUST_LOG_ENV, &cb_config.logs.rust_log), - get_env_val(MAX_LOG_FILES_ENV, &max_files), + get_env_uval(METRICS_SERVER_ENV, metrics_port as u64), + get_env_val(ROLLING_DURATION_ENV, &cb_config.logs.rotation.to_string()), + get_env_val(RUST_LOG_ENV, &cb_config.logs.log_level), ]); + if let Some(max_files) = cb_config.logs.max_log_files { + let (key, val) = get_env_uval(MAX_LOG_FILES_ENV, max_files as u64); + pbs_envs.insert(key, val); + } let mut needs_signer_module = cb_config.pbs.with_signer; @@ -102,16 +101,19 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> let jwt_name = format!("CB_JWT_{}", module.id.to_uppercase()); // module ids are assumed unique, so envs dont override each other - let module_envs = IndexMap::from([ + let mut module_envs = IndexMap::from([ get_env_val(MODULE_ID_ENV, &module.id), get_env_same(CB_CONFIG_ENV), get_env_interp(MODULE_JWT_ENV, &jwt_name), - get_env_val(METRICS_SERVER_ENV, &metrics_port.to_string()), + get_env_uval(METRICS_SERVER_ENV, metrics_port as u64), get_env_val(SIGNER_SERVER_ENV, &signer_server), - get_env_val(ROLLING_DURATION_ENV, &cb_config.logs.duration.to_string()), - get_env_val(RUST_LOG_ENV, &cb_config.logs.rust_log), - get_env_val(MAX_LOG_FILES_ENV, &max_files), + get_env_val(ROLLING_DURATION_ENV, &cb_config.logs.rotation.to_string()), + get_env_val(RUST_LOG_ENV, &cb_config.logs.log_level), ]); + if let Some(max_files) = cb_config.logs.max_log_files { + let (key, val) = get_env_uval(MAX_LOG_FILES_ENV, max_files as u64); + module_envs.insert(key, val); + } envs.insert(jwt_name.clone(), jwt.clone()); jwts.insert(module.id.clone(), jwt); @@ -133,15 +135,18 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> // an event module just needs a port to listen on ModuleKind::Events => { // module ids are assumed unique, so envs dont override each other - let module_envs = IndexMap::from([ + let mut module_envs = IndexMap::from([ get_env_val(MODULE_ID_ENV, &module.id), get_env_same(CB_CONFIG_ENV), - get_env_val(METRICS_SERVER_ENV, &metrics_port.to_string()), + get_env_uval(METRICS_SERVER_ENV, metrics_port as u64), get_env_val(BUILDER_SERVER_ENV, &builder_events_port.to_string()), - get_env_val(ROLLING_DURATION_ENV, &cb_config.logs.duration.to_string()), - get_env_val(RUST_LOG_ENV, &cb_config.logs.rust_log), - get_env_val(MAX_LOG_FILES_ENV, &max_files), + get_env_val(ROLLING_DURATION_ENV, &cb_config.logs.rotation.to_string()), + get_env_val(RUST_LOG_ENV, &cb_config.logs.log_level), ]); + if let Some(max_files) = cb_config.logs.max_log_files { + let (key, val) = get_env_uval(MAX_LOG_FILES_ENV, max_files as u64); + module_envs.insert(key, val); + } builder_events_modules.push(format!("{module_cid}:{builder_events_port}")); @@ -198,12 +203,15 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> let mut signer_envs = IndexMap::from([ get_env_same(CB_CONFIG_ENV), get_env_same(JWTS_ENV), - get_env_val(METRICS_SERVER_ENV, &metrics_port.to_string()), - get_env_val(SIGNER_SERVER_ENV, &signer_port.to_string()), - get_env_val(ROLLING_DURATION_ENV, &cb_config.logs.duration.to_string()), - get_env_val(RUST_LOG_ENV, &cb_config.logs.rust_log), - get_env_val(MAX_LOG_FILES_ENV, &max_files), + get_env_uval(METRICS_SERVER_ENV, metrics_port as u64), + get_env_uval(SIGNER_SERVER_ENV, signer_port as u64), + get_env_val(ROLLING_DURATION_ENV, &cb_config.logs.rotation.to_string()), + get_env_val(RUST_LOG_ENV, &cb_config.logs.log_level), ]); + if let Some(max_files) = cb_config.logs.max_log_files { + let (key, val) = get_env_uval(MAX_LOG_FILES_ENV, max_files as u64); + signer_envs.insert(key, val); + } // TODO: generalize this, different loaders may not need volumes but eg ports match signer_config.loader { @@ -375,6 +383,10 @@ fn get_env_val(k: &str, v: &str) -> (String, Option) { (k.into(), Some(SingleValue::String(v.into()))) } +fn get_env_uval(k: &str, v: u64) -> (String, Option) { + (k.into(), Some(SingleValue::Unsigned(v))) +} + /// A prometheus target, use to dynamically add targets to the prometheus config #[derive(Debug, Serialize)] struct PrometheusTargetConfig { diff --git a/crates/cli/src/lib.rs b/crates/cli/src/lib.rs index 20004134..9a6775e4 100644 --- a/crates/cli/src/lib.rs +++ b/crates/cli/src/lib.rs @@ -84,115 +84,3 @@ impl Args { } } } - -// Command::Start2 { config: config_path } => { -// let config = CommitBoostConfig::from_file(&config_path); -// let signer_config = config.signer.expect("missing signer config with -// modules"); let metrics_config = config.metrics.clone().expect("missing -// metrics config"); - -// // TODO: Actually generate this token -// let pbs_jwt = "MY_PBS_TOKEN"; -// const MODULE_JWT: &str = "JWT_FIXME"; - -// // Initialize Docker client -// let docker = bollard::Docker::connect_with_local_defaults() -// .expect("Failed to connect to Docker"); - -// if let Some(modules) = config.modules { -// let jwts: HashMap = -// iter::once((DEFAULT_PBS_JWT_KEY.into(), pbs_jwt.into())) -// .chain(modules.iter().map(|module| -// // TODO: Generate token instead of hard-coding it. Think -// about persisting it across the project. ( -// module.id.clone(), -// MODULE_JWT.into() -// // format!("JWT_{}", module.id) -// ))) -// .collect(); - -// // start signing server -// tokio::spawn(SigningService::run( -// config.chain, -// signer_config.clone(), -// jwts.clone(), -// )); - -// for module in modules { -// info!("Module: {}", module.id); -// let container_config = bollard::container::Config { -// image: Some(module.docker_image.clone()), -// host_config: Some(bollard::secret::HostConfig { -// binds: { -// let full_config_path = -// std::fs::canonicalize(&config_path) .unwrap() -// .to_string_lossy() -// .to_string(); -// Some(vec![format!("{}:{}", full_config_path, -// "/config.toml")]) }, -// network_mode: Some(String::from("host")), // Use the host -// network ..Default::default() -// }), -// env: { -// let metrics_server_url = metrics_config.address; -// info!("Metrics"); -// Some(vec![ -// format!("{}={}", MODULE_ID_ENV, module.id), -// format!("{}={}", CB_CONFIG_ENV, "/config.toml"), -// format!("{}={}", MODULE_JWT_ENV, -// jwts.get(&module.id).unwrap()), format!("{}={}", -// METRICS_SERVER_ENV, metrics_server_url), ]) -// }, -// ..Default::default() -// }; - -// let container = docker -// .create_container::<&str, String>(None, container_config) -// .await -// .expect("failed to create container"); -// info!("create cid: {:?}", container.id); - -// let container_id = container.id; - -// // start monitoring tasks for spawned modules -// let metrics_config = metrics_config.clone(); -// let cid = container_id.clone(); -// tokio::spawn(async move { -// DockerMetricsCollector::new( -// vec![cid], -// metrics_config.address.clone(), -// // FIXME: The entire DockerMetricsCollector currently -// works with a // single JWT; need to migrate to per-module -// JWT. MODULE_JWT.to_string(), -// ) -// .await -// }); - -// docker.start_container::(&container_id, None).await?; -// println!( -// "Started container: {} from image {}", -// container_id, module.docker_image -// ); -// } -// } - -// // start pbs server -// // if let Some(pbs_path) = config.pbs.path { -// // let cmd = std::process::Command::new(pbs_path) -// // .env(CB_CONFIG_ENV, &config_path) -// // .env(MODULE_JWT_ENV, pbs_jwt) -// // .stdout(Stdio::inherit()) -// // .stderr(Stdio::inherit()) -// // .output() -// // .expect("failed to start pbs module"); - -// // if !cmd.status.success() { -// // eprintln!("Process failed with status: {}", cmd.status); -// // } -// // } else { -// // let state = PbsState::<()>::new(config.chain, config.pbs); -// // PbsService::run::<(), DefaultBuilderApi>(state).await; -// // }; - -// Ok(()) -// } diff --git a/crates/common/src/config/log.rs b/crates/common/src/config/log.rs index caad3a36..3e48a24d 100644 --- a/crates/common/src/config/log.rs +++ b/crates/common/src/config/log.rs @@ -5,34 +5,44 @@ use std::{ use serde::{Deserialize, Serialize}; +use super::CB_BASE_LOG_PATH; + #[derive(Clone, Debug, Deserialize, Serialize)] pub struct LogsSettings { #[serde(default)] - pub duration: RollingDuration, - #[serde(default, rename = "host-path")] - pub host_path: PathBuf, - #[serde(default, rename = "rust-log")] - pub rust_log: String, - #[serde(default, rename = "max-log-files")] + pub rotation: RollingDuration, + #[serde(default = "default_log_dir_path")] + pub log_dir_path: PathBuf, + #[serde(default = "default_log_level")] + pub log_level: String, + #[serde(default)] pub max_log_files: Option, } impl Default for LogsSettings { fn default() -> Self { Self { - duration: RollingDuration::Hourly, - host_path: "/var/log/pbs".into(), - rust_log: "info".to_string(), + rotation: RollingDuration::default(), + log_dir_path: default_log_dir_path(), + log_level: default_log_level(), max_log_files: None, } } } -#[derive(Clone, Debug, Deserialize, Serialize)] +fn default_log_dir_path() -> PathBuf { + CB_BASE_LOG_PATH.into() +} + +pub fn default_log_level() -> String { + "info".into() +} + +#[derive(Clone, Default, Debug, Deserialize, Serialize)] #[serde(rename_all = "lowercase")] pub enum RollingDuration { - Minutely, Hourly, + #[default] Daily, Never, } @@ -40,16 +50,9 @@ pub enum RollingDuration { impl Display for RollingDuration { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { - RollingDuration::Minutely => write!(f, "minutely"), RollingDuration::Hourly => write!(f, "hourly"), RollingDuration::Daily => write!(f, "daily"), RollingDuration::Never => write!(f, "never"), } } } - -impl Default for RollingDuration { - fn default() -> Self { - Self::Daily - } -} diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index 0285e6d4..e4707812 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -1,6 +1,5 @@ use std::{ env, - str::FromStr, time::{SystemTime, UNIX_EPOCH}, }; @@ -11,10 +10,14 @@ use alloy::{ use blst::min_pk::{PublicKey, Signature}; use rand::{distributions::Alphanumeric, Rng}; use reqwest::header::HeaderMap; +use tracing::Level; use tracing_appender::{non_blocking::WorkerGuard, rolling::Rotation}; use tracing_subscriber::{fmt::Layer, prelude::*, EnvFilter}; -use crate::{config::CB_BASE_LOG_PATH, types::Chain}; +use crate::{ + config::{default_log_level, RollingDuration, CB_BASE_LOG_PATH}, + types::Chain, +}; const SECONDS_PER_SLOT: u64 = 12; const MILLIS_PER_SECOND: u64 = 1_000; @@ -122,45 +125,64 @@ pub const fn default_u256() -> U256 { // LOGGING pub fn initialize_tracing_log(module_id: &str) -> WorkerGuard { - let level_env = std::env::var(RUST_LOG_ENV).unwrap_or("info".to_owned()); // Log all events to a rolling log file. - let mut builder = tracing_appender::rolling::Builder::new().filename_prefix(module_id); + let mut builder = + tracing_appender::rolling::Builder::new().filename_prefix(module_id.to_lowercase()); if let Ok(value) = env::var(MAX_LOG_FILES_ENV) { builder = builder.max_log_files(value.parse().expect("MAX_LOG_FILES is not a valid usize value")); } - let log_file = match env::var(ROLLING_DURATION_ENV).unwrap_or("daily".into()).as_str() { - "minutely" => builder.rotation(Rotation::MINUTELY), - "hourly" => builder.rotation(Rotation::HOURLY), - "daily" => builder.rotation(Rotation::DAILY), - "never" => builder.rotation(Rotation::NEVER), - _ => panic!("unknown rolling duration value"), - } - .build(CB_BASE_LOG_PATH) - .expect("failed building rolling file appender"); - let filter = match level_env.parse::() { + let rotation = match env::var(ROLLING_DURATION_ENV) + .unwrap_or(RollingDuration::default().to_string()) + .as_str() + { + "hourly" => Rotation::HOURLY, + "daily" => Rotation::DAILY, + "never" => Rotation::NEVER, + _ => panic!("unknown rotation value"), + }; + + let log_file = builder + .rotation(rotation) + .build(CB_BASE_LOG_PATH) + .expect("failed building rolling file appender"); + + let level_env = std::env::var(RUST_LOG_ENV).unwrap_or(default_log_level()); + + // Log level for stdout + let stdout_log_level = match level_env.parse::() { Ok(f) => f, Err(_) => { eprintln!("Invalid RUST_LOG value {}, defaulting to info", level_env); - EnvFilter::new("info") + Level::INFO } }; - let logging_level = if matches!(level_env.as_str(), "info" | "warning" | "error") { - tracing::Level::DEBUG - } else { - tracing::Level::from_str(&level_env) - .unwrap_or_else(|_| panic!("invalid value for tracing. Got {level_env}")) - }; - let stdout_log = tracing_subscriber::fmt::layer().pretty(); - let (default, guard) = tracing_appender::non_blocking(log_file); - let log_file = Layer::new().with_writer(default.with_max_level(logging_level)); - tracing_subscriber::registry().with(stdout_log.with_filter(filter).and_then(log_file)).init(); + // at least debug for file logs + let file_log_level = stdout_log_level.max(Level::DEBUG); + + let stdout_log_filter = format_crates_filter(Level::INFO.as_str(), stdout_log_level.as_str()); + let file_log_filter = format_crates_filter(Level::INFO.as_str(), file_log_level.as_str()); + + let stdout_log = tracing_subscriber::fmt::layer().with_filter(stdout_log_filter); + let (default_writer, guard) = tracing_appender::non_blocking(log_file); + let log_file = Layer::new().json().with_writer(default_writer).with_filter(file_log_filter); + + tracing_subscriber::registry().with(stdout_log.and_then(log_file)).init(); guard } +// all commit boost crates +// TODO: this can probably done without unwrap +fn format_crates_filter(default_level: &str, crates_level: &str) -> EnvFilter { + let s = format!( + "{default_level},cb-signer={crates_level},cb-pbs={crates_level},cb-common={crates_level},cb-metrics={crates_level}", + ); + s.parse().unwrap() +} + pub fn print_logo() { println!( r#" ______ _ __ ____ __