diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs
index f239ff125fb6..bcd9415069f2 100644
--- a/core/bin/external_node/src/main.rs
+++ b/core/bin/external_node/src/main.rs
@@ -35,7 +35,8 @@ use zksync_node_consensus as consensus;
use zksync_node_db_pruner::{DbPruner, DbPrunerConfig};
use zksync_node_fee_model::l1_gas_price::MainNodeFeeParamsFetcher;
use zksync_node_sync::{
- batch_status_updater::BatchStatusUpdater, external_io::ExternalIO, ActionQueue, SyncState,
+ batch_status_updater::BatchStatusUpdater, external_io::ExternalIO,
+ tree_data_fetcher::TreeDataFetcher, ActionQueue, SyncState,
};
use zksync_reorg_detector::ReorgDetector;
use zksync_state::{PostgresStorageCaches, RocksdbStorageOptions};
@@ -625,6 +626,16 @@ async fn init_tasks(
None
};
+ if components.contains(&Component::TreeFetcher) {
+ tracing::warn!(
+ "Running tree data fetcher (allows a node to operate w/o a Merkle tree or w/o waiting the tree to catch up). \
+ This is an experimental feature; do not use unless you know what you're doing"
+ );
+ let fetcher = TreeDataFetcher::new(main_node_client.clone(), connection_pool.clone());
+ app_health.insert_component(fetcher.health_check())?;
+ task_handles.push(tokio::spawn(fetcher.run(stop_receiver.clone())));
+ }
+
let fee_params_fetcher = Arc::new(MainNodeFeeParamsFetcher::new(main_node_client.clone()));
let sync_state = if components.contains(&Component::Core) {
@@ -722,6 +733,7 @@ pub enum Component {
WsApi,
Tree,
TreeApi,
+ TreeFetcher,
Core,
}
@@ -733,6 +745,7 @@ impl Component {
"ws_api" => Ok(&[Component::WsApi]),
"tree" => Ok(&[Component::Tree]),
"tree_api" => Ok(&[Component::TreeApi]),
+ "tree_fetcher" => Ok(&[Component::TreeFetcher]),
"core" => Ok(&[Component::Core]),
"all" => Ok(&[
Component::HttpApi,
diff --git a/core/lib/dal/.sqlx/query-16e1a17bfc426bb32489595bd8cccb1ef34292fcf694deddc06b6dd5b72a02f3.json b/core/lib/dal/.sqlx/query-16e1a17bfc426bb32489595bd8cccb1ef34292fcf694deddc06b6dd5b72a02f3.json
new file mode 100644
index 000000000000..479bc818b9bb
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-16e1a17bfc426bb32489595bd8cccb1ef34292fcf694deddc06b6dd5b72a02f3.json
@@ -0,0 +1,22 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n SELECT\n MAX(INDEX) AS \"max?\"\n FROM\n initial_writes\n WHERE\n l1_batch_number = $1\n ",
+ "describe": {
+ "columns": [
+ {
+ "ordinal": 0,
+ "name": "max?",
+ "type_info": "Int8"
+ }
+ ],
+ "parameters": {
+ "Left": [
+ "Int8"
+ ]
+ },
+ "nullable": [
+ null
+ ]
+ },
+ "hash": "16e1a17bfc426bb32489595bd8cccb1ef34292fcf694deddc06b6dd5b72a02f3"
+}
diff --git a/core/lib/dal/.sqlx/query-df3256c012f86a9cd3b9260b97be5c6feb8059722149a747c4b6bd46731e2536.json b/core/lib/dal/.sqlx/query-df3256c012f86a9cd3b9260b97be5c6feb8059722149a747c4b6bd46731e2536.json
deleted file mode 100644
index 552b6ab2cc8e..000000000000
--- a/core/lib/dal/.sqlx/query-df3256c012f86a9cd3b9260b97be5c6feb8059722149a747c4b6bd46731e2536.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "db_name": "PostgreSQL",
- "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n l1_batches\n WHERE\n number = $1\n AND hash = $2\n ",
- "describe": {
- "columns": [
- {
- "ordinal": 0,
- "name": "count!",
- "type_info": "Int8"
- }
- ],
- "parameters": {
- "Left": [
- "Int8",
- "Bytea"
- ]
- },
- "nullable": [
- null
- ]
- },
- "hash": "df3256c012f86a9cd3b9260b97be5c6feb8059722149a747c4b6bd46731e2536"
-}
diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs
index 3b9b2176b1c0..467e5437c1fa 100644
--- a/core/lib/dal/src/blocks_dal.rs
+++ b/core/lib/dal/src/blocks_dal.rs
@@ -143,7 +143,7 @@ impl BlocksDal<'_, '_> {
Ok(row.number.map(|num| L1BatchNumber(num as u32)))
}
- pub async fn get_last_l1_batch_number_with_metadata(
+ pub async fn get_last_l1_batch_number_with_tree_data(
&mut self,
) -> DalResult