From 9a261a8e15d7bbec148311571ccaa3566ff5711a Mon Sep 17 00:00:00 2001 From: Brennan Date: Fri, 12 Jan 2024 20:24:28 +0000 Subject: [PATCH 1/2] shred repair admin rpc --- core/src/repair/repair_service.rs | 117 ++++++++++++++++++++- core/src/repair/serve_repair.rs | 4 +- validator/src/admin_rpc_service.rs | 35 +++++- validator/src/bin/solana-test-validator.rs | 1 + validator/src/cli.rs | 27 +++++ validator/src/main.rs | 50 ++++++--- 6 files changed, 217 insertions(+), 17 deletions(-) diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index 36ba4978e1c793..4b46b3e62bd535 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -14,12 +14,18 @@ use { duplicate_repair_status::AncestorDuplicateSlotToRepair, outstanding_requests::OutstandingRequests, quic_endpoint::LocalRequest, + repair_service::shred::Nonce, repair_weight::RepairWeight, - serve_repair::{self, ServeRepair, ShredRepairType, REPAIR_PEERS_CACHE_CAPACITY}, + serve_repair::{ + self, RepairProtocol, RepairRequestHeader, ServeRepair, ShredRepairType, + REPAIR_PEERS_CACHE_CAPACITY, + }, }, }, crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender}, lru::LruCache, + rand::{thread_rng, Rng}, + solana_client::connection_cache::Protocol, solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ blockstore::{Blockstore, SlotMeta}, @@ -678,6 +684,63 @@ impl RepairService { } } + pub fn request_repair_for_shred_from_peer( + cluster_info: Arc, + pubkey: Pubkey, + slot: u64, + shred_index: u64, + repair_socket: &UdpSocket, + ) { + let peer_repair_addr = cluster_info + .lookup_contact_info(&pubkey, |node| node.serve_repair(Protocol::UDP)) + .unwrap() + .unwrap(); + Self::request_repair_for_shred_from_address( + cluster_info, + pubkey, + peer_repair_addr, + slot, + shred_index, + repair_socket, + ); + } + + fn request_repair_for_shred_from_address( + cluster_info: Arc, + pubkey: Pubkey, + address: SocketAddr, + slot: u64, + shred_index: u64, + repair_socket: &UdpSocket, + ) { + // Setup repair request + let nonce = thread_rng().gen_range(0..Nonce::MAX); + let identity_keypair = cluster_info.keypair(); + + // Create repair request + let header = RepairRequestHeader::new(cluster_info.id(), pubkey, timestamp(), nonce); + let request_proto = RepairProtocol::WindowIndex { + header, + slot, + shred_index, + }; + let packet_buf = + ServeRepair::repair_proto_to_bytes(&request_proto, &identity_keypair).unwrap(); + + // Prepare packet batch to send + let reqs = vec![(packet_buf, address)]; + + // Send packet batch + match batch_send(repair_socket, &reqs[..]) { + Ok(()) => { + trace!("successfully sent repair request!"); + } + Err(SendPktsError::IoError(err, _num_failed)) => { + error!("batch_send failed to send packet - error = {:?}", err); + } + } + } + /// Generate repairs for all slots `x` in the repair_range.start <= x <= repair_range.end #[cfg(test)] pub fn generate_repairs_in_range( @@ -859,6 +922,7 @@ pub(crate) fn sleep_shred_deferment_period() { mod test { use { super::*, + crate::repair::quic_endpoint::RemoteRequest, solana_gossip::{cluster_info::Node, contact_info::ContactInfo}, solana_ledger::{ blockstore::{ @@ -883,6 +947,57 @@ mod test { ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified) } + #[test] + pub fn test_request_repair_for_shred_from_address() { + // Setup cluster and repair info + let cluster_info = Arc::new(new_test_cluster_info()); + let pubkey = cluster_info.id(); + let slot = 100; + let shred_index = 50; + let reader = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let address = reader.local_addr().unwrap(); + let sender = UdpSocket::bind("127.0.0.1:0").expect("bind"); + + // Send a repair request + RepairService::request_repair_for_shred_from_address( + cluster_info.clone(), + pubkey, + address, + slot, + shred_index, + &sender, + ); + + // Receive and translate repair packet + let mut packets = vec![solana_sdk::packet::Packet::default(); 1]; + let _recv_count = solana_streamer::recvmmsg::recv_mmsg(&reader, &mut packets[..]).unwrap(); + let packet = &packets[0]; + let Some(bytes) = packet.data(..).map(Vec::from) else { + panic!("packet data not found"); + }; + let remote_request = RemoteRequest { + remote_pubkey: None, + remote_address: packet.meta().socket_addr(), + bytes, + response_sender: None, + }; + + // Deserialize and check the request + let deserialized = + serve_repair::deserialize_request::(&remote_request).unwrap(); + match deserialized { + RepairProtocol::WindowIndex { + slot: deserialized_slot, + shred_index: deserialized_shred_index, + .. + } => { + assert_eq!(deserialized_slot, slot); + assert_eq!(deserialized_shred_index, shred_index); + } + _ => panic!("unexpected repair protocol"), + } + } + #[test] pub fn test_repair_orphan() { let ledger_path = get_tmp_ledger_path_auto_delete!(); diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index a12848f2e78df8..a4c676bf760eb6 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -1384,7 +1384,9 @@ pub(crate) fn get_repair_protocol(_: ClusterType) -> Protocol { Protocol::UDP } -fn deserialize_request(request: &RemoteRequest) -> std::result::Result +pub(crate) fn deserialize_request( + request: &RemoteRequest, +) -> std::result::Result where T: serde::de::DeserializeOwned, { diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 67f2309a9c98bc..1e3bf49aee6649 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -13,6 +13,7 @@ use { solana_core::{ admin_rpc_post_init::AdminRpcRequestMetadataPostInit, consensus::{tower_storage::TowerStorage, Tower}, + repair::repair_service, validator::ValidatorStartProgress, }, solana_geyser_plugin_manager::GeyserPluginManagerRequest, @@ -28,7 +29,7 @@ use { collections::{HashMap, HashSet}, error, fmt::{self, Display}, - net::SocketAddr, + net::{SocketAddr, UdpSocket}, path::{Path, PathBuf}, sync::{Arc, RwLock}, thread::{self, Builder}, @@ -47,6 +48,7 @@ pub struct AdminRpcRequestMetadata { pub staked_nodes_overrides: Arc>>, pub post_init: Arc>>, pub rpc_to_plugin_manager_sender: Option>, + pub repair_socket: Arc, } impl Metadata for AdminRpcRequestMetadata {} @@ -207,6 +209,15 @@ pub trait AdminRpc { #[rpc(meta, name = "contactInfo")] fn contact_info(&self, meta: Self::Metadata) -> Result; + #[rpc(meta, name = "repairShredFromPeer")] + fn repair_shred_from_peer( + &self, + meta: Self::Metadata, + pubkey: Pubkey, + slot: u64, + shred_index: u64, + ) -> Result<()>; + #[rpc(meta, name = "repairWhitelist")] fn repair_whitelist(&self, meta: Self::Metadata) -> Result; @@ -487,6 +498,27 @@ impl AdminRpc for AdminRpcImpl { meta.with_post_init(|post_init| Ok(post_init.cluster_info.my_contact_info().into())) } + fn repair_shred_from_peer( + &self, + meta: Self::Metadata, + pubkey: Pubkey, + slot: u64, + shred_index: u64, + ) -> Result<()> { + debug!("repair_shred_from_peer request received"); + + meta.with_post_init(|post_init| { + repair_service::RepairService::request_repair_for_shred_from_peer( + post_init.cluster_info.clone(), + pubkey, + slot, + shred_index, + &meta.repair_socket.clone(), + ); + Ok(()) + }) + } + fn repair_whitelist(&self, meta: Self::Metadata) -> Result { debug!("repair_whitelist request received"); @@ -898,6 +930,7 @@ mod tests { }))), staked_nodes_overrides: Arc::new(RwLock::new(HashMap::new())), rpc_to_plugin_manager_sender: None, + repair_socket: Arc::new(UdpSocket::bind("0.0.0.0:0").unwrap()), }; let mut io = MetaIoHandler::default(); io.extend_with(AdminRpcImpl.to_delegate()); diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index aee5fc039df410..2947abd802f021 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -405,6 +405,7 @@ fn main() { post_init: admin_service_post_init, tower_storage: tower_storage.clone(), rpc_to_plugin_manager_sender, + repair_socket: Arc::new(std::net::UdpSocket::bind("0.0.0.0:0").unwrap()), }, ); let dashboard = if output == Output::Dashboard { diff --git a/validator/src/cli.rs b/validator/src/cli.rs index d065a3524f4078..892398cc791567 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1497,6 +1497,33 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .help("Output display mode") ) ) + .subcommand(SubCommand::with_name("repair-shred-from-peer") + .about("Request a repair from the specified validator") + .arg( + Arg::with_name("pubkey") + .long("pubkey") + .value_name("PUBKEY") + .takes_value(true) + .validator(is_pubkey) + .help("Identity pubkey of the validator to repair from") + ) + .arg( + Arg::with_name("slot") + .long("slot") + .value_name("SLOT") + .takes_value(true) + .validator(is_parsable::) + .help("Slot to repair") + ) + .arg( + Arg::with_name("shred") + .long("shred") + .value_name("SHRED") + .takes_value(true) + .validator(is_parsable::) + .help("Shred to repair") + ) + ) .subcommand( SubCommand::with_name("repair-whitelist") .about("Manage the validator's repair protocol whitelist") diff --git a/validator/src/main.rs b/validator/src/main.rs index 902a73df45604d..99f1c5d3279bf6 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -793,6 +793,24 @@ pub fn main() { }); return; } + ("repair-shred-from-peer", Some(subcommand_matches)) => { + let pubkey = value_t_or_exit!(subcommand_matches, "pubkey", Pubkey); + let slot = value_t_or_exit!(subcommand_matches, "slot", u64); + let shred_index = value_t_or_exit!(subcommand_matches, "shred", u64); + let admin_client = admin_rpc_service::connect(&ledger_path); + admin_rpc_service::runtime() + .block_on(async move { + admin_client + .await? + .repair_shred_from_peer(pubkey, slot, shred_index) + .await + }) + .unwrap_or_else(|err| { + println!("repair shred from peer failed: {err}"); + exit(1); + }); + return; + } ("repair-whitelist", Some(repair_whitelist_subcommand_matches)) => { match repair_whitelist_subcommand_matches.subcommand() { ("get", Some(subcommand_matches)) => { @@ -1691,20 +1709,6 @@ pub fn main() { } else { (None, None) }; - admin_rpc_service::run( - &ledger_path, - admin_rpc_service::AdminRpcRequestMetadata { - rpc_addr: validator_config.rpc_addrs.map(|(rpc_addr, _)| rpc_addr), - start_time: std::time::SystemTime::now(), - validator_exit: validator_config.validator_exit.clone(), - start_progress: start_progress.clone(), - authorized_voter_keypairs: authorized_voter_keypairs.clone(), - post_init: admin_service_post_init.clone(), - tower_storage: validator_config.tower_storage.clone(), - staked_nodes_overrides, - rpc_to_plugin_manager_sender, - }, - ); let gossip_host: IpAddr = matches .value_of("gossip_host") @@ -1788,6 +1792,24 @@ pub fn main() { public_tpu_forwards_addr, ); + let repair_socket: Arc = + Arc::new(node.sockets.repair.try_clone().unwrap()); + admin_rpc_service::run( + &ledger_path, + admin_rpc_service::AdminRpcRequestMetadata { + rpc_addr: validator_config.rpc_addrs.map(|(rpc_addr, _)| rpc_addr), + start_time: std::time::SystemTime::now(), + validator_exit: validator_config.validator_exit.clone(), + start_progress: start_progress.clone(), + authorized_voter_keypairs: authorized_voter_keypairs.clone(), + post_init: admin_service_post_init.clone(), + tower_storage: validator_config.tower_storage.clone(), + staked_nodes_overrides, + rpc_to_plugin_manager_sender, + repair_socket, + }, + ); + if restricted_repair_only_mode { // When in --restricted_repair_only_mode is enabled only the gossip and repair ports // need to be reachable by the entrypoint to respond to gossip pull requests and repair From fca1ad819f144a010558ce4e455f8ab852e2aba3 Mon Sep 17 00:00:00 2001 From: Brennan Date: Wed, 17 Jan 2024 16:28:31 +0000 Subject: [PATCH 2/2] add repair request to outstanding --- core/src/admin_rpc_post_init.rs | 5 ++++ core/src/repair/repair_service.rs | 13 +++++++-- core/src/tvu.rs | 9 +++++- core/src/validator.rs | 8 +++++- core/src/window_service.rs | 7 ++--- validator/src/admin_rpc_service.rs | 11 +++++--- validator/src/bin/solana-test-validator.rs | 1 - validator/src/main.rs | 32 ++++++++++------------ 8 files changed, 54 insertions(+), 32 deletions(-) diff --git a/core/src/admin_rpc_post_init.rs b/core/src/admin_rpc_post_init.rs index 3acd0f84336113..a7a660043642d2 100644 --- a/core/src/admin_rpc_post_init.rs +++ b/core/src/admin_rpc_post_init.rs @@ -1,9 +1,11 @@ use { + crate::repair::{outstanding_requests, serve_repair}, solana_gossip::cluster_info::ClusterInfo, solana_runtime::bank_forks::BankForks, solana_sdk::{pubkey::Pubkey, quic::NotifyKeyUpdate}, std::{ collections::HashSet, + net::UdpSocket, sync::{Arc, RwLock}, }, }; @@ -15,4 +17,7 @@ pub struct AdminRpcRequestMetadataPostInit { pub vote_account: Pubkey, pub repair_whitelist: Arc>>, pub notifies: Vec>, + pub repair_socket: Arc, + pub outstanding_repair_requests: + Arc>>, } diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index 4b46b3e62bd535..509b1e1b6c1e1e 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -14,7 +14,6 @@ use { duplicate_repair_status::AncestorDuplicateSlotToRepair, outstanding_requests::OutstandingRequests, quic_endpoint::LocalRequest, - repair_service::shred::Nonce, repair_weight::RepairWeight, serve_repair::{ self, RepairProtocol, RepairRequestHeader, ServeRepair, ShredRepairType, @@ -24,7 +23,6 @@ use { }, crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender}, lru::LruCache, - rand::{thread_rng, Rng}, solana_client::connection_cache::Protocol, solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ @@ -690,6 +688,7 @@ impl RepairService { slot: u64, shred_index: u64, repair_socket: &UdpSocket, + outstanding_repair_requests: Arc>, ) { let peer_repair_addr = cluster_info .lookup_contact_info(&pubkey, |node| node.serve_repair(Protocol::UDP)) @@ -702,6 +701,7 @@ impl RepairService { slot, shred_index, repair_socket, + outstanding_repair_requests, ); } @@ -712,10 +712,15 @@ impl RepairService { slot: u64, shred_index: u64, repair_socket: &UdpSocket, + outstanding_repair_requests: Arc>, ) { // Setup repair request - let nonce = thread_rng().gen_range(0..Nonce::MAX); let identity_keypair = cluster_info.keypair(); + let repair_request = ShredRepairType::Shred(slot, shred_index); + let nonce = outstanding_repair_requests + .write() + .unwrap() + .add_request(repair_request, timestamp()); // Create repair request let header = RepairRequestHeader::new(cluster_info.id(), pubkey, timestamp(), nonce); @@ -957,6 +962,7 @@ mod test { let reader = UdpSocket::bind("127.0.0.1:0").expect("bind"); let address = reader.local_addr().unwrap(); let sender = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let outstanding_repair_requests = Arc::new(RwLock::new(OutstandingShredRepairs::default())); // Send a repair request RepairService::request_repair_for_shred_from_address( @@ -966,6 +972,7 @@ mod test { slot, shred_index, &sender, + outstanding_repair_requests, ); // Receive and translate repair packet diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 2fe7e08dd60f8b..bfdb258b235069 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -14,7 +14,10 @@ use { consensus::{tower_storage::TowerStorage, Tower}, cost_update_service::CostUpdateService, drop_bank_service::DropBankService, - repair::{quic_endpoint::LocalRequest, repair_service::RepairInfo}, + repair::{ + quic_endpoint::LocalRequest, + repair_service::{OutstandingShredRepairs, RepairInfo}, + }, replay_stage::{ReplayStage, ReplayStageConfig}, rewards_recorder_service::RewardsRecorderSender, shred_fetch_stage::ShredFetchStage, @@ -138,6 +141,7 @@ impl Tvu { turbine_quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, turbine_quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, repair_quic_endpoint_sender: AsyncSender, + outstanding_repair_requests: Arc>, ) -> Result { let TvuSockets { repair: repair_socket, @@ -228,6 +232,7 @@ impl Tvu { ancestor_hashes_replay_update_receiver, dumped_slots_receiver, popular_pruned_forks_sender, + outstanding_repair_requests, ) }; @@ -442,6 +447,7 @@ pub mod tests { let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let max_complete_rewards_slot = Arc::new(AtomicU64::default()); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let outstanding_repair_requests = Arc::>::default(); let tvu = Tvu::new( &vote_keypair.pubkey(), Arc::new(RwLock::new(vec![Arc::new(vote_keypair)])), @@ -496,6 +502,7 @@ pub mod tests { turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver, repair_quic_endpoint_sender, + outstanding_repair_requests, ) .expect("assume success"); exit.store(true, Ordering::Relaxed); diff --git a/core/src/validator.rs b/core/src/validator.rs index 13c454631625a0..5330f47a702edd 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1255,13 +1255,16 @@ impl Validator { }; let last_vote = tower.last_vote(); + let outstanding_repair_requests = + Arc::>::default(); + let tvu = Tvu::new( vote_account, authorized_voter_keypairs, &bank_forks, &cluster_info, TvuSockets { - repair: node.sockets.repair, + repair: node.sockets.repair.try_clone().unwrap(), retransmit: node.sockets.retransmit_sockets, fetch: node.sockets.tvu, ancestor_hashes_requests: node.sockets.ancestor_hashes_requests, @@ -1307,6 +1310,7 @@ impl Validator { turbine_quic_endpoint_sender.clone(), turbine_quic_endpoint_receiver, repair_quic_endpoint_sender, + outstanding_repair_requests.clone(), )?; if in_wen_restart { @@ -1383,6 +1387,8 @@ impl Validator { vote_account: *vote_account, repair_whitelist: config.repair_whitelist.clone(), notifies: key_notifies, + repair_socket: Arc::new(node.sockets.repair), + outstanding_repair_requests, }); Ok(Self { diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 49418c82683a5d..aa801b7ebd37f2 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -386,9 +386,8 @@ impl WindowService { ancestor_hashes_replay_update_receiver: AncestorHashesReplayUpdateReceiver, dumped_slots_receiver: DumpedSlotsReceiver, popular_pruned_forks_sender: PopularPrunedForksSender, + outstanding_repair_requests: Arc>, ) -> WindowService { - let outstanding_requests = Arc::>::default(); - let cluster_info = repair_info.cluster_info.clone(); let bank_forks = repair_info.bank_forks.clone(); @@ -401,7 +400,7 @@ impl WindowService { repair_quic_endpoint_response_sender, repair_info, verified_vote_receiver, - outstanding_requests.clone(), + outstanding_repair_requests.clone(), ancestor_hashes_replay_update_receiver, dumped_slots_receiver, popular_pruned_forks_sender, @@ -426,7 +425,7 @@ impl WindowService { duplicate_sender, completed_data_sets_sender, retransmit_sender, - outstanding_requests, + outstanding_repair_requests, ); WindowService { diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 1e3bf49aee6649..78ee6a4b303aa3 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -29,7 +29,7 @@ use { collections::{HashMap, HashSet}, error, fmt::{self, Display}, - net::{SocketAddr, UdpSocket}, + net::SocketAddr, path::{Path, PathBuf}, sync::{Arc, RwLock}, thread::{self, Builder}, @@ -48,7 +48,6 @@ pub struct AdminRpcRequestMetadata { pub staked_nodes_overrides: Arc>>, pub post_init: Arc>>, pub rpc_to_plugin_manager_sender: Option>, - pub repair_socket: Arc, } impl Metadata for AdminRpcRequestMetadata {} @@ -513,7 +512,8 @@ impl AdminRpc for AdminRpcImpl { pubkey, slot, shred_index, - &meta.repair_socket.clone(), + &post_init.repair_socket.clone(), + post_init.outstanding_repair_requests.clone(), ); Ok(()) }) @@ -927,10 +927,13 @@ mod tests { vote_account, repair_whitelist, notifies: Vec::new(), + repair_socket: Arc::new(std::net::UdpSocket::bind("0.0.0.0:0").unwrap()), + outstanding_repair_requests: Arc::< + RwLock, + >::default(), }))), staked_nodes_overrides: Arc::new(RwLock::new(HashMap::new())), rpc_to_plugin_manager_sender: None, - repair_socket: Arc::new(UdpSocket::bind("0.0.0.0:0").unwrap()), }; let mut io = MetaIoHandler::default(); io.extend_with(AdminRpcImpl.to_delegate()); diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 2947abd802f021..aee5fc039df410 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -405,7 +405,6 @@ fn main() { post_init: admin_service_post_init, tower_storage: tower_storage.clone(), rpc_to_plugin_manager_sender, - repair_socket: Arc::new(std::net::UdpSocket::bind("0.0.0.0:0").unwrap()), }, ); let dashboard = if output == Output::Dashboard { diff --git a/validator/src/main.rs b/validator/src/main.rs index 99f1c5d3279bf6..781228e9b271a6 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1709,6 +1709,20 @@ pub fn main() { } else { (None, None) }; + admin_rpc_service::run( + &ledger_path, + admin_rpc_service::AdminRpcRequestMetadata { + rpc_addr: validator_config.rpc_addrs.map(|(rpc_addr, _)| rpc_addr), + start_time: std::time::SystemTime::now(), + validator_exit: validator_config.validator_exit.clone(), + start_progress: start_progress.clone(), + authorized_voter_keypairs: authorized_voter_keypairs.clone(), + post_init: admin_service_post_init.clone(), + tower_storage: validator_config.tower_storage.clone(), + staked_nodes_overrides, + rpc_to_plugin_manager_sender, + }, + ); let gossip_host: IpAddr = matches .value_of("gossip_host") @@ -1792,24 +1806,6 @@ pub fn main() { public_tpu_forwards_addr, ); - let repair_socket: Arc = - Arc::new(node.sockets.repair.try_clone().unwrap()); - admin_rpc_service::run( - &ledger_path, - admin_rpc_service::AdminRpcRequestMetadata { - rpc_addr: validator_config.rpc_addrs.map(|(rpc_addr, _)| rpc_addr), - start_time: std::time::SystemTime::now(), - validator_exit: validator_config.validator_exit.clone(), - start_progress: start_progress.clone(), - authorized_voter_keypairs: authorized_voter_keypairs.clone(), - post_init: admin_service_post_init.clone(), - tower_storage: validator_config.tower_storage.clone(), - staked_nodes_overrides, - rpc_to_plugin_manager_sender, - repair_socket, - }, - ); - if restricted_repair_only_mode { // When in --restricted_repair_only_mode is enabled only the gossip and repair ports // need to be reachable by the entrypoint to respond to gossip pull requests and repair