Skip to content
This repository has been archived by the owner on Jan 22, 2025. It is now read-only.

Commit

Permalink
Remove .bank{,_cloned}() fully relying on Deref
Browse files Browse the repository at this point in the history
  • Loading branch information
ryoqun committed Jun 9, 2023
1 parent b2aa017 commit 74b9439
Show file tree
Hide file tree
Showing 7 changed files with 53 additions and 46 deletions.
6 changes: 3 additions & 3 deletions core/src/replay_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2709,7 +2709,7 @@ impl ReplayStage {
// report cost tracker stats
cost_update_sender
.send(CostUpdate::FrozenBank {
bank: bank.bank_cloned(),
bank: <Arc<Bank>>::clone(bank),
})
.unwrap_or_else(|err| {
warn!("cost_update_sender failed sending bank stats: {:?}", err)
Expand Down Expand Up @@ -2749,7 +2749,7 @@ impl ReplayStage {
);
if let Some(sender) = bank_notification_sender {
sender
.send(BankNotification::Frozen(bank.bank_cloned()))
.send(BankNotification::Frozen(<Arc<Bank>>::clone(bank)))
.unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err));
}
blockstore_processor::cache_block_meta(bank, cache_block_meta_sender);
Expand Down Expand Up @@ -4460,7 +4460,7 @@ pub(crate) mod tests {
.or_insert_with(|| ForkProgress::new(bank1.last_blockhash(), None, None, 0, 0));
let shreds = shred_to_insert(
&validator_keypairs.values().next().unwrap().node_keypair,
bank1.bank_cloned(),
bank1.clone(),
);
blockstore.insert_shreds(shreds, None, false).unwrap();
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
Expand Down
22 changes: 11 additions & 11 deletions ledger/src/blockstore_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ fn process_batches(
batches.len()
);
rebatch_and_execute_batches(
bank.bank(),
bank,
batches,
transaction_status_sender,
replay_vote_sender,
Expand Down Expand Up @@ -483,7 +483,7 @@ pub fn process_entries_for_tests(
replay_vote_sender: Option<&ReplayVoteSender>,
) -> Result<()> {
let verify_transaction = {
let bank = bank.clone();
let bank = bank.clone_with_scheduler();
move |versioned_tx: VersionedTransaction| -> Result<SanitizedTransaction> {
bank.verify_transaction(versioned_tx, TransactionVerificationMode::FullVerification)
}
Expand Down Expand Up @@ -1259,7 +1259,7 @@ fn confirm_slot_entries(
};

let verify_transaction = {
let bank = bank.clone();
let bank = bank.clone_with_scheduler();
move |versioned_tx: VersionedTransaction,
verification_mode: TransactionVerificationMode|
-> Result<SanitizedTransaction> {
Expand Down Expand Up @@ -1386,7 +1386,7 @@ fn process_bank_0(
if blockstore.is_primary_access() {
blockstore.insert_bank_hash(bank0.slot(), bank0.hash(), false);
}
cache_block_meta(bank0.bank(), cache_block_meta_sender);
cache_block_meta(bank0, cache_block_meta_sender);
}

// Given a bank, add its children to the pending slots queue if those children slots are
Expand Down Expand Up @@ -1540,7 +1540,7 @@ fn load_frozen_forks(
// Block must be frozen by this point, otherwise `process_single_slot` would
// have errored above
assert!(bank.is_frozen());
all_banks.insert(bank.slot(), bank.clone());
all_banks.insert(bank.slot(), bank.clone_with_scheduler());
m.stop();
process_single_slot_us += m.as_us();

Expand All @@ -1559,7 +1559,7 @@ fn load_frozen_forks(
// If there's a cluster confirmed root greater than our last
// replayed root, then because the cluster confirmed root should
// be descended from our last root, it must exist in `all_banks`
let cluster_root_bank = all_banks.get(&supermajority_root).unwrap().bank_cloned();
let cluster_root_bank = all_banks.get(&supermajority_root).unwrap();

// cluster root must be a descendant of our root, otherwise something
// is drastically wrong
Expand All @@ -1571,7 +1571,7 @@ fn load_frozen_forks(

// Ensure cluster-confirmed root and parents are set as root in blockstore
let mut rooted_slots = vec![];
let mut new_root_bank = cluster_root_bank.clone();
let mut new_root_bank = Arc::clone(cluster_root_bank);
loop {
if new_root_bank.slot() == root { break; } // Found the last root in the chain, yay!
assert!(new_root_bank.slot() > root);
Expand All @@ -1593,7 +1593,7 @@ fn load_frozen_forks(
}
})
} else if blockstore.is_root(slot) {
Some(bank.bank_cloned())
Some(&bank)
} else {
None
}
Expand All @@ -1606,7 +1606,7 @@ fn load_frozen_forks(
let mut m = Measure::start("set_root");
root = new_root_bank.slot();

leader_schedule_cache.set_root(&new_root_bank);
leader_schedule_cache.set_root(new_root_bank);
let _ = bank_forks.write().unwrap().set_root(
root,
accounts_background_request_sender,
Expand Down Expand Up @@ -1645,7 +1645,7 @@ fn load_frozen_forks(
}

process_next_slots(
bank.bank(),
&bank,
&meta,
blockstore,
leader_schedule_cache,
Expand Down Expand Up @@ -1779,7 +1779,7 @@ fn process_single_slot(
if blockstore.is_primary_access() {
blockstore.insert_bank_hash(bank.slot(), bank.hash(), false);
}
cache_block_meta(bank.bank(), cache_block_meta_sender);
cache_block_meta(bank, cache_block_meta_sender);

Ok(())
}
Expand Down
31 changes: 20 additions & 11 deletions poh/src/poh_recorder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,6 @@ impl PohRecorderBank {
}
}

#[derive(Clone)]
pub struct WorkingBank {
pub bank: BankWithScheduler,
pub start: Arc<Instant>,
Expand All @@ -263,6 +262,18 @@ pub struct WorkingBank {
pub transaction_index: Option<usize>,
}

impl Clone for WorkingBank {
fn clone(&self) -> Self {
Self {
bank: self.bank.clone_with_scheduler(),
start: self.start.clone(),
min_tick_height: self.min_tick_height,
max_tick_height: self.max_tick_height,
transaction_index: self.transaction_index,
}
}
}

#[derive(Debug, PartialEq, Eq)]
pub enum PohLeaderStatus {
NotReached,
Expand Down Expand Up @@ -376,12 +387,12 @@ impl PohRecorder {
}

pub fn bank(&self) -> Option<Arc<Bank>> {
self.working_bank.as_ref().map(|w| w.bank.bank_cloned())
self.working_bank.as_ref().map(|w| w.bank.clone())
}

pub fn bank_start(&self) -> Option<BankStart> {
self.working_bank.as_ref().map(|w| BankStart {
working_bank: w.bank.bank_cloned(),
working_bank: w.bank.clone(),
bank_creation_time: w.start.clone(),
})
}
Expand Down Expand Up @@ -575,9 +586,9 @@ impl PohRecorder {

pub fn set_bank(&mut self, bank: BankWithScheduler, track_transaction_indexes: bool) {
assert!(self.working_bank.is_none());
self.leader_bank_notifier.set_in_progress(bank.bank());
self.leader_bank_notifier.set_in_progress(&bank);
let working_bank = WorkingBank {
bank: bank.clone(),
bank: bank.clone_with_scheduler(),
start: Arc::new(Instant::now()),
min_tick_height: bank.tick_height(),
max_tick_height: bank.max_tick_height(),
Expand All @@ -595,7 +606,7 @@ impl PohRecorder {
"resetting poh due to hashes per tick change detected at {}",
working_bank.bank.slot()
);
self.reset_poh(working_bank.clone().bank.bank_cloned(), false);
self.reset_poh(working_bank.clone().bank.clone(), false);
}
}
self.working_bank = Some(working_bank);
Expand Down Expand Up @@ -668,9 +679,7 @@ impl PohRecorder {

for tick in &self.tick_cache[..entry_count] {
working_bank.bank.register_tick(&tick.0.hash);
send_result = self
.sender
.send((working_bank.bank.bank_cloned(), tick.clone()));
send_result = self.sender.send((working_bank.bank.clone(), tick.clone()));
if send_result.is_err() {
break;
}
Expand All @@ -682,7 +691,7 @@ impl PohRecorder {
working_bank.max_tick_height,
working_bank.bank.slot()
);
self.start_bank = working_bank.bank.bank_cloned();
self.start_bank = working_bank.bank.clone();
let working_slot = self.start_slot();
self.start_tick_height = working_slot * self.ticks_per_slot + 1;
self.clear_bank();
Expand Down Expand Up @@ -892,7 +901,7 @@ impl PohRecorder {
hash: poh_entry.hash,
transactions,
};
let bank_clone = working_bank.bank.bank_cloned();
let bank_clone = working_bank.bank.clone();
self.sender.send((bank_clone, (entry, self.tick_height)))
},
"send_poh_entry",
Expand Down
2 changes: 1 addition & 1 deletion program-test/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1135,7 +1135,7 @@ impl ProgramTestContext {
// some warping tests cannot use the append vecs because of the sequence of adding roots and flushing
solana_runtime::accounts_db::CalcAccountsHashDataSource::IndexForTests,
))
.bank_cloned()
.clone()
};

let (snapshot_request_sender, snapshot_request_receiver) = crossbeam_channel::unbounded();
Expand Down
4 changes: 2 additions & 2 deletions rpc/src/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4942,7 +4942,7 @@ pub mod tests {
.read()
.unwrap()
.working_bank_with_scheduler()
.clone();
.clone_with_scheduler();
for (i, root) in roots.iter().enumerate() {
let new_bank =
Bank::new_from_parent(&parent_bank, parent_bank.collector_id(), *root);
Expand Down Expand Up @@ -5000,7 +5000,7 @@ pub mod tests {
CommitmentSlots::new_from_slot(self.bank_forks.read().unwrap().highest_slot()),
);
*self.block_commitment_cache.write().unwrap() = new_block_commitment;
bank.bank_cloned()
bank.clone()
}

fn store_vote_account(&self, vote_pubkey: &Pubkey, vote_state: VoteState) {
Expand Down
19 changes: 10 additions & 9 deletions runtime/src/bank_forks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ pub struct BankForks {
impl Index<u64> for BankForks {
type Output = Arc<Bank>;
fn index(&self, bank_slot: Slot) -> &Self::Output {
self.banks[&bank_slot].bank()
&self.banks[&bank_slot]
}
}

Expand All @@ -89,7 +89,7 @@ impl BankForks {
pub fn banks(&self) -> HashMap<Slot, Arc<Bank>> {
self.banks
.iter()
.map(|(&k, b)| (k, b.bank_cloned()))
.map(|(&k, b)| (k, <Arc<Bank>>::clone(b)))
.collect()
}

Expand Down Expand Up @@ -126,7 +126,7 @@ impl BankForks {
self.banks
.iter()
.filter(|(_, b)| b.is_frozen())
.map(|(&k, b)| (k, b.bank_cloned()))
.map(|(&k, b)| (k, <Arc<Bank>>::clone(b)))
.collect()
}

Expand All @@ -139,11 +139,11 @@ impl BankForks {
}

pub fn get(&self, bank_slot: Slot) -> Option<Arc<Bank>> {
self.get_with_scheduler(bank_slot).map(|b| b.bank_cloned())
self.get_with_scheduler(bank_slot).map(|b| b.clone())
}

pub fn get_with_scheduler(&self, bank_slot: Slot) -> Option<BankWithScheduler> {
self.banks.get(&bank_slot).cloned()
self.banks.get(&bank_slot).map(|b| b.clone_with_scheduler())
}

pub fn get_with_checked_hash(
Expand Down Expand Up @@ -210,7 +210,9 @@ impl BankForks {
} else {
BankWithScheduler::new(bank.clone(), None)
};
let prev = self.banks.insert(bank.slot(), bank_with_scheduler.clone());
let prev = self
.banks
.insert(bank.slot(), bank_with_scheduler.clone_with_scheduler());
assert!(prev.is_none());
let slot = bank.slot();
self.descendants.entry(slot).or_default();
Expand Down Expand Up @@ -266,11 +268,10 @@ impl BankForks {
// ensure atomic ordering correctness.
self.root.store(root, Ordering::Release);

let root_bank = self
let root_bank = &**self
.banks
.get(&root)
.expect("root bank didn't exist in bank_forks")
.bank();
.expect("root bank didn't exist in bank_forks");
let new_epoch = root_bank.epoch();
if old_epoch != new_epoch {
info!(
Expand Down
15 changes: 6 additions & 9 deletions runtime/src/installed_scheduler_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,6 @@ impl SchedulingContext {

// tiny wrapper to ensure to call wait_for_termination() via ::drop() inside
// BankForks::set_root()'s pruning.
#[derive(Clone)]
pub struct BankWithScheduler {
inner: Arc<BankWithSchedulerInner>,
}
Expand Down Expand Up @@ -244,24 +243,22 @@ impl BankWithScheduler {
Self::new(bank, None)
}

pub fn bank_cloned(&self) -> Arc<Bank> {
self.bank().clone()
}

pub fn bank(&self) -> &Arc<Bank> {
&self.inner.bank
pub fn clone_with_scheduler(&self) -> BankWithScheduler {
BankWithScheduler {
inner: self.inner.clone(),
}
}

pub fn register_tick(&self, hash: &Hash) {
self.bank().register_tick(hash, &self.inner.scheduler);
self.inner.bank.register_tick(hash, &self.inner.scheduler);
}

pub fn has_installed_scheduler(&self) -> bool {
self.inner.scheduler.read().unwrap().is_some()
}

pub(crate) fn into_bank(self) -> Arc<Bank> {
self.bank_cloned()
self.inner.bank.clone()
}

#[must_use]
Expand Down

0 comments on commit 74b9439

Please sign in to comment.