Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into state-sync-skips
Browse files Browse the repository at this point in the history
  • Loading branch information
marcelo-gonzalez committed Jan 16, 2025
2 parents 93fa12f + 668b0d6 commit 17a5715
Show file tree
Hide file tree
Showing 32 changed files with 749 additions and 574 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions chain/chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -569,7 +569,7 @@ impl Chain {
// of blocks_in_processing, which is set to 5 now.
let (sc, rc) = unbounded();
let resharding_manager = ReshardingManager::new(
chain_store.store().clone(),
chain_store.store(),
epoch_manager.clone(),
runtime_adapter.clone(),
chain_config.resharding_config,
Expand Down Expand Up @@ -664,7 +664,7 @@ impl Chain {
congestion_info: Option<CongestionInfo>,
) -> Result<ChunkExtra, Error> {
let shard_index = shard_layout.get_shard_index(shard_id)?;
let state_root = *get_genesis_state_roots(self.chain_store.store())?
let state_root = *get_genesis_state_roots(&self.chain_store.store())?
.ok_or_else(|| Error::Other("genesis state roots do not exist in the db".to_owned()))?
.get(shard_index)
.ok_or_else(|| {
Expand Down Expand Up @@ -3953,7 +3953,7 @@ impl Chain {
}
} else {
let is_sync_prev = crate::state_sync::is_sync_prev_hash(
self.chain_store.store(),
&self.chain_store.store(),
&head.last_block_hash,
&head.prev_block_hash,
)?;
Expand Down
17 changes: 0 additions & 17 deletions chain/chain/src/garbage_collection.rs
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,6 @@ impl<'a> ChainStoreUpdate<'a> {
let mut store_update = self.store().store_update();
let key: &[u8] = header_hash.as_bytes();
store_update.delete(DBCol::BlockHeader, key);
self.chain_store().headers.pop(key);
self.merge(store_update);
}
let key = index_to_bytes(height);
Expand Down Expand Up @@ -816,10 +815,8 @@ impl<'a> ChainStoreUpdate<'a> {
let key = &index_to_bytes(height)[..];
if epoch_to_hashes.is_empty() {
store_update.delete(DBCol::BlockPerHeight, key);
self.chain_store().block_hash_per_height.pop(key);
} else {
store_update.set_ser(DBCol::BlockPerHeight, key, &epoch_to_hashes)?;
self.chain_store().block_hash_per_height.put(key.to_vec(), Arc::new(epoch_to_hashes));
}
if self.is_height_processed(height)? {
self.gc_col(DBCol::ProcessedBlockHeights, key);
Expand All @@ -845,7 +842,6 @@ impl<'a> ChainStoreUpdate<'a> {
let mut store_update = self.store().store_update();
let key = get_block_shard_id(block_hash, shard_id);
store_update.delete(DBCol::OutgoingReceipts, &key);
self.chain_store().outgoing_receipts.pop(&key);
self.merge(store_update);
}

Expand Down Expand Up @@ -882,7 +878,6 @@ impl<'a> ChainStoreUpdate<'a> {
}
DBCol::IncomingReceipts => {
store_update.delete(col, key);
self.chain_store().incoming_receipts.pop(key);
}
DBCol::StateHeaders => {
store_update.delete(col, key);
Expand All @@ -893,20 +888,16 @@ impl<'a> ChainStoreUpdate<'a> {
// When that happens we should make sure that block headers is
// copied to the cold storage.
store_update.delete(col, key);
self.chain_store().headers.pop(key);
unreachable!();
}
DBCol::Block => {
store_update.delete(col, key);
self.chain_store().blocks.pop(key);
}
DBCol::BlockExtra => {
store_update.delete(col, key);
self.chain_store().block_extras.pop(key);
}
DBCol::NextBlockHashes => {
store_update.delete(col, key);
self.chain_store().next_block_hashes.pop(key);
}
DBCol::ChallengedBlocks => {
store_update.delete(col, key);
Expand All @@ -919,31 +910,24 @@ impl<'a> ChainStoreUpdate<'a> {
}
DBCol::BlockRefCount => {
store_update.delete(col, key);
self.chain_store().block_refcounts.pop(key);
}
DBCol::Transactions => {
store_update.decrement_refcount(col, key);
self.chain_store().transactions.pop(key);
}
DBCol::Receipts => {
store_update.decrement_refcount(col, key);
self.chain_store().receipts.pop(key);
}
DBCol::Chunks => {
store_update.delete(col, key);
self.chain_store().chunks.pop(key);
}
DBCol::ChunkExtra => {
store_update.delete(col, key);
self.chain_store().chunk_extras.pop(key);
}
DBCol::PartialChunks => {
store_update.delete(col, key);
self.chain_store().partial_chunks.pop(key);
}
DBCol::InvalidChunks => {
store_update.delete(col, key);
self.chain_store().invalid_chunks.pop(key);
}
DBCol::ChunkHashesByHeight => {
store_update.delete(col, key);
Expand Down Expand Up @@ -974,7 +958,6 @@ impl<'a> ChainStoreUpdate<'a> {
}
DBCol::ProcessedBlockHeights => {
store_update.delete(col, key);
self.chain_store().processed_block_heights.pop(key);
}
DBCol::HeaderHashesByHeight => {
store_update.delete(col, key);
Expand Down
8 changes: 4 additions & 4 deletions chain/chain/src/state_sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ fn save_epoch_new_chunks<T: ChainStoreAccess>(
header: &BlockHeader,
) -> Result<bool, Error> {
let Some(mut num_new_chunks) =
get_state_sync_new_chunks(chain_store.store(), header.prev_hash())?
get_state_sync_new_chunks(&chain_store.store(), header.prev_hash())?
else {
// This might happen in the case of epoch sync where we save individual headers without having all
// headers that belong to the epoch.
Expand Down Expand Up @@ -143,7 +143,7 @@ fn on_new_header<T: ChainStoreAccess>(
{
return Ok(());
}
if has_enough_new_chunks(chain_store.store(), sync_prev.hash())? != Some(true) {
if has_enough_new_chunks(&chain_store.store(), sync_prev.hash())? != Some(true) {
return Ok(());
}

Expand All @@ -152,7 +152,7 @@ fn on_new_header<T: ChainStoreAccess>(
return Ok(());
};
let Some(prev_prev_done) =
has_enough_new_chunks(chain_store.store(), sync_prev_prev.hash())?
has_enough_new_chunks(&chain_store.store(), sync_prev_prev.hash())?
else {
return Ok(());
};
Expand Down Expand Up @@ -196,7 +196,7 @@ pub(crate) fn update_sync_hashes<T: ChainStoreAccess>(
// columnn for this block. This means we will no longer remember sync hashes for these old epochs, which
// should be fine as we only care to state sync to (and provide state parts for) the latest state
on_new_epoch(store_update, header)?;
return remove_old_epochs(chain_store.store(), store_update, header, &prev_header);
return remove_old_epochs(&chain_store.store(), store_update, header, &prev_header);
}

on_new_header(chain_store, store_update, header)
Expand Down
4 changes: 2 additions & 2 deletions chain/chain/src/store/latest_witnesses.rs
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,8 @@ impl ChainStore {

// Go over witnesses with increasing indexes and remove them until the limits are satisfied.
while !info.is_within_limits() && info.lowest_index < info.next_witness_index {
let key_to_delete = self
.store()
let store = self.store();
let key_to_delete = store
.get(DBCol::LatestWitnessesByIndex, &info.lowest_index.to_be_bytes())?
.ok_or_else(|| {
std::io::Error::new(
Expand Down
Loading

0 comments on commit 17a5715

Please sign in to comment.