diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index 26f9391800ceb..28a322834271c 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -135,7 +135,7 @@ impl core::Benchmark for ImportBenchmark { // Sanity checks. context .client - .state_at(&hash) + .state_at(hash) .expect("state_at failed for block#1") .inspect_state(|| { match self.block_type { diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index 1e44cec914995..528dce14f46a5 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -144,7 +144,7 @@ impl> Inspector let not_found = format!("Could not find block {:?}", id); let body = self .chain - .block_body(&hash)? + .block_body(hash)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; let header = self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; @@ -155,7 +155,7 @@ impl> Inspector let not_found = format!("Could not find block {:?}", id); let body = self .chain - .block_body(&hash)? + .block_body(hash)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; let header = self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index df9c92b626ecc..79cc0d7a16bcc 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -215,13 +215,13 @@ pub trait BlockImportOperation { /// Mark a block as finalized. fn mark_finalized( &mut self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()>; /// Mark a block as new head. If both block import and set head are specified, set head /// overrides block import's best block rule. - fn mark_head(&mut self, hash: &Block::Hash) -> sp_blockchain::Result<()>; + fn mark_head(&mut self, hash: Block::Hash) -> sp_blockchain::Result<()>; /// Add a transaction index operation. fn update_transaction_index(&mut self, index: Vec) @@ -251,7 +251,7 @@ pub trait Finalizer> { fn apply_finality( &self, operation: &mut ClientImportOperation, - block: &Block::Hash, + block: Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()>; @@ -271,7 +271,7 @@ pub trait Finalizer> { /// while performing major synchronization work. fn finalize_block( &self, - block: &Block::Hash, + block: Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()>; @@ -359,21 +359,21 @@ pub trait StorageProvider> { /// Given a block's `Hash` and a key, return the value under the key in that block. fn storage( &self, - hash: &Block::Hash, + hash: Block::Hash, key: &StorageKey, ) -> sp_blockchain::Result>; /// Given a block's `Hash` and a key prefix, return the matching storage keys in that block. fn storage_keys( &self, - hash: &Block::Hash, + hash: Block::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result>; /// Given a block's `Hash` and a key, return the value under the hash in that block. fn storage_hash( &self, - hash: &Block::Hash, + hash: Block::Hash, key: &StorageKey, ) -> sp_blockchain::Result>; @@ -381,7 +381,7 @@ pub trait StorageProvider> { /// in that block. fn storage_pairs( &self, - hash: &Block::Hash, + hash: Block::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result>; @@ -389,7 +389,7 @@ pub trait StorageProvider> { /// keys in that block. fn storage_keys_iter<'a>( &self, - hash: &Block::Hash, + hash: Block::Hash, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result>; @@ -398,7 +398,7 @@ pub trait StorageProvider> { /// that block. fn child_storage( &self, - hash: &Block::Hash, + hash: Block::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result>; @@ -407,7 +407,7 @@ pub trait StorageProvider> { /// storage keys. fn child_storage_keys( &self, - hash: &Block::Hash, + hash: Block::Hash, child_info: &ChildInfo, key_prefix: &StorageKey, ) -> sp_blockchain::Result>; @@ -416,7 +416,7 @@ pub trait StorageProvider> { /// return a `KeyIterator` that iterates matching storage keys in that block. fn child_storage_keys_iter<'a>( &self, - hash: &Block::Hash, + hash: Block::Hash, child_info: ChildInfo, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, @@ -426,7 +426,7 @@ pub trait StorageProvider> { /// block. fn child_storage_hash( &self, - hash: &Block::Hash, + hash: Block::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result>; @@ -466,7 +466,7 @@ pub trait Backend: AuxStore + Send + Sync { fn begin_state_operation( &self, operation: &mut Self::BlockImportOperation, - block: &Block::Hash, + block: Block::Hash, ) -> sp_blockchain::Result<()>; /// Commit block insertion. @@ -480,7 +480,7 @@ pub trait Backend: AuxStore + Send + Sync { /// This should only be called if the parent of the given block has been finalized. fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()>; @@ -489,7 +489,7 @@ pub trait Backend: AuxStore + Send + Sync { /// This should only be called for blocks that are already finalized. fn append_justification( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Justification, ) -> sp_blockchain::Result<()>; @@ -503,12 +503,12 @@ pub trait Backend: AuxStore + Send + Sync { fn offchain_storage(&self) -> Option; /// Returns true if state for given block is available. - fn have_state_at(&self, hash: &Block::Hash, _number: NumberFor) -> bool { + fn have_state_at(&self, hash: Block::Hash, _number: NumberFor) -> bool { self.state_at(hash).is_ok() } /// Returns state backend with post-state of given block. - fn state_at(&self, hash: &Block::Hash) -> sp_blockchain::Result; + fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result; /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set it will attempt to /// revert past any finalized block, this is unsafe and can potentially leave the node in an @@ -524,7 +524,7 @@ pub trait Backend: AuxStore + Send + Sync { ) -> sp_blockchain::Result<(NumberFor, HashSet)>; /// Discard non-best, unfinalized leaf block. - fn remove_leaf_block(&self, hash: &Block::Hash) -> sp_blockchain::Result<()>; + fn remove_leaf_block(&self, hash: Block::Hash) -> sp_blockchain::Result<()>; /// Insert auxiliary data into key-value store. fn insert_aux< diff --git a/client/api/src/client.rs b/client/api/src/client.rs index a07198abfe8d6..bb88853d23afb 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -110,7 +110,7 @@ pub trait BlockBackend { /// Get block body by ID. Returns `None` if the body is not stored. fn block_body( &self, - hash: &Block::Hash, + hash: Block::Hash, ) -> sp_blockchain::Result::Extrinsic>>>; /// Get all indexed transactions for a block, @@ -118,8 +118,7 @@ pub trait BlockBackend { /// /// Note that this will only fetch transactions /// that are indexed by the runtime with `storage_index_transaction`. - fn block_indexed_body(&self, hash: &Block::Hash) - -> sp_blockchain::Result>>>; + fn block_indexed_body(&self, hash: Block::Hash) -> sp_blockchain::Result>>>; /// Get full block by id. fn block(&self, id: &BlockId) -> sp_blockchain::Result>>; @@ -129,7 +128,7 @@ pub trait BlockBackend { -> sp_blockchain::Result; /// Get block justifications for the block with the given id. - fn justifications(&self, hash: &Block::Hash) -> sp_blockchain::Result>; + fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result>; /// Get block hash by number. fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result>; @@ -138,10 +137,10 @@ pub trait BlockBackend { /// /// Note that this will only fetch transactions /// that are indexed by the runtime with `storage_index_transaction`. - fn indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result>>; + fn indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result>>; /// Check if transaction index exists. - fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { + fn has_indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result { Ok(self.indexed_transaction(hash)?.is_some()) } diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 26364f28acca2..5a3e25ab5987b 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -271,16 +271,16 @@ impl Blockchain { fn finalize_header( &self, - block: &Block::Hash, + block: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()> { let mut storage = self.storage.write(); - storage.finalized_hash = *block; + storage.finalized_hash = block; if justification.is_some() { let block = storage .blocks - .get_mut(block) + .get_mut(&block) .expect("hash was fetched from a block in the db; qed"); let block_justifications = match block { @@ -295,7 +295,7 @@ impl Blockchain { fn append_justification( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Justification, ) -> sp_blockchain::Result<()> { let mut storage = self.storage.write(); @@ -405,17 +405,17 @@ impl HeaderMetadata for Blockchain { impl blockchain::Backend for Blockchain { fn body( &self, - hash: &Block::Hash, + hash: Block::Hash, ) -> sp_blockchain::Result::Extrinsic>>> { Ok(self .storage .read() .blocks - .get(hash) + .get(&hash) .and_then(|b| b.extrinsics().map(|x| x.to_vec()))) } - fn justifications(&self, hash: &Block::Hash) -> sp_blockchain::Result> { + fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result> { Ok(self.storage.read().blocks.get(&hash).and_then(|b| b.justifications().cloned())) } @@ -445,13 +445,13 @@ impl blockchain::Backend for Blockchain { unimplemented!() } - fn indexed_transaction(&self, _hash: &Block::Hash) -> sp_blockchain::Result>> { + fn indexed_transaction(&self, _hash: Block::Hash) -> sp_blockchain::Result>> { unimplemented!("Not supported by the in-mem backend.") } fn block_indexed_body( &self, - _hash: &Block::Hash, + _hash: Block::Hash, ) -> sp_blockchain::Result>>> { unimplemented!("Not supported by the in-mem backend.") } @@ -596,16 +596,16 @@ where fn mark_finalized( &mut self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()> { - self.finalized_blocks.push((*hash, justification)); + self.finalized_blocks.push((hash, justification)); Ok(()) } - fn mark_head(&mut self, hash: &Block::Hash) -> sp_blockchain::Result<()> { + fn mark_head(&mut self, hash: Block::Hash) -> sp_blockchain::Result<()> { assert!(self.pending_block.is_none(), "Only one set block per operation is allowed"); - self.set_head = Some(*hash); + self.set_head = Some(hash); Ok(()) } @@ -677,7 +677,7 @@ where type OffchainStorage = OffchainStorage; fn begin_operation(&self) -> sp_blockchain::Result { - let old_state = self.state_at(&Default::default())?; + let old_state = self.state_at(Default::default())?; Ok(BlockImportOperation { pending_block: None, old_state, @@ -691,7 +691,7 @@ where fn begin_state_operation( &self, operation: &mut Self::BlockImportOperation, - block: &Block::Hash, + block: Block::Hash, ) -> sp_blockchain::Result<()> { operation.old_state = self.state_at(block)?; Ok(()) @@ -700,7 +700,7 @@ where fn commit_operation(&self, operation: Self::BlockImportOperation) -> sp_blockchain::Result<()> { if !operation.finalized_blocks.is_empty() { for (block, justification) in operation.finalized_blocks { - self.blockchain.finalize_header(&block, justification)?; + self.blockchain.finalize_header(block, justification)?; } } @@ -733,7 +733,7 @@ where fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()> { self.blockchain.finalize_header(hash, justification) @@ -741,7 +741,7 @@ where fn append_justification( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Justification, ) -> sp_blockchain::Result<()> { self.blockchain.append_justification(hash, justification) @@ -759,14 +759,14 @@ where None } - fn state_at(&self, hash: &Block::Hash) -> sp_blockchain::Result { - if *hash == Default::default() { + fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result { + if hash == Default::default() { return Ok(Self::State::default()) } self.states .read() - .get(hash) + .get(&hash) .cloned() .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash))) } @@ -779,7 +779,7 @@ where Ok((Zero::zero(), HashSet::new())) } - fn remove_leaf_block(&self, _hash: &Block::Hash) -> sp_blockchain::Result<()> { + fn remove_leaf_block(&self, _hash: Block::Hash) -> sp_blockchain::Result<()> { Ok(()) } @@ -862,13 +862,13 @@ mod tests { let blockchain = test_blockchain(); let last_finalized = blockchain.last_finalized().unwrap(); - blockchain.append_justification(&last_finalized, (ID2, vec![4])).unwrap(); + blockchain.append_justification(last_finalized, (ID2, vec![4])).unwrap(); let justifications = { let mut just = Justifications::from((ID1, vec![3])); just.append((ID2, vec![4])); just }; - assert_eq!(blockchain.justifications(&last_finalized).unwrap(), Some(justifications)); + assert_eq!(blockchain.justifications(last_finalized).unwrap(), Some(justifications)); } #[test] @@ -876,9 +876,9 @@ mod tests { let blockchain = test_blockchain(); let last_finalized = blockchain.last_finalized().unwrap(); - blockchain.append_justification(&last_finalized, (ID2, vec![0])).unwrap(); + blockchain.append_justification(last_finalized, (ID2, vec![0])).unwrap(); assert!(matches!( - blockchain.append_justification(&last_finalized, (ID2, vec![1])), + blockchain.append_justification(last_finalized, (ID2, vec![1])), Err(sp_blockchain::Error::BadJustification(_)), )); } diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 4ddbf883b83f2..01e35df1dec1c 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -27,7 +27,7 @@ pub trait ProofProvider { /// Reads storage value at a given block + key, returning read proof. fn read_proof( &self, - hash: &Block::Hash, + hash: Block::Hash, keys: &mut dyn Iterator, ) -> sp_blockchain::Result; @@ -35,7 +35,7 @@ pub trait ProofProvider { /// read proof. fn read_child_proof( &self, - hash: &Block::Hash, + hash: Block::Hash, child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result; @@ -46,7 +46,7 @@ pub trait ProofProvider { /// No changes are made. fn execution_proof( &self, - hash: &Block::Hash, + hash: Block::Hash, method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)>; @@ -61,7 +61,7 @@ pub trait ProofProvider { /// Returns combined proof and the numbers of collected keys. fn read_proof_collection( &self, - hash: &Block::Hash, + hash: Block::Hash, start_keys: &[Vec], size_limit: usize, ) -> sp_blockchain::Result<(CompactProof, u32)>; @@ -76,7 +76,7 @@ pub trait ProofProvider { /// end. fn storage_collection( &self, - hash: &Block::Hash, + hash: Block::Hash, start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result>; diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index da98ccab9cb07..b69294bf6ccb0 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -736,7 +736,7 @@ mod tests { let api = client.runtime_api(); api.execute_block(&block_id, proposal.block).unwrap(); - let state = backend.state_at(&genesis_hash).unwrap(); + let state = backend.state_at(genesis_hash).unwrap(); let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap(); diff --git a/client/beefy/src/communication/request_response/incoming_requests_handler.rs b/client/beefy/src/communication/request_response/incoming_requests_handler.rs index 3affbbe870ad7..9f02b7162b54c 100644 --- a/client/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/client/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -153,7 +153,7 @@ where self.client.block_hash(request.payload.begin).map_err(Error::Client)? { self.client - .justifications(&hash) + .justifications(hash) .map_err(Error::Client)? .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) // No BEEFY justification present. diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 62d8a45f4471c..2f0306209071e 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -512,7 +512,7 @@ fn finalize_block_and_wait_for_beefy( peers.clone().for_each(|(index, _)| { let client = net.lock().peer(index).client().as_client(); let finalize = client.expect_block_hash_from_id(&BlockId::number(*block)).unwrap(); - client.finalize_block(&finalize, None).unwrap(); + client.finalize_block(finalize, None).unwrap(); }) } @@ -608,7 +608,7 @@ fn lagging_validators() { .expect_block_hash_from_id(&BlockId::number(25)) .unwrap(); let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); - net.lock().peer(0).client().as_client().finalize_block(&finalize, None).unwrap(); + net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); @@ -616,7 +616,7 @@ fn lagging_validators() { // Bob catches up and also finalizes #25 let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); - net.lock().peer(1).client().as_client().finalize_block(&finalize, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // expected beefy finalizes block #17 from diff-power-of-two wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[23, 24, 25]); wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[23, 24, 25]); @@ -637,7 +637,7 @@ fn lagging_validators() { .as_client() .expect_block_hash_from_id(&BlockId::number(60)) .unwrap(); - net.lock().peer(0).client().as_client().finalize_block(&finalize, None).unwrap(); + net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); @@ -645,7 +645,7 @@ fn lagging_validators() { // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); - net.lock().peer(1).client().as_client().finalize_block(&finalize, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // verify beefy skips intermediary votes, and successfully finalizes mandatory block #60 wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[60]); wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[60]); @@ -696,9 +696,9 @@ fn correct_beefy_payload() { .as_client() .expect_block_hash_from_id(&BlockId::number(11)) .unwrap(); - net.lock().peer(0).client().as_client().finalize_block(&hashof11, None).unwrap(); - net.lock().peer(1).client().as_client().finalize_block(&hashof11, None).unwrap(); - net.lock().peer(3).client().as_client().finalize_block(&hashof11, None).unwrap(); + net.lock().peer(0).client().as_client().finalize_block(hashof11, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(hashof11, None).unwrap(); + net.lock().peer(3).client().as_client().finalize_block(hashof11, None).unwrap(); // verify consensus is _not_ reached let timeout = Some(Duration::from_millis(250)); @@ -708,7 +708,7 @@ fn correct_beefy_payload() { // 3rd good validator catches up and votes as well let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); - net.lock().peer(2).client().as_client().finalize_block(&hashof11, None).unwrap(); + net.lock().peer(2).client().as_client().finalize_block(hashof11, None).unwrap(); // verify consensus is reached wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[11]); @@ -760,7 +760,7 @@ fn beefy_importing_blocks() { // none in backend, assert_eq!( full_client - .justifications(&hashof1) + .justifications(hashof1) .unwrap() .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), None @@ -799,7 +799,7 @@ fn beefy_importing_blocks() { // still not in backend (worker is responsible for appending to backend), assert_eq!( full_client - .justifications(&hashof2) + .justifications(hashof2) .unwrap() .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), None @@ -843,7 +843,7 @@ fn beefy_importing_blocks() { // none in backend, assert_eq!( full_client - .justifications(&hashof3) + .justifications(hashof3) .unwrap() .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), None @@ -941,7 +941,7 @@ fn on_demand_beefy_justification_sync() { get_beefy_streams(&mut net.lock(), [(dave_index, BeefyKeyring::Dave)].into_iter()); let client = net.lock().peer(dave_index).client().as_client(); let hashof1 = client.expect_block_hash_from_id(&BlockId::number(1)).unwrap(); - client.finalize_block(&hashof1, None).unwrap(); + client.finalize_block(hashof1, None).unwrap(); // Give Dave task some cpu cycles to process the finality notification, run_for(Duration::from_millis(100), &net, &mut runtime); // freshly spun up Dave now needs to listen for gossip to figure out the state of his peers. diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index ea9e0d0c33999..9c14128624518 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -511,7 +511,7 @@ where .expect("forwards closure result; the closure always returns Ok; qed."); self.backend - .append_justification(&hash, (BEEFY_ENGINE_ID, finality_proof.encode())) + .append_justification(hash, (BEEFY_ENGINE_ID, finality_proof.encode())) }) { error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, finality_proof); } @@ -709,7 +709,7 @@ where // a BEEFY justification, or at this session's boundary; voter will resume from there. loop { if let Some(true) = blockchain - .justifications(&header.hash()) + .justifications(header.hash()) .ok() .flatten() .map(|justifs| justifs.get(BEEFY_ENGINE_ID).is_some()) @@ -1375,8 +1375,8 @@ pub(crate) mod tests { // finalize 1 and 2 without justifications let hashof1 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(1)).unwrap(); let hashof2 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(2)).unwrap(); - backend.finalize_block(&hashof1, None).unwrap(); - backend.finalize_block(&hashof2, None).unwrap(); + backend.finalize_block(hashof1, None).unwrap(); + backend.finalize_block(hashof2, None).unwrap(); let justif = create_finality_proof(2); // create new session at block #2 @@ -1401,7 +1401,7 @@ pub(crate) mod tests { })); // check BEEFY justifications are also appended to backend - let justifs = backend.blockchain().justifications(&hashof2).unwrap().unwrap(); + let justifs = backend.blockchain().justifications(hashof2).unwrap().unwrap(); assert!(justifs.get(BEEFY_ENGINE_ID).is_some()) } @@ -1512,7 +1512,7 @@ pub(crate) mod tests { // finalize 13 without justifications let hashof13 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(13)).unwrap(); - net.peer(0).client().as_client().finalize_block(&hashof13, None).unwrap(); + net.peer(0).client().as_client().finalize_block(hashof13, None).unwrap(); // Test initialization at session boundary. { @@ -1551,7 +1551,7 @@ pub(crate) mod tests { let hashof10 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); backend - .append_justification(&hashof10, (BEEFY_ENGINE_ID, justif.encode())) + .append_justification(hashof10, (BEEFY_ENGINE_ID, justif.encode())) .unwrap(); // initialize voter at block 13, expect rounds initialized at last beefy finalized 10 @@ -1587,7 +1587,7 @@ pub(crate) mod tests { let hashof12 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(12)).unwrap(); backend - .append_justification(&hashof12, (BEEFY_ENGINE_ID, justif.encode())) + .append_justification(hashof12, (BEEFY_ENGINE_ID, justif.encode())) .unwrap(); // initialize voter at block 13, expect rounds initialized at last beefy finalized 12 diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index cd5e62e264200..b6c2ac3ba5d68 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -258,7 +258,7 @@ where let proof = self.api.extract_proof(); - let state = self.backend.state_at(&self.parent_hash)?; + let state = self.backend.state_at(self.parent_hash)?; let storage_changes = self .api diff --git a/client/cli/src/commands/export_state_cmd.rs b/client/cli/src/commands/export_state_cmd.rs index 1bcf21f388a62..04bce0c1d707a 100644 --- a/client/cli/src/commands/export_state_cmd.rs +++ b/client/cli/src/commands/export_state_cmd.rs @@ -69,7 +69,7 @@ impl ExportStateCmd { Some(id) => client.expect_block_hash_from_id(&id)?, None => client.usage_info().chain.best_hash, }; - let raw_state = sc_service::chain_ops::export_raw_state(client, &hash)?; + let raw_state = sc_service::chain_ops::export_raw_state(client, hash)?; input_spec.set_storage(raw_state); info!("Generating new chain spec..."); diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index abffcb0a2b4c3..ee1605f037ff4 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -822,7 +822,7 @@ fn revert_not_allowed_for_finalized() { let canon = propose_and_import_blocks_wrap(BlockId::Number(0), 3); // Finalize best block - client.finalize_block(&canon[2], None, false).unwrap(); + client.finalize_block(canon[2], None, false).unwrap(); // Revert canon chain to last finalized block revert(client.clone(), backend, 100).expect("revert should work for baked test scenario"); @@ -884,7 +884,7 @@ fn importing_epoch_change_block_prunes_tree() { // We finalize block #13 from the canon chain, so on the next epoch // change the tree should be pruned, to not contain F (#7). - client.finalize_block(&canon_hashes[12], None, false).unwrap(); + client.finalize_block(canon_hashes[12], None, false).unwrap(); propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 7); // at this point no hashes from the first fork must exist on the tree @@ -911,7 +911,7 @@ fn importing_epoch_change_block_prunes_tree() { .any(|h| fork_3.contains(h)),); // finalizing block #25 from the canon chain should prune out the second fork - client.finalize_block(&canon_hashes[24], None, false).unwrap(); + client.finalize_block(canon_hashes[24], None, false).unwrap(); propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 8); // at this point no hashes from the second fork must exist on the tree @@ -1049,7 +1049,7 @@ fn obsolete_blocks_aux_data_cleanup() { assert!(aux_data_check(&fork3_hashes, true)); // Finalize A3 - client.finalize_block(&fork1_hashes[2], None, true).unwrap(); + client.finalize_block(fork1_hashes[2], None, true).unwrap(); // Wiped: A1, A2 assert!(aux_data_check(&fork1_hashes[..2], false)); @@ -1060,7 +1060,7 @@ fn obsolete_blocks_aux_data_cleanup() { // Present C4, C5 assert!(aux_data_check(&fork3_hashes, true)); - client.finalize_block(&fork1_hashes[3], None, true).unwrap(); + client.finalize_block(fork1_hashes[3], None, true).unwrap(); // Wiped: A3 assert!(aux_data_check(&fork1_hashes[2..3], false)); diff --git a/client/consensus/manual-seal/src/finalize_block.rs b/client/consensus/manual-seal/src/finalize_block.rs index e11353e2da611..cee4d59b6d6e5 100644 --- a/client/consensus/manual-seal/src/finalize_block.rs +++ b/client/consensus/manual-seal/src/finalize_block.rs @@ -46,7 +46,7 @@ where { let FinalizeBlockParams { hash, mut sender, justification, finalizer, .. } = params; - match finalizer.finalize_block(&hash, justification, true) { + match finalizer.finalize_block(hash, justification, true) { Err(e) => { log::warn!("Failed to finalize block {}", e); rpc::send_result(&mut sender, Err(e.into())) diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs index ccceae1f5b419..bab79fe7c90db 100644 --- a/client/db/benches/state_access.rs +++ b/client/db/benches/state_access.rs @@ -66,7 +66,7 @@ fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 for i in 0..10 { let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, &parent_hash).unwrap(); + db.begin_state_operation(&mut op, parent_hash).unwrap(); let mut header = Header { number, @@ -83,7 +83,7 @@ fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 .map(|(k, v)| (k.clone(), Some(v.clone()))) .collect::>(); - let (state_root, tx) = db.state_at(&parent_hash).unwrap().storage_root( + let (state_root, tx) = db.state_at(parent_hash).unwrap().storage_root( changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())), StateVersion::V1, ); @@ -175,7 +175,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(&block_hash).expect("Creates state"), + || backend.state_at(block_hash).expect("Creates state"), |state| { for key in keys.iter().cycle().take(keys.len() * multiplier) { let _ = state.storage(&key).expect("Doesn't fail").unwrap(); @@ -213,7 +213,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(&block_hash).expect("Creates state"), + || backend.state_at(block_hash).expect("Creates state"), |state| { for key in keys.iter().take(1).cycle().take(multiplier) { let _ = state.storage(&key).expect("Doesn't fail").unwrap(); @@ -251,7 +251,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(&block_hash).expect("Creates state"), + || backend.state_at(block_hash).expect("Creates state"), |state| { for key in keys.iter().take(1).cycle().take(multiplier) { let _ = state.storage_hash(&key).expect("Doesn't fail").unwrap(); @@ -289,7 +289,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(&block_hash).expect("Creates state"), + || backend.state_at(block_hash).expect("Creates state"), |state| { let _ = state .storage_hash(sp_core::storage::well_known_keys::CODE) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 0138df36a38fb..fc031e2aaba59 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -577,9 +577,9 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha } impl sc_client_api::blockchain::Backend for BlockchainDb { - fn body(&self, hash: &Block::Hash) -> ClientResult>> { + fn body(&self, hash: Block::Hash) -> ClientResult>> { if let Some(body) = - read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, BlockId::Hash::(*hash))? + read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, BlockId::Hash::(hash))? { // Plain body match Decode::decode(&mut &body[..]) { @@ -596,7 +596,7 @@ impl sc_client_api::blockchain::Backend for BlockchainDb(*hash), + BlockId::Hash::(hash), )? { match Vec::>::decode(&mut &index[..]) { Ok(index) => { @@ -642,12 +642,12 @@ impl sc_client_api::blockchain::Backend for BlockchainDb ClientResult> { + fn justifications(&self, hash: Block::Hash) -> ClientResult> { match read_db( &*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATIONS, - BlockId::::Hash(*hash), + BlockId::::Hash(hash), )? { Some(justifications) => match Decode::decode(&mut &justifications[..]) { Ok(justifications) => Ok(Some(justifications)), @@ -686,20 +686,20 @@ impl sc_client_api::blockchain::Backend for BlockchainDb ClientResult>> { + fn indexed_transaction(&self, hash: Block::Hash) -> ClientResult>> { Ok(self.db.get(columns::TRANSACTION, hash.as_ref())) } - fn has_indexed_transaction(&self, hash: &Block::Hash) -> ClientResult { + fn has_indexed_transaction(&self, hash: Block::Hash) -> ClientResult { Ok(self.db.contains(columns::TRANSACTION, hash.as_ref())) } - fn block_indexed_body(&self, hash: &Block::Hash) -> ClientResult>>> { + fn block_indexed_body(&self, hash: Block::Hash) -> ClientResult>>> { let body = match read_db( &*self.db, columns::KEY_LOOKUP, columns::BODY_INDEX, - BlockId::::Hash(*hash), + BlockId::::Hash(hash), )? { Some(body) => body, None => return Ok(None), @@ -914,16 +914,16 @@ impl sc_client_api::backend::BlockImportOperation fn mark_finalized( &mut self, - block: &Block::Hash, + block: Block::Hash, justification: Option, ) -> ClientResult<()> { - self.finalized_blocks.push((*block, justification)); + self.finalized_blocks.push((block, justification)); Ok(()) } - fn mark_head(&mut self, hash: &Block::Hash) -> ClientResult<()> { + fn mark_head(&mut self, hash: Block::Hash) -> ClientResult<()> { assert!(self.set_head.is_none(), "Only one set head per operation is allowed"); - self.set_head = Some(*hash); + self.set_head = Some(hash); Ok(()) } @@ -1176,7 +1176,7 @@ impl Backend { info.finalized_hash != Default::default() && sc_client_api::Backend::have_state_at( &backend, - &info.finalized_hash, + info.finalized_hash, info.finalized_number, ) { backend.blockchain.update_meta(MetaUpdate { @@ -1289,7 +1289,7 @@ impl Backend { fn finalize_block_with_transaction( &self, transaction: &mut Transaction, - hash: &Block::Hash, + hash: Block::Hash, header: &Block::Header, last_finalized: Option, justification: Option, @@ -1300,7 +1300,7 @@ impl Backend { self.ensure_sequential_finalization(header, last_finalized)?; let with_state = sc_client_api::Backend::have_state_at(self, hash, number); - self.note_finalized(transaction, header, *hash, finalization_displaced, with_state)?; + self.note_finalized(transaction, header, hash, finalization_displaced, with_state)?; if let Some(justification) = justification { transaction.set_from_vec( @@ -1309,7 +1309,7 @@ impl Backend { Justifications::from(justification).encode(), ); } - Ok(MetaUpdate { hash: *hash, number, is_best: false, is_finalized: true, with_state }) + Ok(MetaUpdate { hash, number, is_best: false, is_finalized: true, with_state }) } // performs forced canonicalization with a delay after importing a non-finalized block. @@ -1340,7 +1340,7 @@ impl Backend { )) })? }; - if !sc_client_api::Backend::have_state_at(self, &hash, new_canonical.saturated_into()) { + if !sc_client_api::Backend::have_state_at(self, hash, new_canonical.saturated_into()) { return Ok(()) } @@ -1372,7 +1372,7 @@ impl Backend { let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; meta_updates.push(self.finalize_block_with_transaction( &mut transaction, - &block_hash, + block_hash, &block_header, Some(last_finalized_hash), justification, @@ -1703,7 +1703,7 @@ impl Backend { } transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - if sc_client_api::Backend::have_state_at(self, &f_hash, f_num) && + if sc_client_api::Backend::have_state_at(self, f_hash, f_num) && self.storage .state_db .best_canonical() @@ -1978,9 +1978,9 @@ impl sc_client_api::backend::Backend for Backend { fn begin_state_operation( &self, operation: &mut Self::BlockImportOperation, - block: &Block::Hash, + block: Block::Hash, ) -> ClientResult<()> { - if *block == Default::default() { + if block == Default::default() { operation.old_state = self.empty_state()?; } else { operation.old_state = self.state_at(block)?; @@ -2008,11 +2008,11 @@ impl sc_client_api::backend::Backend for Backend { fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> ClientResult<()> { let mut transaction = Transaction::new(); - let header = self.blockchain.expect_header(BlockId::Hash(*hash))?; + let header = self.blockchain.expect_header(BlockId::Hash(hash))?; let mut displaced = None; let m = self.finalize_block_with_transaction( @@ -2030,11 +2030,11 @@ impl sc_client_api::backend::Backend for Backend { fn append_justification( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Justification, ) -> ClientResult<()> { let mut transaction: Transaction = Transaction::new(); - let header = self.blockchain.expect_header(BlockId::Hash(*hash))?; + let header = self.blockchain.expect_header(BlockId::Hash(hash))?; let number = *header.number(); // Check if the block is finalized first. @@ -2043,7 +2043,7 @@ impl sc_client_api::backend::Backend for Backend { // We can do a quick check first, before doing a proper but more expensive check if number > self.blockchain.info().finalized_number || - (*hash != last_finalized && !is_descendent_of(hash, &last_finalized)?) + (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?) { return Err(ClientError::NotInFinalizedChain) } @@ -2061,7 +2061,7 @@ impl sc_client_api::backend::Backend for Backend { transaction.set_from_vec( columns::JUSTIFICATIONS, - &utils::number_and_hash_to_lookup_key(number, *hash)?, + &utils::number_and_hash_to_lookup_key(number, hash)?, justifications.encode(), ); @@ -2154,7 +2154,7 @@ impl sc_client_api::backend::Backend for Backend { let prev_hash = if prev_number == best_number { best_hash } else { *removed.parent_hash() }; - if !self.have_state_at(&prev_hash, prev_number) { + if !self.have_state_at(prev_hash, prev_number) { return Ok(c.saturated_into::>()) } @@ -2183,7 +2183,7 @@ impl sc_client_api::backend::Backend for Backend { if hash == hash_to_revert { if !number_to_revert.is_zero() && self.have_state_at( - &prev_hash, + prev_hash, number_to_revert - One::one(), ) { let lookup_key = utils::number_and_hash_to_lookup_key( @@ -2247,14 +2247,14 @@ impl sc_client_api::backend::Backend for Backend { Ok((reverted, reverted_finalized)) } - fn remove_leaf_block(&self, hash: &Block::Hash) -> ClientResult<()> { + fn remove_leaf_block(&self, hash: Block::Hash) -> ClientResult<()> { let best_hash = self.blockchain.info().best_hash; - if best_hash == *hash { + if best_hash == hash { return Err(sp_blockchain::Error::Backend(format!("Can't remove best block {:?}", hash))) } - let hdr = self.blockchain.header_metadata(*hash)?; + let hdr = self.blockchain.header_metadata(hash)?; if !self.have_state_at(hash, hdr.number) { return Err(sp_blockchain::Error::UnknownBlock(format!( "State already discarded for {:?}", @@ -2263,7 +2263,7 @@ impl sc_client_api::backend::Backend for Backend { } let mut leaves = self.blockchain.leaves.write(); - if !leaves.contains(hdr.number, *hash) { + if !leaves.contains(hdr.number, hash) { return Err(sp_blockchain::Error::Backend(format!( "Can't remove non-leaf block {:?}", hash @@ -2271,7 +2271,7 @@ impl sc_client_api::backend::Backend for Backend { } let mut transaction = Transaction::new(); - if let Some(commit) = self.storage.state_db.remove(hash) { + if let Some(commit) = self.storage.state_db.remove(&hash) { apply_state_commit(&mut transaction, commit); } transaction.remove(columns::KEY_LOOKUP, hash.as_ref()); @@ -2280,7 +2280,7 @@ impl sc_client_api::backend::Backend for Backend { .blockchain() .children(hdr.parent)? .into_iter() - .filter(|child_hash| child_hash != hash) + .filter(|child_hash| *child_hash != hash) .collect(); let parent_leaf = if children.is_empty() { children::remove_children( @@ -2301,7 +2301,7 @@ impl sc_client_api::backend::Backend for Backend { None }; - let remove_outcome = leaves.remove(*hash, hdr.number, parent_leaf); + let remove_outcome = leaves.remove(hash, hdr.number, parent_leaf); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); if let Err(e) = self.storage.db.commit(transaction) { if let Some(outcome) = remove_outcome { @@ -2309,7 +2309,7 @@ impl sc_client_api::backend::Backend for Backend { } return Err(e.into()) } - self.blockchain().remove_header_metadata(*hash); + self.blockchain().remove_header_metadata(hash); Ok(()) } @@ -2317,8 +2317,8 @@ impl sc_client_api::backend::Backend for Backend { &self.blockchain } - fn state_at(&self, hash: &Block::Hash) -> ClientResult { - if hash == &self.blockchain.meta.read().genesis_hash { + fn state_at(&self, hash: Block::Hash) -> ClientResult { + if hash == self.blockchain.meta.read().genesis_hash { if let Some(genesis_state) = &*self.genesis_state.read() { let root = genesis_state.root; let db_state = DbStateBuilder::::new(genesis_state.clone(), root) @@ -2330,7 +2330,7 @@ impl sc_client_api::backend::Backend for Backend { } } - match self.blockchain.header_metadata(*hash) { + match self.blockchain.header_metadata(hash) { Ok(ref hdr) => { let hint = || { sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref()) @@ -2338,7 +2338,7 @@ impl sc_client_api::backend::Backend for Backend { .is_some() }; if let Ok(()) = - self.storage.state_db.pin(hash, hdr.number.saturated_into::(), hint) + self.storage.state_db.pin(&hash, hdr.number.saturated_into::(), hint) { let root = hdr.state_root; let db_state = DbStateBuilder::::new(self.storage.clone(), root) @@ -2346,8 +2346,8 @@ impl sc_client_api::backend::Backend for Backend { self.shared_trie_cache.as_ref().map(|c| c.local_cache()), ) .build(); - let state = RefTrackingState::new(db_state, self.storage.clone(), Some(*hash)); - Ok(RecordStatsState::new(state, Some(*hash), self.state_usage.clone())) + let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash)); + Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone())) } else { Err(sp_blockchain::Error::UnknownBlock(format!( "State already discarded for {:?}", @@ -2359,9 +2359,9 @@ impl sc_client_api::backend::Backend for Backend { } } - fn have_state_at(&self, hash: &Block::Hash, number: NumberFor) -> bool { + fn have_state_at(&self, hash: Block::Hash, number: NumberFor) -> bool { if self.is_archive { - match self.blockchain.header_metadata(*hash) { + match self.blockchain.header_metadata(hash) { Ok(header) => sp_state_machine::Storage::get( self.storage.as_ref(), &header.state_root, @@ -2372,10 +2372,10 @@ impl sc_client_api::backend::Backend for Backend { _ => false, } } else { - match self.storage.state_db.is_pruned(hash, number.saturated_into::()) { + match self.storage.state_db.is_pruned(&hash, number.saturated_into::()) { IsPruned::Pruned => false, IsPruned::NotPruned => true, - IsPruned::MaybePruned => match self.blockchain.header_metadata(*hash) { + IsPruned::MaybePruned => match self.blockchain.header_metadata(hash) { Ok(header) => sp_state_machine::Storage::get( self.storage.as_ref(), &header.state_root, @@ -2459,7 +2459,7 @@ pub(crate) mod tests { let block_hash = if number == 0 { Default::default() } else { parent_hash }; let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block_hash).unwrap(); + backend.begin_state_operation(&mut op, block_hash).unwrap(); op.set_block_data(header, Some(body), None, None, NewBlockState::Best).unwrap(); if let Some(index) = transaction_index { op.update_transaction_index(index).unwrap(); @@ -2507,7 +2507,7 @@ pub(crate) mod tests { }; let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, &hash).unwrap(); + db.begin_state_operation(&mut op, hash).unwrap(); let header = Header { number: i, parent_hash: hash, @@ -2581,7 +2581,7 @@ pub(crate) mod tests { db.commit_operation(op).unwrap(); - let state = db.state_at(&hash).unwrap(); + let state = db.state_at(hash).unwrap(); assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); @@ -2592,7 +2592,7 @@ pub(crate) mod tests { { let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, &hash).unwrap(); + db.begin_state_operation(&mut op, hash).unwrap(); let mut header = Header { number: 1, parent_hash: hash, @@ -2616,7 +2616,7 @@ pub(crate) mod tests { db.commit_operation(op).unwrap(); - let state = db.state_at(&header.hash()).unwrap(); + let state = db.state_at(header.hash()).unwrap(); assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); @@ -2633,7 +2633,7 @@ pub(crate) mod tests { let hash = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &Default::default()).unwrap(); + backend.begin_state_operation(&mut op, Default::default()).unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -2670,7 +2670,7 @@ pub(crate) mod tests { let hashof1 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &hash).unwrap(); + backend.begin_state_operation(&mut op, hash).unwrap(); let mut header = Header { number: 1, parent_hash: hash, @@ -2707,7 +2707,7 @@ pub(crate) mod tests { let hashof2 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &hashof1).unwrap(); + backend.begin_state_operation(&mut op, hashof1).unwrap(); let mut header = Header { number: 2, parent_hash: hashof1, @@ -2741,7 +2741,7 @@ pub(crate) mod tests { let hashof3 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &hashof2).unwrap(); + backend.begin_state_operation(&mut op, hashof2).unwrap(); let mut header = Header { number: 3, parent_hash: hashof2, @@ -2771,9 +2771,9 @@ pub(crate) mod tests { hash }; - backend.finalize_block(&hashof1, None).unwrap(); - backend.finalize_block(&hashof2, None).unwrap(); - backend.finalize_block(&hashof3, None).unwrap(); + backend.finalize_block(hashof1, None).unwrap(); + backend.finalize_block(hashof2, None).unwrap(); + backend.finalize_block(hashof3, None).unwrap(); assert!(backend .storage .db @@ -2996,8 +2996,8 @@ pub(crate) mod tests { vec![block2_a, block2_b, block2_c, block1_c] ); - backend.finalize_block(&block1_a, None).unwrap(); - backend.finalize_block(&block2_a, None).unwrap(); + backend.finalize_block(block1_a, None).unwrap(); + backend.finalize_block(block2_a, None).unwrap(); // leaves at same height stay. Leaves at lower heights pruned. assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c]); @@ -3024,10 +3024,10 @@ pub(crate) mod tests { let block1 = insert_header(&backend, 1, block0, None, Default::default()); let justification = Some((CONS0_ENGINE_ID, vec![1, 2, 3])); - backend.finalize_block(&block1, justification.clone()).unwrap(); + backend.finalize_block(block1, justification.clone()).unwrap(); assert_eq!( - backend.blockchain().justifications(&block1).unwrap(), + backend.blockchain().justifications(block1).unwrap(), justification.map(Justifications::from), ); } @@ -3042,14 +3042,14 @@ pub(crate) mod tests { let block1 = insert_header(&backend, 1, block0, None, Default::default()); let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]); - backend.finalize_block(&block1, Some(just0.clone().into())).unwrap(); + backend.finalize_block(block1, Some(just0.clone().into())).unwrap(); let just1 = (CONS1_ENGINE_ID, vec![4, 5]); - backend.append_justification(&block1, just1.clone()).unwrap(); + backend.append_justification(block1, just1.clone()).unwrap(); let just2 = (CONS1_ENGINE_ID, vec![6, 7]); assert!(matches!( - backend.append_justification(&block1, just2), + backend.append_justification(block1, just2), Err(ClientError::BadJustification(_)) )); @@ -3058,7 +3058,7 @@ pub(crate) mod tests { just.append(just1); just }; - assert_eq!(backend.blockchain().justifications(&block1).unwrap(), Some(justifications),); + assert_eq!(backend.blockchain().justifications(block1).unwrap(), Some(justifications),); } #[test] @@ -3072,16 +3072,16 @@ pub(crate) mod tests { let block4 = insert_header(&backend, 4, block3, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block0).unwrap(); - op.mark_finalized(&block1, None).unwrap(); - op.mark_finalized(&block2, None).unwrap(); + backend.begin_state_operation(&mut op, block0).unwrap(); + op.mark_finalized(block1, None).unwrap(); + op.mark_finalized(block2, None).unwrap(); backend.commit_operation(op).unwrap(); } { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block2).unwrap(); - op.mark_finalized(&block3, None).unwrap(); - op.mark_finalized(&block4, None).unwrap(); + backend.begin_state_operation(&mut op, block2).unwrap(); + op.mark_finalized(block3, None).unwrap(); + op.mark_finalized(block4, None).unwrap(); backend.commit_operation(op).unwrap(); } } @@ -3093,7 +3093,7 @@ pub(crate) mod tests { let hash0 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &Default::default()).unwrap(); + backend.begin_state_operation(&mut op, Default::default()).unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -3127,11 +3127,11 @@ pub(crate) mod tests { hash }; - let block0_hash = backend.state_at(&hash0).unwrap().storage_hash(&b"test"[..]).unwrap(); + let block0_hash = backend.state_at(hash0).unwrap().storage_hash(&b"test"[..]).unwrap(); let hash1 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &hash0).unwrap(); + backend.begin_state_operation(&mut op, hash0).unwrap(); let mut header = Header { number: 1, parent_hash: hash0, @@ -3166,7 +3166,7 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); } - let block1_hash = backend.state_at(&hash1).unwrap().storage_hash(&b"test"[..]).unwrap(); + let block1_hash = backend.state_at(hash1).unwrap().storage_hash(&b"test"[..]).unwrap(); assert_ne!(block0_hash, block1_hash); } @@ -3180,8 +3180,8 @@ pub(crate) mod tests { let block2 = insert_header(&backend, 2, block1, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block0).unwrap(); - op.mark_finalized(&block2, None).unwrap(); + backend.begin_state_operation(&mut op, block0).unwrap(); + op.mark_finalized(block2, None).unwrap(); backend.commit_operation(op).unwrap_err(); } } @@ -3208,18 +3208,18 @@ pub(crate) mod tests { { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); for i in 1..5 { - op.mark_finalized(&blocks[i], None).unwrap(); + op.mark_finalized(blocks[i], None).unwrap(); } backend.commit_operation(op).unwrap(); } let bc = backend.blockchain(); - assert_eq!(None, bc.body(&blocks[0]).unwrap()); - assert_eq!(None, bc.body(&blocks[1]).unwrap()); - assert_eq!(None, bc.body(&blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(&blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(&blocks[4]).unwrap()); + assert_eq!(None, bc.body(blocks[0]).unwrap()); + assert_eq!(None, bc.body(blocks[1]).unwrap()); + assert_eq!(None, bc.body(blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); } #[test] @@ -3243,18 +3243,18 @@ pub(crate) mod tests { } let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); for i in 1..3 { - op.mark_finalized(&blocks[i], None).unwrap(); + op.mark_finalized(blocks[i], None).unwrap(); } backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); - assert_eq!(Some(vec![0.into()]), bc.body(&blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(&blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(&blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(&blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(&blocks[4]).unwrap()); + assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); } #[test] @@ -3300,27 +3300,27 @@ pub(crate) mod tests { .unwrap(); let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); - op.mark_head(&blocks[4]).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_head(blocks[4]).unwrap(); backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); - assert_eq!(Some(vec![2.into()]), bc.body(&fork_hash_root).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); for i in 1..5 { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[i]).unwrap(); - op.mark_finalized(&blocks[i], None).unwrap(); + backend.begin_state_operation(&mut op, blocks[i]).unwrap(); + op.mark_finalized(blocks[i], None).unwrap(); backend.commit_operation(op).unwrap(); } - assert_eq!(Some(vec![0.into()]), bc.body(&blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(&blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(&blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(&blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(&blocks[4]).unwrap()); + assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(&fork_hash_root).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); assert_eq!(bc.info().best_number, 4); for i in 0..5 { assert!(bc.hash(i).unwrap().is_some()); @@ -3369,23 +3369,23 @@ pub(crate) mod tests { ) .unwrap(); let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); - op.mark_head(&blocks[4]).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_head(blocks[4]).unwrap(); backend.commit_operation(op).unwrap(); for i in 1..5 { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); - op.mark_finalized(&blocks[i], None).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_finalized(blocks[i], None).unwrap(); backend.commit_operation(op).unwrap(); } let bc = backend.blockchain(); - assert_eq!(None, bc.body(&blocks[0]).unwrap()); - assert_eq!(None, bc.body(&blocks[1]).unwrap()); - assert_eq!(None, bc.body(&blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(&blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(&blocks[4]).unwrap()); + assert_eq!(None, bc.body(blocks[0]).unwrap()); + assert_eq!(None, bc.body(blocks[1]).unwrap()); + assert_eq!(None, bc.body(blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); } #[test] @@ -3419,17 +3419,17 @@ pub(crate) mod tests { ) .unwrap(); let bc = backend.blockchain(); - assert_eq!(bc.indexed_transaction(&x0_hash).unwrap().unwrap(), &x0[1..]); - assert_eq!(bc.indexed_transaction(&x1_hash).unwrap().unwrap(), &x1[1..]); + assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[1..]); + assert_eq!(bc.indexed_transaction(x1_hash).unwrap().unwrap(), &x1[1..]); let hashof0 = bc.info().genesis_hash; // Push one more blocks and make sure block is pruned and transaction index is cleared. let block1 = insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap(); - backend.finalize_block(&block1, None).unwrap(); - assert_eq!(bc.body(&hashof0).unwrap(), None); - assert_eq!(bc.indexed_transaction(&x0_hash).unwrap(), None); - assert_eq!(bc.indexed_transaction(&x1_hash).unwrap(), None); + backend.finalize_block(block1, None).unwrap(); + assert_eq!(bc.body(hashof0).unwrap(), None); + assert_eq!(bc.indexed_transaction(x0_hash).unwrap(), None); + assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None); } #[test] @@ -3463,8 +3463,8 @@ pub(crate) mod tests { ) .unwrap(); let bc = backend.blockchain(); - assert_eq!(bc.indexed_transaction(&x0_hash).unwrap().unwrap(), &x0[..]); - assert_eq!(bc.indexed_transaction(&x1_hash).unwrap(), None); + assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[..]); + assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None); } #[test] @@ -3502,14 +3502,14 @@ pub(crate) mod tests { for i in 1..10 { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); - op.mark_finalized(&blocks[i], None).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_finalized(blocks[i], None).unwrap(); backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); if i < 6 { - assert!(bc.indexed_transaction(&x1_hash).unwrap().is_some()); + assert!(bc.indexed_transaction(x1_hash).unwrap().is_some()); } else { - assert!(bc.indexed_transaction(&x1_hash).unwrap().is_none()); + assert!(bc.indexed_transaction(x1_hash).unwrap().is_none()); } } } @@ -3561,31 +3561,31 @@ pub(crate) mod tests { .unwrap(); assert_eq!(backend.blockchain().info().best_hash, best_hash); - assert!(backend.remove_leaf_block(&best_hash).is_err()); + assert!(backend.remove_leaf_block(best_hash).is_err()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], blocks[3], best_hash]); assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2], blocks[3]]); - assert!(backend.have_state_at(&blocks[3], 2)); + assert!(backend.have_state_at(blocks[3], 2)); assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_some()); - backend.remove_leaf_block(&blocks[3]).unwrap(); - assert!(!backend.have_state_at(&blocks[3], 2)); + backend.remove_leaf_block(blocks[3]).unwrap(); + assert!(!backend.have_state_at(blocks[3], 2)); assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_none()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], best_hash]); assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2]]); - assert!(backend.have_state_at(&blocks[2], 2)); + assert!(backend.have_state_at(blocks[2], 2)); assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_some()); - backend.remove_leaf_block(&blocks[2]).unwrap(); - assert!(!backend.have_state_at(&blocks[2], 2)); + backend.remove_leaf_block(blocks[2]).unwrap(); + assert!(!backend.have_state_at(blocks[2], 2)); assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_none()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash, blocks[1]]); assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![]); - assert!(backend.have_state_at(&blocks[1], 1)); + assert!(backend.have_state_at(blocks[1], 1)); assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_some()); - backend.remove_leaf_block(&blocks[1]).unwrap(); - assert!(!backend.have_state_at(&blocks[1], 1)); + backend.remove_leaf_block(blocks[1]).unwrap(); + assert!(!backend.have_state_at(blocks[1], 1)); assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_none()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash]); assert_eq!(backend.blockchain().children(blocks[0]).unwrap(), vec![best_hash]); @@ -3678,7 +3678,7 @@ pub(crate) mod tests { let block1_a = insert_header(&backend, 1, block0, None, Default::default()); let block2_a = insert_header(&backend, 2, block1_a, None, Default::default()); - backend.finalize_block(&block1_a, None).unwrap(); + backend.finalize_block(block1_a, None).unwrap(); assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]); // Insert a fork prior to finalization point. Leave should not be created. @@ -3702,7 +3702,7 @@ pub(crate) mod tests { let block3 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block1).unwrap(); + backend.begin_state_operation(&mut op, block1).unwrap(); let header = Header { number: 3, parent_hash: block2, @@ -3721,7 +3721,7 @@ pub(crate) mod tests { let block4 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block2).unwrap(); + backend.begin_state_operation(&mut op, block2).unwrap(); let header = Header { number: 4, parent_hash: block3, @@ -3740,7 +3740,7 @@ pub(crate) mod tests { let block3_fork = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block2).unwrap(); + backend.begin_state_operation(&mut op, block2).unwrap(); let header = Header { number: 3, parent_hash: block2, @@ -3757,22 +3757,22 @@ pub(crate) mod tests { header.hash() }; - assert!(backend.have_state_at(&block1, 1)); - assert!(backend.have_state_at(&block2, 2)); - assert!(backend.have_state_at(&block3, 3)); - assert!(backend.have_state_at(&block4, 4)); - assert!(backend.have_state_at(&block3_fork, 3)); + assert!(backend.have_state_at(block1, 1)); + assert!(backend.have_state_at(block2, 2)); + assert!(backend.have_state_at(block3, 3)); + assert!(backend.have_state_at(block4, 4)); + assert!(backend.have_state_at(block3_fork, 3)); assert_eq!(backend.blockchain.leaves().unwrap(), vec![block4, block3_fork]); assert_eq!(4, backend.blockchain.leaves.read().highest_leaf().unwrap().0); assert_eq!(3, backend.revert(1, false).unwrap().0); - assert!(backend.have_state_at(&block1, 1)); - assert!(!backend.have_state_at(&block2, 2)); - assert!(!backend.have_state_at(&block3, 3)); - assert!(!backend.have_state_at(&block4, 4)); - assert!(!backend.have_state_at(&block3_fork, 3)); + assert!(backend.have_state_at(block1, 1)); + assert!(!backend.have_state_at(block2, 2)); + assert!(!backend.have_state_at(block3, 3)); + assert!(!backend.have_state_at(block4, 4)); + assert!(!backend.have_state_at(block3_fork, 3)); assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]); assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0); diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 60720494a9f9a..f235c3a86c04e 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -1352,7 +1352,7 @@ where // ideally some handle to a synchronization oracle would be used // to avoid unconditionally notifying. client - .apply_finality(import_op, &hash, persisted_justification, true) + .apply_finality(import_op, hash, persisted_justification, true) .map_err(|e| { warn!(target: "afg", "Error applying finality to block {:?}: {}", (hash, number), e); e diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 3070581350662..453b41bc63468 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -188,7 +188,7 @@ where .expect_block_hash_from_id(&BlockId::Number(last_block_for_set))?; let justification = if let Some(grandpa_justification) = backend .blockchain() - .justifications(&last_block_for_set_id)? + .justifications(last_block_for_set_id)? .and_then(|justifications| justifications.into_justification(GRANDPA_ENGINE_ID)) { grandpa_justification @@ -312,7 +312,7 @@ mod tests { for block in to_finalize { let hash = blocks[*block as usize - 1].hash(); - client.finalize_block(&hash, None).unwrap(); + client.finalize_block(hash, None).unwrap(); } (client, backend, blocks) } @@ -492,7 +492,7 @@ mod tests { let grandpa_just8 = GrandpaJustification::from_commit(&client, round, commit).unwrap(); client - .finalize_block(&block8.hash(), Some((ID, grandpa_just8.encode().clone()))) + .finalize_block(block8.hash(), Some((ID, grandpa_just8.encode().clone()))) .unwrap(); // Authority set change at block 8, so the justification stored there will be used in the diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index d0a66888ec072..3715287eea31f 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -424,8 +424,8 @@ where } /// Read current set id form a given state. - fn current_set_id(&self, hash: &Block::Hash) -> Result { - let id = &BlockId::hash(*hash); + fn current_set_id(&self, hash: Block::Hash) -> Result { + let id = &BlockId::hash(hash); let runtime_version = self.inner.runtime_api().version(id).map_err(|e| { ConsensusError::ClientImport(format!( "Unable to retrieve current runtime version. {}", @@ -480,7 +480,7 @@ where .runtime_api() .grandpa_authorities(&BlockId::hash(hash)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - let set_id = self.current_set_id(&hash)?; + let set_id = self.current_set_id(hash)?; let authority_set = AuthoritySet::new( authorities.clone(), set_id, diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index d2db1feea0fef..93d20110ff5af 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -375,7 +375,7 @@ fn finalize_3_voters_no_observers() { // normally there's no justification for finalized blocks assert!( - net.lock().peer(0).client().justifications(&hashof20).unwrap().is_none(), + net.lock().peer(0).client().justifications(hashof20).unwrap().is_none(), "Extra justification for block#1", ); } @@ -621,7 +621,7 @@ fn justification_is_generated_periodically() { // when block#32 (justification_period) is finalized, justification // is required => generated for i in 0..3 { - assert!(net.lock().peer(i).client().justifications(&hashof32).unwrap().is_some()); + assert!(net.lock().peer(i).client().justifications(hashof32).unwrap().is_some()); } } @@ -665,12 +665,12 @@ fn sync_justifications_on_change_blocks() { // the first 3 peers are grandpa voters and therefore have already finalized // block 21 and stored a justification for i in 0..3 { - assert!(net.lock().peer(i).client().justifications(&hashof21).unwrap().is_some()); + assert!(net.lock().peer(i).client().justifications(hashof21).unwrap().is_some()); } // the last peer should get the justification by syncing from other peers futures::executor::block_on(futures::future::poll_fn(move |cx| { - if net.lock().peer(3).client().justifications(&hashof21).unwrap().is_none() { + if net.lock().peer(3).client().justifications(hashof21).unwrap().is_none() { net.lock().poll(cx); Poll::Pending } else { @@ -1428,7 +1428,7 @@ fn grandpa_environment_respects_voting_rules() { .as_client() .expect_block_hash_from_id(&BlockId::Number(19)) .unwrap(); - peer.client().finalize_block(&hashof19, None, false).unwrap(); + peer.client().finalize_block(hashof19, None, false).unwrap(); // the 3/4 environment should propose block 21 for voting assert_eq!( @@ -1455,7 +1455,7 @@ fn grandpa_environment_respects_voting_rules() { .as_client() .expect_block_hash_from_id(&BlockId::Number(21)) .unwrap(); - peer.client().finalize_block(&hashof21, None, false).unwrap(); + peer.client().finalize_block(hashof21, None, false).unwrap(); // even though the default environment will always try to not vote on the // best block, there's a hard rule that we can't cast any votes lower than @@ -1666,7 +1666,7 @@ fn imports_justification_for_regular_blocks_on_import() { ); // the justification should be imported and available from the client - assert!(client.justifications(&block_hash).unwrap().is_some()); + assert!(client.justifications(block_hash).unwrap().is_some()); } #[test] diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index 786dfacf8b0b9..c9f762fc7d593 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -130,7 +130,7 @@ impl WarpSyncProof { } let justification = blockchain - .justifications(&header.hash())? + .justifications(header.hash())? .and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)) .expect( "header is last in set and contains standard change signal; \ @@ -412,7 +412,7 @@ mod tests { let justification = GrandpaJustification::from_commit(&client, 42, commit).unwrap(); client - .finalize_block(&target_hash, Some((GRANDPA_ENGINE_ID, justification.encode()))) + .finalize_block(target_hash, Some((GRANDPA_ENGINE_ID, justification.encode()))) .unwrap(); authority_set_changes.push((current_set_id, n)); diff --git a/client/network/bitswap/src/lib.rs b/client/network/bitswap/src/lib.rs index aba7f40ce632f..62a18b18c839d 100644 --- a/client/network/bitswap/src/lib.rs +++ b/client/network/bitswap/src/lib.rs @@ -203,7 +203,7 @@ impl BitswapRequestHandler { let mut hash = B::Hash::default(); hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]); - let transaction = match self.client.indexed_transaction(&hash) { + let transaction = match self.client.indexed_transaction(hash) { Ok(ex) => ex, Err(e) => { error!(target: LOG_TARGET, "Error retrieving transaction {}: {}", hash, e); diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 7156545fbd9aa..77904c7256295 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -172,7 +172,7 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let response = match self.client.execution_proof(&block, &request.method, &request.data) { + let response = match self.client.execution_proof(block, &request.method, &request.data) { Ok((_, proof)) => { let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; Some(schema::v1::light::response::Response::RemoteCallResponse(r)) @@ -212,7 +212,7 @@ where let block = Decode::decode(&mut request.block.as_ref())?; let response = - match self.client.read_proof(&block, &mut request.keys.iter().map(AsRef::as_ref)) { + match self.client.read_proof(block, &mut request.keys.iter().map(AsRef::as_ref)) { Ok(proof) => { let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; Some(schema::v1::light::response::Response::RemoteReadResponse(r)) @@ -259,7 +259,7 @@ where }; let response = match child_info.and_then(|child_info| { self.client.read_child_proof( - &block, + block, &child_info, &mut request.keys.iter().map(AsRef::as_ref), ) diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index 5eba1d52dc68c..467d898489b61 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -332,7 +332,7 @@ where let hash = header.hash(); let parent_hash = *header.parent_hash(); let justifications = - if get_justification { self.client.justifications(&hash)? } else { None }; + if get_justification { self.client.justifications(hash)? } else { None }; let (justifications, justification, is_empty_justification) = if support_multiple_justifications { @@ -361,7 +361,7 @@ where }; let body = if get_body { - match self.client.block_body(&hash)? { + match self.client.block_body(hash)? { Some(mut extrinsics) => extrinsics.iter_mut().map(|extrinsic| extrinsic.encode()).collect(), None => { @@ -374,7 +374,7 @@ where }; let indexed_body = if get_indexed_body { - match self.client.block_indexed_body(&hash)? { + match self.client.block_indexed_body(hash)? { Some(transactions) => transactions, None => { log::trace!( diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index f369bdb47e1c6..7c484835951e8 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -3201,7 +3201,7 @@ mod test { let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); - client.finalize_block(&finalized_block.hash(), Some(just)).unwrap(); + client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); sync.update_chain_info(&info.best_hash, info.best_number); let peer_id1 = PeerId::random(); @@ -3333,7 +3333,7 @@ mod test { let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); - client.finalize_block(&finalized_block.hash(), Some(just)).unwrap(); + client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); sync.update_chain_info(&info.best_hash, info.best_number); let peer_id1 = PeerId::random(); diff --git a/client/network/sync/src/state_request_handler.rs b/client/network/sync/src/state_request_handler.rs index 0a369c998dbd7..441400ef439b7 100644 --- a/client/network/sync/src/state_request_handler.rs +++ b/client/network/sync/src/state_request_handler.rs @@ -205,14 +205,14 @@ where if !request.no_proof { let (proof, _count) = self.client.read_proof_collection( - &block, + block, request.start.as_slice(), MAX_RESPONSE_BYTES, )?; response.proof = proof.encode(); } else { let entries = self.client.storage_collection( - &block, + block, request.start.as_slice(), MAX_RESPONSE_BYTES, )?; diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index a1d42f1e60440..b86f6787f30b5 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -40,7 +40,7 @@ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); let header = client.header(&BlockId::Number(1)).unwrap(); - let justifications = client.justifications(&hash).unwrap(); + let justifications = client.justifications(hash).unwrap(); let peer_id = PeerId::random(); ( client, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index f348d5cf94c43..035fc0a972a59 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -173,12 +173,12 @@ impl PeersClient { Some(header) => header, None => return false, }; - self.backend.have_state_at(&header.hash(), *header.number()) + self.backend.have_state_at(header.hash(), *header.number()) } pub fn justifications( &self, - hash: &::Hash, + hash: ::Hash, ) -> ClientResult> { self.client.justifications(hash) } @@ -193,7 +193,7 @@ impl PeersClient { pub fn finalize_block( &self, - hash: &::Hash, + hash: ::Hash, justification: Option, notify: bool, ) -> ClientResult<()> { @@ -535,14 +535,14 @@ where self.verifier.failed_verifications.lock().clone() } - pub fn has_block(&self, hash: &H256) -> bool { + pub fn has_block(&self, hash: H256) -> bool { self.backend .as_ref() - .map(|backend| backend.blockchain().header(BlockId::hash(*hash)).unwrap().is_some()) + .map(|backend| backend.blockchain().header(BlockId::hash(hash)).unwrap().is_some()) .unwrap_or(false) } - pub fn has_body(&self, hash: &H256) -> bool { + pub fn has_body(&self, hash: H256) -> bool { self.backend .as_ref() .map(|backend| backend.blockchain().body(hash).unwrap().is_some()) @@ -1124,7 +1124,7 @@ impl JustificationImport for ForceFinalized { justification: Justification, ) -> Result<(), Self::Error> { self.0 - .finalize_block(&hash, Some(justification), true) + .finalize_block(hash, Some(justification), true) .map_err(|_| ConsensusError::InvalidJustification) } } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 9ae3014e497ce..bbba3bc6ded62 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -252,23 +252,14 @@ fn sync_justifications() { let hashof20 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(20)).unwrap(); // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justifications(&hashof10).unwrap(), None); - assert_eq!(net.peer(1).client().justifications(&hashof10).unwrap(), None); + assert_eq!(net.peer(0).client().justifications(hashof10).unwrap(), None); + assert_eq!(net.peer(1).client().justifications(hashof10).unwrap(), None); // we finalize block #10, #15 and #20 for peer 0 with a justification let just = (*b"FRNK", Vec::new()); - net.peer(0) - .client() - .finalize_block(&hashof10, Some(just.clone()), true) - .unwrap(); - net.peer(0) - .client() - .finalize_block(&hashof15, Some(just.clone()), true) - .unwrap(); - net.peer(0) - .client() - .finalize_block(&hashof20, Some(just.clone()), true) - .unwrap(); + net.peer(0).client().finalize_block(hashof10, Some(just.clone()), true).unwrap(); + net.peer(0).client().finalize_block(hashof15, Some(just.clone()), true).unwrap(); + net.peer(0).client().finalize_block(hashof20, Some(just.clone()), true).unwrap(); let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash(); let hashof15 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap().hash(); @@ -283,12 +274,12 @@ fn sync_justifications() { net.poll(cx); for hash in [hashof10, hashof15, hashof20] { - if net.peer(0).client().justifications(&hash).unwrap() != + if net.peer(0).client().justifications(hash).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending } - if net.peer(1).client().justifications(&hash).unwrap() != + if net.peer(1).client().justifications(hash).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending @@ -314,7 +305,7 @@ fn sync_justifications_across_forks() { net.block_until_sync(); let just = (*b"FRNK", Vec::new()); - net.peer(0).client().finalize_block(&f1_best, Some(just), true).unwrap(); + net.peer(0).client().finalize_block(f1_best, Some(just), true).unwrap(); net.peer(1).request_justification(&f1_best, 10); net.peer(1).request_justification(&f2_best, 11); @@ -322,9 +313,9 @@ fn sync_justifications_across_forks() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).client().justifications(&f1_best).unwrap() == + if net.peer(0).client().justifications(f1_best).unwrap() == Some(Justifications::from((*b"FRNK", Vec::new()))) && - net.peer(1).client().justifications(&f1_best).unwrap() == + net.peer(1).client().justifications(f1_best).unwrap() == Some(Justifications::from((*b"FRNK", Vec::new()))) { Poll::Ready(()) @@ -369,10 +360,10 @@ fn syncs_all_forks() { net.block_until_sync(); // Check that all peers have all of the branches. - assert!(net.peer(0).has_block(&b1)); - assert!(net.peer(0).has_block(&b2)); - assert!(net.peer(1).has_block(&b1)); - assert!(net.peer(1).has_block(&b2)); + assert!(net.peer(0).has_block(b1)); + assert!(net.peer(0).has_block(b2)); + assert!(net.peer(1).has_block(b1)); + assert!(net.peer(1).has_block(b2)); } #[test] @@ -555,7 +546,7 @@ fn syncs_header_only_forks() { net.peer(1).push_blocks(4, false); // Peer 1 will sync the small fork even though common block state is missing - while !net.peer(1).has_block(&small_hash) { + while !net.peer(1).has_block(small_hash) { net.block_until_idle(); } } @@ -657,13 +648,13 @@ fn can_sync_to_peers_with_wrong_common_block() { // both peers re-org to the same fork without notifying each other let just = Some((*b"FRNK", Vec::new())); - net.peer(0).client().finalize_block(&fork_hash, just.clone(), true).unwrap(); - net.peer(1).client().finalize_block(&fork_hash, just, true).unwrap(); + net.peer(0).client().finalize_block(fork_hash, just.clone(), true).unwrap(); + net.peer(1).client().finalize_block(fork_hash, just, true).unwrap(); let final_hash = net.peer(0).push_blocks(1, false); net.block_until_sync(); - assert!(net.peer(1).has_block(&final_hash)); + assert!(net.peer(1).has_block(final_hash)); } /// Returns `is_new_best = true` for each validated announcement. @@ -724,7 +715,7 @@ fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { ForkChoiceStrategy::Custom(false), ); - while !net.peer(2).has_block(&block_hash) { + while !net.peer(2).has_block(block_hash) { net.block_until_idle(); } } @@ -767,7 +758,7 @@ fn wait_until_deferred_block_announce_validation_is_ready() { ForkChoiceStrategy::Custom(false), ); - while !net.peer(1).has_block(&block_hash) { + while !net.peer(1).has_block(block_hash) { net.block_until_idle(); } } @@ -788,7 +779,7 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { net.block_until_idle(); // The peer should not have synced the block. - assert!(!net.peer(1).has_block(&block_hash)); + assert!(!net.peer(1).has_block(block_hash)); // Make sync protocol aware of the best block net.peer(0).network_service().new_best_block_imported(block_hash, 3); @@ -802,7 +793,7 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(2).has_block(&block_hash) { + if net.peer(2).has_block(block_hash) { Poll::Ready(()) } else { Poll::Pending @@ -810,7 +801,7 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { })); // However peer 1 should still not have the block. - assert!(!net.peer(1).has_block(&block_hash)); + assert!(!net.peer(1).has_block(block_hash)); } /// Ensures that if we as a syncing node sync to the tip while we are connected to another peer @@ -831,10 +822,10 @@ fn sync_to_tip_when_we_sync_together_with_multiple_peers() { net.block_until_connected(); net.block_until_idle(); - assert!(!net.peer(2).has_block(&block_hash)); + assert!(!net.peer(2).has_block(block_hash)); net.peer(0).network_service().new_best_block_imported(block_hash, 10_000); - while !net.peer(2).has_block(&block_hash) && !net.peer(1).has_block(&block_hash) { + while !net.peer(2).has_block(block_hash) && !net.peer(1).has_block(block_hash) { net.block_until_idle(); } } @@ -895,7 +886,7 @@ fn block_announce_data_is_propagated() { let block_hash = net.peer(0).push_blocks_at_without_announcing(BlockId::Number(0), 1, true); net.peer(0).announce_block(block_hash, Some(vec![137])); - while !net.peer(1).has_block(&block_hash) || !net.peer(2).has_block(&block_hash) { + while !net.peer(1).has_block(block_hash) || !net.peer(2).has_block(block_hash) { net.block_until_idle(); } } @@ -939,7 +930,7 @@ fn continue_to_sync_after_some_block_announcement_verifications_failed() { let block_hash = net.peer(0).push_blocks(500, true); net.block_until_sync(); - assert!(net.peer(1).has_block(&block_hash)); + assert!(net.peer(1).has_block(block_hash)); } /// When being spammed by the same request of a peer, we ban this peer. However, we should only ban @@ -956,8 +947,8 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash(); // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justifications(&hashof10).unwrap(), None); - assert_eq!(net.peer(1).client().justifications(&hashof10).unwrap(), None); + assert_eq!(net.peer(0).client().justifications(hashof10).unwrap(), None); + assert_eq!(net.peer(1).client().justifications(hashof10).unwrap(), None); // Let's assume block 10 was finalized, but we still need the justification from the network. net.peer(1).request_justification(&hashof10, 10); @@ -982,13 +973,13 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { // Finalize the block and make the justification available. net.peer(0) .client() - .finalize_block(&hashof10, Some((*b"FRNK", Vec::new())), true) + .finalize_block(hashof10, Some((*b"FRNK", Vec::new())), true) .unwrap(); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(1).client().justifications(&hashof10).unwrap() != + if net.peer(1).client().justifications(hashof10).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending @@ -1110,7 +1101,7 @@ fn syncs_state() { .blockchain() .expect_block_hash_from_id(&BlockId::Number(60)) .unwrap(); - net.peer(1).client().finalize_block(&hashof60, Some(just), true).unwrap(); + net.peer(1).client().finalize_block(hashof60, Some(just), true).unwrap(); // Wait for state sync. block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); @@ -1165,14 +1156,14 @@ fn syncs_indexed_blocks() { .peer(0) .client() .as_client() - .indexed_transaction(&indexed_key) + .indexed_transaction(indexed_key) .unwrap() .is_some()); assert!(net .peer(1) .client() .as_client() - .indexed_transaction(&indexed_key) + .indexed_transaction(indexed_key) .unwrap() .is_none()); @@ -1181,7 +1172,7 @@ fn syncs_indexed_blocks() { .peer(1) .client() .as_client() - .indexed_transaction(&indexed_key) + .indexed_transaction(indexed_key) .unwrap() .is_some()); } @@ -1210,7 +1201,7 @@ fn warp_sync() { // Wait for peer 1 download block history block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(3).has_body(&gap_end) && net.peer(3).has_body(&target) { + if net.peer(3).has_body(gap_end) && net.peer(3).has_body(target) { Poll::Ready(()) } else { Poll::Pending diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index a41d8e41b8fa7..1e6dbd5aca148 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -206,7 +206,7 @@ async fn should_return_finalized_hash() { assert_eq!(res, client.genesis_hash()); // finalize - client.finalize_block(&block_hash, None).unwrap(); + client.finalize_block(block_hash, None).unwrap(); let res: H256 = api.call("chain_getFinalizedHead", EmptyParams::new()).await.unwrap(); assert_eq!(res, block_hash); } @@ -235,7 +235,7 @@ async fn test_head_subscription(method: &str) { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; let block_hash = block.hash(); client.import(BlockOrigin::Own, block).await.unwrap(); - client.finalize_block(&block_hash, None).unwrap(); + client.finalize_block(block_hash, None).unwrap(); sub }; diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 7fc7f840a9daf..64b6cacaad700 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -147,7 +147,7 @@ where let mut block_changes = StorageChangeSet { block: *block_hash, changes: Vec::new() }; for key in keys { let (has_changed, data) = { - let curr_data = self.client.storage(block_hash, key).map_err(client_err)?; + let curr_data = self.client.storage(*block_hash, key).map_err(client_err)?; match last_values.get(key) { Some(prev_data) => (curr_data != *prev_data, curr_data), None => (true, curr_data), @@ -213,7 +213,7 @@ where prefix: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage_keys(&block, &prefix)) + .and_then(|block| self.client.storage_keys(block, &prefix)) .map_err(client_err) } @@ -223,7 +223,7 @@ where prefix: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage_pairs(&block, &prefix)) + .and_then(|block| self.client.storage_pairs(block, &prefix)) .map_err(client_err) } @@ -236,7 +236,7 @@ where ) -> std::result::Result, Error> { self.block_or_best(block) .and_then(|block| { - self.client.storage_keys_iter(&block, prefix.as_ref(), start_key.as_ref()) + self.client.storage_keys_iter(block, prefix.as_ref(), start_key.as_ref()) }) .map(|iter| iter.take(count as usize).collect()) .map_err(client_err) @@ -248,7 +248,7 @@ where key: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage(&block, &key)) + .and_then(|block| self.client.storage(block, &key)) .map_err(client_err) } @@ -262,14 +262,14 @@ where Err(e) => return Err(client_err(e)), }; - match self.client.storage(&block, &key) { + match self.client.storage(block, &key) { Ok(Some(d)) => return Ok(Some(d.0.len() as u64)), Err(e) => return Err(client_err(e)), Ok(None) => {}, } self.client - .storage_pairs(&block, &key) + .storage_pairs(block, &key) .map(|kv| { let item_sum = kv.iter().map(|(_, v)| v.0.len() as u64).sum::(); if item_sum > 0 { @@ -287,7 +287,7 @@ where key: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage_hash(&block, &key)) + .and_then(|block| self.client.storage_hash(block, &key)) .map_err(client_err) } @@ -345,7 +345,7 @@ where self.block_or_best(block) .and_then(|block| { self.client - .read_proof(&block, &mut keys.iter().map(|key| key.0.as_ref())) + .read_proof(block, &mut keys.iter().map(|key| key.0.as_ref())) .map(|proof| proof.into_iter_nodes().map(|node| node.into()).collect()) .map(|proof| ReadProof { at: block, proof }) }) @@ -413,7 +413,7 @@ where let changes = keys .into_iter() .map(|key| { - let v = self.client.storage(&block, &key).ok().flatten(); + let v = self.client.storage(block, &key).ok().flatten(); (key, v) }) .collect(); @@ -494,7 +494,7 @@ where }; self.client .read_child_proof( - &block, + block, &child_info, &mut keys.iter().map(|key| key.0.as_ref()), ) @@ -517,7 +517,7 @@ where ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage_keys(&block, &child_info, &prefix) + self.client.child_storage_keys(block, &child_info, &prefix) }) .map_err(client_err) } @@ -538,7 +538,7 @@ where None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_keys_iter( - &block, + block, child_info, prefix.as_ref(), start_key.as_ref(), @@ -561,7 +561,7 @@ where ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage(&block, &child_info, &key) + self.client.child_storage(block, &child_info, &key) }) .map_err(client_err) } @@ -584,7 +584,7 @@ where keys.into_iter() .map(move |key| { - client.clone().child_storage(&block, &child_info, &key).map_err(client_err) + client.clone().child_storage(block, &child_info, &key).map_err(client_err) }) .collect() } @@ -602,7 +602,7 @@ where ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage_hash(&block, &child_info, &key) + self.client.child_storage_hash(block, &child_info, &key) }) .map_err(client_err) } diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs index 04dba387de908..ca7a070086f45 100644 --- a/client/service/src/chain_ops/export_raw_state.rs +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -25,7 +25,7 @@ use std::{collections::HashMap, sync::Arc}; /// Export the raw state at the given `block`. If `block` is `None`, the /// best block will be used. -pub fn export_raw_state(client: Arc, hash: &B::Hash) -> Result +pub fn export_raw_state(client: Arc, hash: B::Hash) -> Result where C: UsageProvider + StorageProvider, B: BlockT, diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 8ab332a24be78..a1a012dcedd9f 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -148,7 +148,7 @@ where ) -> sp_blockchain::Result> { let mut changes = OverlayedChanges::default(); let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; - let state = self.backend.state_at(&at_hash)?; + let state = self.backend.state_at(at_hash)?; let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; @@ -193,7 +193,7 @@ where let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; - let state = self.backend.state_at(&at_hash)?; + let state = self.backend.state_at(at_hash)?; let changes = &mut *changes.borrow_mut(); @@ -251,7 +251,7 @@ where let mut overlay = OverlayedChanges::default(); let at_hash = self.backend.blockchain().expect_block_hash_from_id(id)?; - let state = self.backend.state_at(&at_hash)?; + let state = self.backend.state_at(at_hash)?; let mut cache = StorageTransactionCache::::default(); let mut ext = Ext::new(&mut overlay, &mut cache, &state, None); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); @@ -269,7 +269,7 @@ where call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)> { let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; - let state = self.backend.state_at(&at_hash)?; + let state = self.backend.state_at(at_hash)?; let trie_backend = state.as_trie_backend(); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index a4890f2fcf06f..438d0b7f77061 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -414,14 +414,14 @@ where } /// Get a reference to the state at a given block. - pub fn state_at(&self, hash: &Block::Hash) -> sp_blockchain::Result { + pub fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result { self.backend.state_at(hash) } /// Get the code at a given block. pub fn code_at(&self, id: &BlockId) -> sp_blockchain::Result> { let hash = self.backend.blockchain().expect_block_hash_from_id(id)?; - Ok(StorageProvider::storage(self, &hash, &StorageKey(well_known_keys::CODE.to_vec()))? + Ok(StorageProvider::storage(self, hash, &StorageKey(well_known_keys::CODE.to_vec()))? .expect( "None is returned if there's no value stored for the given key;\ ':code' key is always defined; qed", @@ -586,7 +586,7 @@ where Some(storage_changes) => { let storage_changes = match storage_changes { sc_consensus::StorageChanges::Changes(storage_changes) => { - self.backend.begin_state_operation(&mut operation.op, &parent_hash)?; + self.backend.begin_state_operation(&mut operation.op, parent_hash)?; let (main_sc, child_sc, offchain_sc, tx, _, tx_index) = storage_changes.into_inner(); @@ -813,7 +813,7 @@ where Block::new(import_block.header.clone(), body.clone()), )?; - let state = self.backend.state_at(parent_hash)?; + let state = self.backend.state_at(*parent_hash)?; let gen_storage_changes = runtime_api .into_storage_changes(&state, *parent_hash) .map_err(sp_blockchain::Error::Storage)?; @@ -877,17 +877,17 @@ where // plugable we cannot make a better choice here. usages that need // an accurate "best" block need to go through `SelectChain` // instead. - operation.op.mark_head(&block)?; + operation.op.mark_head(block)?; } let enacted = route_from_finalized.enacted(); assert!(enacted.len() > 0); for finalize_new in &enacted[..enacted.len() - 1] { - operation.op.mark_finalized(&finalize_new.hash, None)?; + operation.op.mark_finalized(finalize_new.hash, None)?; } assert_eq!(enacted.last().map(|e| e.hash), Some(block)); - operation.op.mark_finalized(&block, justification)?; + operation.op.mark_finalized(block, justification)?; if notify { let finalized = @@ -1033,7 +1033,7 @@ where }; match hash_and_number { Some((hash, number)) => - if self.backend.have_state_at(&hash, number) { + if self.backend.have_state_at(hash, number) { Ok(BlockStatus::InChainWithState) } else { Ok(BlockStatus::InChainPruned) @@ -1053,7 +1053,7 @@ where /// Get block body by id. pub fn body( &self, - hash: &Block::Hash, + hash: Block::Hash, ) -> sp_blockchain::Result::Extrinsic>>> { self.backend.blockchain().body(hash) } @@ -1151,7 +1151,7 @@ where { fn read_proof( &self, - hash: &Block::Hash, + hash: Block::Hash, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { self.state_at(hash) @@ -1160,7 +1160,7 @@ where fn read_child_proof( &self, - hash: &Block::Hash, + hash: Block::Hash, child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { @@ -1170,16 +1170,16 @@ where fn execution_proof( &self, - hash: &Block::Hash, + hash: Block::Hash, method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)> { - self.executor.prove_execution(&BlockId::Hash(*hash), method, call_data) + self.executor.prove_execution(&BlockId::Hash(hash), method, call_data) } fn read_proof_collection( &self, - hash: &Block::Hash, + hash: Block::Hash, start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result<(CompactProof, u32)> { @@ -1198,14 +1198,14 @@ where fn storage_collection( &self, - hash: &Block::Hash, + hash: Block::Hash, start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result> { if start_key.len() > MAX_NESTED_TRIE_DEPTH { return Err(Error::Backend("Invalid start key.".to_string())) } - let state = self.state_at(&hash)?; + let state = self.state_at(hash)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(storage_key); match ChildType::from_prefixed_key(storage_key) { @@ -1398,7 +1398,7 @@ where { fn storage_keys( &self, - hash: &Block::Hash, + hash: Block::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { let keys = self.state_at(hash)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); @@ -1407,7 +1407,7 @@ where fn storage_pairs( &self, - hash: &::Hash, + hash: ::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { let state = self.state_at(hash)?; @@ -1424,7 +1424,7 @@ where fn storage_keys_iter<'a>( &self, - hash: &::Hash, + hash: ::Hash, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { @@ -1435,7 +1435,7 @@ where fn child_storage_keys_iter<'a>( &self, - hash: &::Hash, + hash: ::Hash, child_info: ChildInfo, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, @@ -1447,7 +1447,7 @@ where fn storage( &self, - hash: &Block::Hash, + hash: Block::Hash, key: &StorageKey, ) -> sp_blockchain::Result> { Ok(self @@ -1459,7 +1459,7 @@ where fn storage_hash( &self, - hash: &::Hash, + hash: ::Hash, key: &StorageKey, ) -> sp_blockchain::Result> { self.state_at(hash)? @@ -1469,7 +1469,7 @@ where fn child_storage_keys( &self, - hash: &::Hash, + hash: ::Hash, child_info: &ChildInfo, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { @@ -1484,7 +1484,7 @@ where fn child_storage( &self, - hash: &::Hash, + hash: ::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { @@ -1497,7 +1497,7 @@ where fn child_storage_hash( &self, - hash: &::Hash, + hash: ::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { @@ -1683,7 +1683,7 @@ where fn state_at(&self, at: &BlockId) -> Result { let hash = self.backend.blockchain().expect_block_hash_from_id(at)?; - self.state_at(&hash).map_err(Into::into) + self.state_at(hash).map_err(Into::into) } } @@ -1844,17 +1844,17 @@ where fn apply_finality( &self, operation: &mut ClientImportOperation, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { let last_best = self.backend.blockchain().info().best_hash; - self.apply_finality_with_block_hash(operation, *hash, justification, last_best, notify) + self.apply_finality_with_block_hash(operation, hash, justification, last_best, notify) } fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { @@ -1873,7 +1873,7 @@ where fn apply_finality( &self, operation: &mut ClientImportOperation, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { @@ -1882,7 +1882,7 @@ where fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { @@ -1939,7 +1939,7 @@ where { fn block_body( &self, - hash: &Block::Hash, + hash: Block::Hash, ) -> sp_blockchain::Result::Extrinsic>>> { self.body(hash) } @@ -1948,7 +1948,7 @@ where Ok(match self.header(id)? { Some(header) => { let hash = header.hash(); - match (self.body(&hash)?, self.justifications(&hash)?) { + match (self.body(hash)?, self.justifications(hash)?) { (Some(extrinsics), justifications) => Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), _ => None, @@ -1962,7 +1962,7 @@ where Client::block_status(self, id) } - fn justifications(&self, hash: &Block::Hash) -> sp_blockchain::Result> { + fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result> { self.backend.blockchain().justifications(hash) } @@ -1970,18 +1970,15 @@ where self.backend.blockchain().hash(number) } - fn indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result>> { + fn indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result>> { self.backend.blockchain().indexed_transaction(hash) } - fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { + fn has_indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result { self.backend.blockchain().has_indexed_transaction(hash) } - fn block_indexed_body( - &self, - hash: &Block::Hash, - ) -> sp_blockchain::Result>>> { + fn block_indexed_body(&self, hash: Block::Hash) -> sp_blockchain::Result>>> { self.backend.blockchain().block_indexed_body(hash) } @@ -2085,7 +2082,7 @@ where self.backend .blockchain() - .block_indexed_body(&hash) + .block_indexed_body(hash) .map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e))) } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index c60ff4dd09d7b..788f119130ac0 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -346,7 +346,7 @@ fn block_builder_works_with_transactions() { .expect("block 1 was just imported. qed"); assert_eq!(client.chain_info().best_number, 1); - assert_ne!(client.state_at(&hash1).unwrap().pairs(), client.state_at(&hash0).unwrap().pairs()); + assert_ne!(client.state_at(hash1).unwrap().pairs(), client.state_at(hash0).unwrap().pairs()); assert_eq!( client .runtime_api() @@ -405,10 +405,10 @@ fn block_builder_does_not_include_invalid() { assert_eq!(client.chain_info().best_number, 1); assert_ne!( - client.state_at(&hashof1).unwrap().pairs(), - client.state_at(&hashof0).unwrap().pairs() + client.state_at(hashof1).unwrap().pairs(), + client.state_at(hashof0).unwrap().pairs() ); - assert_eq!(client.body(&hashof1).unwrap().unwrap().len(), 1) + assert_eq!(client.body(hashof1).unwrap().unwrap().len(), 1) } #[test] @@ -870,7 +870,7 @@ fn import_with_justification() { .unwrap() .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - client.finalize_block(&a2.hash(), None).unwrap(); + client.finalize_block(a2.hash(), None).unwrap(); // A2 -> A3 let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); @@ -884,11 +884,11 @@ fn import_with_justification() { assert_eq!(client.chain_info().finalized_hash, a3.hash()); - assert_eq!(client.justifications(&a3.hash()).unwrap(), Some(justification)); + assert_eq!(client.justifications(a3.hash()).unwrap(), Some(justification)); - assert_eq!(client.justifications(&a1.hash()).unwrap(), None); + assert_eq!(client.justifications(a1.hash()).unwrap(), None); - assert_eq!(client.justifications(&a2.hash()).unwrap(), None); + assert_eq!(client.justifications(a2.hash()).unwrap(), None); finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[]); finality_notification_check(&mut finality_notifications, &[a3.hash()], &[]); @@ -999,7 +999,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { // we finalize block B1 which is on a different branch from current best // which should trigger a re-org. - ClientExt::finalize_block(&client, &b1.hash(), None).unwrap(); + ClientExt::finalize_block(&client, b1.hash(), None).unwrap(); // B1 should now be the latest finalized assert_eq!(client.chain_info().finalized_hash, b1.hash()); @@ -1023,7 +1023,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { assert_eq!(client.chain_info().best_hash, b3.hash()); - ClientExt::finalize_block(&client, &b3.hash(), None).unwrap(); + ClientExt::finalize_block(&client, b3.hash(), None).unwrap(); finality_notification_check(&mut finality_notifications, &[b1.hash()], &[]); finality_notification_check(&mut finality_notifications, &[b2.hash(), b3.hash()], &[a2.hash()]); @@ -1121,7 +1121,7 @@ fn finality_notifications_content() { // Postpone import to test behavior of import of finalized block. - ClientExt::finalize_block(&client, &a2.hash(), None).unwrap(); + ClientExt::finalize_block(&client, a2.hash(), None).unwrap(); // Import and finalize D4 block_on(client.import_as_final(BlockOrigin::Own, d4.clone())).unwrap(); @@ -1285,7 +1285,7 @@ fn doesnt_import_blocks_that_revert_finality() { // we will finalize A2 which should make it impossible to import a new // B3 at the same height but that doesn't include it - ClientExt::finalize_block(&client, &a2.hash(), None).unwrap(); + ClientExt::finalize_block(&client, a2.hash(), None).unwrap(); let import_err = block_on(client.import(BlockOrigin::Own, b3)).err().unwrap(); let expected_err = @@ -1320,7 +1320,7 @@ fn doesnt_import_blocks_that_revert_finality() { .unwrap() .block; block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); - ClientExt::finalize_block(&client, &a3.hash(), None).unwrap(); + ClientExt::finalize_block(&client, a3.hash(), None).unwrap(); finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[]); @@ -1620,7 +1620,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { let child_prefix = StorageKey(b"sec".to_vec()); let res: Vec<_> = client - .storage_keys_iter(&block_hash, Some(&prefix), None) + .storage_keys_iter(block_hash, Some(&prefix), None) .unwrap() .map(|x| x.0) .collect(); @@ -1635,7 +1635,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { let res: Vec<_> = client .storage_keys_iter( - &block_hash, + block_hash, Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))), ) @@ -1646,7 +1646,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { let res: Vec<_> = client .storage_keys_iter( - &block_hash, + block_hash, Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a686561707061676573"))), ) @@ -1656,7 +1656,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { assert_eq!(res, Vec::>::new()); let res: Vec<_> = client - .child_storage_keys_iter(&block_hash, child_info.clone(), Some(&child_prefix), None) + .child_storage_keys_iter(block_hash, child_info.clone(), Some(&child_prefix), None) .unwrap() .map(|x| x.0) .collect(); @@ -1664,7 +1664,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { let res: Vec<_> = client .child_storage_keys_iter( - &block_hash, + block_hash, child_info, None, Some(&StorageKey(b"second".to_vec())), @@ -1684,7 +1684,7 @@ fn storage_keys_iter_works() { let prefix = StorageKey(array_bytes::hex2bytes_unchecked("")); let res: Vec<_> = client - .storage_keys_iter(&block_hash, Some(&prefix), None) + .storage_keys_iter(block_hash, Some(&prefix), None) .unwrap() .take(9) .map(|x| array_bytes::bytes2hex("", &x.0)) @@ -1706,7 +1706,7 @@ fn storage_keys_iter_works() { let res: Vec<_> = client .storage_keys_iter( - &block_hash, + block_hash, Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))), ) @@ -1729,7 +1729,7 @@ fn storage_keys_iter_works() { let res: Vec<_> = client .storage_keys_iter( - &block_hash, + block_hash, Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked( "7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa", diff --git a/client/tracing/src/block/mod.rs b/client/tracing/src/block/mod.rs index ee524f5f72902..63fd1de374cba 100644 --- a/client/tracing/src/block/mod.rs +++ b/client/tracing/src/block/mod.rs @@ -225,7 +225,7 @@ where .ok_or_else(|| Error::MissingBlockComponent("Header not found".to_string()))?; let extrinsics = self .client - .block_body(&self.block) + .block_body(self.block) .map_err(Error::InvalidBlockId)? .ok_or_else(|| Error::MissingBlockComponent("Extrinsics not found".to_string()))?; tracing::debug!(target: "state_tracing", "Found {} extrinsics", extrinsics.len()); diff --git a/client/transaction-pool/benches/basics.rs b/client/transaction-pool/benches/basics.rs index bc6f2f7d5e947..602e84b47775c 100644 --- a/client/transaction-pool/benches/basics.rs +++ b/client/transaction-pool/benches/basics.rs @@ -111,7 +111,7 @@ impl ChainApi for TestApi { (blake2_256(&encoded).into(), encoded.len()) } - fn block_body(&self, _id: &::Hash) -> Self::BodyFuture { + fn block_body(&self, _id: ::Hash) -> Self::BodyFuture { ready(Ok(None)) } diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index f162a02ddb643..c3f9b50f9482d 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -126,7 +126,7 @@ where Pin> + Send>>; type BodyFuture = Ready::Extrinsic>>>>; - fn block_body(&self, hash: &Block::Hash) -> Self::BodyFuture { + fn block_body(&self, hash: Block::Hash) -> Self::BodyFuture { ready(self.client.block_body(hash).map_err(error::Error::from)) } diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 99119ac8fa8ab..7b3a8db15982a 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -91,7 +91,7 @@ pub trait ChainApi: Send + Sync { fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (ExtrinsicHash, usize); /// Returns a block body given the block. - fn block_body(&self, at: &::Hash) -> Self::BodyFuture; + fn block_body(&self, at: ::Hash) -> Self::BodyFuture; /// Returns a block header given the block id. fn block_header( diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index e66c780a5ed8f..a441bf9b2a9a0 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -550,12 +550,12 @@ impl RevalidationStatus { /// Prune the known txs for the given block. async fn prune_known_txs_for_block>( - block_hash: &Block::Hash, + block_hash: Block::Hash, api: &Api, pool: &graph::Pool, ) -> Vec> { let extrinsics = api - .block_body(&block_hash) + .block_body(block_hash) .await .unwrap_or_else(|e| { log::warn!("Prune known transactions: error request: {}", e); @@ -567,7 +567,7 @@ async fn prune_known_txs_for_block h, Ok(None) => { log::debug!(target: "txpool", "Could not find header for {:?}.", block_hash); @@ -580,7 +580,7 @@ async fn prune_known_txs_for_block::Hash) -> Self::BodyFuture { + fn block_body(&self, _id: ::Hash) -> Self::BodyFuture { futures::future::ready(Ok(None)) } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index fdb56020661b4..dea3a7f285117 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -89,9 +89,9 @@ pub trait Backend: HeaderBackend + HeaderMetadata { /// Get block body. Returns `None` if block is not found. - fn body(&self, hash: &Block::Hash) -> Result::Extrinsic>>>; + fn body(&self, hash: Block::Hash) -> Result::Extrinsic>>>; /// Get block justifications. Returns `None` if no justification exists. - fn justifications(&self, hash: &Block::Hash) -> Result>; + fn justifications(&self, hash: Block::Hash) -> Result>; /// Get last finalized block hash. fn last_finalized(&self) -> Result; @@ -231,14 +231,14 @@ pub trait Backend: /// Get single indexed transaction by content hash. Note that this will only fetch transactions /// that are indexed by the runtime with `storage_index_transaction`. - fn indexed_transaction(&self, hash: &Block::Hash) -> Result>>; + fn indexed_transaction(&self, hash: Block::Hash) -> Result>>; /// Check if indexed transaction exists. - fn has_indexed_transaction(&self, hash: &Block::Hash) -> Result { + fn has_indexed_transaction(&self, hash: Block::Hash) -> Result { Ok(self.indexed_transaction(hash)?.is_some()) } - fn block_indexed_body(&self, hash: &Block::Hash) -> Result>>>; + fn block_indexed_body(&self, hash: Block::Hash) -> Result>>>; } /// Blockchain info diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index dd416b9102fc0..881c50d434264 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -29,7 +29,7 @@ pub trait ClientExt: Sized { /// Finalize a block. fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()>; @@ -75,7 +75,7 @@ where { fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()> { Finalizer::finalize_block(self, hash, justification, true) diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index e2d6efccea424..f8d551a6fa5bd 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -315,12 +315,12 @@ impl sc_transaction_pool::ChainApi for TestApi { Self::hash_and_length_inner(ex) } - fn block_body(&self, hash: &::Hash) -> Self::BodyFuture { + fn block_body(&self, hash: ::Hash) -> Self::BodyFuture { futures::future::ready(Ok(self .chain .read() .block_by_hash - .get(hash) + .get(&hash) .map(|b| b.extrinsics().to_vec()))) } diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index 47cd047e158d0..5a67b11f494f5 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -142,7 +142,7 @@ where let block_hash = self.client.expect_block_hash_from_id(block)?; let mut raw_weight = &self .client - .storage(&block_hash, &key)? + .storage(block_hash, &key)? .ok_or(format!("Could not find System::BlockWeight for block: {}", block))? .0[..]; diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index 32fd5da7f95f0..ce2d52e57d641 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -193,7 +193,7 @@ impl StorageCmd { { let hash = client.usage_info().chain.best_hash; let empty_prefix = StorageKey(Vec::new()); - let mut keys = client.storage_keys(&hash, &empty_prefix)?; + let mut keys = client.storage_keys(hash, &empty_prefix)?; let (mut rng, _) = new_rng(None); keys.shuffle(&mut rng); @@ -201,7 +201,7 @@ impl StorageCmd { info!("Warmup round {}/{}", i + 1, self.params.warmups); for key in keys.as_slice() { let _ = client - .storage(&hash, &key) + .storage(hash, &key) .expect("Checked above to exist") .ok_or("Value unexpectedly empty"); } diff --git a/utils/frame/benchmarking-cli/src/storage/read.rs b/utils/frame/benchmarking-cli/src/storage/read.rs index 2df7e697039e8..20c41e4a5196b 100644 --- a/utils/frame/benchmarking-cli/src/storage/read.rs +++ b/utils/frame/benchmarking-cli/src/storage/read.rs @@ -43,7 +43,7 @@ impl StorageCmd { info!("Preparing keys from block {}", best_hash); // Load all keys and randomly shuffle them. let empty_prefix = StorageKey(Vec::new()); - let mut keys = client.storage_keys(&best_hash, &empty_prefix)?; + let mut keys = client.storage_keys(best_hash, &empty_prefix)?; let (mut rng, _) = new_rng(None); keys.shuffle(&mut rng); @@ -55,7 +55,7 @@ impl StorageCmd { match (self.params.include_child_trees, self.is_child_key(key.clone().0)) { (true, Some(info)) => { // child tree key - let child_keys = client.child_storage_keys(&best_hash, &info, &empty_prefix)?; + let child_keys = client.child_storage_keys(best_hash, &info, &empty_prefix)?; for ck in child_keys { child_nodes.push((ck.clone(), info.clone())); } @@ -64,7 +64,7 @@ impl StorageCmd { // regular key let start = Instant::now(); let v = client - .storage(&best_hash, &key) + .storage(best_hash, &key) .expect("Checked above to exist") .ok_or("Value unexpectedly empty")?; record.append(v.0.len(), start.elapsed())?; @@ -79,7 +79,7 @@ impl StorageCmd { for (key, info) in child_nodes.as_slice() { let start = Instant::now(); let v = client - .child_storage(&best_hash, info, key) + .child_storage(best_hash, info, key) .expect("Checked above to exist") .ok_or("Value unexpectedly empty")?; record.append(v.0.len(), start.elapsed())?; diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index 2ee37a5619136..55a7b60d55552 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -77,7 +77,7 @@ impl StorageCmd { match (self.params.include_child_trees, self.is_child_key(k.to_vec())) { (true, Some(info)) => { let child_keys = - client.child_storage_keys_iter(&best_hash, info.clone(), None, None)?; + client.child_storage_keys_iter(best_hash, info.clone(), None, None)?; for ck in child_keys { child_nodes.push((ck.clone(), info.clone())); } @@ -124,7 +124,7 @@ impl StorageCmd { for (key, info) in child_nodes { if let Some(original_v) = client - .child_storage(&best_hash, &info.clone(), &key) + .child_storage(best_hash, &info.clone(), &key) .expect("Checked above to exist") { let mut new_v = vec![0; original_v.0.len()]; diff --git a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index c3d3ec816f97e..ab180c7d45d5b 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -145,7 +145,7 @@ where self.deny_unsafe.check_if_safe()?; let hash = at.unwrap_or_else(|| self.client.info().best_hash); - let state = self.backend.state_at(&hash).map_err(error_into_rpc_err)?; + let state = self.backend.state_at(hash).map_err(error_into_rpc_err)?; let (top, child) = migration_status(&state).map_err(error_into_rpc_err)?; Ok(MigrationStatusResult {