Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into oty-nis-rococo
Browse files Browse the repository at this point in the history
  • Loading branch information
ggwpez committed Apr 8, 2024
2 parents 0ecf273 + c1063a5 commit 0512e7d
Show file tree
Hide file tree
Showing 49 changed files with 602 additions and 288 deletions.
14 changes: 8 additions & 6 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -558,6 +558,7 @@ serde_json = { version = "1.0.114", default-features = false }
serde_yaml = { version = "0.9" }
syn = { version = "2.0.53" }
thiserror = { version = "1.0.48" }
tracing-subscriber = { version = "0.3.18" }

[profile.release]
# Polkadot runtime requires unwinding.
Expand Down
53 changes: 38 additions & 15 deletions cumulus/client/consensus/aura/src/collators/lookahead.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@ use polkadot_node_subsystem::messages::{
CollationGenerationMessage, RuntimeApiMessage, RuntimeApiRequest,
};
use polkadot_overseer::Handle as OverseerHandle;
use polkadot_primitives::{CollatorPair, CoreIndex, Id as ParaId, OccupiedCoreAssumption};
use polkadot_primitives::{
AsyncBackingParams, CollatorPair, CoreIndex, CoreState, Id as ParaId, OccupiedCoreAssumption,
};

use futures::{channel::oneshot, prelude::*};
use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf};
Expand Down Expand Up @@ -186,10 +188,14 @@ where

// TODO: Currently we use just the first core here, but for elastic scaling
// we iterate and build on all of the cores returned.
let core_index = if let Some(core_index) =
cores_scheduled_for_para(relay_parent, params.para_id, &mut params.overseer_handle)
.await
.get(0)
let core_index = if let Some(core_index) = cores_scheduled_for_para(
relay_parent,
params.para_id,
&mut params.overseer_handle,
&mut params.relay_client,
)
.await
.get(0)
{
*core_index
} else {
Expand Down Expand Up @@ -223,7 +229,10 @@ where
let parent_search_params = ParentSearchParams {
relay_parent,
para_id: params.para_id,
ancestry_lookback: max_ancestry_lookback(relay_parent, &params.relay_client).await,
ancestry_lookback: async_backing_params(relay_parent, &params.relay_client)
.await
.map(|c| c.allowed_ancestry_len as usize)
.unwrap_or(0),
max_depth: PARENT_SEARCH_DEPTH,
ignore_alternative_branches: true,
};
Expand Down Expand Up @@ -461,21 +470,19 @@ where
Some(SlotClaim::unchecked::<P>(author_pub, slot, timestamp))
}

/// Reads allowed ancestry length parameter from the relay chain storage at the given relay parent.
///
/// Falls back to 0 in case of an error.
async fn max_ancestry_lookback(
/// Reads async backing parameters from the relay chain storage at the given relay parent.
async fn async_backing_params(
relay_parent: PHash,
relay_client: &impl RelayChainInterface,
) -> usize {
) -> Option<AsyncBackingParams> {
match load_abridged_host_configuration(relay_parent, relay_client).await {
Ok(Some(config)) => config.async_backing_params.allowed_ancestry_len as usize,
Ok(Some(config)) => Some(config.async_backing_params),
Ok(None) => {
tracing::error!(
target: crate::LOG_TARGET,
"Active config is missing in relay chain storage",
);
0
None
},
Err(err) => {
tracing::error!(
Expand All @@ -484,7 +491,7 @@ async fn max_ancestry_lookback(
?relay_parent,
"Failed to read active config from relay chain client",
);
0
None
},
}
}
Expand All @@ -494,7 +501,9 @@ async fn cores_scheduled_for_para(
relay_parent: PHash,
para_id: ParaId,
overseer_handle: &mut OverseerHandle,
relay_client: &impl RelayChainInterface,
) -> Vec<CoreIndex> {
// Get `AvailabilityCores` from runtime
let (tx, rx) = oneshot::channel();
let request = RuntimeApiRequest::AvailabilityCores(tx);
overseer_handle
Expand Down Expand Up @@ -522,11 +531,25 @@ async fn cores_scheduled_for_para(
},
};

let max_candidate_depth = async_backing_params(relay_parent, relay_client)
.await
.map(|c| c.max_candidate_depth)
.unwrap_or(0);

cores
.iter()
.enumerate()
.filter_map(|(index, core)| {
if core.para_id() == Some(para_id) {
let core_para_id = match core {
CoreState::Scheduled(scheduled_core) => Some(scheduled_core.para_id),
CoreState::Occupied(occupied_core) if max_candidate_depth >= 1 => occupied_core
.next_up_on_available
.as_ref()
.map(|scheduled_core| scheduled_core.para_id),
CoreState::Free | CoreState::Occupied(_) => None,
};

if core_para_id == Some(para_id) {
Some(CoreIndex(index as u32))
} else {
None
Expand Down
2 changes: 1 addition & 1 deletion cumulus/pallets/collator-selection/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -984,7 +984,7 @@ pub mod pallet {
}
}

/// [`TypedGet`] implementaion to get the AccountId of the StakingPot.
/// [`TypedGet`] implementation to get the AccountId of the StakingPot.
pub struct StakingPotAccountId<R>(PhantomData<R>);
impl<R> TypedGet for StakingPotAccountId<R>
where
Expand Down
2 changes: 1 addition & 1 deletion cumulus/parachains/runtimes/bridge-hubs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ mkdir -p ~/local_bridge_testing/logs
---
# 1. Install zombienet
Go to: https://github.com/paritytech/zombienet/releases
Copy the apropriate binary (zombienet-linux) from the latest release to ~/local_bridge_testing/bin
Copy the appropriate binary (zombienet-linux) from the latest release to ~/local_bridge_testing/bin
---
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ use sp_runtime::{
create_runtime_str, generic, impl_opaque_keys,
traits::Block as BlockT,
transaction_validity::{TransactionSource, TransactionValidity},
ApplyExtrinsicResult, MultiAddress, Perbill,
ApplyExtrinsicResult, DispatchError, MultiAddress, Perbill,
};
use sp_std::prelude::*;
#[cfg(feature = "std")]
Expand Down Expand Up @@ -593,6 +593,12 @@ impl_runtime_apis! {
}
}

impl pallet_broker::runtime_api::BrokerApi<Block, Balance> for Runtime {
fn sale_price() -> Result<Balance, DispatchError> {
Broker::current_price()
}
}

impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime {
fn query_info(
uxt: <Block as BlockT>::Extrinsic,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ use sp_runtime::{
create_runtime_str, generic, impl_opaque_keys,
traits::Block as BlockT,
transaction_validity::{TransactionSource, TransactionValidity},
ApplyExtrinsicResult, MultiAddress, Perbill,
ApplyExtrinsicResult, DispatchError, MultiAddress, Perbill,
};
use sp_std::prelude::*;
#[cfg(feature = "std")]
Expand Down Expand Up @@ -584,6 +584,12 @@ impl_runtime_apis! {
}
}

impl pallet_broker::runtime_api::BrokerApi<Block, Balance> for Runtime {
fn sale_price() -> Result<Balance, DispatchError> {
Broker::current_price()
}
}

impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime {
fn query_info(
uxt: <Block as BlockT>::Extrinsic,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ pub fn native_version() -> NativeVersion {
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
}

/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers.
/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers.
/// This is used to limit the maximal weight of a single extrinsic.
const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10);
/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used
Expand Down
4 changes: 2 additions & 2 deletions docs/sdk/src/reference_docs/frame_tokens.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
//! On completion of reading this doc, you should have a good understanding of:
//! - The distinction between token traits and trait implementations in FRAME, and why this
//! distinction is helpful
//! - Token-related traits avaliable in FRAME
//! - Token-related traits available in FRAME
//! - Token-related trait implementations in FRAME
//! - How to choose the right trait or trait implementation for your use case
//! - Where to go next
Expand All @@ -38,7 +38,7 @@
//! [tightly coupling](crate::reference_docs::frame_pallet_coupling#tight-coupling-pallets) your
//! custom pallet to [`pallet_balances`].
//!
//! However, to keep pallets flexible and modular, it is often prefered to
//! However, to keep pallets flexible and modular, it is often preferred to
//! [loosely couple](crate::reference_docs::frame_pallet_coupling#loosely--coupling-pallets).
//!
//! To achieve loose coupling,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1148,7 +1148,7 @@ mod tests {
}
.into();

// Populate the requested tranches. The assignemnts aren't inspected in
// Populate the requested tranches. The assignments aren't inspected in
// this test.
for &t in &test_tranche {
approval_entry.import_assignment(t, ValidatorIndex(0), 0)
Expand Down
2 changes: 1 addition & 1 deletion polkadot/node/core/approval-voting/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1102,7 +1102,7 @@ where
// need to handle these newly generated actions before we finalize
// completing additional actions in the submitted sequence of actions.
//
// Since recursive async functions are not not stable yet, we are
// Since recursive async functions are not stable yet, we are
// forced to modify the actions iterator on the fly whenever a new set
// of actions are generated by handling a single action.
//
Expand Down
10 changes: 8 additions & 2 deletions polkadot/node/core/prospective-parachains/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1797,7 +1797,10 @@ fn persists_pending_availability_candidate() {
test_state.availability_cores = test_state
.availability_cores
.into_iter()
.filter(|core| core.para_id().map_or(false, |id| id == para_id))
.filter(|core| match core {
CoreState::Scheduled(scheduled_core) => scheduled_core.para_id == para_id,
_ => false,
})
.collect();
assert_eq!(test_state.availability_cores.len(), 1);

Expand Down Expand Up @@ -1896,7 +1899,10 @@ fn backwards_compatible() {
test_state.availability_cores = test_state
.availability_cores
.into_iter()
.filter(|core| core.para_id().map_or(false, |id| id == para_id))
.filter(|core| match core {
CoreState::Scheduled(scheduled_core) => scheduled_core.para_id == para_id,
_ => false,
})
.collect();
assert_eq!(test_state.availability_cores.len(), 1);

Expand Down
6 changes: 5 additions & 1 deletion polkadot/node/core/provisioner/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -918,7 +918,11 @@ mod select_candidates {
let committed_receipts: Vec<_> = (0..mock_cores.len())
.map(|i| {
let mut descriptor = dummy_candidate_descriptor(dummy_hash());
descriptor.para_id = mock_cores[i].para_id().unwrap();
descriptor.para_id = if let Scheduled(scheduled_core) = &mock_cores[i] {
scheduled_core.para_id
} else {
panic!("`mock_cores` is not initialized with `Scheduled`?")
};
descriptor.persisted_validation_data_hash = empty_hash;
descriptor.pov_hash = Hash::from_low_u64_be(i as u64);
CommittedCandidateReceipt {
Expand Down
Loading

0 comments on commit 0512e7d

Please sign in to comment.