Skip to content

Commit

Permalink
Merge branch 'master' into laz/remove-usage-of-read-state-with-wasm
Browse files Browse the repository at this point in the history
  • Loading branch information
Lazark0x authored Jan 14, 2025
2 parents 528d847 + 6ce236b commit 9573ea4
Show file tree
Hide file tree
Showing 19 changed files with 369 additions and 133 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ Gear Protocol provides a developer-friendly programming platform for decentraliz

- Gear Protocol provides dApp developers with a very minimal, intuitive, and sufficient API for writing custom-logic programs in Rust and running them on Gear-powered networks, such as the Vara Network.
- It provides a technological foundation for constructing highly scalable and rapid decentralized Layer-1 networks.
- Reduces the computational burden on blockchains by offloading highly intensive calculations using a Vara node with WAVM, and then proving the correctness of these calculations on any blockchain.
- Reduces the computational burden on blockchains by offloading highly intensive calculations using a Vara node with Wasm VM, and then proving the correctness of these calculations on any blockchain.
- A Vara node can be used as a standalone instance running microservices, middleware, open API, and more.

For more details refer to the **[Gear Whitepaper](https://whitepaper.gear.foundation)**.
Expand All @@ -79,7 +79,7 @@ Refer to the **[Technical Paper](https://github.com/gear-tech/gear-technical/blo

### Get the binaries

To build Vara node binaries from source follow a step by step instructions provided in [Node README](https://github.com/gear-tech/gear/tree/master/node/README.md).
To build Vara node binaries from source follow the step-by-step instructions provided in [Node README](https://github.com/gear-tech/gear/tree/master/node/README.md).

Alternatively, you can download pre-built packages for your OS/architecture:

Expand Down
15 changes: 15 additions & 0 deletions ethexe/ethereum/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,21 @@ impl Ethereum {
let builder = router.lookupGenesisHash();
builder.send().await?.try_get_receipt().await?;

log::debug!("Router impl has been deployed at {}", router_impl.address());
log::debug!("Router proxy has been deployed at {router_address}");

log::debug!(
"WrappedVara impl has been deployed at {}",
wrapped_vara_impl.address()
);
log::debug!("WrappedVara deployed at {wvara_address}");

log::debug!("Mirror impl has been deployed at {}", mirror.address());
log::debug!(
"Mirror proxy has been deployed at {}",
mirror_proxy.address()
);

Ok(Self {
router_address,
wvara_address,
Expand Down
2 changes: 1 addition & 1 deletion ethexe/observer/src/observer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ impl Observer {

/// Clones the `Observer` with resubscribing to blocks.
///
/// Resubscription here is the same as calling provider's `subscibe_blocks`
/// Resubscription here is the same as calling provider's `subscribe_blocks`
/// method from the sense of both approaches will result in receiving only new blocks.
/// All the previous blocks queued in the inner channel of the subscription won't be
/// accessible by the new subscription.
Expand Down
2 changes: 1 addition & 1 deletion ethexe/processor/src/host/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ impl InstanceWrapper {
maybe_instrumented_code,
);

// Peaces of resulting journal. Hack to avoid single allocation limit.
// Pieces of resulting journal. Hack to avoid single allocation limit.
let ptr_lens: Vec<i64> = self.call("run", arg.encode())?;

let mut journal = Vec::new();
Expand Down
12 changes: 6 additions & 6 deletions ethexe/processor/src/host/threads.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ use core::fmt;
use ethexe_db::BlockMetaStorage;
use ethexe_runtime_common::{
state::{
ActiveProgram, HashOf, MemoryPages, MemoryPagesInner, MemoryPagesRegionInner, Program,
ProgramState, RegionIdx, Storage,
ActiveProgram, HashOf, MemoryPages, MemoryPagesRegionInner, Program, ProgramState,
RegionIdx, Storage,
},
BlockInfo,
};
Expand All @@ -45,7 +45,7 @@ pub struct ThreadParams {
pub db: Database,
pub block_info: BlockInfo,
pub state_hash: H256,
pages_registry_cache: Option<MemoryPagesInner>,
pages_registry_cache: Option<MemoryPages>,
pages_regions_cache: Option<BTreeMap<RegionIdx, MemoryPagesRegionInner>>,
}

Expand All @@ -69,20 +69,20 @@ impl ThreadParams {
unreachable!("program that is currently running can't be inactive");
};

pages_hash.query(&self.db).expect(UNKNOWN_STATE).into()
pages_hash.query(&self.db).expect(UNKNOWN_STATE)
});

let region_idx = MemoryPages::page_region(page);

let region_hash = pages_registry.get(&region_idx)?;
let region_hash = pages_registry[region_idx].with_hash(|hash| hash)?;

let pages_regions = self
.pages_regions_cache
.get_or_insert_with(Default::default);

let page_region = pages_regions.entry(region_idx).or_insert_with(|| {
self.db
.read_pages_region(*region_hash)
.read_pages_region(region_hash)
.expect("Pages region not found")
.into()
});
Expand Down
66 changes: 49 additions & 17 deletions ethexe/runtime/common/src/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,14 @@ use alloc::{
vec::Vec,
};
use anyhow::{anyhow, Result};
use core::{any::Any, marker::PhantomData, mem};
use core::{
any::Any,
marker::PhantomData,
mem,
ops::{Index, IndexMut},
};
use ethexe_common::gear::Message;
pub use gear_core::program::ProgramState as InitStatus;
use gear_core::{
ids::{prelude::MessageIdExt as _, ProgramId},
memory::PageBuf,
Expand All @@ -41,8 +47,6 @@ use gprimitives::{ActorId, MessageId, H256};
use parity_scale_codec::{Decode, Encode};
use private::Sealed;

pub use gear_core::program::ProgramState as InitStatus;

/// 3h validity in mailbox for 12s blocks.
pub const MAILBOX_VALIDITY: u32 = 54_000;

Expand Down Expand Up @@ -812,14 +816,38 @@ impl Mailbox {
}
}

#[derive(Clone, Default, Debug, Encode, Decode, PartialEq, Eq, derive_more::Into)]
#[derive(Clone, Debug, Encode, Decode, PartialEq, Eq, derive_more::Into)]
#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))]
pub struct MemoryPages(MemoryPagesInner);

// TODO (breathx): replace BTreeMap here with fixed size array.
pub type MemoryPagesInner = BTreeMap<RegionIdx, HashOf<MemoryPagesRegion>>;
impl Default for MemoryPages {
fn default() -> Self {
Self([MaybeHashOf::empty(); MemoryPages::REGIONS_AMOUNT])
}
}

impl Index<RegionIdx> for MemoryPages {
type Output = MaybeHashOf<MemoryPagesRegion>;

fn index(&self, idx: RegionIdx) -> &Self::Output {
&self.0[idx.0 as usize]
}
}

impl IndexMut<RegionIdx> for MemoryPages {
fn index_mut(&mut self, index: RegionIdx) -> &mut Self::Output {
&mut self.0[index.0 as usize]
}
}

/// An inner structure for [`MemoryPages`]. Has at [`REGIONS_AMOUNT`](MemoryPages::REGIONS_AMOUNT)
/// entries.
pub type MemoryPagesInner = [MaybeHashOf<MemoryPagesRegion>; MemoryPages::REGIONS_AMOUNT];

impl MemoryPages {
/// Copy of the gear_core constant defining max pages amount per program.
pub const MAX_PAGES: usize = gear_core::code::MAX_WASM_PAGES_AMOUNT as usize;

/// Granularity parameter of how memory pages hashes are stored.
///
/// Instead of a single huge map of GearPage to HashOf<PageBuf>, memory is
Expand All @@ -833,8 +861,12 @@ impl MemoryPages {
/// host implementation: see the `ThreadParams` struct.
pub const REGIONS_AMOUNT: usize = 16;

/// Pages amount per each region.
pub const PAGES_PER_REGION: usize = Self::MAX_PAGES / Self::REGIONS_AMOUNT;
const _DIVISIBILITY_ASSERT: () = assert!(Self::MAX_PAGES % Self::REGIONS_AMOUNT == 0);

pub fn page_region(page: GearPage) -> RegionIdx {
RegionIdx((u32::from(page) as usize / Self::REGIONS_AMOUNT) as u8)
RegionIdx((u32::from(page) as usize / Self::PAGES_PER_REGION) as u8)
}

pub fn update_and_store_regions<S: Storage>(
Expand All @@ -852,8 +884,9 @@ impl MemoryPages {

if current_region_idx != Some(region_idx) {
let region_entry = updated_regions.entry(region_idx).or_insert_with(|| {
self.0
.remove(&region_idx)
self[region_idx]
.0
.take()
.map(|region_hash| {
storage
.read_pages_region(region_hash)
Expand All @@ -879,7 +912,7 @@ impl MemoryPages {
.hash()
.expect("infallible; pages are only appended here, none are removed");

self.0.insert(region_idx, region_hash);
self[region_idx] = region_hash.into();
}
}

Expand All @@ -894,8 +927,9 @@ impl MemoryPages {

if current_region_idx != Some(region_idx) {
let region_entry = updated_regions.entry(region_idx).or_insert_with(|| {
self.0
.remove(&region_idx)
self[region_idx]
.0
.take()
.map(|region_hash| {
storage
.read_pages_region(region_hash)
Expand All @@ -917,7 +951,7 @@ impl MemoryPages {

for (region_idx, region) in updated_regions {
if let Some(region_hash) = region.store(storage).hash() {
self.0.insert(region_idx, region_hash);
self[region_idx] = region_hash.into();
}
}
}
Expand All @@ -943,7 +977,7 @@ impl MemoryPagesRegion {
#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))]
pub struct RegionIdx(u8);

#[derive(Default, Debug, Encode, Decode, PartialEq, Eq, derive_more::Into)]
#[derive(Clone, Default, Debug, Encode, Decode, PartialEq, Eq, derive_more::Into)]
pub struct Allocations {
inner: IntervalsTree<WasmPage>,
#[into(ignore)]
Expand All @@ -964,9 +998,7 @@ impl Allocations {
.flat_map(|i| i.to_iter())
.collect();

if !removed_pages.is_empty()
|| allocations.intervals_amount() != self.inner.intervals_amount()
{
if !removed_pages.is_empty() || allocations.difference(&self.inner).next().is_some() {
self.changed = true;
self.inner = allocations;
}
Expand Down
Loading

0 comments on commit 9573ea4

Please sign in to comment.