diff --git a/.changelog/unreleased/improvements/733-core-crate-split.md b/.changelog/unreleased/improvements/733-core-crate-split.md new file mode 100644 index 0000000000..6ad3737cae --- /dev/null +++ b/.changelog/unreleased/improvements/733-core-crate-split.md @@ -0,0 +1,4 @@ +- Public parts of shared `namada` crate have been split up into a + `namada_core` crate. The `namada_proof_of_stake`, `namada_vp_prelude` + and `namada_tx_prelude` crates now depend on this `namada_core` crate. + ([#733](https://github.com/anoma/namada/pull/733)) diff --git a/Cargo.lock b/Cargo.lock index 75c3c2f770..9a2cd54a75 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3628,54 +3628,39 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" name = "namada" version = "0.10.1" dependencies = [ - "ark-bls12-381", - "ark-ec", - "ark-serialize", "assert_matches", "async-trait", - "bech32", "bellman", - "bit-vec", "bls12_381", "borsh", "byte-unit", - "chrono", "circular-queue", "clru", "data-encoding", "derivative", - "ed25519-consensus", - "ferveo", - "ferveo-common", - "group-threshold-cryptography", - "hex", "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", - "ics23", "itertools", "libsecp256k1", "loupe", "masp_primitives", "masp_proofs", + "namada_core", "namada_proof_of_stake", "parity-wasm", "paste", "pretty_assertions", "proptest", "prost", - "prost-types", "pwasm-utils", "rand 0.8.5", "rand_core 0.6.4", "rayon", "rust_decimal", - "rust_decimal_macros", - "serde 1.0.147", "serde_json", "sha2 0.9.9", - "sparse-merkle-tree", "tempfile", "tendermint 0.23.5", "tendermint 0.23.6", @@ -3686,7 +3671,6 @@ dependencies = [ "test-log", "thiserror", "tokio", - "tonic-build", "tracing 0.1.37", "tracing-subscriber 0.3.16", "wasmer", @@ -3785,6 +3769,58 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "namada_core" +version = "0.9.0" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-serialize", + "assert_matches", + "bech32", + "bellman", + "bit-vec", + "borsh", + "chrono", + "data-encoding", + "derivative", + "ed25519-consensus", + "ferveo", + "ferveo-common", + "group-threshold-cryptography", + "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", + "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", + "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", + "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", + "ics23", + "itertools", + "libsecp256k1", + "masp_primitives", + "pretty_assertions", + "proptest", + "prost", + "prost-types", + "rand 0.8.5", + "rand_core 0.6.4", + "rayon", + "rust_decimal", + "rust_decimal_macros", + "serde 1.0.147", + "serde_json", + "sha2 0.9.9", + "sparse-merkle-tree", + "tendermint 0.23.5", + "tendermint 0.23.6", + "tendermint-proto 0.23.5", + "tendermint-proto 0.23.6", + "test-log", + "thiserror", + "tonic-build", + "tracing 0.1.37", + "tracing-subscriber 0.3.16", + "zeroize", +] + [[package]] name = "namada_encoding_spec" version = "0.10.1" @@ -3810,10 +3846,12 @@ version = "0.10.1" dependencies = [ "borsh", "derivative", + "namada_core", "proptest", "rust_decimal", "rust_decimal_macros", "thiserror", + "tracing 0.1.37", ] [[package]] @@ -3866,8 +3904,9 @@ version = "0.10.1" dependencies = [ "borsh", "masp_primitives", - "namada", + "namada_core", "namada_macros", + "namada_proof_of_stake", "namada_vm_env", "rust_decimal", "sha2 0.10.6", @@ -3882,7 +3921,7 @@ dependencies = [ "hex", "masp_primitives", "masp_proofs", - "namada", + "namada_core", ] [[package]] @@ -3890,8 +3929,9 @@ name = "namada_vp_prelude" version = "0.10.1" dependencies = [ "borsh", - "namada", + "namada_core", "namada_macros", + "namada_proof_of_stake", "namada_vm_env", "sha2 0.10.6", "thiserror", diff --git a/Cargo.toml b/Cargo.toml index fab5ebd8d9..42a99343fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ resolver = "2" members = [ "apps", + "core", "proof_of_stake", "shared", "tests", diff --git a/Makefile b/Makefile index fabcdb61d3..fdcf707a94 100644 --- a/Makefile +++ b/Makefile @@ -72,8 +72,7 @@ clippy-abcipp: $(cargo) +$(nightly) clippy \ --all-targets \ --manifest-path ./vm_env/Cargo.toml \ - --no-default-features \ - --features "abcipp" && \ + --no-default-features && \ make -C $(wasms) clippy && \ $(foreach wasm,$(wasm_templates),$(clippy-wasm) && ) true diff --git a/apps/Cargo.toml b/apps/Cargo.toml index 6068bf347f..edd67efb1d 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -47,25 +47,27 @@ std = ["ed25519-consensus/std", "rand/std", "rand_core/std"] testing = ["dev"] abcipp = [ + "namada/abcipp", + "namada/tendermint-rpc-abcipp", "tendermint-abcipp", "tendermint-config-abcipp", "tendermint-proto-abcipp", "tendermint-rpc-abcipp", "tower-abci-abcipp", - "namada/abcipp" ] abciplus = [ + "namada/abciplus", + "namada/tendermint-rpc", "tendermint", "tendermint-config", "tendermint-rpc", "tendermint-proto", "tower-abci", - "namada/abciplus" ] [dependencies] -namada = {path = "../shared", features = ["wasm-runtime", "ferveo-tpke", "rand", "tendermint-rpc", "secp256k1-sign-verify"]} +namada = {path = "../shared", default-features = false, features = ["wasm-runtime", "ferveo-tpke", "masp-tx-gen"]} ark-serialize = "0.3.0" ark-std = "0.3.0" # branch = "bat/arse-merkle-tree" @@ -148,7 +150,7 @@ rust_decimal = "1.26.1" rust_decimal_macros = "1.26.1" [dev-dependencies] -namada = {path = "../shared", features = ["testing", "wasm-runtime"]} +namada = {path = "../shared", default-features = false, features = ["testing", "wasm-runtime"]} bit-set = "0.5.2" # A fork with state machime testing proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} diff --git a/apps/src/bin/anoma-wallet/cli.rs b/apps/src/bin/anoma-wallet/cli.rs index 62857e76f3..b7d4d78a02 100644 --- a/apps/src/bin/anoma-wallet/cli.rs +++ b/apps/src/bin/anoma-wallet/cli.rs @@ -11,7 +11,7 @@ use namada::types::key::*; use namada::types::masp::{MaspValue, PaymentAddress}; use namada_apps::cli; use namada_apps::cli::{args, cmds, Context}; -use namada_apps::client::tx::find_valid_diversifier; +use namada::ledger::masp::find_valid_diversifier; use namada_apps::wallet::{DecryptionError, FindKeyError}; use rand_core::OsRng; diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 4d0ebcad23..5fc81f4dc4 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -1526,7 +1526,6 @@ pub mod args { use super::context::*; use super::utils::*; use super::{ArgGroup, ArgMatches}; - use crate::client::types::{ParsedTxArgs, ParsedTxTransferArgs}; use crate::config; use crate::config::TendermintMode; use crate::facade::tendermint::Timeout; @@ -1766,21 +1765,6 @@ pub mod args { pub amount: token::Amount, } - impl TxTransfer { - pub fn parse_from_context( - &self, - ctx: &mut Context, - ) -> ParsedTxTransferArgs { - ParsedTxTransferArgs { - tx: self.tx.parse_from_context(ctx), - source: ctx.get_cached(&self.source), - target: ctx.get(&self.target), - token: ctx.get(&self.token), - amount: self.amount, - } - } - } - impl Args for TxTransfer { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); @@ -2731,28 +2715,6 @@ pub mod args { pub signer: Option, } - impl Tx { - pub fn parse_from_context(&self, ctx: &mut Context) -> ParsedTxArgs { - ParsedTxArgs { - dry_run: self.dry_run, - force: self.force, - broadcast_only: self.broadcast_only, - ledger_address: self.ledger_address.clone(), - initialized_account_alias: self - .initialized_account_alias - .clone(), - fee_amount: self.fee_amount, - fee_token: ctx.get(&self.fee_token), - gas_limit: self.gas_limit.clone(), - signing_key: self - .signing_key - .as_ref() - .map(|sk| ctx.get_cached(sk)), - signer: self.signer.as_ref().map(|signer| ctx.get(signer)), - } - } - } - impl Args for Tx { fn def(app: App) -> App { app.arg( diff --git a/apps/src/lib/cli/context.rs b/apps/src/lib/cli/context.rs index 04bd91e6ce..2a05e6847c 100644 --- a/apps/src/lib/cli/context.rs +++ b/apps/src/lib/cli/context.rs @@ -10,9 +10,10 @@ use namada::types::address::Address; use namada::types::chain::ChainId; use namada::types::key::*; use namada::types::masp::*; +use namada::ledger::masp::ShieldedContext; use super::args; -use crate::client::tx::ShieldedContext; +use crate::client::tx::CLIShieldedUtils; use crate::config::genesis::genesis_config; use crate::config::global::GlobalConfig; use crate::config::{self, Config}; @@ -72,7 +73,7 @@ pub struct Context { /// The ledger configuration for a specific chain ID pub config: Config, /// The context fr shielded operations - pub shielded: ShieldedContext, + pub shielded: ShieldedContext, /// Native token's address pub native_token: Address, } @@ -118,7 +119,7 @@ impl Context { wallet, global_config, config, - shielded: ShieldedContext::new(chain_dir), + shielded: CLIShieldedUtils::new(chain_dir), native_token, }) } diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 9fa4fc9ee0..7ed6760890 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -25,11 +25,9 @@ use masp_primitives::zip32::ExtendedFullViewingKey; use namada::ledger::events::Event; use namada::ledger::governance::parameters::GovParams; use namada::ledger::governance::storage as gov_storage; -use namada::ledger::governance::utils::Votes; +use namada::ledger::native_vp::governance::utils::Votes; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; -use namada::ledger::pos::types::{ - decimal_mult_u64, Epoch as PosEpoch, WeightedValidator, -}; +use namada::ledger::pos::types::{decimal_mult_u64, WeightedValidator}; use namada::ledger::pos::{ self, is_validator_slashes_key, BondId, Bonds, PosParams, Slash, Unbonds, }; @@ -58,7 +56,7 @@ use tokio::time::{Duration, Instant}; use crate::cli::{self, args, Context}; use crate::client::tendermint_rpc_types::TxResponse; -use crate::client::tx::{ +use namada::ledger::masp::{ Conversions, PinnedBalanceError, TransactionDelta, TransferDelta, }; use crate::facade::tendermint::merkle::proof::Proof; @@ -164,13 +162,14 @@ pub async fn query_tx_deltas( // Connect to the Tendermint server holding the transactions let client = HttpClient::new(ledger_address.clone()).unwrap(); // Build up the context that will be queried for transactions + ctx.shielded.utils.ledger_address = Some(ledger_address.clone()); let _ = ctx.shielded.load(); let vks = ctx.wallet.get_viewing_keys(); let fvks: Vec<_> = vks .values() .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) .collect(); - ctx.shielded.fetch(&ledger_address, &[], &fvks).await; + ctx.shielded.fetch(&[], &fvks).await; // Save the update state so that future fetches can be short-circuited let _ = ctx.shielded.save(); // Required for filtering out rejected transactions from Tendermint @@ -284,8 +283,6 @@ pub async fn query_transfers(mut ctx: Context, args: args::QueryTransfers) { .values() .map(|fvk| (ExtendedFullViewingKey::from(*fvk).fvk.vk, fvk)) .collect(); - // Connect to the Tendermint server holding the transactions - let client = HttpClient::new(args.query.ledger_address.clone()).unwrap(); // Now display historical shielded and transparent transactions for ((height, idx), (epoch, tfer_delta, tx_delta)) in transfers { // Check if this transfer pertains to the supplied owner @@ -306,7 +303,6 @@ pub async fn query_transfers(mut ctx: Context, args: args::QueryTransfers) { let amt = ctx .shielded .compute_exchanged_amount( - client.clone(), amt, epoch, Conversions::new(), @@ -314,7 +310,7 @@ pub async fn query_transfers(mut ctx: Context, args: args::QueryTransfers) { .await .0; let dec = - ctx.shielded.decode_amount(client.clone(), amt, epoch).await; + ctx.shielded.decode_amount(amt, epoch).await; shielded_accounts.insert(acc, dec); } // Check if this transfer pertains to the supplied token @@ -556,9 +552,8 @@ pub async fn query_pinned_balance(ctx: &mut Context, args: args::QueryBalance) { .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) .collect(); // Build up the context that will be queried for asset decodings + ctx.shielded.utils.ledger_address = Some(args.query.ledger_address.clone()); let _ = ctx.shielded.load(); - // Establish connection with which to do exchange rate queries - let client = HttpClient::new(args.query.ledger_address.clone()).unwrap(); // Print the token balances by payment address for owner in owners { let mut balance = Err(PinnedBalanceError::InvalidViewingKey); @@ -568,7 +563,6 @@ pub async fn query_pinned_balance(ctx: &mut Context, args: args::QueryBalance) { balance = ctx .shielded .compute_exchanged_pinned_balance( - &args.query.ledger_address, owner, vk, ) @@ -595,7 +589,6 @@ pub async fn query_pinned_balance(ctx: &mut Context, args: args::QueryBalance) { balance = ctx .shielded .compute_exchanged_pinned_balance( - &args.query.ledger_address, owner, &vk, ) @@ -639,7 +632,7 @@ pub async fn query_pinned_balance(ctx: &mut Context, args: args::QueryBalance) { // Print balances by human-readable token names let balance = ctx .shielded - .decode_amount(client.clone(), balance, epoch) + .decode_amount(balance, epoch) .await; for (addr, value) in balance.components() { let asset_value = token::Amount::from(*value as u64); @@ -880,20 +873,17 @@ pub async fn query_shielded_balance( None => ctx.wallet.get_viewing_keys().values().copied().collect(), }; // Build up the context that will be queried for balances + ctx.shielded.utils.ledger_address = Some(args.query.ledger_address.clone()); let _ = ctx.shielded.load(); let fvks: Vec<_> = viewing_keys .iter() .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) .collect(); - ctx.shielded - .fetch(&args.query.ledger_address, &[], &fvks) - .await; + ctx.shielded.fetch(&[], &fvks).await; // Save the update state so that future fetches can be short-circuited let _ = ctx.shielded.save(); // The epoch is required to identify timestamped tokens let epoch = query_epoch(args.query.clone()).await; - // Establish connection with which to do exchange rate queries - let client = HttpClient::new(args.query.ledger_address.clone()).unwrap(); // Map addresses to token names let tokens = address::tokens(); match (args.token, owner.is_some()) { @@ -909,7 +899,6 @@ pub async fn query_shielded_balance( } else { ctx.shielded .compute_exchanged_balance( - client.clone(), &viewing_key, epoch, ) @@ -954,7 +943,6 @@ pub async fn query_shielded_balance( } else { ctx.shielded .compute_exchanged_balance( - client.clone(), &viewing_key, epoch, ) @@ -976,7 +964,7 @@ pub async fn query_shielded_balance( // Decode the asset type let decoded = ctx .shielded - .decode_asset_type(client.clone(), asset_type) + .decode_asset_type(asset_type) .await; match decoded { Some((addr, asset_epoch)) if asset_epoch == epoch => { @@ -1046,7 +1034,6 @@ pub async fn query_shielded_balance( } else { ctx.shielded .compute_exchanged_balance( - client.clone(), &viewing_key, epoch, ) @@ -1081,14 +1068,13 @@ pub async fn query_shielded_balance( // Print balances by human-readable token names let decoded_balance = ctx .shielded - .decode_all_amounts(client.clone(), balance) + .decode_all_amounts(balance) .await; print_decoded_balance_with_epoch(decoded_balance); } else { balance = ctx .shielded .compute_exchanged_balance( - client.clone(), &viewing_key, epoch, ) @@ -1097,7 +1083,7 @@ pub async fn query_shielded_balance( // Print balances by human-readable token names let decoded_balance = ctx .shielded - .decode_amount(client.clone(), balance, epoch) + .decode_amount(balance, epoch) .await; print_decoded_balance(decoded_balance); } @@ -2012,8 +1998,8 @@ pub async fn known_address( fn apply_slashes( slashes: &[Slash], mut delta: token::Amount, - epoch_start: PosEpoch, - withdraw_epoch: Option, + epoch_start: Epoch, + withdraw_epoch: Option, mut w: Option<&mut std::io::StdoutLock>, ) -> token::Amount { let mut slashed = token::Amount::default(); @@ -2065,8 +2051,7 @@ fn process_bonds_query( .unwrap(); delta = apply_slashes(slashes, delta, *epoch_start, None, Some(w)); current_total += delta; - let epoch_start: Epoch = (*epoch_start).into(); - if epoch >= &epoch_start { + if epoch >= epoch_start { total_active += delta; } } @@ -2121,8 +2106,7 @@ fn process_unbonds_query( Some(w), ); current_total += delta; - let epoch_end: Epoch = (*epoch_end).into(); - if epoch > &epoch_end { + if epoch > epoch_end { withdrawable += delta; } } @@ -2640,11 +2624,8 @@ pub async fn get_proposal_offline_votes( .await .unwrap_or_default(); let mut delegated_amount: token::Amount = 0.into(); - let epoch = namada::ledger::pos::types::Epoch::from( - proposal.tally_epoch.0, - ); let bond = epoched_bonds - .get(epoch) + .get(proposal.tally_epoch) .expect("Delegation bond should be defined."); let mut to_deduct = bond.neg_deltas; for (start_epoch, &(mut delta)) in @@ -2800,8 +2781,7 @@ pub async fn get_bond_amount_at( None, None, ); - let epoch_start: Epoch = (*epoch_start).into(); - if epoch >= epoch_start { + if epoch >= *epoch_start { delegated_amount += delta; } } diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 65a0fac3f5..94ffe42311 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -1,11 +1,9 @@ use std::borrow::Cow; -use std::collections::hash_map::Entry; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::HashSet; use std::env; use std::fmt::Debug; use std::fs::{File, OpenOptions}; use std::io::{Read, Write}; -use std::ops::Deref; use std::path::PathBuf; use async_std::io::prelude::WriteExt; @@ -13,22 +11,9 @@ use async_std::io::{self}; use borsh::{BorshDeserialize, BorshSerialize}; use itertools::Either::*; use masp_primitives::asset_type::AssetType; -use masp_primitives::consensus::{BranchId, TestNetwork}; -use masp_primitives::convert::AllowedConversion; -use masp_primitives::ff::PrimeField; -use masp_primitives::group::cofactor::CofactorGroup; -use masp_primitives::keys::FullViewingKey; -use masp_primitives::legacy::TransparentAddress; -use masp_primitives::merkle_tree::{ - CommitmentTree, IncrementalWitness, MerklePath, -}; -use masp_primitives::note_encryption::*; -use masp_primitives::primitives::{Diversifier, Note, ViewingKey}; +use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::Node; -use masp_primitives::transaction::builder::{self, secp256k1, *}; -use masp_primitives::transaction::components::{Amount, OutPoint, TxOut}; -use masp_primitives::transaction::Transaction; -use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; +use masp_primitives::transaction::builder; use masp_proofs::prover::LocalTxProver; use namada::ibc::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; use namada::ibc::signer::Signer; @@ -45,33 +30,27 @@ use namada::types::governance::{ OfflineProposal, OfflineVote, Proposal, ProposalVote, }; use namada::types::key::*; -use namada::types::masp::{PaymentAddress, TransferTarget}; +use namada::types::masp::TransferTarget; use namada::types::storage::{ - BlockHeight, Epoch, Key, KeySeg, TxIndex, RESERVED_ADDRESS_PREFIX, + Epoch, RESERVED_ADDRESS_PREFIX, }; use namada::types::time::DateTimeUtc; -use namada::types::token::{ - Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, -}; use namada::types::transaction::governance::{ InitProposalData, VoteProposalData, }; use namada::types::transaction::{pos, InitAccount, InitValidator, UpdateVp}; use namada::types::{storage, token}; use namada::{ledger, vm}; -use rand_core::{CryptoRng, OsRng, RngCore}; use rust_decimal::Decimal; -use sha2::Digest; use tokio::time::{Duration, Instant}; +use async_trait::async_trait; use super::rpc; -use super::types::ShieldedTransferContext; use crate::cli::context::WalletAddress; use crate::cli::{args, safe_exit, Context}; use crate::client::rpc::{query_conversion, query_storage_value}; use crate::client::signing::{find_keypair, sign_tx, tx_signer, TxSigningKey}; use crate::client::tendermint_rpc_types::{TxBroadcastData, TxResponse}; -use crate::client::types::ParsedTxTransferArgs; use crate::facade::tendermint_config::net::Address as TendermintAddress; use crate::facade::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::facade::tendermint_rpc::error::Error as RpcError; @@ -389,149 +368,98 @@ pub async fn submit_init_validator( } } -/// Make a ViewingKey that can view notes encrypted by given ExtendedSpendingKey -pub fn to_viewing_key(esk: &ExtendedSpendingKey) -> FullViewingKey { - ExtendedFullViewingKey::from(esk).fvk -} +/// Shielded context file name +const FILE_NAME: &str = "shielded.dat"; +const TMP_FILE_NAME: &str = "shielded.tmp"; -/// Generate a valid diversifier, i.e. one that has a diversified base. Return -/// also this diversified base. -pub fn find_valid_diversifier( - rng: &mut R, -) -> (Diversifier, masp_primitives::jubjub::SubgroupPoint) { - let mut diversifier; - let g_d; - // Keep generating random diversifiers until one has a diversified base - loop { - let mut d = [0; 11]; - rng.fill_bytes(&mut d); - diversifier = Diversifier(d); - if let Some(val) = diversifier.g_d() { - g_d = val; - break; - } - } - (diversifier, g_d) +#[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] +pub struct CLIShieldedUtils { + #[borsh_skip] + context_dir: PathBuf, + #[borsh_skip] + pub ledger_address: Option, } -/// Determine if using the current note would actually bring us closer to our -/// target -pub fn is_amount_required(src: Amount, dest: Amount, delta: Amount) -> bool { - if delta > Amount::zero() { - let gap = dest - src; - for (asset_type, value) in gap.components() { - if *value > 0 && delta[asset_type] > 0 { - return true; - } +impl CLIShieldedUtils { + /// Initialize a shielded transaction context that identifies notes + /// decryptable by any viewing key in the given set + pub fn new( + context_dir: PathBuf, + ) -> masp::ShieldedContext { + // Make sure that MASP parameters are downloaded to enable MASP + // transaction building and verification later on + let params_dir = masp::get_params_dir(); + let spend_path = params_dir.join(masp::SPEND_NAME); + let convert_path = params_dir.join(masp::CONVERT_NAME); + let output_path = params_dir.join(masp::OUTPUT_NAME); + if !(spend_path.exists() + && convert_path.exists() + && output_path.exists()) + { + println!("MASP parameters not present, downloading..."); + masp_proofs::download_parameters() + .expect("MASP parameters not present or downloadable"); + println!("MASP parameter download complete, resuming execution..."); } + // Finally initialize a shielded context with the supplied directory + let utils = Self { context_dir, ledger_address: None }; + masp::ShieldedContext { utils, ..Default::default() } } - false -} - -/// An extension of Option's cloned method for pair types -fn cloned_pair((a, b): (&T, &U)) -> (T, U) { - (a.clone(), b.clone()) } -/// Errors that can occur when trying to retrieve pinned transaction -#[derive(PartialEq, Eq)] -pub enum PinnedBalanceError { - /// No transaction has yet been pinned to the given payment address - NoTransactionPinned, - /// The supplied viewing key does not recognize payments to given address - InvalidViewingKey, +impl Default for CLIShieldedUtils { + fn default() -> Self { + Self { context_dir: PathBuf::from(FILE_NAME), ledger_address: None } + } } -/// Represents the amount used of different conversions -pub type Conversions = - HashMap, i64)>; - -/// Represents the changes that were made to a list of transparent accounts -pub type TransferDelta = HashMap>; - -/// Represents the changes that were made to a list of shielded accounts -pub type TransactionDelta = HashMap; - -/// Represents the current state of the shielded pool from the perspective of -/// the chosen viewing keys. -#[derive(BorshSerialize, BorshDeserialize, Debug)] -pub struct ShieldedContext { - /// Location where this shielded context is saved - #[borsh_skip] - context_dir: PathBuf, - /// The last transaction index to be processed in this context - last_txidx: u64, - /// The commitment tree produced by scanning all transactions up to tx_pos - tree: CommitmentTree, - /// Maps viewing keys to applicable note positions - pos_map: HashMap>, - /// Maps a nullifier to the note position to which it applies - nf_map: HashMap<[u8; 32], usize>, - /// Maps note positions to their corresponding notes - note_map: HashMap, - /// Maps note positions to their corresponding memos - memo_map: HashMap, - /// Maps note positions to the diversifier of their payment address - div_map: HashMap, - /// Maps note positions to their witness (used to make merkle paths) - witness_map: HashMap>, - /// Tracks what each transaction does to various account balances - delta_map: BTreeMap< - (BlockHeight, TxIndex), - (Epoch, TransferDelta, TransactionDelta), - >, - /// The set of note positions that have been spent - spents: HashSet, - /// Maps asset types to their decodings - asset_types: HashMap, - /// Maps note positions to their corresponding viewing keys - vk_map: HashMap, -} +#[async_trait] +impl masp::ShieldedUtils for CLIShieldedUtils { + async fn query_storage_value( + &self, + key: &storage::Key, + ) -> Option + where T: BorshDeserialize { + let client = HttpClient::new(self.ledger_address.clone().unwrap()).unwrap(); + query_storage_value::(&client, &key).await + } -/// Shielded context file name -const FILE_NAME: &str = "shielded.dat"; -const TMP_FILE_NAME: &str = "shielded.tmp"; + async fn query_epoch(&self) -> Epoch { + rpc::query_epoch(args::Query { + ledger_address: self.ledger_address.clone().unwrap() + }).await + } -/// Default implementation to ease construction of TxContexts. Derive cannot be -/// used here due to CommitmentTree not implementing Default. -impl Default for ShieldedContext { - fn default() -> ShieldedContext { - ShieldedContext { - context_dir: PathBuf::from(FILE_NAME), - last_txidx: u64::default(), - tree: CommitmentTree::empty(), - pos_map: HashMap::default(), - nf_map: HashMap::default(), - note_map: HashMap::default(), - memo_map: HashMap::default(), - div_map: HashMap::default(), - witness_map: HashMap::default(), - spents: HashSet::default(), - delta_map: BTreeMap::default(), - asset_types: HashMap::default(), - vk_map: HashMap::default(), + fn local_tx_prover(&self) -> LocalTxProver { + if let Ok(params_dir) = env::var(masp::ENV_VAR_MASP_PARAMS_DIR) + { + let params_dir = PathBuf::from(params_dir); + let spend_path = params_dir.join(masp::SPEND_NAME); + let convert_path = params_dir.join(masp::CONVERT_NAME); + let output_path = params_dir.join(masp::OUTPUT_NAME); + LocalTxProver::new(&spend_path, &output_path, &convert_path) + } else { + LocalTxProver::with_default_location() + .expect("unable to load MASP Parameters") } } -} -impl ShieldedContext { /// Try to load the last saved shielded context from the given context /// directory. If this fails, then leave the current context unchanged. - pub fn load(&mut self) -> std::io::Result<()> { + fn load(self) -> std::io::Result> { // Try to load shielded context from file let mut ctx_file = File::open(self.context_dir.join(FILE_NAME))?; let mut bytes = Vec::new(); ctx_file.read_to_end(&mut bytes)?; - let mut new_ctx = Self::deserialize(&mut &bytes[..])?; + let mut new_ctx = masp::ShieldedContext::deserialize(&mut &bytes[..])?; // Associate the originating context directory with the // shielded context under construction - new_ctx.context_dir = self.context_dir.clone(); - *self = new_ctx; - Ok(()) + new_ctx.utils = self; + Ok(new_ctx) } /// Save this shielded context into its associated context directory - pub fn save(&self) -> std::io::Result<()> { + fn save(&self, ctx: &masp::ShieldedContext) -> std::io::Result<()> { // TODO: use mktemp crate? let tmp_path = self.context_dir.join(TMP_FILE_NAME); { @@ -545,7 +473,7 @@ impl ShieldedContext { .create_new(true) .open(tmp_path.clone())?; let mut bytes = Vec::new(); - self.serialize(&mut bytes) + ctx.serialize(&mut bytes) .expect("cannot serialize shielded context"); ctx_file.write_all(&bytes[..])?; } @@ -559,926 +487,28 @@ impl ShieldedContext { Ok(()) } - /// Merge data from the given shielded context into the current shielded - /// context. It must be the case that the two shielded contexts share the - /// same last transaction ID and share identical commitment trees. - pub fn merge(&mut self, new_ctx: ShieldedContext) { - debug_assert_eq!(self.last_txidx, new_ctx.last_txidx); - // Merge by simply extending maps. Identical keys should contain - // identical values, so overwriting should not be problematic. - self.pos_map.extend(new_ctx.pos_map); - self.nf_map.extend(new_ctx.nf_map); - self.note_map.extend(new_ctx.note_map); - self.memo_map.extend(new_ctx.memo_map); - self.div_map.extend(new_ctx.div_map); - self.witness_map.extend(new_ctx.witness_map); - self.spents.extend(new_ctx.spents); - self.asset_types.extend(new_ctx.asset_types); - self.vk_map.extend(new_ctx.vk_map); - // The deltas are the exception because different keys can reveal - // different parts of the same transaction. Hence each delta needs to be - // merged separately. - for ((height, idx), (ep, ntfer_delta, ntx_delta)) in new_ctx.delta_map { - let (_ep, tfer_delta, tx_delta) = self - .delta_map - .entry((height, idx)) - .or_insert((ep, TransferDelta::new(), TransactionDelta::new())); - tfer_delta.extend(ntfer_delta); - tx_delta.extend(ntx_delta); - } - } - - /// Fetch the current state of the multi-asset shielded pool into a - /// ShieldedContext - pub async fn fetch( - &mut self, - ledger_address: &TendermintAddress, - sks: &[ExtendedSpendingKey], - fvks: &[ViewingKey], - ) { - // First determine which of the keys requested to be fetched are new. - // Necessary because old transactions will need to be scanned for new - // keys. - let mut unknown_keys = Vec::new(); - for esk in sks { - let vk = to_viewing_key(esk).vk; - if !self.pos_map.contains_key(&vk) { - unknown_keys.push(vk); - } - } - for vk in fvks { - if !self.pos_map.contains_key(vk) { - unknown_keys.push(*vk); - } - } - - // If unknown keys are being used, we need to scan older transactions - // for any unspent notes - let (txs, mut tx_iter); - if !unknown_keys.is_empty() { - // Load all transactions accepted until this point - txs = Self::fetch_shielded_transfers(ledger_address, 0).await; - tx_iter = txs.iter(); - // Do this by constructing a shielding context only for unknown keys - let mut tx_ctx = ShieldedContext::new(self.context_dir.clone()); - for vk in unknown_keys { - tx_ctx.pos_map.entry(vk).or_insert_with(HashSet::new); - } - // Update this unknown shielded context until it is level with self - while tx_ctx.last_txidx != self.last_txidx { - if let Some(((height, idx), (epoch, tx))) = tx_iter.next() { - tx_ctx.scan_tx(*height, *idx, *epoch, tx); - } else { - break; - } - } - // Merge the context data originating from the unknown keys into the - // current context - self.merge(tx_ctx); - } else { - // Load only transactions accepted from last_txid until this point - txs = - Self::fetch_shielded_transfers(ledger_address, self.last_txidx) - .await; - tx_iter = txs.iter(); - } - // Now that we possess the unspent notes corresponding to both old and - // new keys up until tx_pos, proceed to scan the new transactions. - for ((height, idx), (epoch, tx)) in &mut tx_iter { - self.scan_tx(*height, *idx, *epoch, tx); - } - } - - /// Initialize a shielded transaction context that identifies notes - /// decryptable by any viewing key in the given set - pub fn new(context_dir: PathBuf) -> ShieldedContext { - // Make sure that MASP parameters are downloaded to enable MASP - // transaction building and verification later on - let params_dir = masp::get_params_dir(); - let spend_path = params_dir.join(masp::SPEND_NAME); - let convert_path = params_dir.join(masp::CONVERT_NAME); - let output_path = params_dir.join(masp::OUTPUT_NAME); - if !(spend_path.exists() - && convert_path.exists() - && output_path.exists()) - { - println!("MASP parameters not present, downloading..."); - masp_proofs::download_parameters() - .expect("MASP parameters not present or downloadable"); - println!("MASP parameter download complete, resuming execution..."); - } - // Finally initialize a shielded context with the supplied directory - Self { - context_dir, - ..Default::default() - } - } - - /// Obtain a chronologically-ordered list of all accepted shielded - /// transactions from the ledger. The ledger conceptually stores - /// transactions as a vector. More concretely, the HEAD_TX_KEY location - /// stores the index of the last accepted transaction and each transaction - /// is stored at a key derived from its index. - pub async fn fetch_shielded_transfers( - ledger_address: &TendermintAddress, - last_txidx: u64, - ) -> BTreeMap<(BlockHeight, TxIndex), (Epoch, Transfer)> { - let client = HttpClient::new(ledger_address.clone()).unwrap(); - // The address of the MASP account - let masp_addr = masp(); - // Construct the key where last transaction pointer is stored - let head_tx_key = Key::from(masp_addr.to_db_key()) - .push(&HEAD_TX_KEY.to_owned()) - .expect("Cannot obtain a storage key"); - // Query for the index of the last accepted transaction - let head_txidx = query_storage_value::(&client, &head_tx_key) - .await - .unwrap_or(0); - let mut shielded_txs = BTreeMap::new(); - // Fetch all the transactions we do not have yet - for i in last_txidx..head_txidx { - // Construct the key for where the current transaction is stored - let current_tx_key = Key::from(masp_addr.to_db_key()) - .push(&(TX_KEY_PREFIX.to_owned() + &i.to_string())) - .expect("Cannot obtain a storage key"); - // Obtain the current transaction - let (tx_epoch, tx_height, tx_index, current_tx) = - query_storage_value::<(Epoch, BlockHeight, TxIndex, Transfer)>( - &client, - ¤t_tx_key, - ) - .await - .unwrap(); - // Collect the current transaction - shielded_txs.insert((tx_height, tx_index), (tx_epoch, current_tx)); - } - shielded_txs - } - - /// Applies the given transaction to the supplied context. More precisely, - /// the shielded transaction's outputs are added to the commitment tree. - /// Newly discovered notes are associated to the supplied viewing keys. Note - /// nullifiers are mapped to their originating notes. Note positions are - /// associated to notes, memos, and diversifiers. And the set of notes that - /// we have spent are updated. The witness map is maintained to make it - /// easier to construct note merkle paths in other code. See - /// https://zips.z.cash/protocol/protocol.pdf#scan - pub fn scan_tx( - &mut self, - height: BlockHeight, - index: TxIndex, - epoch: Epoch, - tx: &Transfer, - ) { - // Ignore purely transparent transactions - let shielded = if let Some(shielded) = &tx.shielded { - shielded - } else { - return; - }; - // For tracking the account changes caused by this Transaction - let mut transaction_delta = TransactionDelta::new(); - // Listen for notes sent to our viewing keys - for so in &shielded.shielded_outputs { - // Create merkle tree leaf node from note commitment - let node = Node::new(so.cmu.to_repr()); - // Update each merkle tree in the witness map with the latest - // addition - for (_, witness) in self.witness_map.iter_mut() { - witness.append(node).expect("note commitment tree is full"); - } - let note_pos = self.tree.size(); - self.tree - .append(node) - .expect("note commitment tree is full"); - // Finally, make it easier to construct merkle paths to this new - // note - let witness = IncrementalWitness::::from_tree(&self.tree); - self.witness_map.insert(note_pos, witness); - // Let's try to see if any of our viewing keys can decrypt latest - // note - for (vk, notes) in self.pos_map.iter_mut() { - let decres = try_sapling_note_decryption::( - 0, - &vk.ivk().0, - &so.ephemeral_key.into_subgroup().unwrap(), - &so.cmu, - &so.enc_ciphertext, - ); - // So this current viewing key does decrypt this current note... - if let Some((note, pa, memo)) = decres { - // Add this note to list of notes decrypted by this viewing - // key - notes.insert(note_pos); - // Compute the nullifier now to quickly recognize when spent - let nf = note.nf(vk, note_pos.try_into().unwrap()); - self.note_map.insert(note_pos, note); - self.memo_map.insert(note_pos, memo); - // The payment address' diversifier is required to spend - // note - self.div_map.insert(note_pos, *pa.diversifier()); - self.nf_map.insert(nf.0, note_pos); - // Note the account changes - let balance = transaction_delta - .entry(*vk) - .or_insert_with(Amount::zero); - *balance += - Amount::from_nonnegative(note.asset_type, note.value) - .expect( - "found note with invalid value or asset type", - ); - self.vk_map.insert(note_pos, *vk); - break; - } - } - } - // Cancel out those of our notes that have been spent - for ss in &shielded.shielded_spends { - // If the shielded spend's nullifier is in our map, then target note - // is rendered unusable - if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { - self.spents.insert(*note_pos); - // Note the account changes - let balance = transaction_delta - .entry(self.vk_map[note_pos]) - .or_insert_with(Amount::zero); - let note = self.note_map[note_pos]; - *balance -= - Amount::from_nonnegative(note.asset_type, note.value) - .expect("found note with invalid value or asset type"); - } - } - // Record the changes to the transparent accounts - let transparent_delta = - Amount::from_nonnegative(tx.token.clone(), u64::from(tx.amount)) - .expect("invalid value for amount"); - let mut transfer_delta = TransferDelta::new(); - transfer_delta - .insert(tx.source.clone(), Amount::zero() - &transparent_delta); - transfer_delta.insert(tx.target.clone(), transparent_delta); - self.delta_map.insert( - (height, index), - (epoch, transfer_delta, transaction_delta), - ); - self.last_txidx += 1; - } - - /// Summarize the effects on shielded and transparent accounts of each - /// Transfer in this context - pub fn get_tx_deltas( + /// Query a conversion. + async fn query_conversion( &self, - ) -> &BTreeMap< - (BlockHeight, TxIndex), - (Epoch, TransferDelta, TransactionDelta), - > { - &self.delta_map - } - - /// Compute the total unspent notes associated with the viewing key in the - /// context. If the key is not in the context, then we do not know the - /// balance and hence we return None. - pub fn compute_shielded_balance(&self, vk: &ViewingKey) -> Option { - // Cannot query the balance of a key that's not in the map - if !self.pos_map.contains_key(vk) { - return None; - } - let mut val_acc = Amount::zero(); - // Retrieve the notes that can be spent by this key - if let Some(avail_notes) = self.pos_map.get(vk) { - for note_idx in avail_notes { - // Spent notes cannot contribute a new transaction's pool - if self.spents.contains(note_idx) { - continue; - } - // Get note associated with this ID - let note = self.note_map.get(note_idx).unwrap(); - // Finally add value to multi-asset accumulator - val_acc += - Amount::from_nonnegative(note.asset_type, note.value) - .expect("found note with invalid value or asset type"); - } - } - Some(val_acc) - } - - /// Query the ledger for the decoding of the given asset type and cache it - /// if it is found. - pub async fn decode_asset_type( - &mut self, - client: HttpClient, - asset_type: AssetType, - ) -> Option<(Address, Epoch)> { - // Try to find the decoding in the cache - if let decoded @ Some(_) = self.asset_types.get(&asset_type) { - return decoded.cloned(); - } - // Query for the ID of the last accepted transaction - let (addr, ep, _conv, _path): (Address, _, Amount, MerklePath) = - query_conversion(client, asset_type).await?; - self.asset_types.insert(asset_type, (addr.clone(), ep)); - Some((addr, ep)) - } - - /// Query the ledger for the conversion that is allowed for the given asset - /// type and cache it. - async fn query_allowed_conversion<'a>( - &'a mut self, - client: HttpClient, - asset_type: AssetType, - conversions: &'a mut Conversions, - ) -> Option<&'a mut (AllowedConversion, MerklePath, i64)> { - match conversions.entry(asset_type) { - Entry::Occupied(conv_entry) => Some(conv_entry.into_mut()), - Entry::Vacant(conv_entry) => { - // Query for the ID of the last accepted transaction - let (addr, ep, conv, path): (Address, _, _, _) = - query_conversion(client, asset_type).await?; - self.asset_types.insert(asset_type, (addr, ep)); - // If the conversion is 0, then we just have a pure decoding - if conv == Amount::zero() { - None - } else { - Some(conv_entry.insert((Amount::into(conv), path, 0))) - } - } - } - } - - /// Compute the total unspent notes associated with the viewing key in the - /// context and express that value in terms of the currently timestamped - /// asset types. If the key is not in the context, then we do not know the - /// balance and hence we return None. - pub async fn compute_exchanged_balance( - &mut self, - client: HttpClient, - vk: &ViewingKey, - target_epoch: Epoch, - ) -> Option { - // First get the unexchanged balance - if let Some(balance) = self.compute_shielded_balance(vk) { - // And then exchange balance into current asset types - Some( - self.compute_exchanged_amount( - client, - balance, - target_epoch, - HashMap::new(), - ) - .await - .0, - ) - } else { - None - } - } - - /// Try to convert as much of the given asset type-value pair using the - /// given allowed conversion. usage is incremented by the amount of the - /// conversion used, the conversions are applied to the given input, and - /// the trace amount that could not be converted is moved from input to - /// output. - fn apply_conversion( - conv: AllowedConversion, asset_type: AssetType, - value: i64, - usage: &mut i64, - input: &mut Amount, - output: &mut Amount, - ) { - // If conversion if possible, accumulate the exchanged amount - let conv: Amount = conv.into(); - // The amount required of current asset to qualify for conversion - let threshold = -conv[&asset_type]; - if threshold == 0 { - eprintln!( - "Asset threshold of selected conversion for asset type {} is \ - 0, this is a bug, please report it.", - asset_type - ); - } - // We should use an amount of the AllowedConversion that almost - // cancels the original amount - let required = value / threshold; - // Forget about the trace amount left over because we cannot - // realize its value - let trace = Amount::from_pair(asset_type, value % threshold).unwrap(); - // Record how much more of the given conversion has been used - *usage += required; - // Apply the conversions to input and move the trace amount to output - *input += conv * required - &trace; - *output += trace; - } - - /// Convert the given amount into the latest asset types whilst making a - /// note of the conversions that were used. Note that this function does - /// not assume that allowed conversions from the ledger are expressed in - /// terms of the latest asset types. - pub async fn compute_exchanged_amount( - &mut self, - client: HttpClient, - mut input: Amount, - target_epoch: Epoch, - mut conversions: Conversions, - ) -> (Amount, Conversions) { - // Where we will store our exchanged value - let mut output = Amount::zero(); - // Repeatedly exchange assets until it is no longer possible - while let Some((asset_type, value)) = - input.components().next().map(cloned_pair) - { - let target_asset_type = self - .decode_asset_type(client.clone(), asset_type) - .await - .map(|(addr, _epoch)| make_asset_type(target_epoch, &addr)) - .unwrap_or(asset_type); - let at_target_asset_type = asset_type == target_asset_type; - if let (Some((conv, _wit, usage)), false) = ( - self.query_allowed_conversion( - client.clone(), - asset_type, - &mut conversions, - ) - .await, - at_target_asset_type, - ) { - println!( - "converting current asset type to latest asset type..." - ); - // Not at the target asset type, not at the latest asset type. - // Apply conversion to get from current asset type to the latest - // asset type. - Self::apply_conversion( - conv.clone(), - asset_type, - value, - usage, - &mut input, - &mut output, - ); - } else if let (Some((conv, _wit, usage)), false) = ( - self.query_allowed_conversion( - client.clone(), - target_asset_type, - &mut conversions, - ) - .await, - at_target_asset_type, - ) { - println!( - "converting latest asset type to target asset type..." - ); - // Not at the target asset type, yes at the latest asset type. - // Apply inverse conversion to get from latest asset type to - // the target asset type. - Self::apply_conversion( - conv.clone(), - asset_type, - value, - usage, - &mut input, - &mut output, - ); - } else { - // At the target asset type. Then move component over to output. - let comp = input.project(asset_type); - output += ∁ - // Strike from input to avoid repeating computation - input -= comp; - } - } - (output, conversions) - } - - /// Collect enough unspent notes in this context to exceed the given amount - /// of the specified asset type. Return the total value accumulated plus - /// notes and the corresponding diversifiers/merkle paths that were used to - /// achieve the total value. - pub async fn collect_unspent_notes( - &mut self, - ledger_address: TendermintAddress, - vk: &ViewingKey, - target: Amount, - target_epoch: Epoch, - ) -> ( - Amount, - Vec<(Diversifier, Note, MerklePath)>, - Conversions, - ) { - // Establish connection with which to do exchange rate queries - let client = HttpClient::new(ledger_address.clone()).unwrap(); - let mut conversions = HashMap::new(); - let mut val_acc = Amount::zero(); - let mut notes = Vec::new(); - // Retrieve the notes that can be spent by this key - if let Some(avail_notes) = self.pos_map.get(vk).cloned() { - for note_idx in &avail_notes { - // No more transaction inputs are required once we have met - // the target amount - if val_acc >= target { - break; - } - // Spent notes cannot contribute a new transaction's pool - if self.spents.contains(note_idx) { - continue; - } - // Get note, merkle path, diversifier associated with this ID - let note = *self.note_map.get(note_idx).unwrap(); - - // The amount contributed by this note before conversion - let pre_contr = Amount::from_pair(note.asset_type, note.value) - .expect("received note has invalid value or asset type"); - let (contr, proposed_convs) = self - .compute_exchanged_amount( - client.clone(), - pre_contr, - target_epoch, - conversions.clone(), - ) - .await; - - // Use this note only if it brings us closer to our target - if is_amount_required( - val_acc.clone(), - target.clone(), - contr.clone(), - ) { - // Be sure to record the conversions used in computing - // accumulated value - val_acc += contr; - // Commit the conversions that were used to exchange - conversions = proposed_convs; - let merkle_path = - self.witness_map.get(note_idx).unwrap().path().unwrap(); - let diversifier = self.div_map.get(note_idx).unwrap(); - // Commit this note to our transaction - notes.push((*diversifier, note, merkle_path)); - } - } - } - (val_acc, notes, conversions) - } - - /// Compute the combined value of the output notes of the transaction pinned - /// at the given payment address. This computation uses the supplied viewing - /// keys to try to decrypt the output notes. If no transaction is pinned at - /// the given payment address fails with - /// `PinnedBalanceError::NoTransactionPinned`. - pub async fn compute_pinned_balance( - ledger_address: &TendermintAddress, - owner: PaymentAddress, - viewing_key: &ViewingKey, - ) -> Result<(Amount, Epoch), PinnedBalanceError> { - // Check that the supplied viewing key corresponds to given payment - // address - let counter_owner = viewing_key.to_payment_address( - *masp_primitives::primitives::PaymentAddress::diversifier( - &owner.into(), - ), - ); - match counter_owner { - Some(counter_owner) if counter_owner == owner.into() => {} - _ => return Err(PinnedBalanceError::InvalidViewingKey), - } - let client = HttpClient::new(ledger_address.clone()).unwrap(); - // The address of the MASP account - let masp_addr = masp(); - // Construct the key for where the transaction ID would be stored - let pin_key = Key::from(masp_addr.to_db_key()) - .push(&(PIN_KEY_PREFIX.to_owned() + &owner.hash())) - .expect("Cannot obtain a storage key"); - // Obtain the transaction pointer at the key - let txidx = query_storage_value::(&client, &pin_key) - .await - .ok_or(PinnedBalanceError::NoTransactionPinned)?; - // Construct the key for where the pinned transaction is stored - let tx_key = Key::from(masp_addr.to_db_key()) - .push(&(TX_KEY_PREFIX.to_owned() + &txidx.to_string())) - .expect("Cannot obtain a storage key"); - // Obtain the pointed to transaction - let (tx_epoch, _tx_height, _tx_index, tx) = - query_storage_value::<(Epoch, BlockHeight, TxIndex, Transfer)>( - &client, &tx_key, - ) - .await - .expect("Ill-formed epoch, transaction pair"); - // Accumulate the combined output note value into this Amount - let mut val_acc = Amount::zero(); - let tx = tx - .shielded - .expect("Pinned Transfers should have shielded part"); - for so in &tx.shielded_outputs { - // Let's try to see if our viewing key can decrypt current note - let decres = try_sapling_note_decryption::( - 0, - &viewing_key.ivk().0, - &so.ephemeral_key.into_subgroup().unwrap(), - &so.cmu, - &so.enc_ciphertext, - ); - match decres { - // So the given viewing key does decrypt this current note... - Some((note, pa, _memo)) if pa == owner.into() => { - val_acc += - Amount::from_nonnegative(note.asset_type, note.value) - .expect( - "found note with invalid value or asset type", - ); - break; - } - _ => {} - } - } - Ok((val_acc, tx_epoch)) - } - - /// Compute the combined value of the output notes of the pinned transaction - /// at the given payment address if there's any. The asset types may be from - /// the epoch of the transaction or even before, so exchange all these - /// amounts to the epoch of the transaction in order to get the value that - /// would have been displayed in the epoch of the transaction. - pub async fn compute_exchanged_pinned_balance( - &mut self, - ledger_address: &TendermintAddress, - owner: PaymentAddress, - viewing_key: &ViewingKey, - ) -> Result<(Amount, Epoch), PinnedBalanceError> { - // Obtain the balance that will be exchanged - let (amt, ep) = - Self::compute_pinned_balance(ledger_address, owner, viewing_key) - .await?; - // Establish connection with which to do exchange rate queries - let client = HttpClient::new(ledger_address.clone()).unwrap(); - // Finally, exchange the balance to the transaction's epoch - Ok(( - self.compute_exchanged_amount(client, amt, ep, HashMap::new()) - .await - .0, - ep, - )) - } - - /// Convert an amount whose units are AssetTypes to one whose units are - /// Addresses that they decode to. All asset types not corresponding to - /// the given epoch are ignored. - pub async fn decode_amount( - &mut self, - client: HttpClient, - amt: Amount, - target_epoch: Epoch, - ) -> Amount
{ - let mut res = Amount::zero(); - for (asset_type, val) in amt.components() { - // Decode the asset type - let decoded = - self.decode_asset_type(client.clone(), *asset_type).await; - // Only assets with the target timestamp count - match decoded { - Some((addr, epoch)) if epoch == target_epoch => { - res += &Amount::from_pair(addr, *val).unwrap() - } - _ => {} - } - } - res - } - - /// Convert an amount whose units are AssetTypes to one whose units are - /// Addresses that they decode to. - pub async fn decode_all_amounts( - &mut self, - client: HttpClient, - amt: Amount, - ) -> Amount<(Address, Epoch)> { - let mut res = Amount::zero(); - for (asset_type, val) in amt.components() { - // Decode the asset type - let decoded = - self.decode_asset_type(client.clone(), *asset_type).await; - // Only assets with the target timestamp count - if let Some((addr, epoch)) = decoded { - res += &Amount::from_pair((addr, epoch), *val).unwrap() - } - } - res + ) -> Option<( + Address, + Epoch, + masp_primitives::transaction::components::Amount, + MerklePath, + )> { + let client = HttpClient::new(self.ledger_address.clone().unwrap()).unwrap(); + query_conversion(client, asset_type).await } } -/// Make asset type corresponding to given address and epoch -fn make_asset_type(epoch: Epoch, token: &Address) -> AssetType { - // Typestamp the chosen token with the current epoch - let token_bytes = (token, epoch.0) - .try_to_vec() - .expect("token should serialize"); - // Generate the unique asset identifier from the unique token address - AssetType::new(token_bytes.as_ref()).expect("unable to create asset type") -} -/// Convert Anoma amount and token type to MASP equivalents -fn convert_amount( - epoch: Epoch, - token: &Address, - val: token::Amount, -) -> (AssetType, Amount) { - let asset_type = make_asset_type(epoch, token); - // Combine the value and unit into one amount - let amount = Amount::from_nonnegative(asset_type, u64::from(val)) - .expect("invalid value for amount"); - (asset_type, amount) -} - -/// Make shielded components to embed within a Transfer object. If no shielded -/// payment address nor spending key is specified, then no shielded components -/// are produced. Otherwise a transaction containing nullifiers and/or note -/// commitments are produced. Dummy transparent UTXOs are sometimes used to make -/// transactions balanced, but it is understood that transparent account changes -/// are effected only by the amounts and signatures specified by the containing -/// Transfer object. -async fn gen_shielded_transfer( - ctx: &mut C, - args: &ParsedTxTransferArgs, - shielded_gas: bool, -) -> Result, builder::Error> -where - C: ShieldedTransferContext, -{ - let spending_key = args.source.spending_key().map(|x| x.into()); - let payment_address = args.target.payment_address(); - // Determine epoch in which to submit potential shielded transaction - let epoch = ctx.query_epoch(args.tx.ledger_address.clone()).await; - // Context required for storing which notes are in the source's possesion - let consensus_branch_id = BranchId::Sapling; - let amt: u64 = args.amount.into(); - let memo: Option = None; - - // Now we build up the transaction within this object - let mut builder = Builder::::new(0u32); - // Convert transaction amount into MASP types - let (asset_type, amount) = convert_amount(epoch, &args.token, args.amount); - - // Transactions with transparent input and shielded output - // may be affected if constructed close to epoch boundary - let mut epoch_sensitive: bool = false; - // If there are shielded inputs - if let Some(sk) = spending_key { - // Transaction fees need to match the amount in the wrapper Transfer - // when MASP source is used - let (_, fee) = - convert_amount(epoch, &args.tx.fee_token, args.tx.fee_amount); - builder.set_fee(fee.clone())?; - // If the gas is coming from the shielded pool, then our shielded inputs - // must also cover the gas fee - let required_amt = if shielded_gas { amount + fee } else { amount }; - // Locate unspent notes that can help us meet the transaction amount - let (_, unspent_notes, used_convs) = ctx - .collect_unspent_notes( - args.tx.ledger_address.clone(), - &to_viewing_key(&sk).vk, - required_amt, - epoch, - ) - .await; - // Commit the notes found to our transaction - for (diversifier, note, merkle_path) in unspent_notes { - builder.add_sapling_spend(sk, diversifier, note, merkle_path)?; - } - // Commit the conversion notes used during summation - for (conv, wit, value) in used_convs.values() { - if *value > 0 { - builder.add_convert( - conv.clone(), - *value as u64, - wit.clone(), - )?; - } - } - } else { - // No transfer fees come from the shielded transaction for non-MASP - // sources - builder.set_fee(Amount::zero())?; - // We add a dummy UTXO to our transaction, but only the source of the - // parent Transfer object is used to validate fund availability - let secp_sk = - secp256k1::SecretKey::from_slice(&[0xcd; 32]).expect("secret key"); - let secp_ctx = secp256k1::Secp256k1::::gen_new(); - let secp_pk = - secp256k1::PublicKey::from_secret_key(&secp_ctx, &secp_sk) - .serialize(); - let hash = - ripemd160::Ripemd160::digest(&sha2::Sha256::digest(&secp_pk)); - let script = TransparentAddress::PublicKey(hash.into()).script(); - epoch_sensitive = true; - builder.add_transparent_input( - secp_sk, - OutPoint::new([0u8; 32], 0), - TxOut { - asset_type, - value: amt, - script_pubkey: script, - }, - )?; - } - // Now handle the outputs of this transaction - // If there is a shielded output - if let Some(pa) = payment_address { - let ovk_opt = spending_key.map(|x| x.expsk.ovk); - builder.add_sapling_output( - ovk_opt, - pa.into(), - asset_type, - amt, - memo.clone(), - )?; - } else { - epoch_sensitive = false; - // Embed the transparent target address into the shielded transaction so - // that it can be signed - let target_enc = args - .target - .address() - .expect("target address should be transparent") - .try_to_vec() - .expect("target address encoding"); - let hash = ripemd160::Ripemd160::digest(&sha2::Sha256::digest( - target_enc.as_ref(), - )); - builder.add_transparent_output( - &TransparentAddress::PublicKey(hash.into()), - asset_type, - amt, - )?; - } - let prover = if let Ok(params_dir) = env::var(masp::ENV_VAR_MASP_PARAMS_DIR) - { - let params_dir = PathBuf::from(params_dir); - let spend_path = params_dir.join(masp::SPEND_NAME); - let convert_path = params_dir.join(masp::CONVERT_NAME); - let output_path = params_dir.join(masp::OUTPUT_NAME); - LocalTxProver::new(&spend_path, &output_path, &convert_path) - } else { - LocalTxProver::with_default_location() - .expect("unable to load MASP Parameters") - }; - // Build and return the constructed transaction - let mut tx = builder.build(consensus_branch_id, &prover); - - if epoch_sensitive { - let new_epoch = ctx.query_epoch(args.tx.ledger_address.clone()).await; - - // If epoch has changed, recalculate shielded outputs to match new epoch - if new_epoch != epoch { - // Hack: build new shielded transfer with updated outputs - let mut replay_builder = Builder::::new(0u32); - replay_builder.set_fee(Amount::zero())?; - let ovk_opt = spending_key.map(|x| x.expsk.ovk); - let (new_asset_type, _) = - convert_amount(new_epoch, &args.token, args.amount); - replay_builder.add_sapling_output( - ovk_opt, - payment_address.unwrap().into(), - new_asset_type, - amt, - memo, - )?; - - let secp_sk = secp256k1::SecretKey::from_slice(&[0xcd; 32]) - .expect("secret key"); - let secp_ctx = - secp256k1::Secp256k1::::gen_new(); - let secp_pk = - secp256k1::PublicKey::from_secret_key(&secp_ctx, &secp_sk) - .serialize(); - let hash = - ripemd160::Ripemd160::digest(&sha2::Sha256::digest(&secp_pk)); - let script = TransparentAddress::PublicKey(hash.into()).script(); - replay_builder.add_transparent_input( - secp_sk, - OutPoint::new([0u8; 32], 0), - TxOut { - asset_type: new_asset_type, - value: amt, - script_pubkey: script, - }, - )?; - - let (replay_tx, _) = - replay_builder.build(consensus_branch_id, &prover)?; - tx = tx.map(|(t, tm)| { - let mut temp = t.deref().clone(); - temp.shielded_outputs = replay_tx.shielded_outputs.clone(); - temp.value_balance = temp.value_balance.reject(asset_type) - - Amount::from_pair(new_asset_type, amt).unwrap(); - (temp.freeze().unwrap(), tm) - }); - } - } - - tx.map(Some) -} pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { - let parsed_args = args.parse_from_context(&mut ctx); - let source = parsed_args.source.effective_address(); - let target = parsed_args.target.effective_address(); + let transfer_source = ctx.get_cached(&args.source); + let source = transfer_source.effective_address(); + let transfer_target = ctx.get(&args.target); + let target = transfer_target.effective_address(); // Check that the source address exists on chain let source_exists = rpc::known_address(&source, args.tx.ledger_address.clone()).await; @@ -1497,25 +527,26 @@ pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { safe_exit(1) } } + let token = ctx.get(&args.token); // Check that the token address exists on chain let token_exists = - rpc::known_address(&parsed_args.token, args.tx.ledger_address.clone()) + rpc::known_address(&token, args.tx.ledger_address.clone()) .await; if !token_exists { eprintln!( "The token address {} doesn't exist on chain.", - parsed_args.token + token ); if !args.tx.force { safe_exit(1) } } // Check source balance - let (sub_prefix, balance_key) = match args.sub_prefix { + let (sub_prefix, balance_key) = match &args.sub_prefix { Some(sub_prefix) => { let sub_prefix = storage::Key::parse(sub_prefix).unwrap(); let prefix = token::multitoken_balance_prefix( - &parsed_args.token, + &token, &sub_prefix, ); ( @@ -1523,7 +554,7 @@ pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { token::multitoken_balance_key(&prefix, &source), ) } - None => (None, token::balance_key(&parsed_args.token, &source)), + None => (None, token::balance_key(&token, &source)), }; let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); match rpc::query_storage_value::(&client, &balance_key).await @@ -1534,7 +565,7 @@ pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { "The balance of the source {} of token {} is lower than \ the amount to be transferred. Amount to transfer is {} \ and the balance is {}.", - source, parsed_args.token, args.amount, balance + source, token, args.amount, balance ); if !args.tx.force { safe_exit(1) @@ -1544,7 +575,7 @@ pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { None => { eprintln!( "No balance found for the source {} of token {}", - source, parsed_args.token + source, token ); if !args.tx.force { safe_exit(1) @@ -1570,13 +601,13 @@ pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { ( TxSigningKey::SecretKey(masp_tx_key()), args.amount, - parsed_args.token.clone(), + token.clone(), ) } else { ( TxSigningKey::WalletAddress(args.source.to_address()), args.amount, - parsed_args.token.clone(), + token.clone(), ) }; // If our chosen signer is the MASP sentinel key, then our shielded inputs @@ -1591,6 +622,38 @@ pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { _ => None, }; + // Update the context with the current ledger address + ctx.shielded.utils.ledger_address = Some(args.tx.ledger_address.clone()); + + let stx_result = + ctx.shielded.gen_shielded_transfer( + transfer_source, + transfer_target, + args.amount, + ctx.get(&args.token), + args.tx.fee_amount, + ctx.get(&args.tx.fee_token), + shielded_gas, + ) + .await; + let shielded = match stx_result { + Ok(stx) => stx.map(|x| x.0), + Err(builder::Error::ChangeIsNegative(_)) => { + eprintln!( + "The balance of the source {} is lower than the \ + amount to be transferred and fees. Amount to \ + transfer is {} {} and fees are {} {}.", + source, + args.amount, + token, + args.tx.fee_amount, + ctx.get(&args.tx.fee_token), + ); + safe_exit(1) + } + Err(err) => panic!("{}", err), + }; + let transfer = token::Transfer { source, target, @@ -1598,49 +661,7 @@ pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { sub_prefix, amount, key, - shielded: { - let spending_key = parsed_args.source.spending_key(); - let payment_address = parsed_args.target.payment_address(); - // No shielded components are needed when neither source nor - // destination are shielded - if spending_key.is_none() && payment_address.is_none() { - None - } else { - // We want to fund our transaction solely from supplied spending - // key - let spending_key = spending_key.map(|x| x.into()); - let spending_keys: Vec<_> = spending_key.into_iter().collect(); - // Load the current shielded context given the spending key we - // possess - let _ = ctx.shielded.load(); - ctx.shielded - .fetch(&args.tx.ledger_address, &spending_keys, &[]) - .await; - // Save the update state so that future fetches can be - // short-circuited - let _ = ctx.shielded.save(); - let stx_result = - gen_shielded_transfer(&mut ctx, &parsed_args, shielded_gas) - .await; - match stx_result { - Ok(stx) => stx.map(|x| x.0), - Err(builder::Error::ChangeIsNegative(_)) => { - eprintln!( - "The balance of the source {} is lower than the \ - amount to be transferred and fees. Amount to \ - transfer is {} {} and fees are {} {}.", - parsed_args.source, - args.amount, - parsed_args.token, - args.tx.fee_amount, - parsed_args.tx.fee_token, - ); - safe_exit(1) - } - Err(err) => panic!("{}", err), - } - } - }, + shielded, }; tracing::debug!("Transfer data {:?}", transfer); let data = transfer @@ -2179,7 +1200,7 @@ async fn is_safe_voting_window( match proposal_end_epoch { Some(proposal_end_epoch) => { - !namada::ledger::governance::utils::is_valid_validator_voting_period( + !namada::ledger::native_vp::governance::utils::is_valid_validator_voting_period( current_epoch, proposal_start_epoch, proposal_end_epoch, @@ -2486,7 +1507,7 @@ pub async fn submit_validator_commission_change( match (commission_rates, max_change) { (Some(rates), Some(max_change)) => { // Assuming that pipeline length = 2 - let rate_next_epoch = rates.get(epoch + 1).unwrap(); + let rate_next_epoch = rates.get(epoch.next()).unwrap(); if (args.rate - rate_next_epoch).abs() > max_change { eprintln!( "New rate is too large of a change with respect to \ diff --git a/apps/src/lib/client/types.rs b/apps/src/lib/client/types.rs index 5a26244474..10ae25182a 100644 --- a/apps/src/lib/client/types.rs +++ b/apps/src/lib/client/types.rs @@ -11,84 +11,4 @@ use namada::types::{key, token}; use super::rpc; use crate::cli::{args, Context}; -use crate::client::tx::Conversions; use crate::facade::tendermint_config::net::Address as TendermintAddress; - -#[derive(Clone, Debug)] -pub struct ParsedTxArgs { - /// Simulate applying the transaction - pub dry_run: bool, - /// Submit the transaction even if it doesn't pass client checks - pub force: bool, - /// Do not wait for the transaction to be added to the blockchain - pub broadcast_only: bool, - /// The address of the ledger node as host:port - pub ledger_address: TendermintAddress, - /// If any new account is initialized by the tx, use the given alias to - /// save it in the wallet. - pub initialized_account_alias: Option, - /// The amount being payed to include the transaction - pub fee_amount: token::Amount, - /// The token in which the fee is being paid - pub fee_token: Address, - /// The max amount of gas used to process tx - pub gas_limit: GasLimit, - /// Sign the tx with the key for the given alias from your wallet - pub signing_key: Option, - /// Sign the tx with the keypair of the public key of the given address - pub signer: Option
, -} - -#[derive(Clone, Debug)] -pub struct ParsedTxTransferArgs { - /// Common tx arguments - pub tx: ParsedTxArgs, - /// Transfer source address - pub source: TransferSource, - /// Transfer target address - pub target: TransferTarget, - /// Transferred token address - pub token: Address, - /// Transferred token amount - pub amount: token::Amount, -} - -#[async_trait] -pub trait ShieldedTransferContext { - async fn collect_unspent_notes( - &mut self, - ledger_address: TendermintAddress, - vk: &ViewingKey, - target: Amount, - target_epoch: Epoch, - ) -> ( - Amount, - Vec<(Diversifier, Note, MerklePath)>, - Conversions, - ); - - async fn query_epoch(&self, ledger_address: TendermintAddress) -> Epoch; -} - -#[async_trait] -impl ShieldedTransferContext for Context { - async fn collect_unspent_notes( - &mut self, - ledger_address: TendermintAddress, - vk: &ViewingKey, - target: Amount, - target_epoch: Epoch, - ) -> ( - Amount, - Vec<(Diversifier, Note, MerklePath)>, - Conversions, - ) { - self.shielded - .collect_unspent_notes(ledger_address, vk, target, target_epoch) - .await - } - - async fn query_epoch(&self, ledger_address: TendermintAddress) -> Epoch { - rpc::query_epoch(args::Query { ledger_address }).await - } -} diff --git a/apps/src/lib/node/ledger/rpc.rs b/apps/src/lib/node/ledger/rpc.rs deleted file mode 100644 index b7a1ebcfad..0000000000 --- a/apps/src/lib/node/ledger/rpc.rs +++ /dev/null @@ -1,123 +0,0 @@ -//! RPC endpoint is used for ledger state queries - -use std::fmt::Display; -use std::str::FromStr; - -use masp_primitives::asset_type::AssetType; -use namada::types::address::Address; -use namada::types::storage; -use namada::types::token::CONVERSION_KEY_PREFIX; -use thiserror::Error; - -use crate::facade::tendermint::abci::Path as AbciPath; - -/// RPC query path -#[derive(Debug, Clone)] -pub enum Path { - /// Dry run a transaction - DryRunTx, - /// Epoch of the last committed block - Epoch, - /// Results of all committed blocks - Results, - /// Read a storage value with exact storage key - Value(storage::Key), - /// Read a range of storage values with a matching key prefix - Prefix(storage::Key), - /// Check if the given storage key exists - HasKey(storage::Key), - /// Conversion associated with given asset type - Conversion(AssetType), -} - -#[derive(Debug, Clone)] -pub struct BalanceQuery { - #[allow(dead_code)] - owner: Option
, - #[allow(dead_code)] - token: Option
, -} - -const DRY_RUN_TX_PATH: &str = "dry_run_tx"; -const EPOCH_PATH: &str = "epoch"; -const RESULTS_PATH: &str = "results"; -const VALUE_PREFIX: &str = "value"; -const PREFIX_PREFIX: &str = "prefix"; -const HAS_KEY_PREFIX: &str = "has_key"; - -impl Display for Path { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Path::DryRunTx => write!(f, "{}", DRY_RUN_TX_PATH), - Path::Epoch => write!(f, "{}", EPOCH_PATH), - Path::Results => write!(f, "{}", RESULTS_PATH), - Path::Value(storage_key) => { - write!(f, "{}/{}", VALUE_PREFIX, storage_key) - } - Path::Prefix(storage_key) => { - write!(f, "{}/{}", PREFIX_PREFIX, storage_key) - } - Path::HasKey(storage_key) => { - write!(f, "{}/{}", HAS_KEY_PREFIX, storage_key) - } - Path::Conversion(asset_type) => { - write!(f, "{}/{}", CONVERSION_KEY_PREFIX, asset_type) - } - } - } -} - -impl FromStr for Path { - type Err = PathParseError; - - fn from_str(s: &str) -> Result { - match s { - DRY_RUN_TX_PATH => Ok(Self::DryRunTx), - EPOCH_PATH => Ok(Self::Epoch), - RESULTS_PATH => Ok(Self::Results), - _ => match s.split_once('/') { - Some((VALUE_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::Value(key)) - } - Some((PREFIX_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::Prefix(key)) - } - Some((HAS_KEY_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::HasKey(key)) - } - Some((CONVERSION_KEY_PREFIX, asset_type)) => { - let key = AssetType::from_str(asset_type) - .map_err(PathParseError::InvalidAssetType)?; - Ok(Self::Conversion(key)) - } - _ => Err(PathParseError::InvalidPath(s.to_string())), - }, - } - } -} - -impl From for AbciPath { - fn from(path: Path) -> Self { - let path = path.to_string(); - // TODO: update in tendermint-rs to allow to construct this from owned - // string. It's what `from_str` does anyway - AbciPath::from_str(&path).unwrap() - } -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum PathParseError { - #[error("Unrecognized query path: {0}")] - InvalidPath(String), - #[error("Invalid storage key: {0}")] - InvalidStorageKey(storage::Error), - #[error("Unrecognized asset type: {0}")] - InvalidAssetType(std::io::Error), -} diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index 71c6049afd..02a22b1caa 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -1,11 +1,12 @@ +use namada::core::ledger::slash_fund::ADDRESS as slash_fund_address; use namada::ledger::events::EventType; -use namada::ledger::governance::utils::{ - compute_tally, get_proposal_votes, ProposalEvent, -}; use namada::ledger::governance::{ storage as gov_storage, ADDRESS as gov_address, }; -use namada::ledger::slash_fund::ADDRESS as slash_fund_address; +use namada::ledger::native_vp::governance::utils::{ + compute_tally, get_proposal_votes, ProposalEvent, +}; +use namada::ledger::protocol; use namada::ledger::storage::types::encode; use namada::ledger::storage::{DBIter, StorageHasher, DB}; use namada::types::address::Address; diff --git a/apps/src/lib/node/ledger/storage/rocksdb.rs b/apps/src/lib/node/ledger/storage/rocksdb.rs index 05075def5a..32d8aaeed2 100644 --- a/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -37,9 +37,9 @@ use namada::ledger::storage::{ types, BlockStateRead, BlockStateWrite, DBIter, DBWriteBatch, Error, MerkleTreeStoresRead, Result, StoreType, DB, }; +use namada::types::internal::TxQueue; use namada::types::storage::{ - BlockHeight, BlockResults, Header, Key, KeySeg, TxQueue, - KEY_SEGMENT_SEPARATOR, + BlockHeight, BlockResults, Header, Key, KeySeg, KEY_SEGMENT_SEPARATOR, }; use namada::types::time::DateTimeUtc; use rocksdb::{ diff --git a/core/Cargo.toml b/core/Cargo.toml new file mode 100644 index 0000000000..50015c4f50 --- /dev/null +++ b/core/Cargo.toml @@ -0,0 +1,112 @@ +[package] +authors = ["Heliax AG "] +edition = "2021" +license = "GPL-3.0" +name = "namada_core" +resolver = "2" +version = "0.9.0" + +[features] +default = [] +ferveo-tpke = [ + "ferveo", + "tpke", + "ark-ec", + "rand_core", + "rand", +] +wasm-runtime = [ + "rayon", +] +# secp256k1 key signing and verification, disabled in WASM build by default as +# it bloats the build a lot +secp256k1-sign-verify = [ + "libsecp256k1/hmac", +] + +abcipp = [ + "ibc-proto-abcipp", + "ibc-abcipp", + "tendermint-abcipp", + "tendermint-proto-abcipp" +] +abciplus = [ + "ibc", + "ibc-proto", + "tendermint", + "tendermint-proto", +] + +ibc-mocks = [ + "ibc/mocks", +] +ibc-mocks-abcipp = [ + "ibc-abcipp/mocks", +] + +# for integration tests and test utilies +testing = [ + "rand", + "rand_core", + "proptest", +] + +[dependencies] +ark-bls12-381 = {version = "0.3"} +ark-ec = {version = "0.3", optional = true} +ark-serialize = {version = "0.3"} +# We switch off "blake2b" because it cannot be compiled to wasm +# branch = "bat/arse-merkle-tree" +arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "04ad1eeb28901b57a7599bbe433b3822965dabe8", default-features = false, features = ["std", "borsh"]} +bech32 = "0.8.0" +bellman = "0.11.2" +bit-vec = "0.6.3" +borsh = "0.9.0" +chrono = {version = "0.4.22", default-features = false, features = ["clock", "std"]} +data-encoding = "2.3.2" +derivative = "2.2.0" +ed25519-consensus = "1.2.0" +ferveo = {optional = true, git = "https://github.com/anoma/ferveo"} +ferveo-common = {git = "https://github.com/anoma/ferveo"} +tpke = {package = "group-threshold-cryptography", optional = true, git = "https://github.com/anoma/ferveo"} +# TODO using the same version of tendermint-rs as we do here. +ibc = {version = "0.14.0", default-features = false, optional = true} +ibc-proto = {version = "0.17.1", default-features = false, optional = true} +ibc-abcipp = {package = "ibc", git = "https://github.com/heliaxdev/ibc-rs", rev = "9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d", default-features = false, optional = true} +ibc-proto-abcipp = {package = "ibc-proto", git = "https://github.com/heliaxdev/ibc-rs", rev = "9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d", default-features = false, optional = true} +ics23 = "0.7.0" +itertools = "0.10.0" +libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9", default-features = false, features = ["std", "static-context"]} +masp_primitives = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c" } +proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm", optional = true} +prost = "0.9.0" +prost-types = "0.9.0" +rand = {version = "0.8", optional = true} +rand_core = {version = "0.6", optional = true} +rayon = {version = "=1.5.3", optional = true} +rust_decimal = { version = "1.26.1", features = ["borsh"] } +rust_decimal_macros = "1.26.1" +serde = {version = "1.0.125", features = ["derive"]} +serde_json = "1.0.62" +sha2 = "0.9.3" +tendermint = {version = "0.23.6", optional = true} +tendermint-proto = {version = "0.23.6", optional = true} +tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +thiserror = "1.0.30" +tracing = "0.1.30" +zeroize = {version = "1.5.5", features = ["zeroize_derive"]} + +[dev-dependencies] +assert_matches = "1.5.0" +libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9"} +pretty_assertions = "0.7.2" +# A fork with state machine testing +proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} +rand = {version = "0.8"} +rand_core = {version = "0.6"} +test-log = {version = "0.2.7", default-features = false, features = ["trace"]} +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[build-dependencies] +tonic-build = "0.6.0" \ No newline at end of file diff --git a/core/build.rs b/core/build.rs new file mode 100644 index 0000000000..5c58f08c73 --- /dev/null +++ b/core/build.rs @@ -0,0 +1,55 @@ +use std::fs::read_to_string; +use std::process::Command; +use std::{env, str}; + +/// Path to the .proto source files, relative to `core` directory +const PROTO_SRC: &str = "./proto"; + +/// The version should match the one we use in the `Makefile` +const RUSTFMT_TOOLCHAIN_SRC: &str = "../rust-nightly-version"; + +fn main() { + if let Ok(val) = env::var("COMPILE_PROTO") { + if val.to_ascii_lowercase() == "false" { + // Skip compiling proto files + return; + } + } + + // Tell Cargo that if the given file changes, to rerun this build script. + println!("cargo:rerun-if-changed={}", PROTO_SRC); + + let mut use_rustfmt = false; + + // The version should match the one we use in the `Makefile` + if let Ok(rustfmt_toolchain) = read_to_string(RUSTFMT_TOOLCHAIN_SRC) { + // Try to find the path to rustfmt. + if let Ok(output) = Command::new("rustup") + .args([ + "which", + "rustfmt", + "--toolchain", + rustfmt_toolchain.trim(), + ]) + .output() + { + if let Ok(rustfmt) = str::from_utf8(&output.stdout) { + // Set the command to be used by tonic_build below to format the + // generated files + let rustfmt = rustfmt.trim(); + if !rustfmt.is_empty() { + println!("using rustfmt from path \"{}\"", rustfmt); + env::set_var("RUSTFMT", rustfmt); + use_rustfmt = true + } + } + } + } + + tonic_build::configure() + .out_dir("src/proto/generated") + .format(use_rustfmt) + .protoc_arg("--experimental_allow_proto3_optional") + .compile(&[format!("{}/types.proto", PROTO_SRC)], &[PROTO_SRC]) + .unwrap(); +} diff --git a/shared/proto b/core/proto similarity index 100% rename from shared/proto rename to core/proto diff --git a/shared/src/bytes.rs b/core/src/bytes.rs similarity index 100% rename from shared/src/bytes.rs rename to core/src/bytes.rs diff --git a/shared/src/ledger/gas.rs b/core/src/ledger/gas.rs similarity index 100% rename from shared/src/ledger/gas.rs rename to core/src/ledger/gas.rs diff --git a/core/src/ledger/governance/mod.rs b/core/src/ledger/governance/mod.rs new file mode 100644 index 0000000000..8e3fb977f3 --- /dev/null +++ b/core/src/ledger/governance/mod.rs @@ -0,0 +1,11 @@ +//! Governance library code + +use crate::types::address::{Address, InternalAddress}; + +/// governance parameters +pub mod parameters; +/// governance storage +pub mod storage; + +/// The governance internal address +pub const ADDRESS: Address = Address::Internal(InternalAddress::Governance); diff --git a/shared/src/ledger/governance/parameters.rs b/core/src/ledger/governance/parameters.rs similarity index 100% rename from shared/src/ledger/governance/parameters.rs rename to core/src/ledger/governance/parameters.rs diff --git a/shared/src/ledger/governance/storage.rs b/core/src/ledger/governance/storage.rs similarity index 100% rename from shared/src/ledger/governance/storage.rs rename to core/src/ledger/governance/storage.rs diff --git a/shared/src/ledger/ibc/handler.rs b/core/src/ledger/ibc/actions.rs similarity index 99% rename from shared/src/ledger/ibc/handler.rs rename to core/src/ledger/ibc/actions.rs index c79f8ac6fb..cbba1d748d 100644 --- a/shared/src/ledger/ibc/handler.rs +++ b/core/src/ledger/ibc/actions.rs @@ -68,15 +68,15 @@ use crate::ibc::events::IbcEvent; #[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] use crate::ibc::mock::client_state::{MockClientState, MockConsensusState}; use crate::ibc::timestamp::Timestamp; +use crate::ledger::ibc::data::{ + Error as IbcDataError, FungibleTokenPacketData, IbcMessage, PacketAck, + PacketReceipt, +}; use crate::ledger::ibc::storage; use crate::ledger::storage_api; use crate::tendermint::Time; use crate::tendermint_proto::{Error as ProtoError, Protobuf}; use crate::types::address::{Address, InternalAddress}; -use crate::types::ibc::data::{ - Error as IbcDataError, FungibleTokenPacketData, IbcMessage, PacketAck, - PacketReceipt, -}; use crate::types::ibc::IbcEvent as AnomaIbcEvent; use crate::types::storage::{BlockHeight, Key}; use crate::types::time::Rfc3339String; diff --git a/shared/src/types/ibc/data.rs b/core/src/ledger/ibc/data.rs similarity index 100% rename from shared/src/types/ibc/data.rs rename to core/src/ledger/ibc/data.rs diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs new file mode 100644 index 0000000000..f98fb2e432 --- /dev/null +++ b/core/src/ledger/ibc/mod.rs @@ -0,0 +1,5 @@ +//! IBC library code + +pub mod actions; +pub mod data; +pub mod storage; diff --git a/shared/src/ledger/ibc/storage.rs b/core/src/ledger/ibc/storage.rs similarity index 100% rename from shared/src/ledger/ibc/storage.rs rename to core/src/ledger/ibc/storage.rs diff --git a/core/src/ledger/mod.rs b/core/src/ledger/mod.rs new file mode 100644 index 0000000000..83568c0da7 --- /dev/null +++ b/core/src/ledger/mod.rs @@ -0,0 +1,12 @@ +//! The ledger modules + +pub mod gas; +pub mod governance; +#[cfg(any(feature = "abciplus", feature = "abcipp"))] +pub mod ibc; +pub mod parameters; +pub mod slash_fund; +pub mod storage; +pub mod storage_api; +pub mod tx_env; +pub mod vp_env; diff --git a/shared/src/ledger/parameters/mod.rs b/core/src/ledger/parameters/mod.rs similarity index 87% rename from shared/src/ledger/parameters/mod.rs rename to core/src/ledger/parameters/mod.rs index 75862404db..cb84bd56e7 100644 --- a/shared/src/ledger/parameters/mod.rs +++ b/core/src/ledger/parameters/mod.rs @@ -1,104 +1,19 @@ //! Protocol parameters pub mod storage; -use std::collections::BTreeSet; - use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use rust_decimal::Decimal; use thiserror::Error; -use self::storage as parameter_storage; -use super::governance::{self}; use super::storage::types::{decode, encode}; use super::storage::{types, Storage}; -use crate::ledger::native_vp::{self, Ctx, NativeVp}; -use crate::ledger::storage::{self as ledger_storage, StorageHasher}; +use crate::ledger::storage::{self as ledger_storage}; use crate::types::address::{Address, InternalAddress}; use crate::types::storage::Key; use crate::types::time::DurationSecs; -use crate::vm::WasmCacheAccess; const ADDRESS: Address = Address::Internal(InternalAddress::Parameters); -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("Native VP error: {0}")] - NativeVpError(native_vp::Error), -} - -/// Parameters functions result -pub type Result = std::result::Result; - -/// Parameters VP -pub struct ParametersVp<'a, DB, H, CA> -where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: StorageHasher, - CA: WasmCacheAccess, -{ - /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, -} - -impl<'a, DB, H, CA> NativeVp for ParametersVp<'a, DB, H, CA> -where - DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - type Error = Error; - - const ADDR: InternalAddress = InternalAddress::Parameters; - - fn validate_tx( - &self, - tx_data: &[u8], - keys_changed: &BTreeSet, - _verifiers: &BTreeSet
, - ) -> Result { - let result = keys_changed.iter().all(|key| { - let key_type: KeyType = key.into(); - match key_type { - KeyType::PARAMETER => governance::utils::is_proposal_accepted( - self.ctx.storage, - tx_data, - ) - .unwrap_or(false), - KeyType::UNKNOWN_PARAMETER => false, - KeyType::UNKNOWN => true, - } - }); - Ok(result) - } -} - -impl From for Error { - fn from(err: native_vp::Error) -> Self { - Self::NativeVpError(err) - } -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum ReadError { - #[error("Storage error: {0}")] - StorageError(ledger_storage::Error), - #[error("Storage type error: {0}")] - StorageTypeError(types::Error), - #[error("Protocol parameters are missing, they must be always set")] - ParametersMissing, -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum WriteError { - #[error("Storage error: {0}")] - StorageError(ledger_storage::Error), - #[error("Serialize error: {0}")] - SerializeError(String), -} - /// Protocol parameters #[derive( Clone, @@ -156,6 +71,26 @@ pub struct EpochDuration { pub min_duration: DurationSecs, } +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ReadError { + #[error("Storage error: {0}")] + StorageError(ledger_storage::Error), + #[error("Storage type error: {0}")] + StorageTypeError(types::Error), + #[error("Protocol parameters are missing, they must be always set")] + ParametersMissing, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum WriteError { + #[error("Storage error: {0}")] + StorageError(ledger_storage::Error), + #[error("Serialize error: {0}")] + SerializeError(String), +} + impl Parameters { /// Initialize parameters in storage in the genesis block. pub fn init_storage(&self, storage: &mut Storage) @@ -254,7 +189,6 @@ impl Parameters { ); } } - /// Update the max_expected_time_per_block parameter in storage. Returns the /// parameters and gas cost. pub fn update_max_expected_time_per_block_parameter( @@ -555,26 +489,3 @@ where + gas_reward, )) } - -#[allow(clippy::upper_case_acronyms)] -enum KeyType { - #[allow(clippy::upper_case_acronyms)] - PARAMETER, - #[allow(clippy::upper_case_acronyms)] - #[allow(non_camel_case_types)] - UNKNOWN_PARAMETER, - #[allow(clippy::upper_case_acronyms)] - UNKNOWN, -} - -impl From<&Key> for KeyType { - fn from(value: &Key) -> Self { - if parameter_storage::is_protocol_parameter_key(value) { - KeyType::PARAMETER - } else if parameter_storage::is_parameter_key(value) { - KeyType::UNKNOWN_PARAMETER - } else { - KeyType::UNKNOWN - } - } -} diff --git a/shared/src/ledger/parameters/storage.rs b/core/src/ledger/parameters/storage.rs similarity index 100% rename from shared/src/ledger/parameters/storage.rs rename to core/src/ledger/parameters/storage.rs diff --git a/core/src/ledger/slash_fund/mod.rs b/core/src/ledger/slash_fund/mod.rs new file mode 100644 index 0000000000..7a7d53963b --- /dev/null +++ b/core/src/ledger/slash_fund/mod.rs @@ -0,0 +1,8 @@ +//! SlashFund library code + +use crate::types::address::{Address, InternalAddress}; + +/// Internal SlashFund address +pub const ADDRESS: Address = Address::Internal(InternalAddress::SlashFund); + +pub mod storage; diff --git a/shared/src/ledger/slash_fund/storage.rs b/core/src/ledger/slash_fund/storage.rs similarity index 80% rename from shared/src/ledger/slash_fund/storage.rs rename to core/src/ledger/slash_fund/storage.rs index 60d29f0f48..9c437da591 100644 --- a/shared/src/ledger/slash_fund/storage.rs +++ b/core/src/ledger/slash_fund/storage.rs @@ -1,7 +1,8 @@ -use super::ADDRESS; +//! Slash fund storage + use crate::types::storage::{DbKeySeg, Key}; /// Check if a key is a slash fund key pub fn is_slash_fund_key(key: &Key) -> bool { - matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &ADDRESS) + matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &super::ADDRESS) } diff --git a/shared/src/ledger/storage/ics23_specs.rs b/core/src/ledger/storage/ics23_specs.rs similarity index 100% rename from shared/src/ledger/storage/ics23_specs.rs rename to core/src/ledger/storage/ics23_specs.rs diff --git a/shared/src/ledger/storage/merkle_tree.rs b/core/src/ledger/storage/merkle_tree.rs similarity index 92% rename from shared/src/ledger/storage/merkle_tree.rs rename to core/src/ledger/storage/merkle_tree.rs index 49afb3dcfc..eb690356e5 100644 --- a/shared/src/ledger/storage/merkle_tree.rs +++ b/core/src/ledger/storage/merkle_tree.rs @@ -10,20 +10,17 @@ use arse_merkle_tree::{ use borsh::{BorshDeserialize, BorshSerialize}; use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof, NonExistenceProof}; -use prost::Message; use thiserror::Error; use super::traits::{StorageHasher, SubTreeRead, SubTreeWrite}; -use super::IBC_KEY_LIMIT; use crate::bytes::ByteBuf; use crate::ledger::storage::ics23_specs::{self, ibc_leaf_spec}; use crate::ledger::storage::types; -use crate::tendermint::merkle::proof::{Proof, ProofOp}; use crate::types::address::{Address, InternalAddress}; use crate::types::hash::Hash; use crate::types::storage::{ - DbKeySeg, Error as StorageError, Key, MembershipProof, MerkleValue, - StringKey, TreeBytes, + self, DbKeySeg, Error as StorageError, Key, MerkleValue, StringKey, + TreeBytes, TreeKeyError, IBC_KEY_LIMIT, }; #[allow(missing_docs)] @@ -33,6 +30,8 @@ pub enum Error { InvalidKey(StorageError), #[error("Invalid key for merkle tree: {0}")] InvalidMerkleKey(String), + #[error("Storage tree key error: {0}")] + StorageTreeKey(#[from] TreeKeyError), #[error("Empty Key: {0}")] EmptyKey(String), #[error("Merkle Tree error: {0}")] @@ -52,10 +51,14 @@ pub enum Error { /// Result for functions that may fail type Result = std::result::Result; -/// Type aliases for the different merkle trees and backing stores +// Type aliases for the different merkle trees and backing stores +/// Sparse-merkle-tree store pub type SmtStore = DefaultStore; +/// Arse-merkle-tree store pub type AmtStore = DefaultStore; +/// Sparse-merkle-tree pub type Smt = ArseMerkleTree; +/// Arse-merkle-tree pub type Amt = ArseMerkleTree; @@ -95,6 +98,7 @@ pub enum Store { } impl Store { + /// Convert to a `StoreRef` with borrowed store pub fn as_ref(&self) -> StoreRef { match self { Self::Base(store) => StoreRef::Base(store), @@ -118,6 +122,7 @@ pub enum StoreRef<'a> { } impl<'a> StoreRef<'a> { + /// Convert to an owned `Store`. pub fn to_owned(&self) -> Store { match *self { Self::Base(store) => Store::Base(store.to_owned()), @@ -127,6 +132,7 @@ impl<'a> StoreRef<'a> { } } + /// Encode a `StoreRef`. pub fn encode(&self) -> Vec { match self { Self::Base(store) => store.try_to_vec(), @@ -391,25 +397,15 @@ impl MerkleTree { } // Get a proof of the sub tree - self.get_tendermint_proof(key, nep) + self.get_sub_tree_proof(key, nep) } /// Get the Tendermint proof with the base proof - pub fn get_tendermint_proof( + pub fn get_sub_tree_proof( &self, key: &Key, sub_proof: CommitmentProof, ) -> Result { - let mut data = vec![]; - sub_proof - .encode(&mut data) - .expect("Encoding proof shouldn't fail"); - let sub_proof_op = ProofOp { - field_type: "ics23_CommitmentProof".to_string(), - key: key.to_string().as_bytes().to_vec(), - data, - }; - // Get a membership proof of the base tree because the sub root should // exist let (store_type, _) = StoreType::sub_key(key)?; @@ -428,19 +424,10 @@ impl MerkleTree { _ => unreachable!(), }; - let mut data = vec![]; - base_proof - .encode(&mut data) - .expect("Encoding proof shouldn't fail"); - let base_proof_op = ProofOp { - field_type: "ics23_CommitmentProof".to_string(), - key: key.to_string().as_bytes().to_vec(), - data, - }; - - // Set ProofOps from leaf to root Ok(Proof { - ops: vec![sub_proof_op, base_proof_op], + key: key.clone(), + sub_proof, + base_proof, }) } } @@ -540,6 +527,69 @@ impl From for Error { } } +/// Type of membership proof from a merkle tree +pub enum MembershipProof { + /// ICS23 compliant membership proof + ICS23(CommitmentProof), +} + +impl From for MembershipProof { + fn from(proof: CommitmentProof) -> Self { + Self::ICS23(proof) + } +} + +/// A storage key existence or non-existence proof +#[derive(Debug)] +pub struct Proof { + /// Storage key + pub key: storage::Key, + /// Sub proof + pub sub_proof: CommitmentProof, + /// Base proof + pub base_proof: CommitmentProof, +} + +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl From for crate::tendermint::merkle::proof::Proof { + fn from( + Proof { + key, + sub_proof, + base_proof, + }: Proof, + ) -> Self { + use prost::Message; + + use crate::tendermint::merkle::proof::{Proof, ProofOp}; + + let mut data = vec![]; + sub_proof + .encode(&mut data) + .expect("Encoding proof shouldn't fail"); + let sub_proof_op = ProofOp { + field_type: "ics23_CommitmentProof".to_string(), + key: key.to_string().as_bytes().to_vec(), + data, + }; + + let mut data = vec![]; + base_proof + .encode(&mut data) + .expect("Encoding proof shouldn't fail"); + let base_proof_op = ProofOp { + field_type: "ics23_CommitmentProof".to_string(), + key: key.to_string().as_bytes().to_vec(), + data, + }; + + // Set ProofOps from leaf to root + Proof { + ops: vec![sub_proof_op, base_proof_op], + } + } +} + #[cfg(test)] mod test { use super::*; @@ -584,9 +634,7 @@ mod test { let nep = tree .get_non_existence_proof(&ibc_non_key) .expect("Test failed"); - let subtree_nep = nep.ops.get(0).expect("Test failed"); - let nep_commitment_proof = - CommitmentProof::decode(&*subtree_nep.data).expect("Test failed"); + let nep_commitment_proof = nep.sub_proof; let non_existence_proof = match nep_commitment_proof.clone().proof.expect("Test failed") { Ics23Proof::Nonexist(nep) => nep, @@ -610,9 +658,7 @@ mod test { sub_key.to_string().as_bytes(), ); assert!(nep_verification_res); - let basetree_ep = nep.ops.get(1).unwrap(); - let basetree_ep_commitment_proof = - CommitmentProof::decode(&*basetree_ep.data).unwrap(); + let basetree_ep_commitment_proof = nep.base_proof; let basetree_ics23_ep = match basetree_ep_commitment_proof.clone().proof.unwrap() { Ics23Proof::Exist(ep) => ep, @@ -678,17 +724,19 @@ mod test { vec![ibc_val.clone().into()], ) .unwrap(); - let proof = tree.get_tendermint_proof(&ibc_key, proof).unwrap(); + let proof = tree.get_sub_tree_proof(&ibc_key, proof).unwrap(); let (store_type, sub_key) = StoreType::sub_key(&ibc_key).unwrap(); let paths = vec![sub_key.to_string(), store_type.to_string()]; let mut sub_root = ibc_val.clone(); let mut value = ibc_val; // First, the sub proof is verified. Next the base proof is verified // with the sub root - for ((p, spec), key) in - proof.ops.iter().zip(specs.iter()).zip(paths.iter()) + for ((commitment_proof, spec), key) in + [proof.sub_proof, proof.base_proof] + .into_iter() + .zip(specs.iter()) + .zip(paths.iter()) { - let commitment_proof = CommitmentProof::decode(&*p.data).unwrap(); let existence_proof = match commitment_proof.clone().proof.unwrap() { Ics23Proof::Exist(ep) => ep, @@ -733,17 +781,19 @@ mod test { vec![pos_val.clone().into()], ) .unwrap(); - let proof = tree.get_tendermint_proof(&pos_key, proof).unwrap(); + let proof = tree.get_sub_tree_proof(&pos_key, proof).unwrap(); let (store_type, sub_key) = StoreType::sub_key(&pos_key).unwrap(); let paths = vec![sub_key.to_string(), store_type.to_string()]; let mut sub_root = pos_val.clone(); let mut value = pos_val; // First, the sub proof is verified. Next the base proof is verified // with the sub root - for ((p, spec), key) in - proof.ops.iter().zip(specs.iter()).zip(paths.iter()) + for ((commitment_proof, spec), key) in + [proof.sub_proof, proof.base_proof] + .into_iter() + .zip(specs.iter()) + .zip(paths.iter()) { - let commitment_proof = CommitmentProof::decode(&*p.data).unwrap(); let existence_proof = match commitment_proof.clone().proof.unwrap() { Ics23Proof::Exist(ep) => ep, @@ -783,9 +833,7 @@ mod test { let nep = tree .get_non_existence_proof(&ibc_non_key) .expect("Test failed"); - let subtree_nep = nep.ops.get(0).expect("Test failed"); - let nep_commitment_proof = - CommitmentProof::decode(&*subtree_nep.data).expect("Test failed"); + let nep_commitment_proof = nep.sub_proof; let non_existence_proof = match nep_commitment_proof.clone().proof.expect("Test failed") { Ics23Proof::Nonexist(nep) => nep, @@ -809,9 +857,7 @@ mod test { sub_key.to_string().as_bytes(), ); assert!(nep_verification_res); - let basetree_ep = nep.ops.get(1).unwrap(); - let basetree_ep_commitment_proof = - CommitmentProof::decode(&*basetree_ep.data).unwrap(); + let basetree_ep_commitment_proof = nep.base_proof; let basetree_ics23_ep = match basetree_ep_commitment_proof.clone().proof.unwrap() { Ics23Proof::Exist(ep) => ep, diff --git a/shared/src/ledger/storage/mockdb.rs b/core/src/ledger/storage/mockdb.rs similarity index 99% rename from shared/src/ledger/storage/mockdb.rs rename to core/src/ledger/storage/mockdb.rs index 0daa61d935..011d8faac8 100644 --- a/shared/src/ledger/storage/mockdb.rs +++ b/core/src/ledger/storage/mockdb.rs @@ -14,7 +14,7 @@ use super::{ }; use crate::ledger::storage::types::{self, KVBytes, PrefixIterator}; #[cfg(feature = "ferveo-tpke")] -use crate::types::storage::TxQueue; +use crate::types::internal::TxQueue; use crate::types::storage::{ BlockHeight, BlockResults, Header, Key, KeySeg, KEY_SEGMENT_SEPARATOR, }; diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs new file mode 100644 index 0000000000..9944dc3804 --- /dev/null +++ b/core/src/ledger/storage/mod.rs @@ -0,0 +1,1370 @@ +//! Ledger's state storage with key-value backed store and a merkle tree + +pub mod ics23_specs; +pub mod merkle_tree; +#[cfg(any(test, feature = "testing"))] +pub mod mockdb; +pub mod traits; +pub mod types; + +use core::fmt::Debug; +use std::collections::BTreeMap; + +use borsh::{BorshDeserialize, BorshSerialize}; +use masp_primitives::asset_type::AssetType; +use masp_primitives::convert::AllowedConversion; +use masp_primitives::merkle_tree::FrozenCommitmentTree; +use masp_primitives::sapling::Node; +pub use merkle_tree::{ + MembershipProof, MerkleTree, MerkleTreeStoresRead, MerkleTreeStoresWrite, + StoreType, +}; +#[cfg(feature = "wasm-runtime")] +use rayon::iter::{ + IndexedParallelIterator, IntoParallelIterator, ParallelIterator, +}; +#[cfg(feature = "wasm-runtime")] +use rayon::prelude::ParallelSlice; +use thiserror::Error; +pub use traits::{Sha256Hasher, StorageHasher}; + +use crate::ledger::gas::MIN_STORAGE_GAS; +use crate::ledger::parameters::{self, EpochDuration, Parameters}; +use crate::ledger::storage::merkle_tree::{ + Error as MerkleTreeError, MerkleRoot, +}; +use crate::ledger::storage_api; +use crate::ledger::storage_api::{ResultExt, StorageRead, StorageWrite}; +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +use crate::tendermint::merkle::proof::Proof; +use crate::types::address::{ + masp, Address, EstablishedAddressGen, InternalAddress, +}; +use crate::types::chain::{ChainId, CHAIN_ID_LENGTH}; +// TODO +#[cfg(feature = "ferveo-tpke")] +use crate::types::internal::TxQueue; +use crate::types::storage::{ + BlockHash, BlockHeight, BlockResults, Epoch, Epochs, Header, Key, KeySeg, + TxIndex, BLOCK_HASH_LENGTH, +}; +use crate::types::time::DateTimeUtc; +use crate::types::token; + +/// A result of a function that may fail +pub type Result = std::result::Result; +/// A representation of the conversion state +#[derive(Debug, Default, BorshSerialize, BorshDeserialize)] +pub struct ConversionState { + /// The merkle root from the previous epoch + pub prev_root: Node, + /// The tree currently containing all the conversions + pub tree: FrozenCommitmentTree, + /// Map assets to their latest conversion and position in Merkle tree + pub assets: BTreeMap, +} + +/// The storage data +#[derive(Debug)] +pub struct Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// The database for the storage + pub db: D, + /// The ID of the chain + pub chain_id: ChainId, + /// The address of the native token - this is not stored in DB, but read + /// from genesis + pub native_token: Address, + /// Block storage data + pub block: BlockStorage, + /// During `FinalizeBlock`, this is the header of the block that is + /// going to be committed. After a block is committed, this is reset to + /// `None` until the next `FinalizeBlock` phase is reached. + pub header: Option
, + /// The height of the most recently committed block, or `BlockHeight(0)` if + /// no block has been committed for this chain yet. + pub last_height: BlockHeight, + /// The epoch of the most recently committed block. If it is `Epoch(0)`, + /// then no block may have been committed for this chain yet. + pub last_epoch: Epoch, + /// Minimum block height at which the next epoch may start + pub next_epoch_min_start_height: BlockHeight, + /// Minimum block time at which the next epoch may start + pub next_epoch_min_start_time: DateTimeUtc, + /// The current established address generator + pub address_gen: EstablishedAddressGen, + /// The shielded transaction index + pub tx_index: TxIndex, + /// The currently saved conversion state + pub conversion_state: ConversionState, + /// Wrapper txs to be decrypted in the next block proposal + #[cfg(feature = "ferveo-tpke")] + pub tx_queue: TxQueue, +} + +/// The block storage data +#[derive(Debug)] +pub struct BlockStorage { + /// Merkle tree of all the other data in block storage + pub tree: MerkleTree, + /// During `FinalizeBlock`, this is updated to be the hash of the block + /// that is going to be committed. If it is `BlockHash::default()`, + /// then no `FinalizeBlock` stage has been reached yet. + pub hash: BlockHash, + /// From the start of `FinalizeBlock` until the end of `Commit`, this is + /// height of the block that is going to be committed. Otherwise, it is the + /// height of the most recently committed block, or `BlockHeight(0)` if no + /// block has been committed yet. + pub height: BlockHeight, + /// From the start of `FinalizeBlock` until the end of `Commit`, this is + /// height of the block that is going to be committed. Otherwise it is the + /// epoch of the most recently committed block, or `Epoch(0)` if no block + /// has been committed yet. + pub epoch: Epoch, + /// Results of applying transactions + pub results: BlockResults, + /// Predecessor block epochs + pub pred_epochs: Epochs, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("TEMPORARY error: {error}")] + Temporary { error: String }, + #[error("Found an unknown key: {key}")] + UnknownKey { key: String }, + #[error("Storage key error {0}")] + KeyError(crate::types::storage::Error), + #[error("Coding error: {0}")] + CodingError(types::Error), + #[error("Merkle tree error: {0}")] + MerkleTreeError(MerkleTreeError), + #[error("DB error: {0}")] + DBError(String), + #[error("Borsh (de)-serialization error: {0}")] + BorshCodingError(std::io::Error), + #[error("Merkle tree at the height {height} is not stored")] + NoMerkleTree { height: BlockHeight }, +} + +/// The block's state as stored in the database. +pub struct BlockStateRead { + /// Merkle tree stores + pub merkle_tree_stores: MerkleTreeStoresRead, + /// Hash of the block + pub hash: BlockHash, + /// Height of the block + pub height: BlockHeight, + /// Epoch of the block + pub epoch: Epoch, + /// Predecessor block epochs + pub pred_epochs: Epochs, + /// Minimum block height at which the next epoch may start + pub next_epoch_min_start_height: BlockHeight, + /// Minimum block time at which the next epoch may start + pub next_epoch_min_start_time: DateTimeUtc, + /// Established address generator + pub address_gen: EstablishedAddressGen, + /// Results of applying transactions + pub results: BlockResults, + /// Wrapper txs to be decrypted in the next block proposal + #[cfg(feature = "ferveo-tpke")] + pub tx_queue: TxQueue, +} + +/// The block's state to write into the database. +pub struct BlockStateWrite<'a> { + /// Merkle tree stores + pub merkle_tree_stores: MerkleTreeStoresWrite<'a>, + /// Header of the block + pub header: Option<&'a Header>, + /// Hash of the block + pub hash: &'a BlockHash, + /// Height of the block + pub height: BlockHeight, + /// Epoch of the block + pub epoch: Epoch, + /// Predecessor block epochs + pub pred_epochs: &'a Epochs, + /// Minimum block height at which the next epoch may start + pub next_epoch_min_start_height: BlockHeight, + /// Minimum block time at which the next epoch may start + pub next_epoch_min_start_time: DateTimeUtc, + /// Established address generator + pub address_gen: &'a EstablishedAddressGen, + /// Results of applying transactions + pub results: &'a BlockResults, + /// Wrapper txs to be decrypted in the next block proposal + #[cfg(feature = "ferveo-tpke")] + pub tx_queue: &'a TxQueue, +} + +/// A database backend. +pub trait DB: std::fmt::Debug { + /// A DB's cache + type Cache; + /// A handle for batch writes + type WriteBatch: DBWriteBatch; + + /// Open the database from provided path + fn open( + db_path: impl AsRef, + cache: Option<&Self::Cache>, + ) -> Self; + + /// Flush data on the memory to persistent them + fn flush(&self, wait: bool) -> Result<()>; + + /// Read the last committed block's metadata + fn read_last_block(&mut self) -> Result>; + + /// Write block's metadata + fn write_block(&mut self, state: BlockStateWrite) -> Result<()>; + + /// Read the block header with the given height from the DB + fn read_block_header(&self, height: BlockHeight) -> Result>; + + /// Read the merkle tree stores with the given height + fn read_merkle_tree_stores( + &self, + height: BlockHeight, + ) -> Result>; + + /// Read the latest value for account subspace key from the DB + fn read_subspace_val(&self, key: &Key) -> Result>>; + + /// Read the value for account subspace key at the given height from the DB. + /// In our `PersistentStorage` (rocksdb), to find a value from arbitrary + /// height requires looking for diffs from the given `height`, possibly + /// up to the `last_height`. + fn read_subspace_val_with_height( + &self, + key: &Key, + height: BlockHeight, + last_height: BlockHeight, + ) -> Result>>; + + /// Write the value with the given height and account subspace key to the + /// DB. Returns the size difference from previous value, if any, or the + /// size of the value otherwise. + fn write_subspace_val( + &mut self, + height: BlockHeight, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result; + + /// Delete the value with the given height and account subspace key from the + /// DB. Returns the size of the removed value, if any, 0 if no previous + /// value was found. + fn delete_subspace_val( + &mut self, + height: BlockHeight, + key: &Key, + ) -> Result; + + /// Start write batch. + fn batch() -> Self::WriteBatch; + + /// Execute write batch. + fn exec_batch(&mut self, batch: Self::WriteBatch) -> Result<()>; + + /// Batch write the value with the given height and account subspace key to + /// the DB. Returns the size difference from previous value, if any, or + /// the size of the value otherwise. + fn batch_write_subspace_val( + &self, + batch: &mut Self::WriteBatch, + height: BlockHeight, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result; + + /// Batch delete the value with the given height and account subspace key + /// from the DB. Returns the size of the removed value, if any, 0 if no + /// previous value was found. + fn batch_delete_subspace_val( + &self, + batch: &mut Self::WriteBatch, + height: BlockHeight, + key: &Key, + ) -> Result; +} + +/// A database prefix iterator. +pub trait DBIter<'iter> { + /// The concrete type of the iterator + type PrefixIter: Debug + Iterator, u64)>; + + /// Read account subspace key value pairs with the given prefix from the DB, + /// ordered by the storage keys. + fn iter_prefix(&'iter self, prefix: &Key) -> Self::PrefixIter; + + /// Read account subspace key value pairs with the given prefix from the DB, + /// reverse ordered by the storage keys. + fn rev_iter_prefix(&'iter self, prefix: &Key) -> Self::PrefixIter; + + /// Read results subspace key value pairs from the DB + fn iter_results(&'iter self) -> Self::PrefixIter; +} + +/// Atomic batch write. +pub trait DBWriteBatch { + /// Insert a value into the database under the given key. + fn put(&mut self, key: K, value: V) + where + K: AsRef<[u8]>, + V: AsRef<[u8]>; + + /// Removes the database entry for key. Does nothing if the key was not + /// found. + fn delete>(&mut self, key: K); +} + +impl Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// open up a new instance of the storage given path to db and chain id + pub fn open( + db_path: impl AsRef, + chain_id: ChainId, + native_token: Address, + cache: Option<&D::Cache>, + ) -> Self { + let block = BlockStorage { + tree: MerkleTree::default(), + hash: BlockHash::default(), + height: BlockHeight::default(), + epoch: Epoch::default(), + pred_epochs: Epochs::default(), + results: BlockResults::default(), + }; + Storage:: { + db: D::open(db_path, cache), + chain_id, + block, + header: None, + last_height: BlockHeight(0), + last_epoch: Epoch::default(), + next_epoch_min_start_height: BlockHeight::default(), + next_epoch_min_start_time: DateTimeUtc::now(), + address_gen: EstablishedAddressGen::new( + "Privacy is a function of liberty.", + ), + tx_index: TxIndex::default(), + conversion_state: ConversionState::default(), + #[cfg(feature = "ferveo-tpke")] + tx_queue: TxQueue::default(), + native_token, + } + } + + /// Load the full state at the last committed height, if any. Returns the + /// Merkle root hash and the height of the committed block. + pub fn load_last_state(&mut self) -> Result<()> { + if let Some(BlockStateRead { + merkle_tree_stores, + hash, + height, + epoch, + pred_epochs, + next_epoch_min_start_height, + next_epoch_min_start_time, + results, + address_gen, + #[cfg(feature = "ferveo-tpke")] + tx_queue, + }) = self.db.read_last_block()? + { + self.block.tree = MerkleTree::new(merkle_tree_stores); + self.block.hash = hash; + self.block.height = height; + self.block.epoch = epoch; + self.block.results = results; + self.block.pred_epochs = pred_epochs; + self.last_height = height; + self.last_epoch = epoch; + self.next_epoch_min_start_height = next_epoch_min_start_height; + self.next_epoch_min_start_time = next_epoch_min_start_time; + self.address_gen = address_gen; + if self.last_epoch.0 > 1 { + // The derived conversions will be placed in MASP address space + let masp_addr = masp(); + let key_prefix: Key = masp_addr.to_db_key().into(); + // Load up the conversions currently being given as query + // results + let state_key = key_prefix + .push(&(token::CONVERSION_KEY_PREFIX.to_owned())) + .map_err(Error::KeyError)?; + self.conversion_state = types::decode( + self.read(&state_key) + .expect("unable to read conversion state") + .0 + .expect("unable to find conversion state"), + ) + .expect("unable to decode conversion state") + } + #[cfg(feature = "ferveo-tpke")] + { + self.tx_queue = tx_queue; + } + tracing::debug!("Loaded storage from DB"); + } else { + tracing::info!("No state could be found"); + } + Ok(()) + } + + /// Returns the Merkle root hash and the height of the committed block. If + /// no block exists, returns None. + pub fn get_state(&self) -> Option<(MerkleRoot, u64)> { + if self.block.height.0 != 0 { + Some((self.block.tree.root(), self.block.height.0)) + } else { + None + } + } + + /// Persist the current block's state to the database + pub fn commit(&mut self) -> Result<()> { + let state = BlockStateWrite { + merkle_tree_stores: self.block.tree.stores(), + header: self.header.as_ref(), + hash: &self.block.hash, + height: self.block.height, + epoch: self.block.epoch, + results: &self.block.results, + pred_epochs: &self.block.pred_epochs, + next_epoch_min_start_height: self.next_epoch_min_start_height, + next_epoch_min_start_time: self.next_epoch_min_start_time, + address_gen: &self.address_gen, + #[cfg(feature = "ferveo-tpke")] + tx_queue: &self.tx_queue, + }; + self.db.write_block(state)?; + self.last_height = self.block.height; + self.last_epoch = self.block.epoch; + self.header = None; + Ok(()) + } + + /// Find the root hash of the merkle tree + pub fn merkle_root(&self) -> MerkleRoot { + self.block.tree.root() + } + + /// Check if the given key is present in storage. Returns the result and the + /// gas cost. + pub fn has_key(&self, key: &Key) -> Result<(bool, u64)> { + Ok((self.block.tree.has_key(key)?, key.len() as _)) + } + + /// Returns a value from the specified subspace and the gas cost + pub fn read(&self, key: &Key) -> Result<(Option>, u64)> { + tracing::debug!("storage read key {}", key); + let (present, gas) = self.has_key(key)?; + if !present { + return Ok((None, gas)); + } + + match self.db.read_subspace_val(key)? { + Some(v) => { + let gas = key.len() + v.len(); + Ok((Some(v), gas as _)) + } + None => Ok((None, key.len() as _)), + } + } + + /// Returns a value from the specified subspace at the given height and the + /// gas cost + pub fn read_with_height( + &self, + key: &Key, + height: BlockHeight, + ) -> Result<(Option>, u64)> { + if height >= self.last_height { + self.read(key) + } else { + match self.db.read_subspace_val_with_height( + key, + height, + self.last_height, + )? { + Some(v) => { + let gas = key.len() + v.len(); + Ok((Some(v), gas as _)) + } + None => Ok((None, key.len() as _)), + } + } + } + + /// Returns a prefix iterator, ordered by storage keys, and the gas cost + pub fn iter_prefix( + &self, + prefix: &Key, + ) -> (>::PrefixIter, u64) { + (self.db.iter_prefix(prefix), prefix.len() as _) + } + + /// Returns a prefix iterator, reverse ordered by storage keys, and the gas + /// cost + pub fn rev_iter_prefix( + &self, + prefix: &Key, + ) -> (>::PrefixIter, u64) { + (self.db.rev_iter_prefix(prefix), prefix.len() as _) + } + + /// Returns a prefix iterator and the gas cost + pub fn iter_results(&self) -> (>::PrefixIter, u64) { + (self.db.iter_results(), 0) + } + + /// Write a value to the specified subspace and returns the gas cost and the + /// size difference + pub fn write( + &mut self, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result<(u64, i64)> { + // Note that this method is the same as `StorageWrite::write_bytes`, + // but with gas and storage bytes len diff accounting + tracing::debug!("storage write key {}", key,); + let value = value.as_ref(); + self.block.tree.update(key, value)?; + + let len = value.len(); + let gas = key.len() + len; + let size_diff = + self.db.write_subspace_val(self.block.height, key, value)?; + Ok((gas as _, size_diff)) + } + + /// Delete the specified subspace and returns the gas cost and the size + /// difference + pub fn delete(&mut self, key: &Key) -> Result<(u64, i64)> { + // Note that this method is the same as `StorageWrite::delete`, + // but with gas and storage bytes len diff accounting + let mut deleted_bytes_len = 0; + if self.has_key(key)?.0 { + self.block.tree.delete(key)?; + deleted_bytes_len = + self.db.delete_subspace_val(self.block.height, key)?; + } + let gas = key.len() + deleted_bytes_len as usize; + Ok((gas as _, deleted_bytes_len)) + } + + /// Set the block header. + /// The header is not in the Merkle tree as it's tracked by Tendermint. + /// Hence, we don't update the tree when this is set. + pub fn set_header(&mut self, header: Header) -> Result<()> { + self.header = Some(header); + Ok(()) + } + + /// Block data is in the Merkle tree as it's tracked by Tendermint in the + /// block header. Hence, we don't update the tree when this is set. + pub fn begin_block( + &mut self, + hash: BlockHash, + height: BlockHeight, + ) -> Result<()> { + self.block.hash = hash; + self.block.height = height; + Ok(()) + } + + /// Get a validity predicate for the given account address and the gas cost + /// for reading it. + pub fn validity_predicate( + &self, + addr: &Address, + ) -> Result<(Option>, u64)> { + let key = if let Address::Implicit(_) = addr { + parameters::storage::get_implicit_vp_key() + } else { + Key::validity_predicate(addr) + }; + self.read(&key) + } + + #[allow(dead_code)] + /// Check if the given address exists on chain and return the gas cost. + pub fn exists(&self, addr: &Address) -> Result<(bool, u64)> { + let key = Key::validity_predicate(addr); + self.has_key(&key) + } + + /// Get the chain ID as a raw string + pub fn get_chain_id(&self) -> (String, u64) { + (self.chain_id.to_string(), CHAIN_ID_LENGTH as _) + } + + /// Get the block height + pub fn get_block_height(&self) -> (BlockHeight, u64) { + (self.block.height, MIN_STORAGE_GAS) + } + + /// Get the block hash + pub fn get_block_hash(&self) -> (BlockHash, u64) { + (self.block.hash.clone(), BLOCK_HASH_LENGTH as _) + } + + /// Get the existence proof + #[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] + pub fn get_existence_proof( + &self, + key: &Key, + value: crate::types::storage::MerkleValue, + height: BlockHeight, + ) -> Result { + use std::array; + + if height >= self.get_block_height().0 { + let MembershipProof::ICS23(proof) = self + .block + .tree + .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) + .map_err(Error::MerkleTreeError)?; + self.block + .tree + .get_sub_tree_proof(key, proof) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } else { + match self.db.read_merkle_tree_stores(height)? { + Some(stores) => { + let tree = MerkleTree::::new(stores); + let MembershipProof::ICS23(proof) = tree + .get_sub_tree_existence_proof( + array::from_ref(key), + vec![value], + ) + .map_err(Error::MerkleTreeError)?; + tree.get_sub_tree_proof(key, proof) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } + None => Err(Error::NoMerkleTree { height }), + } + } + } + + /// Get the non-existence proof + #[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] + pub fn get_non_existence_proof( + &self, + key: &Key, + height: BlockHeight, + ) -> Result { + if height >= self.last_height { + self.block + .tree + .get_non_existence_proof(key) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } else { + match self.db.read_merkle_tree_stores(height)? { + Some(stores) => MerkleTree::::new(stores) + .get_non_existence_proof(key) + .map(Into::into) + .map_err(Error::MerkleTreeError), + None => Err(Error::NoMerkleTree { height }), + } + } + } + + /// Get the current (yet to be committed) block epoch + pub fn get_current_epoch(&self) -> (Epoch, u64) { + (self.block.epoch, MIN_STORAGE_GAS) + } + + /// Get the epoch of the last committed block + pub fn get_last_epoch(&self) -> (Epoch, u64) { + (self.last_epoch, MIN_STORAGE_GAS) + } + + /// Initialize the first epoch. The first epoch begins at genesis time. + pub fn init_genesis_epoch( + &mut self, + initial_height: BlockHeight, + genesis_time: DateTimeUtc, + parameters: &Parameters, + ) -> Result<()> { + let EpochDuration { + min_num_of_blocks, + min_duration, + } = parameters.epoch_duration; + self.next_epoch_min_start_height = initial_height + min_num_of_blocks; + self.next_epoch_min_start_time = genesis_time + min_duration; + self.update_epoch_in_merkle_tree() + } + + /// Get the block header + pub fn get_block_header( + &self, + height: Option, + ) -> Result<(Option
, u64)> { + match height { + Some(h) if h == self.get_block_height().0 => { + Ok((self.header.clone(), MIN_STORAGE_GAS)) + } + Some(h) => match self.db.read_block_header(h)? { + Some(header) => { + let gas = header.encoded_len() as u64; + Ok((Some(header), gas)) + } + None => Ok((None, MIN_STORAGE_GAS)), + }, + None => Ok((self.header.clone(), MIN_STORAGE_GAS)), + } + } + + /// Initialize a new epoch when the current epoch is finished. Returns + /// `true` on a new epoch. + #[cfg(feature = "wasm-runtime")] + pub fn update_epoch( + &mut self, + height: BlockHeight, + time: DateTimeUtc, + ) -> Result { + let (parameters, _gas) = + parameters::read(self).expect("Couldn't read protocol parameters"); + + // Check if the current epoch is over + let new_epoch = height >= self.next_epoch_min_start_height + && time >= self.next_epoch_min_start_time; + if new_epoch { + // Begin a new epoch + self.block.epoch = self.block.epoch.next(); + let EpochDuration { + min_num_of_blocks, + min_duration, + } = parameters.epoch_duration; + self.next_epoch_min_start_height = height + min_num_of_blocks; + self.next_epoch_min_start_time = time + min_duration; + // TODO put this into PoS parameters and pass it to tendermint + // `consensus_params` on `InitChain` and `EndBlock` + let evidence_max_age_num_blocks: u64 = 100000; + self.block + .pred_epochs + .new_epoch(height, evidence_max_age_num_blocks); + tracing::info!("Began a new epoch {}", self.block.epoch); + self.update_allowed_conversions()?; + } + self.update_epoch_in_merkle_tree()?; + Ok(new_epoch) + } + + /// Get the current conversions + pub fn get_conversion_state(&self) -> &ConversionState { + &self.conversion_state + } + + // Construct MASP asset type with given timestamp for given token + #[cfg(feature = "wasm-runtime")] + fn encode_asset_type(addr: Address, epoch: Epoch) -> AssetType { + let new_asset_bytes = (addr, epoch.0) + .try_to_vec() + .expect("unable to serialize address and epoch"); + AssetType::new(new_asset_bytes.as_ref()) + .expect("unable to derive asset identifier") + } + + #[cfg(feature = "wasm-runtime")] + /// Update the MASP's allowed conversions + fn update_allowed_conversions(&mut self) -> Result<()> { + use masp_primitives::ff::PrimeField; + use masp_primitives::transaction::components::Amount as MaspAmount; + + use crate::types::address::{masp_rewards, nam}; + + // The derived conversions will be placed in MASP address space + let masp_addr = masp(); + let key_prefix: Key = masp_addr.to_db_key().into(); + + let masp_rewards = masp_rewards(); + // The total transparent value of the rewards being distributed + let mut total_reward = token::Amount::from(0); + + // Construct MASP asset type for rewards. Always timestamp reward tokens + // with the zeroth epoch to minimize the number of convert notes clients + // have to use. This trick works under the assumption that reward tokens + // from different epochs are exactly equivalent. + let reward_asset_bytes = (nam(), 0u64) + .try_to_vec() + .expect("unable to serialize address and epoch"); + let reward_asset = AssetType::new(reward_asset_bytes.as_ref()) + .expect("unable to derive asset identifier"); + // Conversions from the previous to current asset for each address + let mut current_convs = BTreeMap::::new(); + // Reward all tokens according to above reward rates + for (addr, reward) in &masp_rewards { + // Dispence a transparent reward in parallel to the shielded rewards + let token_key = self.read(&token::balance_key(addr, &masp_addr)); + if let Ok((Some(addr_balance), _)) = token_key { + // The reward for each reward.1 units of the current asset is + // reward.0 units of the reward token + let addr_bal: token::Amount = + types::decode(addr_balance).expect("invalid balance"); + // Since floor(a) + floor(b) <= floor(a+b), there will always be + // enough rewards to reimburse users + total_reward += (addr_bal * *reward).0; + } + // Provide an allowed conversion from previous timestamp. The + // negative sign allows each instance of the old asset to be + // cancelled out/replaced with the new asset + let old_asset = + Self::encode_asset_type(addr.clone(), self.last_epoch); + let new_asset = + Self::encode_asset_type(addr.clone(), self.block.epoch); + current_convs.insert( + addr.clone(), + (MaspAmount::from_pair(old_asset, -(reward.1 as i64)).unwrap() + + MaspAmount::from_pair(new_asset, reward.1).unwrap() + + MaspAmount::from_pair(reward_asset, reward.0).unwrap()) + .into(), + ); + // Add a conversion from the previous asset type + self.conversion_state.assets.insert( + old_asset, + (addr.clone(), self.last_epoch, MaspAmount::zero().into(), 0), + ); + } + + // Try to distribute Merkle leaf updating as evenly as possible across + // multiple cores + let num_threads = rayon::current_num_threads(); + // Put assets into vector to enable computation batching + let assets: Vec<_> = self + .conversion_state + .assets + .values_mut() + .enumerate() + .collect(); + // ceil(assets.len() / num_threads) + let notes_per_thread_max = (assets.len() - 1) / num_threads + 1; + // floor(assets.len() / num_threads) + let notes_per_thread_min = assets.len() / num_threads; + // Now on each core, add the latest conversion to each conversion + let conv_notes: Vec = assets + .into_par_iter() + .with_min_len(notes_per_thread_min) + .with_max_len(notes_per_thread_max) + .map(|(idx, (addr, _epoch, conv, pos))| { + // Use transitivity to update conversion + *conv += current_convs[addr].clone(); + // Update conversion position to leaf we are about to create + *pos = idx; + // The merkle tree need only provide the conversion commitment, + // the remaining information is provided through the storage API + Node::new(conv.cmu().to_repr()) + }) + .collect(); + + // Update the MASP's transparent reward token balance to ensure that it + // is sufficiently backed to redeem rewards + let reward_key = token::balance_key(&nam(), &masp_addr); + if let Ok((Some(addr_bal), _)) = self.read(&reward_key) { + // If there is already a balance, then add to it + let addr_bal: token::Amount = + types::decode(addr_bal).expect("invalid balance"); + let new_bal = types::encode(&(addr_bal + total_reward)); + self.write(&reward_key, new_bal) + .expect("unable to update MASP transparent balance"); + } else { + // Otherwise the rewards form the entirity of the reward token + // balance + self.write(&reward_key, types::encode(&total_reward)) + .expect("unable to update MASP transparent balance"); + } + // Try to distribute Merkle tree construction as evenly as possible + // across multiple cores + // Merkle trees must have exactly 2^n leaves to be mergeable + let mut notes_per_thread_rounded = 1; + while notes_per_thread_max > notes_per_thread_rounded * 4 { + notes_per_thread_rounded *= 2; + } + // Make the sub-Merkle trees in parallel + let tree_parts: Vec<_> = conv_notes + .par_chunks(notes_per_thread_rounded) + .map(FrozenCommitmentTree::new) + .collect(); + + // Keep the merkle root from the old tree for transactions constructed + // close to the epoch boundary + self.conversion_state.prev_root = self.conversion_state.tree.root(); + + // Convert conversion vector into tree so that Merkle paths can be + // obtained + self.conversion_state.tree = FrozenCommitmentTree::merge(&tree_parts); + + // Add purely decoding entries to the assets map. These will be + // overwritten before the creation of the next commitment tree + for addr in masp_rewards.keys() { + // Add the decoding entry for the new asset type. An uncommited + // node position is used since this is not a conversion. + let new_asset = + Self::encode_asset_type(addr.clone(), self.block.epoch); + self.conversion_state.assets.insert( + new_asset, + ( + addr.clone(), + self.block.epoch, + MaspAmount::zero().into(), + self.conversion_state.tree.size(), + ), + ); + } + + // Save the current conversion state in order to avoid computing + // conversion commitments from scratch in the next epoch + let state_key = key_prefix + .push(&(token::CONVERSION_KEY_PREFIX.to_owned())) + .map_err(Error::KeyError)?; + self.write(&state_key, types::encode(&self.conversion_state)) + .expect("unable to save current conversion state"); + Ok(()) + } + + /// Update the merkle tree with epoch data + fn update_epoch_in_merkle_tree(&mut self) -> Result<()> { + let key_prefix: Key = + Address::Internal(InternalAddress::PoS).to_db_key().into(); + + let key = key_prefix + .push(&"epoch_start_height".to_string()) + .map_err(Error::KeyError)?; + self.block + .tree + .update(&key, types::encode(&self.next_epoch_min_start_height))?; + + let key = key_prefix + .push(&"epoch_start_time".to_string()) + .map_err(Error::KeyError)?; + self.block + .tree + .update(&key, types::encode(&self.next_epoch_min_start_time))?; + + let key = key_prefix + .push(&"current_epoch".to_string()) + .map_err(Error::KeyError)?; + self.block + .tree + .update(&key, types::encode(&self.block.epoch))?; + + Ok(()) + } + + /// Start write batch. + pub fn batch() -> D::WriteBatch { + D::batch() + } + + /// Execute write batch. + pub fn exec_batch(&mut self, batch: D::WriteBatch) -> Result<()> { + self.db.exec_batch(batch) + } + + /// Batch write the value with the given height and account subspace key to + /// the DB. Returns the size difference from previous value, if any, or + /// the size of the value otherwise. + pub fn batch_write_subspace_val( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result { + let value = value.as_ref(); + self.block.tree.update(key, value)?; + self.db + .batch_write_subspace_val(batch, self.block.height, key, value) + } + + /// Batch delete the value with the given height and account subspace key + /// from the DB. Returns the size of the removed value, if any, 0 if no + /// previous value was found. + pub fn batch_delete_subspace_val( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + ) -> Result { + self.block.tree.delete(key)?; + self.db + .batch_delete_subspace_val(batch, self.block.height, key) + } +} + +impl<'iter, D, H> StorageRead<'iter> for Storage +where + D: DB + for<'iter_> DBIter<'iter_>, + H: StorageHasher, +{ + type PrefixIter = >::PrefixIter; + + fn read_bytes( + &self, + key: &crate::types::storage::Key, + ) -> std::result::Result>, storage_api::Error> { + self.db.read_subspace_val(key).into_storage_result() + } + + fn has_key( + &self, + key: &crate::types::storage::Key, + ) -> std::result::Result { + self.block.tree.has_key(key).into_storage_result() + } + + fn iter_prefix( + &'iter self, + prefix: &crate::types::storage::Key, + ) -> std::result::Result { + Ok(self.db.iter_prefix(prefix)) + } + + fn rev_iter_prefix( + &'iter self, + prefix: &crate::types::storage::Key, + ) -> std::result::Result { + Ok(self.db.rev_iter_prefix(prefix)) + } + + fn iter_next( + &self, + iter: &mut Self::PrefixIter, + ) -> std::result::Result)>, storage_api::Error> + { + Ok(iter.next().map(|(key, val, _gas)| (key, val))) + } + + fn get_chain_id(&self) -> std::result::Result { + Ok(self.chain_id.to_string()) + } + + fn get_block_height( + &self, + ) -> std::result::Result { + Ok(self.block.height) + } + + fn get_block_hash( + &self, + ) -> std::result::Result { + Ok(self.block.hash.clone()) + } + + fn get_block_epoch( + &self, + ) -> std::result::Result { + Ok(self.block.epoch) + } + + fn get_tx_index(&self) -> std::result::Result { + Ok(self.tx_index) + } + + fn get_native_token( + &self, + ) -> std::result::Result { + Ok(self.native_token.clone()) + } +} + +impl StorageWrite for Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + fn write_bytes( + &mut self, + key: &crate::types::storage::Key, + val: impl AsRef<[u8]>, + ) -> storage_api::Result<()> { + // Note that this method is the same as `Storage::write`, but without + // gas and storage bytes len diff accounting, because it can only be + // used by the protocol that has a direct mutable access to storage + let val = val.as_ref(); + self.block.tree.update(key, val).into_storage_result()?; + let _ = self + .db + .write_subspace_val(self.block.height, key, val) + .into_storage_result()?; + Ok(()) + } + + fn delete( + &mut self, + key: &crate::types::storage::Key, + ) -> storage_api::Result<()> { + // Note that this method is the same as `Storage::delete`, but without + // gas and storage bytes len diff accounting, because it can only be + // used by the protocol that has a direct mutable access to storage + self.block.tree.delete(key).into_storage_result()?; + let _ = self + .db + .delete_subspace_val(self.block.height, key) + .into_storage_result()?; + Ok(()) + } +} + +impl StorageWrite for &mut Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + fn write( + &mut self, + key: &crate::types::storage::Key, + val: T, + ) -> storage_api::Result<()> { + let val = val.try_to_vec().unwrap(); + self.write_bytes(key, val) + } + + fn write_bytes( + &mut self, + key: &crate::types::storage::Key, + val: impl AsRef<[u8]>, + ) -> storage_api::Result<()> { + let _ = self + .db + .write_subspace_val(self.block.height, key, val) + .into_storage_result()?; + Ok(()) + } + + fn delete( + &mut self, + key: &crate::types::storage::Key, + ) -> storage_api::Result<()> { + let _ = self + .db + .delete_subspace_val(self.block.height, key) + .into_storage_result()?; + Ok(()) + } +} + +impl From for Error { + fn from(error: MerkleTreeError) -> Self { + Self::MerkleTreeError(error) + } +} + +/// Helpers for testing components that depend on storage +#[cfg(any(test, feature = "testing"))] +pub mod testing { + use super::mockdb::MockDB; + use super::*; + use crate::ledger::storage::traits::Sha256Hasher; + use crate::types::address; + /// Storage with a mock DB for testing + pub type TestStorage = Storage; + + impl Default for TestStorage { + fn default() -> Self { + let chain_id = ChainId::default(); + let tree = MerkleTree::default(); + let block = BlockStorage { + tree, + hash: BlockHash::default(), + height: BlockHeight::default(), + epoch: Epoch::default(), + pred_epochs: Epochs::default(), + results: BlockResults::default(), + }; + Self { + db: MockDB::default(), + chain_id, + block, + header: None, + last_height: BlockHeight(0), + last_epoch: Epoch::default(), + next_epoch_min_start_height: BlockHeight::default(), + next_epoch_min_start_time: DateTimeUtc::now(), + address_gen: EstablishedAddressGen::new( + "Test address generator seed", + ), + tx_index: TxIndex::default(), + conversion_state: ConversionState::default(), + #[cfg(feature = "ferveo-tpke")] + tx_queue: TxQueue::default(), + native_token: address::nam(), + } + } + } +} + +#[cfg(test)] +mod tests { + use chrono::{TimeZone, Utc}; + use proptest::prelude::*; + use rust_decimal_macros::dec; + + use super::testing::*; + use super::*; + use crate::ledger::parameters::{self, Parameters}; + use crate::types::time::{self, Duration}; + + prop_compose! { + /// Setup test input data with arbitrary epoch duration, epoch start + /// height and time, and a block height and time that are greater than + /// the epoch start height and time, and the change to be applied to + /// the epoch duration parameters. + fn arb_and_epoch_duration_start_and_block() + ( + start_height in 0..1000_u64, + start_time in 0..10000_i64, + min_num_of_blocks in 1..10_u64, + min_duration in 1..100_i64, + max_expected_time_per_block in 1..100_i64, + ) + ( + min_num_of_blocks in Just(min_num_of_blocks), + min_duration in Just(min_duration), + max_expected_time_per_block in Just(max_expected_time_per_block), + start_height in Just(start_height), + start_time in Just(start_time), + block_height in start_height + 1..(start_height + 2 * min_num_of_blocks), + block_time in start_time + 1..(start_time + 2 * min_duration), + // Delta will be applied on the `min_num_of_blocks` parameter + min_blocks_delta in -(min_num_of_blocks as i64 - 1)..5, + // Delta will be applied on the `min_duration` parameter + min_duration_delta in -(min_duration - 1)..50, + // Delta will be applied on the `max_expected_time_per_block` parameter + max_time_per_block_delta in -(max_expected_time_per_block - 1)..50, + ) -> (EpochDuration, i64, BlockHeight, DateTimeUtc, BlockHeight, DateTimeUtc, + i64, i64, i64) { + let epoch_duration = EpochDuration { + min_num_of_blocks, + min_duration: Duration::seconds(min_duration).into(), + }; + (epoch_duration, max_expected_time_per_block, + BlockHeight(start_height), Utc.timestamp_opt(start_time, 0).single().expect("expected valid timestamp").into(), + BlockHeight(block_height), Utc.timestamp_opt(block_time, 0).single().expect("expected valid timestamp").into(), + min_blocks_delta, min_duration_delta, max_time_per_block_delta) + } + } + + proptest! { + /// Test that: + /// 1. When the minimum blocks have been created since the epoch + /// start height and minimum time passed since the epoch start time, + /// a new epoch must start. + /// 2. When the epoch duration parameters change, the current epoch's + /// duration doesn't change, but the next one does. + #[test] + fn update_epoch_after_its_duration( + (epoch_duration, max_expected_time_per_block, start_height, start_time, block_height, block_time, + min_blocks_delta, min_duration_delta, max_time_per_block_delta) + in arb_and_epoch_duration_start_and_block()) + { + let mut storage = TestStorage { + next_epoch_min_start_height: + start_height + epoch_duration.min_num_of_blocks, + next_epoch_min_start_time: + start_time + epoch_duration.min_duration, + ..Default::default() + }; + let mut parameters = Parameters { + epoch_duration: epoch_duration.clone(), + max_expected_time_per_block: Duration::seconds(max_expected_time_per_block).into(), + vp_whitelist: vec![], + tx_whitelist: vec![], + implicit_vp: vec![], + epochs_per_year: 100, + pos_gain_p: dec!(0.1), + pos_gain_d: dec!(0.1), + staked_ratio: dec!(0.1), + pos_inflation_amount: 0, + }; + parameters.init_storage(&mut storage); + + let epoch_before = storage.last_epoch; + assert_eq!(epoch_before, storage.block.epoch); + + // Try to apply the epoch update + storage.update_epoch(block_height, block_time).unwrap(); + + // Test for 1. + if block_height.0 - start_height.0 + >= epoch_duration.min_num_of_blocks + && time::duration_passed( + block_time, + start_time, + epoch_duration.min_duration, + ) + { + assert_eq!(storage.block.epoch, epoch_before.next()); + assert_eq!(storage.next_epoch_min_start_height, + block_height + epoch_duration.min_num_of_blocks); + assert_eq!(storage.next_epoch_min_start_time, + block_time + epoch_duration.min_duration); + assert_eq!( + storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), + Some(epoch_before)); + assert_eq!( + storage.block.pred_epochs.get_epoch(block_height), + Some(epoch_before.next())); + } else { + assert_eq!(storage.block.epoch, epoch_before); + assert_eq!( + storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), + Some(epoch_before)); + assert_eq!( + storage.block.pred_epochs.get_epoch(block_height), + Some(epoch_before)); + } + // Last epoch should only change when the block is committed + assert_eq!(storage.last_epoch, epoch_before); + + // Update the epoch duration parameters + parameters.epoch_duration.min_num_of_blocks = + (parameters.epoch_duration.min_num_of_blocks as i64 + min_blocks_delta) as u64; + let min_duration: i64 = parameters.epoch_duration.min_duration.0 as _; + parameters.epoch_duration.min_duration = + Duration::seconds(min_duration + min_duration_delta).into(); + parameters.max_expected_time_per_block = + Duration::seconds(max_expected_time_per_block + max_time_per_block_delta).into(); + parameters::update_max_expected_time_per_block_parameter(&mut storage, ¶meters.max_expected_time_per_block).unwrap(); + parameters::update_epoch_parameter(&mut storage, ¶meters.epoch_duration).unwrap(); + + // Test for 2. + let epoch_before = storage.block.epoch; + let height_of_update = storage.next_epoch_min_start_height.0 ; + let time_of_update = storage.next_epoch_min_start_time; + let height_before_update = BlockHeight(height_of_update - 1); + let height_of_update = BlockHeight(height_of_update); + let time_before_update = time_of_update - Duration::seconds(1); + + // No update should happen before both epoch duration conditions are + // satisfied + storage.update_epoch(height_before_update, time_before_update).unwrap(); + assert_eq!(storage.block.epoch, epoch_before); + storage.update_epoch(height_of_update, time_before_update).unwrap(); + assert_eq!(storage.block.epoch, epoch_before); + storage.update_epoch(height_before_update, time_of_update).unwrap(); + assert_eq!(storage.block.epoch, epoch_before); + + // Update should happen at this or after this height and time + storage.update_epoch(height_of_update, time_of_update).unwrap(); + assert_eq!(storage.block.epoch, epoch_before.next()); + // The next epoch's minimum duration should change + assert_eq!(storage.next_epoch_min_start_height, + height_of_update + parameters.epoch_duration.min_num_of_blocks); + assert_eq!(storage.next_epoch_min_start_time, + time_of_update + parameters.epoch_duration.min_duration); + } + } +} diff --git a/shared/src/ledger/storage/traits.rs b/core/src/ledger/storage/traits.rs similarity index 80% rename from shared/src/ledger/storage/traits.rs rename to core/src/ledger/storage/traits.rs index e382f34d73..79427c06fb 100644 --- a/shared/src/ledger/storage/traits.rs +++ b/core/src/ledger/storage/traits.rs @@ -9,12 +9,10 @@ use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof}; use sha2::{Digest, Sha256}; -use super::merkle_tree::{Amt, Error, Smt}; -use super::{ics23_specs, IBC_KEY_LIMIT}; +use super::ics23_specs; +use super::merkle_tree::{Amt, Error, MembershipProof, Smt}; use crate::types::hash::Hash; -use crate::types::storage::{ - Key, MembershipProof, MerkleValue, StringKey, TreeBytes, -}; +use crate::types::storage::{Key, MerkleValue, StringKey, TreeBytes}; /// Trait for reading from a merkle tree that is a sub-tree /// of the global merkle tree. @@ -159,74 +157,6 @@ impl<'a, H: StorageHasher + Default> SubTreeWrite for &'a mut Amt { } } -impl TreeKey for StringKey { - type Error = Error; - - fn as_slice(&self) -> &[u8] { - &self.original.as_slice()[..self.length] - } - - fn try_from_bytes(bytes: &[u8]) -> Result { - let mut tree_key = [0u8; IBC_KEY_LIMIT]; - let mut original = [0u8; IBC_KEY_LIMIT]; - let mut length = 0; - for (i, byte) in bytes.iter().enumerate() { - if i >= IBC_KEY_LIMIT { - return Err(Error::InvalidMerkleKey( - "Input IBC key is too large".into(), - )); - } - original[i] = *byte; - tree_key[i] = byte.wrapping_add(1); - length += 1; - } - Ok(Self { - original, - tree_key: tree_key.into(), - length, - }) - } -} - -impl Value for Hash { - fn as_slice(&self) -> &[u8] { - self.0.as_slice() - } - - fn zero() -> Self { - Hash([0u8; 32]) - } -} - -impl From for H256 { - fn from(hash: Hash) -> Self { - hash.0.into() - } -} - -impl From for Hash { - fn from(hash: H256) -> Self { - Self(hash.into()) - } -} - -impl From<&H256> for Hash { - fn from(hash: &H256) -> Self { - let hash = hash.to_owned(); - Self(hash.into()) - } -} - -impl Value for TreeBytes { - fn as_slice(&self) -> &[u8] { - self.0.as_slice() - } - - fn zero() -> Self { - TreeBytes::zero() - } -} - /// The storage hasher used for the merkle tree. pub trait StorageHasher: Hasher + Default { /// Hash the value to store diff --git a/shared/src/ledger/storage/types.rs b/core/src/ledger/storage/types.rs similarity index 100% rename from shared/src/ledger/storage/types.rs rename to core/src/ledger/storage/types.rs diff --git a/shared/src/ledger/storage_api/collections/lazy_map.rs b/core/src/ledger/storage_api/collections/lazy_map.rs similarity index 100% rename from shared/src/ledger/storage_api/collections/lazy_map.rs rename to core/src/ledger/storage_api/collections/lazy_map.rs diff --git a/shared/src/ledger/storage_api/collections/lazy_vec.rs b/core/src/ledger/storage_api/collections/lazy_vec.rs similarity index 100% rename from shared/src/ledger/storage_api/collections/lazy_vec.rs rename to core/src/ledger/storage_api/collections/lazy_vec.rs diff --git a/shared/src/ledger/storage_api/collections/mod.rs b/core/src/ledger/storage_api/collections/mod.rs similarity index 100% rename from shared/src/ledger/storage_api/collections/mod.rs rename to core/src/ledger/storage_api/collections/mod.rs diff --git a/shared/src/ledger/storage_api/error.rs b/core/src/ledger/storage_api/error.rs similarity index 100% rename from shared/src/ledger/storage_api/error.rs rename to core/src/ledger/storage_api/error.rs diff --git a/shared/src/ledger/storage_api/key.rs b/core/src/ledger/storage_api/key.rs similarity index 100% rename from shared/src/ledger/storage_api/key.rs rename to core/src/ledger/storage_api/key.rs diff --git a/shared/src/ledger/storage_api/mod.rs b/core/src/ledger/storage_api/mod.rs similarity index 100% rename from shared/src/ledger/storage_api/mod.rs rename to core/src/ledger/storage_api/mod.rs diff --git a/shared/src/ledger/storage_api/validation/mod.rs b/core/src/ledger/storage_api/validation/mod.rs similarity index 100% rename from shared/src/ledger/storage_api/validation/mod.rs rename to core/src/ledger/storage_api/validation/mod.rs diff --git a/shared/src/ledger/tx_env.rs b/core/src/ledger/tx_env.rs similarity index 100% rename from shared/src/ledger/tx_env.rs rename to core/src/ledger/tx_env.rs diff --git a/core/src/ledger/vp_env.rs b/core/src/ledger/vp_env.rs new file mode 100644 index 0000000000..49bd5d515c --- /dev/null +++ b/core/src/ledger/vp_env.rs @@ -0,0 +1,180 @@ +//! Validity predicate environment contains functions that can be called from +//! inside validity predicates. + +use borsh::BorshDeserialize; + +use super::storage_api::{self, StorageRead}; +use crate::types::address::Address; +use crate::types::hash::Hash; +use crate::types::key::common; +use crate::types::storage::{BlockHash, BlockHeight, Epoch, Key, TxIndex}; + +/// Validity predicate's environment is available for native VPs and WASM VPs +pub trait VpEnv<'view> { + /// Storage read prefix iterator + type PrefixIter; + + /// Type to read storage state before the transaction execution + type Pre: StorageRead<'view, PrefixIter = Self::PrefixIter>; + + /// Type to read storage state after the transaction execution + type Post: StorageRead<'view, PrefixIter = Self::PrefixIter>; + + /// Read storage state before the transaction execution + fn pre(&'view self) -> Self::Pre; + + /// Read storage state after the transaction execution + fn post(&'view self) -> Self::Post; + + /// Storage read temporary state Borsh encoded value (after tx execution). + /// It will try to read from only the write log and then decode it if + /// found. + fn read_temp( + &self, + key: &Key, + ) -> Result, storage_api::Error>; + + /// Storage read temporary state raw bytes (after tx execution). It will try + /// to read from only the write log. + fn read_bytes_temp( + &self, + key: &Key, + ) -> Result>, storage_api::Error>; + + /// Getting the chain ID. + fn get_chain_id(&'view self) -> Result; + + /// Getting the block height. The height is that of the block to which the + /// current transaction is being applied. + fn get_block_height(&'view self) + -> Result; + + /// Getting the block hash. The height is that of the block to which the + /// current transaction is being applied. + fn get_block_hash(&'view self) -> Result; + + /// Getting the block epoch. The epoch is that of the block to which the + /// current transaction is being applied. + fn get_block_epoch(&'view self) -> Result; + + /// Get the shielded transaction index. + fn get_tx_index(&'view self) -> Result; + + /// Get the address of the native token. + fn get_native_token(&'view self) -> Result; + + /// Storage prefix iterator, ordered by storage keys. It will try to get an + /// iterator from the storage. + fn iter_prefix( + &'view self, + prefix: &Key, + ) -> Result; + + /// Storage prefix iterator, reverse ordered by storage keys. It will try to + /// get an iterator from the storage. + fn rev_iter_prefix( + &self, + prefix: &Key, + ) -> Result; + + /// Evaluate a validity predicate with given data. The address, changed + /// storage keys and verifiers will have the same values as the input to + /// caller's validity predicate. + /// + /// If the execution fails for whatever reason, this will return `false`. + /// Otherwise returns the result of evaluation. + fn eval( + &self, + vp_code: Vec, + input_data: Vec, + ) -> Result; + + /// Verify a transaction signature. The signature is expected to have been + /// produced on the encoded transaction [`crate::proto::Tx`] + /// using [`crate::proto::Tx::sign`]. + fn verify_tx_signature( + &self, + pk: &common::PublicKey, + sig: &common::Signature, + ) -> Result; + + /// Get a tx hash + fn get_tx_code_hash(&self) -> Result; + + /// Verify a MASP transaction + fn verify_masp(&self, tx: Vec) -> Result; + + // ---- Methods below have default implementation via `pre/post` ---- + + /// Storage read prior state Borsh encoded value (before tx execution). It + /// will try to read from the storage and decode it if found. + fn read_pre( + &'view self, + key: &Key, + ) -> Result, storage_api::Error> { + self.pre().read(key) + } + + /// Storage read prior state raw bytes (before tx execution). It + /// will try to read from the storage. + fn read_bytes_pre( + &'view self, + key: &Key, + ) -> Result>, storage_api::Error> { + self.pre().read_bytes(key) + } + + /// Storage read posterior state Borsh encoded value (after tx execution). + /// It will try to read from the write log first and if no entry found + /// then from the storage and then decode it if found. + fn read_post( + &'view self, + key: &Key, + ) -> Result, storage_api::Error> { + self.post().read(key) + } + + /// Storage read posterior state raw bytes (after tx execution). It will try + /// to read from the write log first and if no entry found then from the + /// storage. + fn read_bytes_post( + &'view self, + key: &Key, + ) -> Result>, storage_api::Error> { + self.post().read_bytes(key) + } + + /// Storage `has_key` in prior state (before tx execution). It will try to + /// read from the storage. + fn has_key_pre(&'view self, key: &Key) -> Result { + self.pre().has_key(key) + } + + /// Storage `has_key` in posterior state (after tx execution). It will try + /// to check the write log first and if no entry found then the storage. + fn has_key_post( + &'view self, + key: &Key, + ) -> Result { + self.post().has_key(key) + } + + /// Storage prefix iterator for prior state (before tx execution). It will + /// try to read from the storage. + fn iter_pre_next( + &'view self, + iter: &mut Self::PrefixIter, + ) -> Result)>, storage_api::Error> { + self.pre().iter_next(iter) + } + + /// Storage prefix iterator next for posterior state (after tx execution). + /// It will try to read from the write log first and if no entry found + /// then from the storage. + fn iter_post_next( + &'view self, + iter: &mut Self::PrefixIter, + ) -> Result)>, storage_api::Error> { + self.post().iter_next(iter) + } +} diff --git a/core/src/lib.rs b/core/src/lib.rs new file mode 100644 index 0000000000..b20e4aa150 --- /dev/null +++ b/core/src/lib.rs @@ -0,0 +1,26 @@ +//! The core public types, storage_api, VpEnv and TxEnv. + +#![doc(html_favicon_url = "https://dev.anoma.net/master/favicon.png")] +#![doc(html_logo_url = "https://dev.anoma.net/master/rustdoc-logo.png")] +#![warn(missing_docs)] +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(rustdoc::private_intra_doc_links)] + +pub mod bytes; +pub mod ledger; +pub mod proto; +pub mod types; + +#[cfg(feature = "abciplus")] +pub use {ibc, ibc_proto, tendermint, tendermint_proto}; +#[cfg(feature = "abcipp")] +pub use { + ibc_abcipp as ibc, ibc_proto_abcipp as ibc_proto, + tendermint_abcipp as tendermint, + tendermint_proto_abcipp as tendermint_proto, +}; + +// A handy macro for tests +#[cfg(test)] +#[macro_use] +extern crate assert_matches; diff --git a/shared/src/proto/generated.rs b/core/src/proto/generated.rs similarity index 100% rename from shared/src/proto/generated.rs rename to core/src/proto/generated.rs diff --git a/shared/src/proto/generated/.gitignore b/core/src/proto/generated/.gitignore similarity index 100% rename from shared/src/proto/generated/.gitignore rename to core/src/proto/generated/.gitignore diff --git a/shared/src/proto/mod.rs b/core/src/proto/mod.rs similarity index 100% rename from shared/src/proto/mod.rs rename to core/src/proto/mod.rs diff --git a/shared/src/proto/types.rs b/core/src/proto/types.rs similarity index 98% rename from shared/src/proto/types.rs rename to core/src/proto/types.rs index f2df360051..5901c02dca 100644 --- a/shared/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -7,10 +7,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use super::generated::types; -#[cfg(feature = "ferveo-tpke")] -use crate::tendermint_proto::abci::Event; -#[cfg(feature = "ferveo-tpke")] -use crate::tendermint_proto::abci::EventAttribute; +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] use crate::tendermint_proto::abci::ResponseDeliverTx; use crate::types::key::*; use crate::types::time::DateTimeUtc; @@ -164,6 +161,7 @@ impl From for types::Tx { } } +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] impl From for ResponseDeliverTx { #[cfg(not(feature = "ferveo-tpke"))] fn from(_tx: Tx) -> ResponseDeliverTx { @@ -173,6 +171,8 @@ impl From for ResponseDeliverTx { /// Annotate the Tx with meta-data based on its contents #[cfg(feature = "ferveo-tpke")] fn from(tx: Tx) -> ResponseDeliverTx { + use crate::tendermint_proto::abci::{Event, EventAttribute}; + #[cfg(feature = "ABCI")] fn encode_str(x: &str) -> Vec { x.as_bytes().to_vec() diff --git a/shared/src/types/address.rs b/core/src/types/address.rs similarity index 100% rename from shared/src/types/address.rs rename to core/src/types/address.rs diff --git a/shared/src/types/chain.rs b/core/src/types/chain.rs similarity index 100% rename from shared/src/types/chain.rs rename to core/src/types/chain.rs diff --git a/shared/src/types/governance.rs b/core/src/types/governance.rs similarity index 89% rename from shared/src/types/governance.rs rename to core/src/types/governance.rs index a7de68c8ff..438017a370 100644 --- a/shared/src/types/governance.rs +++ b/core/src/types/governance.rs @@ -9,13 +9,12 @@ use rust_decimal::Decimal; use serde::{Deserialize, Serialize}; use thiserror::Error; -use super::address::Address; -use super::hash::Hash; -use super::key::common::{self, Signature}; -use super::key::SigScheme; -use super::storage::Epoch; -use super::token::SCALE; -use super::transaction::governance::InitProposalData; +use crate::types::address::Address; +use crate::types::hash::Hash; +use crate::types::key::common::{self, Signature}; +use crate::types::key::SigScheme; +use crate::types::storage::Epoch; +use crate::types::token::SCALE; /// Type alias for vote power pub type VotePower = u128; @@ -163,31 +162,6 @@ pub enum ProposalError { InvalidProposalData, } -impl TryFrom for InitProposalData { - type Error = ProposalError; - - fn try_from(proposal: Proposal) -> Result { - let proposal_code = if let Some(path) = proposal.proposal_code_path { - match std::fs::read(path) { - Ok(bytes) => Some(bytes), - Err(_) => return Err(Self::Error::InvalidProposalData), - } - } else { - None - }; - - Ok(InitProposalData { - id: proposal.id, - content: proposal.content.try_to_vec().unwrap(), - author: proposal.author, - voting_start_epoch: proposal.voting_start_epoch, - voting_end_epoch: proposal.voting_end_epoch, - grace_epoch: proposal.grace_epoch, - proposal_code, - }) - } -} - #[derive( Debug, Clone, BorshSerialize, BorshDeserialize, Serialize, Deserialize, )] diff --git a/shared/src/types/hash.rs b/core/src/types/hash.rs similarity index 67% rename from shared/src/types/hash.rs rename to core/src/types/hash.rs index 4198cac4d5..74bfe3dd45 100644 --- a/shared/src/types/hash.rs +++ b/core/src/types/hash.rs @@ -5,17 +5,14 @@ use std::ops::Deref; use std::str::FromStr; use arse_merkle_tree::traits::Value; -use arse_merkle_tree::Hash as TreeHash; +use arse_merkle_tree::{Hash as TreeHash, H256}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use hex::FromHex; +use data_encoding::HEXUPPER; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; -use crate::tendermint::abci::transaction; -use crate::tendermint::Hash as TmHash; - -/// The length of the raw transaction hash. +/// The length of the transaction hash string pub const HASH_LENGTH: usize = 32; #[allow(missing_docs)] @@ -26,7 +23,7 @@ pub enum Error { #[error("Failed trying to convert slice to a hash: {0}")] ConversionFailed(std::array::TryFromSliceError), #[error("Failed to convert string into a hash: {0}")] - FromStringError(hex::FromHexError), + FromStringError(data_encoding::DecodeError), } /// Result for functions that may fail @@ -46,14 +43,11 @@ pub type HashResult = std::result::Result; Deserialize, )] /// A hash, typically a sha-2 hash of a tx -pub struct Hash(pub [u8; 32]); +pub struct Hash(pub [u8; HASH_LENGTH]); impl Display for Hash { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - for byte in &self.0 { - write!(f, "{:02X}", byte)?; - } - Ok(()) + write!(f, "{}", HEXUPPER.encode(&self.0)) } } @@ -64,7 +58,7 @@ impl AsRef<[u8]> for Hash { } impl Deref for Hash { - type Target = [u8; 32]; + type Target = [u8; HASH_LENGTH]; fn deref(&self) -> &Self::Target { &self.0 @@ -84,7 +78,7 @@ impl TryFrom<&[u8]> for Hash { ), }); } - let hash: [u8; 32] = + let hash: [u8; HASH_LENGTH] = TryFrom::try_from(value).map_err(Error::ConversionFailed)?; Ok(Hash(hash)) } @@ -102,16 +96,10 @@ impl TryFrom<&str> for Hash { type Error = self::Error; fn try_from(string: &str) -> HashResult { - Ok(Self( - <[u8; HASH_LENGTH]>::from_hex(string) - .map_err(Error::FromStringError)?, - )) - } -} - -impl From for transaction::Hash { - fn from(hash: Hash) -> Self { - Self::new(hash.0) + let vec = HEXUPPER + .decode(string.as_ref()) + .map_err(Error::FromStringError)?; + Self::try_from(&vec[..]) } } @@ -130,15 +118,27 @@ impl Hash { Self(*digest.as_ref()) } + fn zero() -> Self { + Self([0u8; HASH_LENGTH]) + } + /// Check if the hash is all zeros pub fn is_zero(&self) -> bool { self == &Self::zero() } } -impl From for TmHash { +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl From for crate::tendermint::abci::transaction::Hash { fn from(hash: Hash) -> Self { - TmHash::Sha256(hash.0) + Self::new(hash.0) + } +} + +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl From for crate::tendermint::Hash { + fn from(hash: Hash) -> Self { + Self::Sha256(hash.0) } } @@ -148,22 +148,31 @@ impl From for TreeHash { } } -#[cfg(test)] -mod tests { - use proptest::prelude::*; - use proptest::string::{string_regex, RegexGeneratorStrategy}; +impl Value for Hash { + fn as_slice(&self) -> &[u8] { + self.0.as_slice() + } - use super::*; + fn zero() -> Self { + Hash([0u8; HASH_LENGTH]) + } +} - /// Returns a proptest strategy that yields hex encoded hashes. - fn hex_encoded_hash_strat() -> RegexGeneratorStrategy { - string_regex(r"[a-fA-F0-9]{64}").unwrap() +impl From for H256 { + fn from(hash: Hash) -> Self { + hash.0.into() } +} - proptest! { - #[test] - fn test_hash_string(hex_hash in hex_encoded_hash_strat()) { - let _: Hash = hex_hash.try_into().unwrap(); - } +impl From for Hash { + fn from(hash: H256) -> Self { + Self(hash.into()) + } +} + +impl From<&H256> for Hash { + fn from(hash: &H256) -> Self { + let hash = hash.to_owned(); + Self(hash.into()) } } diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs new file mode 100644 index 0000000000..3d537cb025 --- /dev/null +++ b/core/src/types/ibc.rs @@ -0,0 +1,74 @@ +//! IBC event without IBC-related data types + +use std::collections::HashMap; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; + +/// Wrapped IbcEvent +#[derive( + Debug, Clone, BorshSerialize, BorshDeserialize, BorshSchema, PartialEq, Eq, +)] +pub struct IbcEvent { + /// The IBC event type + pub event_type: String, + /// The attributes of the IBC event + pub attributes: HashMap, +} + +impl std::fmt::Display for IbcEvent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let attributes = self + .attributes + .iter() + .map(|(k, v)| format!("{}: {};", k, v)) + .collect::>() + .join(", "); + write!( + f, + "Event type: {}, Attributes: {}", + self.event_type, attributes + ) + } +} + +#[cfg(any(feature = "abciplus", feature = "abcipp"))] +mod ibc_rs_conversion { + use std::collections::HashMap; + + use thiserror::Error; + + use super::IbcEvent; + use crate::ibc::events::{Error as IbcEventError, IbcEvent as RawIbcEvent}; + use crate::tendermint::abci::Event as AbciEvent; + + #[allow(missing_docs)] + #[derive(Error, Debug)] + pub enum Error { + #[error("IBC event error: {0}")] + IbcEvent(IbcEventError), + } + + /// Conversion functions result + pub type Result = std::result::Result; + + impl TryFrom for IbcEvent { + type Error = Error; + + fn try_from(e: RawIbcEvent) -> Result { + let event_type = e.event_type().as_str().to_string(); + let abci_event = AbciEvent::try_from(e).map_err(Error::IbcEvent)?; + let attributes: HashMap<_, _> = abci_event + .attributes + .iter() + .map(|tag| (tag.key.to_string(), tag.value.to_string())) + .collect(); + Ok(Self { + event_type, + attributes, + }) + } + } +} + +#[cfg(any(feature = "abciplus", feature = "abcipp"))] +pub use ibc_rs_conversion::*; diff --git a/core/src/types/internal.rs b/core/src/types/internal.rs new file mode 100644 index 0000000000..848c09bec1 --- /dev/null +++ b/core/src/types/internal.rs @@ -0,0 +1,82 @@ +//! Shared internal types between the host env and guest (wasm). + +use borsh::{BorshDeserialize, BorshSerialize}; + +/// A result of a wasm call to host functions that may fail. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum HostEnvResult { + /// A success + Success = 1, + /// A non-fatal failure does **not** interrupt WASM execution + Fail = -1, +} + +/// Key-value pair represents data from account's subspace. +/// It is used for prefix iterator's WASM host_env functions. +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] +pub struct KeyVal { + /// The storage key + pub key: String, + /// The value as arbitrary bytes + pub val: Vec, +} + +impl HostEnvResult { + /// Convert result to `i64`, which can be passed to wasm + pub fn to_i64(self) -> i64 { + self as _ + } + + /// Check if the given result as `i64` is a success + pub fn is_success(int: i64) -> bool { + int == Self::Success.to_i64() + } + + /// Check if the given result as `i64` is a non-fatal failure + pub fn is_fail(int: i64) -> bool { + int == Self::Fail.to_i64() + } +} + +impl From for HostEnvResult { + fn from(success: bool) -> Self { + if success { Self::Success } else { Self::Fail } + } +} + +#[cfg(feature = "ferveo-tpke")] +mod tx_queue { + use borsh::{BorshDeserialize, BorshSerialize}; + + use crate::types::transaction::WrapperTx; + + #[derive(Default, Debug, Clone, BorshDeserialize, BorshSerialize)] + /// Wrapper txs to be decrypted in the next block proposal + pub struct TxQueue(std::collections::VecDeque); + + impl TxQueue { + /// Add a new wrapper at the back of the queue + pub fn push(&mut self, wrapper: WrapperTx) { + self.0.push_back(wrapper); + } + + /// Remove the wrapper at the head of the queue + pub fn pop(&mut self) -> Option { + self.0.pop_front() + } + + /// Get an iterator over the queue + pub fn iter(&self) -> impl std::iter::Iterator { + self.0.iter() + } + + /// Check if there are any txs in the queue + #[allow(dead_code)] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + } +} + +#[cfg(feature = "ferveo-tpke")] +pub use tx_queue::TxQueue; diff --git a/shared/src/types/key/common.rs b/core/src/types/key/common.rs similarity index 96% rename from shared/src/types/key/common.rs rename to core/src/types/key/common.rs index fc4a4732dd..e928579367 100644 --- a/shared/src/types/key/common.rs +++ b/core/src/types/key/common.rs @@ -5,15 +5,14 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::HEXLOWER; -use namada_proof_of_stake::types::PublicKeyTmRawHash; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; use serde::{Deserialize, Serialize}; use super::{ - ed25519, secp256k1, tm_consensus_key_raw_hash, ParsePublicKeyError, - ParseSecretKeyError, ParseSignatureError, RefTo, SchemeType, - SigScheme as SigSchemeTrait, VerifySigError, + ed25519, secp256k1, ParsePublicKeyError, ParseSecretKeyError, + ParseSignatureError, RefTo, SchemeType, SigScheme as SigSchemeTrait, + VerifySigError, }; /// Public key @@ -324,9 +323,3 @@ impl super::SigScheme for SigScheme { } } } - -impl PublicKeyTmRawHash for PublicKey { - fn tm_raw_hash(&self) -> String { - tm_consensus_key_raw_hash(self) - } -} diff --git a/shared/src/types/key/dkg_session_keys.rs b/core/src/types/key/dkg_session_keys.rs similarity index 100% rename from shared/src/types/key/dkg_session_keys.rs rename to core/src/types/key/dkg_session_keys.rs diff --git a/shared/src/types/key/ed25519.rs b/core/src/types/key/ed25519.rs similarity index 100% rename from shared/src/types/key/ed25519.rs rename to core/src/types/key/ed25519.rs diff --git a/core/src/types/key/mod.rs b/core/src/types/key/mod.rs new file mode 100644 index 0000000000..8836b56981 --- /dev/null +++ b/core/src/types/key/mod.rs @@ -0,0 +1,537 @@ +//! Cryptographic keys + +pub mod common; +/// Elliptic curve keys for the DKG +pub mod dkg_session_keys; +pub mod ed25519; +pub mod secp256k1; + +use std::fmt::{Debug, Display}; +use std::hash::Hash; +use std::str::FromStr; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use data_encoding::HEXUPPER; +#[cfg(feature = "rand")] +use rand::{CryptoRng, RngCore}; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; +use thiserror::Error; + +use super::address::Address; +use super::storage::{self, DbKeySeg, Key, KeySeg}; +use crate::types::address; + +const PK_STORAGE_KEY: &str = "public_key"; +const PROTOCOL_PK_STORAGE_KEY: &str = "protocol_public_key"; + +/// Obtain a storage key for user's public key. +pub fn pk_key(owner: &Address) -> storage::Key { + Key::from(owner.to_db_key()) + .push(&PK_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Check if the given storage key is a public key. If it is, returns the owner. +pub fn is_pk_key(key: &Key) -> Option<&Address> { + match &key.segments[..] { + [DbKeySeg::AddressSeg(owner), DbKeySeg::StringSeg(key)] + if key == PK_STORAGE_KEY => + { + Some(owner) + } + _ => None, + } +} + +/// Obtain a storage key for user's protocol public key. +pub fn protocol_pk_key(owner: &Address) -> storage::Key { + Key::from(owner.to_db_key()) + .push(&PROTOCOL_PK_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Check if the given storage key is a public key. If it is, returns the owner. +pub fn is_protocol_pk_key(key: &Key) -> Option<&Address> { + match &key.segments[..] { + [DbKeySeg::AddressSeg(owner), DbKeySeg::StringSeg(key)] + if key == PROTOCOL_PK_STORAGE_KEY => + { + Some(owner) + } + _ => None, + } +} + +/// Represents an error in signature verification +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum VerifySigError { + #[error("Signature verification failed: {0}")] + SigVerifyError(String), + #[error("Signature verification failed to encode the data: {0}")] + DataEncodingError(std::io::Error), + #[error("Transaction doesn't have any data with a signature.")] + MissingData, + #[error("Signature belongs to a different scheme from the public key.")] + MismatchedScheme, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ParsePublicKeyError { + #[error("Invalid public key hex: {0}")] + InvalidHex(data_encoding::DecodeError), + #[error("Invalid public key encoding: {0}")] + InvalidEncoding(std::io::Error), + #[error("Parsed public key does not belong to desired scheme")] + MismatchedScheme, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ParseSignatureError { + #[error("Invalid signature hex: {0}")] + InvalidHex(data_encoding::DecodeError), + #[error("Invalid signature encoding: {0}")] + InvalidEncoding(std::io::Error), + #[error("Parsed signature does not belong to desired scheme")] + MismatchedScheme, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ParseSecretKeyError { + #[error("Invalid secret key hex: {0}")] + InvalidHex(data_encoding::DecodeError), + #[error("Invalid secret key encoding: {0}")] + InvalidEncoding(std::io::Error), + #[error("Parsed secret key does not belong to desired scheme")] + MismatchedScheme, +} + +/// A value-to-value conversion that consumes the input value. + +pub trait RefTo { + /// Performs the conversion. + fn ref_to(&self) -> T; +} + +/// Simple and safe type conversions that may fail in a controlled +/// way under some circumstances. + +pub trait TryFromRef: Sized { + /// The type returned in the event of a conversion error. + type Error; + /// Performs the conversion. + fn try_from_ref(value: &T) -> Result; +} + +/// Type capturing signature scheme IDs +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +pub enum SchemeType { + /// Type identifier for Ed25519 scheme + Ed25519, + /// Type identifier for Secp256k1 scheme + Secp256k1, + /// Type identifier for Common + Common, +} + +impl FromStr for SchemeType { + type Err = (); + + fn from_str(input: &str) -> Result { + match input.to_lowercase().as_str() { + "ed25519" => Ok(Self::Ed25519), + "secp256k1" => Ok(Self::Secp256k1), + "common" => Ok(Self::Common), + _ => Err(()), + } + } +} + +/// Represents a signature + +pub trait Signature: + Hash + PartialOrd + Serialize + BorshSerialize + BorshDeserialize + BorshSchema +{ + /// The scheme type of this implementation + const TYPE: SchemeType; + /// Convert from one Signature type to another + fn try_from_sig( + sig: &SIG, + ) -> Result { + if SIG::TYPE == Self::TYPE { + let sig_arr = sig.try_to_vec().unwrap(); + let res = Self::try_from_slice(sig_arr.as_ref()); + res.map_err(ParseSignatureError::InvalidEncoding) + } else { + Err(ParseSignatureError::MismatchedScheme) + } + } + /// Convert from self to another SecretKey type + fn try_to_sig(&self) -> Result { + SIG::try_from_sig(self) + } +} + +/// Represents a public key + +pub trait PublicKey: + BorshSerialize + + BorshDeserialize + + BorshSchema + + Ord + + Clone + + Display + + Debug + + PartialOrd + + FromStr + + Hash + + Send + + Sync +{ + /// The scheme type of this implementation + const TYPE: SchemeType; + /// Convert from one PublicKey type to another + fn try_from_pk( + pk: &PK, + ) -> Result { + if Self::TYPE == PK::TYPE { + let pk_arr = pk.try_to_vec().unwrap(); + let res = Self::try_from_slice(pk_arr.as_ref()); + res.map_err(ParsePublicKeyError::InvalidEncoding) + } else { + Err(ParsePublicKeyError::MismatchedScheme) + } + } + /// Convert from self to another PublicKey type + fn try_to_pk(&self) -> Result { + PK::try_from_pk(self) + } +} + +/// Represents a secret key + +pub trait SecretKey: + BorshSerialize + + BorshDeserialize + + BorshSchema + + Display + + Debug + + RefTo + + FromStr + + Clone + + Sync + + Send +{ + /// The scheme type of this implementation + const TYPE: SchemeType; + /// Represents the public part of this keypair + type PublicKey: PublicKey; + /// Convert from one SecretKey type to self + fn try_from_sk( + sk: &SK, + ) -> Result { + if SK::TYPE == Self::TYPE { + let sk_vec = sk.try_to_vec().unwrap(); + let res = Self::try_from_slice(sk_vec.as_ref()); + res.map_err(ParseSecretKeyError::InvalidEncoding) + } else { + Err(ParseSecretKeyError::MismatchedScheme) + } + } + /// Convert from self to another SecretKey type + fn try_to_sk(&self) -> Result { + SK::try_from_sk(self) + } +} + +/// Represents a digital signature scheme. More precisely this trait captures +/// the concepts of public keys, private keys, and signatures as well as +/// the algorithms over these concepts to generate keys, sign messages, and +/// verify signatures. + +pub trait SigScheme: Eq + Ord + Debug + Serialize + Default { + /// Represents the signature for this scheme + type Signature: 'static + Signature; + /// Represents the public key for this scheme + type PublicKey: 'static + PublicKey; + /// Represents the secret key for this scheme + type SecretKey: 'static + SecretKey; + /// The scheme type of this implementation + const TYPE: SchemeType; + /// Generate a keypair. + #[cfg(feature = "rand")] + fn generate(csprng: &mut R) -> Self::SecretKey + where + R: CryptoRng + RngCore; + /// Sign the data with a key. + fn sign( + keypair: &Self::SecretKey, + data: impl AsRef<[u8]>, + ) -> Self::Signature; + /// Check that the public key matches the signature on the given data. + fn verify_signature( + pk: &Self::PublicKey, + data: &T, + sig: &Self::Signature, + ) -> Result<(), VerifySigError>; + /// Check that the public key matches the signature on the given raw data. + fn verify_signature_raw( + pk: &Self::PublicKey, + data: &[u8], + sig: &Self::Signature, + ) -> Result<(), VerifySigError>; +} + +/// Public key hash derived from `common::Key` borsh encoded bytes (hex string +/// of the first 40 chars of sha256 hash) +#[derive( + Debug, + Clone, + BorshSerialize, + BorshDeserialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, +)] +#[serde(transparent)] +pub struct PublicKeyHash(pub(crate) String); + +const PKH_HASH_LEN: usize = address::HASH_LEN; + +impl From for String { + fn from(pkh: PublicKeyHash) -> Self { + pkh.0 + } +} + +impl Display for PublicKeyHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl FromStr for PublicKeyHash { + type Err = PkhFromStringError; + + fn from_str(s: &str) -> Result { + if s.len() != PKH_HASH_LEN { + return Err(Self::Err::UnexpectedLen(s.len())); + } + Ok(Self(s.to_owned())) + } +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum PkhFromStringError { + #[error("Wrong PKH len. Expected {PKH_HASH_LEN}, got {0}")] + UnexpectedLen(usize), +} + +impl From<&PK> for PublicKeyHash { + fn from(pk: &PK) -> Self { + let pk_bytes = + pk.try_to_vec().expect("Public key encoding shouldn't fail"); + let mut hasher = Sha256::new(); + hasher.update(pk_bytes); + // hex of the first 40 chars of the hash + PublicKeyHash(format!( + "{:.width$X}", + hasher.finalize(), + width = PKH_HASH_LEN + )) + } +} + +/// Derive Tendermint raw hash from the public key +pub trait PublicKeyTmRawHash { + /// Derive Tendermint raw hash from the public key + fn tm_raw_hash(&self) -> String; +} + +impl PublicKeyTmRawHash for common::PublicKey { + fn tm_raw_hash(&self) -> String { + tm_consensus_key_raw_hash(self) + } +} + +/// Convert validator's consensus key into address raw hash that is compatible +/// with Tendermint +pub fn tm_consensus_key_raw_hash(pk: &common::PublicKey) -> String { + match pk { + common::PublicKey::Ed25519(pk) => { + let pkh = PublicKeyHash::from(pk); + pkh.0 + } + common::PublicKey::Secp256k1(pk) => { + let pkh = PublicKeyHash::from(pk); + pkh.0 + } + } +} + +/// Convert Tendermint validator's raw hash bytes to Anoma raw hash string +pub fn tm_raw_hash_to_string(raw_hash: impl AsRef<[u8]>) -> String { + HEXUPPER.encode(raw_hash.as_ref()) +} + +/// Helpers for testing with keys. +#[cfg(any(test, feature = "testing"))] +pub mod testing { + use borsh::BorshDeserialize; + use proptest::prelude::*; + use rand::prelude::{StdRng, ThreadRng}; + use rand::{thread_rng, SeedableRng}; + + use super::SigScheme; + use crate::types::key::*; + + /// A keypair for tests + pub fn keypair_1() -> ::SecretKey { + // generated from `cargo test gen_keypair -- --nocapture` + let bytes = [ + 33, 82, 91, 186, 100, 168, 220, 158, 185, 140, 63, 172, 3, 88, 52, + 113, 94, 30, 213, 84, 175, 184, 235, 169, 70, 175, 36, 252, 45, + 190, 138, 79, + ]; + ed25519::SecretKey::try_from_slice(bytes.as_ref()) + .unwrap() + .try_to_sk() + .unwrap() + } + + /// A keypair for tests + pub fn keypair_2() -> ::SecretKey { + // generated from `cargo test gen_keypair -- --nocapture` + let bytes = [ + 27, 238, 157, 32, 131, 242, 184, 142, 146, 189, 24, 249, 68, 165, + 205, 71, 213, 158, 25, 253, 52, 217, 87, 52, 171, 225, 110, 131, + 238, 58, 94, 56, + ]; + ed25519::SecretKey::try_from_slice(bytes.as_ref()) + .unwrap() + .try_to_sk() + .unwrap() + } + + /// Generate an arbitrary [`super::SecretKey`]. + pub fn arb_keypair() -> impl Strategy { + any::<[u8; 32]>().prop_map(move |seed| { + let mut rng = StdRng::from_seed(seed); + S::generate(&mut rng) + }) + } + + /// Generate an arbitrary [`common::SecretKey`]. + pub fn arb_common_keypair() -> impl Strategy { + arb_keypair::() + .prop_map(|keypair| keypair.try_to_sk().unwrap()) + } + + /// Generate a new random [`super::SecretKey`]. + pub fn gen_keypair() -> S::SecretKey { + let mut rng: ThreadRng = thread_rng(); + S::generate(&mut rng) + } +} + +#[cfg(test)] +macro_rules! sigscheme_test { + ($name:ident, $type:ty) => { + pub mod $name { + use super::*; + + /// Run `cargo test gen_keypair -- --nocapture` to generate a + /// keypair. + #[test] + fn gen_keypair0() { + use rand::prelude::ThreadRng; + use rand::thread_rng; + + let mut rng: ThreadRng = thread_rng(); + let keypair = <$type>::generate(&mut rng); + println!( + "keypair {:?}", + keypair.try_to_vec().unwrap().as_slice() + ); + } + /// Run `cargo test gen_keypair -- --nocapture` to generate a + /// new keypair. + #[test] + fn gen_keypair1() { + let secret_key = testing::gen_keypair::<$type>(); + let public_key = secret_key.ref_to(); + println!("Public key: {}", public_key); + println!("Secret key: {}", secret_key); + } + + /// Sign a simple message and verify the signature. + #[test] + fn gen_sign_verify() { + use rand::prelude::ThreadRng; + use rand::thread_rng; + + let mut rng: ThreadRng = thread_rng(); + let sk = <$type>::generate(&mut rng); + let sig = <$type>::sign(&sk, b"hello"); + assert!( + <$type>::verify_signature_raw(&sk.ref_to(), b"hello", &sig) + .is_ok() + ); + } + } + }; +} + +#[cfg(test)] +sigscheme_test! {ed25519_test, ed25519::SigScheme} +#[cfg(test)] +sigscheme_test! {secp256k1_test, secp256k1::SigScheme} + +#[cfg(test)] +mod more_tests { + use super::*; + + #[test] + fn zeroize_keypair_ed25519() { + use rand::thread_rng; + + let sk = ed25519::SigScheme::generate(&mut thread_rng()); + let sk_bytes = sk.0.as_bytes(); + let len = sk_bytes.len(); + let ptr = sk_bytes.as_ptr(); + + drop(sk); + + assert_eq!(&[0u8; 32], unsafe { + core::slice::from_raw_parts(ptr, len) + }); + } + + #[test] + fn zeroize_keypair_secp256k1() { + use rand::thread_rng; + + let mut sk = secp256k1::SigScheme::generate(&mut thread_rng()); + let sk_scalar = sk.0.to_scalar_ref(); + let len = sk_scalar.0.len(); + let ptr = sk_scalar.0.as_ref().as_ptr(); + + let original_data = sk_scalar.0; + + drop(sk); + + assert_ne!(&original_data, unsafe { + core::slice::from_raw_parts(ptr, len) + }); + } +} diff --git a/shared/src/types/key/secp256k1.rs b/core/src/types/key/secp256k1.rs similarity index 100% rename from shared/src/types/key/secp256k1.rs rename to core/src/types/key/secp256k1.rs diff --git a/shared/src/types/masp.rs b/core/src/types/masp.rs similarity index 100% rename from shared/src/types/masp.rs rename to core/src/types/masp.rs diff --git a/core/src/types/mod.rs b/core/src/types/mod.rs new file mode 100644 index 0000000000..0550060498 --- /dev/null +++ b/core/src/types/mod.rs @@ -0,0 +1,15 @@ +//! Types definitions. + +pub mod address; +pub mod chain; +pub mod governance; +pub mod hash; +pub mod ibc; +pub mod internal; +pub mod key; +pub mod masp; +pub mod storage; +pub mod time; +pub mod token; +pub mod transaction; +pub mod validity_predicate; diff --git a/shared/src/types/named_address.rs b/core/src/types/named_address.rs similarity index 100% rename from shared/src/types/named_address.rs rename to core/src/types/named_address.rs diff --git a/shared/src/types/storage.rs b/core/src/types/storage.rs similarity index 92% rename from shared/src/types/storage.rs rename to core/src/types/storage.rs index bec847e8aa..bdd68e070e 100644 --- a/shared/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -1,27 +1,27 @@ //! Storage types use std::convert::{TryFrom, TryInto}; use std::fmt::Display; +use std::io::Write; use std::num::ParseIntError; use std::ops::{Add, Deref, Div, Mul, Rem, Sub}; use std::str::FromStr; -use arse_merkle_tree::InternalKey; +use arse_merkle_tree::traits::Value; +use arse_merkle_tree::{InternalKey, Key as TreeKey}; use bit_vec::BitVec; -use borsh::maybestd::io::Write; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::BASE32HEX_NOPAD; -use ics23::CommitmentProof; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use thiserror::Error; -#[cfg(feature = "ferveo-tpke")] -use super::transaction::WrapperTx; use crate::bytes::ByteBuf; -use crate::ledger::storage::IBC_KEY_LIMIT; use crate::types::address::{self, Address}; use crate::types::hash::Hash; use crate::types::time::DateTimeUtc; +/// The maximum size of an IBC key (in bytes) allowed in merkle-ized storage +pub const IBC_KEY_LIMIT: usize = 120; + #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { @@ -389,6 +389,42 @@ pub struct StringKey { pub length: usize, } +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum TreeKeyError { + #[error("Invalid key for merkle tree: {0}")] + InvalidMerkleKey(String), +} + +impl TreeKey for StringKey { + type Error = TreeKeyError; + + fn as_slice(&self) -> &[u8] { + &self.original.as_slice()[..self.length] + } + + fn try_from_bytes(bytes: &[u8]) -> std::result::Result { + let mut tree_key = [0u8; IBC_KEY_LIMIT]; + let mut original = [0u8; IBC_KEY_LIMIT]; + let mut length = 0; + for (i, byte) in bytes.iter().enumerate() { + if i >= IBC_KEY_LIMIT { + return Err(TreeKeyError::InvalidMerkleKey( + "Input IBC key is too large".into(), + )); + } + original[i] = *byte; + tree_key[i] = byte.wrapping_add(1); + length += 1; + } + Ok(Self { + original, + tree_key: tree_key.into(), + length, + }) + } +} + impl Deref for StringKey { type Target = InternalKey; @@ -456,15 +492,13 @@ impl From for Vec { } } -/// Type of membership proof from a merkle tree -pub enum MembershipProof { - /// ICS23 compliant membership proof - ICS23(CommitmentProof), -} +impl Value for TreeBytes { + fn as_slice(&self) -> &[u8] { + self.0.as_slice() + } -impl From for MembershipProof { - fn from(proof: CommitmentProof) -> Self { - Self::ICS23(proof) + fn zero() -> Self { + TreeBytes::zero() } } @@ -892,6 +926,53 @@ impl Epoch { pub fn prev(&self) -> Self { Self(self.0 - 1) } + + /// Iterate a range of consecutive epochs starting from `self` of a given + /// length. Work-around for `Step` implementation pending on stabilization of . + pub fn iter_range(self, len: u64) -> impl Iterator + Clone { + let start_ix: u64 = self.into(); + let end_ix: u64 = start_ix + len; + (start_ix..end_ix).map(Epoch::from) + } + + /// Checked epoch subtraction. Computes self - rhs, returning None if + /// overflow occurred. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn checked_sub(self, rhs: Epoch) -> Option { + if rhs.0 > self.0 { + None + } else { + Some(Self(self.0 - rhs.0)) + } + } + + /// Checked epoch subtraction. Computes self - rhs, returning default + /// `Epoch(0)` if overflow occurred. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn sub_or_default(self, rhs: Epoch) -> Self { + self.checked_sub(rhs).unwrap_or_default() + } +} + +impl From for Epoch { + fn from(epoch: u64) -> Self { + Epoch(epoch) + } +} + +impl From for u64 { + fn from(epoch: Epoch) -> Self { + epoch.0 + } +} + +// TODO remove this once it's not being used +impl From for usize { + fn from(epoch: Epoch) -> Self { + epoch.0 as usize + } } impl Add for Epoch { @@ -902,6 +983,15 @@ impl Add for Epoch { } } +// TODO remove this once it's not being used +impl Add for Epoch { + type Output = Self; + + fn add(self, rhs: usize) -> Self::Output { + Epoch(self.0 + rhs as u64) + } +} + impl Sub for Epoch { type Output = Epoch; @@ -910,6 +1000,14 @@ impl Sub for Epoch { } } +impl Sub for Epoch { + type Output = Self; + + fn sub(self, rhs: Epoch) -> Self::Output { + Epoch(self.0 - rhs.0) + } +} + impl Mul for Epoch { type Output = Epoch; @@ -934,14 +1032,6 @@ impl Rem for Epoch { } } -impl Sub for Epoch { - type Output = Epoch; - - fn sub(self, rhs: Self) -> Self::Output { - Self(self.0 - rhs.0) - } -} - impl Add for Epoch { type Output = Epoch; @@ -958,18 +1048,6 @@ impl Mul for Epoch { } } -impl From for u64 { - fn from(epoch: Epoch) -> Self { - epoch.0 - } -} - -impl From for Epoch { - fn from(value: u64) -> Self { - Self(value) - } -} - /// Predecessor block epochs #[derive( Clone, @@ -1050,35 +1128,6 @@ impl Epochs { } } -#[cfg(feature = "ferveo-tpke")] -#[derive(Default, Debug, Clone, BorshDeserialize, BorshSerialize)] -/// Wrapper txs to be decrypted in the next block proposal -pub struct TxQueue(std::collections::VecDeque); - -#[cfg(feature = "ferveo-tpke")] -impl TxQueue { - /// Add a new wrapper at the back of the queue - pub fn push(&mut self, wrapper: WrapperTx) { - self.0.push_back(wrapper); - } - - /// Remove the wrapper at the head of the queue - pub fn pop(&mut self) -> Option { - self.0.pop_front() - } - - /// Get an iterator over the queue - pub fn iter(&self) -> impl std::iter::Iterator { - self.0.iter() - } - - /// Check if there are any txs in the queue - #[allow(dead_code)] - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - /// A value of a storage prefix iterator. #[derive(Debug, Clone, BorshSerialize, BorshDeserialize, BorshSchema)] pub struct PrefixValue { diff --git a/shared/src/types/time.rs b/core/src/types/time.rs similarity index 89% rename from shared/src/types/time.rs rename to core/src/types/time.rs index dfca614c82..a508501d94 100644 --- a/shared/src/types/time.rs +++ b/core/src/types/time.rs @@ -7,10 +7,6 @@ use std::ops::{Add, Sub}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; pub use chrono::{DateTime, Duration, TimeZone, Utc}; -use crate::tendermint::time::Time; -use crate::tendermint::Error as TendermintError; -use crate::tendermint_proto::google::protobuf; - /// Check if the given `duration` has passed since the given `start. pub fn duration_passed( current: DateTimeUtc, @@ -198,10 +194,15 @@ impl From for prost_types::Timestamp { } } -impl TryFrom for DateTimeUtc { +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl TryFrom + for DateTimeUtc +{ type Error = prost_types::TimestampOutOfSystemRangeError; - fn try_from(timestamp: protobuf::Timestamp) -> Result { + fn try_from( + timestamp: crate::tendermint_proto::google::protobuf::Timestamp, + ) -> Result { Self::try_from(prost_types::Timestamp { seconds: timestamp.seconds, nanos: timestamp.nanos, @@ -230,18 +231,20 @@ impl From for Rfc3339String { } } -impl TryFrom for Time { - type Error = TendermintError; +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl TryFrom for crate::tendermint::time::Time { + type Error = crate::tendermint::Error; fn try_from(dt: DateTimeUtc) -> Result { Self::parse_from_rfc3339(&DateTime::to_rfc3339(&dt.0)) } } -impl TryFrom