Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[draft] Modify the funds of an account in-place #2092

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions apps/src/bin/namada-node/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

use eyre::{Context, Result};
use namada::types::time::{DateTimeUtc, Utc};
use namada_apps::cli::args::CliToSdk;
use namada_apps::cli::{self, cmds};
use namada_apps::node::ledger;

Expand All @@ -28,6 +29,10 @@ pub fn main() -> Result<()> {
cmds::Ledger::DumpDb(cmds::LedgerDumpDb(args)) => {
ledger::dump_db(ctx.config.ledger, args);
}
cmds::Ledger::SetFunds(cmds::LedgerSetFunds(args)) => {
let args = args.to_sdk(&mut ctx);
ledger::set_funds(ctx.config.ledger, args);
}
cmds::Ledger::RollBack(_) => {
ledger::rollback(ctx.config.ledger)
.wrap_err("Failed to rollback the Namada node")?;
Expand Down
72 changes: 72 additions & 0 deletions apps/src/lib/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -945,6 +945,7 @@ pub mod cmds {
RunUntil(LedgerRunUntil),
Reset(LedgerReset),
DumpDb(LedgerDumpDb),
SetFunds(LedgerSetFunds),
RollBack(LedgerRollBack),
}

Expand All @@ -956,10 +957,12 @@ pub mod cmds {
let run = SubCmd::parse(matches).map(Self::Run);
let reset = SubCmd::parse(matches).map(Self::Reset);
let dump_db = SubCmd::parse(matches).map(Self::DumpDb);
let set_funds = SubCmd::parse(matches).map(Self::SetFunds);
let rollback = SubCmd::parse(matches).map(Self::RollBack);
let run_until = SubCmd::parse(matches).map(Self::RunUntil);
run.or(reset)
.or(dump_db)
.or(set_funds)
.or(rollback)
.or(run_until)
// The `run` command is the default if no sub-command given
Expand All @@ -979,6 +982,7 @@ pub mod cmds {
.subcommand(LedgerRunUntil::def())
.subcommand(LedgerReset::def())
.subcommand(LedgerDumpDb::def())
.subcommand(LedgerSetFunds::def())
.subcommand(LedgerRollBack::def())
}
}
Expand Down Expand Up @@ -1061,6 +1065,29 @@ pub mod cmds {
}
}

#[derive(Clone, Debug)]
pub struct LedgerSetFunds(pub args::LedgerSetFunds<args::CliTypes>);

impl SubCmd for LedgerSetFunds {
const CMD: &'static str = "set-funds";

fn parse(matches: &ArgMatches) -> Option<Self> {
matches
.subcommand_matches(Self::CMD)
.map(|matches| Self(args::LedgerSetFunds::parse(matches)))
}

fn def() -> App {
App::new(Self::CMD)
.about(
"Change the funds of an account in-place. Use with \
caution, as this modifies state in storage without going \
through the consensus protocol.",
)
.add_args::<args::LedgerSetFunds<args::CliTypes>>()
}
}

#[derive(Clone, Debug)]
pub struct LedgerRollBack;

Expand Down Expand Up @@ -2964,6 +2991,51 @@ pub mod args {
}
}

#[derive(Clone, Debug)]
pub struct LedgerSetFunds<C: NamadaTypes = SdkTypes> {
pub account: C::Address,
pub token: C::Address,
pub amount: token::Amount,
}

impl CliToSdk<LedgerSetFunds<SdkTypes>> for LedgerSetFunds<CliTypes> {
fn to_sdk(self, ctx: &mut Context) -> LedgerSetFunds<SdkTypes> {
LedgerSetFunds {
account: ctx.get(&self.account),
token: ctx.get(&self.token),
amount: self.amount,
}
}
}

impl Args for LedgerSetFunds<CliTypes> {
fn parse(matches: &ArgMatches) -> Self {
let account = ADDRESS.parse(matches);
let token = TOKEN.parse(matches);
let amount = AMOUNT.parse(matches).into();

Self {
account,
token,
amount,
}
}

fn def(app: App) -> App {
app.arg(
ADDRESS
.def()
.help("The target account whose funds will be modified."),
)
.arg(
AMOUNT.def().help(
"The amount of tokens to set for the target account.",
),
)
.arg(TOKEN.def().help("The asset to be changed in storage."))
}
}

/// Convert CLI args to SDK args, with contextual data.
pub trait CliToSdk<SDK>: Args {
/// Convert CLI args to SDK args, with contextual data.
Expand Down
94 changes: 93 additions & 1 deletion apps/src/lib/node/ledger/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use byte_unit::Byte;
use futures::future::TryFutureExt;
use namada::core::ledger::governance::storage::keys as governance_storage;
use namada::eth_bridge::ethers::providers::{Http, Provider};
use namada::types::storage::Key;
use namada::types::storage::{BlockHeight, Key};
use once_cell::unsync::Lazy;
use sysinfo::{RefreshKind, System, SystemExt};
use tokio::sync::mpsc;
Expand Down Expand Up @@ -168,6 +168,13 @@ impl Shell {
}
}

/// Determine if the ledger is migrating state.
pub fn migrating_state() -> Option<BlockHeight> {
const ENV_INITIAL_HEIGHT: &str = "NAMADA_INITIAL_HEIGHT";
let height = std::env::var(ENV_INITIAL_HEIGHT).ok()?;
height.parse::<u64>().ok().map(BlockHeight)
}

/// Run the ledger with an async runtime
pub fn run(config: config::Ledger, wasm_dir: PathBuf) {
let logical_cores = num_cpus::get();
Expand Down Expand Up @@ -229,6 +236,91 @@ pub fn dump_db(
db.dump_block(out_file_path, historic, block_height);
}

/// Change the funds of an account in-place. Use with
/// caution, as this modifies state in storage without
/// going through the consensus protocol.
pub fn set_funds(
config: config::Ledger,
args::LedgerSetFunds {
account,
token,
amount,
}: args::LedgerSetFunds,
) {
use namada::ledger::storage::types::{decode, encode};
use namada::ledger::storage::DB;
use namada::types::token;

let cometbft_path = config.cometbft_dir();
let chain_id = config.chain_id;
let db_path = config.shell.db_dir(&chain_id);

let mut db = storage::PersistentDB::open(db_path, None);
let mut batch = Default::default();

let bal_key = token::balance_key(&token, &account);
let minted_key = token::minted_balance_key(&token);

tracing::debug!(
%bal_key,
%minted_key,
%token,
%account,
?amount,
"Changing balance keys"
);

let previous_acc_funds = {
let value: token::Amount = db
.read_subspace_val(&bal_key)
.expect("Failed to read from storage")
.map(|amt| decode(amt).expect("Failed to decode amount"))
.unwrap_or_default();
value
};
let previous_minted_funds = {
let value: token::Amount = db
.read_subspace_val(&minted_key)
.expect("Failed to read from storage")
.map(|amt| decode(amt).expect("Failed to decode amount"))
.unwrap_or_default();
value
};

tracing::debug!(
?previous_acc_funds,
?previous_minted_funds,
"Previous funds in storage"
);

let diff = amount.change() - previous_acc_funds.change();
let new_minted_funds =
token::Amount::from_change(previous_minted_funds.change() + diff);

db.overwrite_entry(&mut batch, None, &bal_key, encode(&amount))
.expect("Failed to overwrite funds in storage");
db.overwrite_entry(
&mut batch,
None,
&minted_key,
encode(&new_minted_funds),
)
.expect("Failed to overwrite funds in storage");

db.exec_batch(batch).expect("Failed to execute write batch");
db.flush(true).expect("Failed to flush data to disk");

// reset CometBFT's state, such that we can resume with a different app hash
tendermint_node::reset_state(cometbft_path)
.expect("Failed to reset CometBFT state");

tracing::debug!(
new_acc_funds = ?amount,
?new_minted_funds,
"New funds in storage"
);
}

/// Roll Namada state back to the previous height
pub fn rollback(config: config::Ledger) -> Result<(), shell::Error> {
shell::rollback(config)
Expand Down
19 changes: 15 additions & 4 deletions apps/src/lib/node/ledger/shell/finalize_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,10 +120,21 @@ where
)?;
}

// Invariant: Has to be applied before `record_slashes_from_evidence`
// because it potentially needs to be able to read validator state from
// previous epoch and jailing validator removes the historical state
self.log_block_rewards(&req.votes, height, current_epoch, new_epoch)?;
// NOTE: this condition is required for hard-forks
// TODO: load block signatures from external sources for
// hard-forks, so we can still distribute PoS rewards
if hints::likely(!req.votes.is_empty()) {
// Invariant: Has to be applied before
// `record_slashes_from_evidence` because it potentially
// needs to be able to read validator state from
// previous epoch and jailing validator removes the historical state
self.log_block_rewards(
&req.votes,
height,
current_epoch,
new_epoch,
)?;
}
if new_epoch {
self.apply_inflation(current_epoch)?;
}
Expand Down
13 changes: 13 additions & 0 deletions apps/src/lib/node/ledger/shell/init_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ use namada_sdk::eth_bridge::EthBridgeStatus;
use super::*;
use crate::facade::tendermint_proto::google::protobuf;
use crate::facade::tower_abci::{request, response};
use crate::node::ledger;
use crate::wasm_loader;

impl<D, H> Shell<D, H>
Expand Down Expand Up @@ -48,6 +49,18 @@ where
current_chain_id, init.chain_id
)));
}
if ledger::migrating_state().is_some() {
let rsp = response::InitChain {
validators: self
.get_abci_validator_updates(true)
.expect("Must be able to set genesis validator set"),
app_hash: self.wl_storage.storage.merkle_root().0.to_vec(),
..Default::default()
};
debug_assert!(!rsp.validators.is_empty());
debug_assert!(!rsp.app_hash.iter().all(|&b| b == 0));
return Ok(rsp);
}
#[cfg(not(any(test, feature = "dev")))]
let genesis =
genesis::genesis(&self.base_dir, &self.wl_storage.storage.chain_id);
Expand Down
8 changes: 8 additions & 0 deletions apps/src/lib/node/ledger/shell/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ use crate::facade::tendermint_proto::abci::{
use crate::facade::tendermint_proto::crypto::public_key;
use crate::facade::tendermint_proto::google::protobuf::Timestamp;
use crate::facade::tower_abci::{request, response};
use crate::node::ledger;
use crate::node::ledger::shims::abcipp_shim_types::shim;
use crate::node::ledger::shims::abcipp_shim_types::shim::response::TxResult;
use crate::node::ledger::{storage, tendermint_node};
Expand Down Expand Up @@ -572,6 +573,13 @@ where
/// Load the Merkle root hash and the height of the last committed block, if
/// any. This is returned when ABCI sends an `info` request.
pub fn last_state(&mut self) -> response::Info {
if ledger::migrating_state().is_some() {
// when migrating state, return a height of 0, such
// that CometBFT calls InitChain and subsequently
// updates the apphash in its state
return response::Info::default();
}

let mut response = response::Info::default();
let result = self.wl_storage.storage.get_state();

Expand Down
54 changes: 54 additions & 0 deletions apps/src/lib/node/ledger/storage/rocksdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1432,6 +1432,60 @@ impl DB for RocksDB {

Ok(())
}

#[inline]
fn overwrite_entry(
&mut self,
batch: &mut Self::WriteBatch,
height: Option<BlockHeight>,
key: &Key,
new_value: impl AsRef<[u8]>,
) -> Result<()> {
let last_height: BlockHeight = {
let state_cf = self.get_column_family(STATE_CF)?;

types::decode(
self.0
.get_cf(state_cf, "height")
.map_err(|e| Error::DBError(e.to_string()))?
.ok_or_else(|| {
Error::DBError("No block height found".to_string())
})?,
)
.map_err(|e| {
Error::DBError(format!("Unable to decode block height: {e}"))
})?
};
let desired_height = height.unwrap_or(last_height);

if desired_height != last_height {
todo!(
"Overwriting values at heights different than the last \
committed height hast yet to be implemented"
);
}
// NB: the following code only updates values
// written to at the last committed height

let val = new_value.as_ref();

// update subspace value
let subspace_cf = self.get_column_family(SUBSPACE_CF)?;
let subspace_key = key.to_string();

batch.0.put_cf(subspace_cf, subspace_key, val);

// update value stored in diffs
let diffs_cf = self.get_column_family(DIFFS_CF)?;
let diffs_key = Key::from(last_height.to_db_key())
.with_segment("new".to_owned())
.join(key)
.to_string();

batch.0.put_cf(diffs_cf, diffs_key, val);

Ok(())
}
}

impl<'iter> DBIter<'iter> for RocksDB {
Expand Down
Loading