From a0e8d6dc64910a4255cb74d883b8ae90cc6913e8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 22 Jun 2022 21:30:34 -0500 Subject: [PATCH 01/25] GH-293 Add similar logging for failed and successful scheduled transactions as input transactions. --- plugins/producer_plugin/producer_plugin.cpp | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 8e4a7919cb..a4f4ddd85c 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -581,7 +581,6 @@ class producer_plugin_impl : public std::enable_shared_from_thisget_log_trx_trace( std::get(response) ); } - }; fc::exception_ptr except_ptr; // rejected @@ -2157,6 +2156,14 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p break; } + auto get_first_authorizer = [&](const transaction_trace_ptr& trace) { + for( const auto& a : trace->action_traces ) { + for( const auto& u : a.act.authorization ) + return u.actor; + } + return account_name(); + }; + try { fc::microseconds max_trx_time = fc::milliseconds( _max_transaction_time_ms.load() ); if( max_trx_time.count() < 0 ) max_trx_time = fc::microseconds::maximum(); @@ -2169,11 +2176,23 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p break; } } else { + fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${txid}, auth: ${a} : ${why} ", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) + ("txid", trx_id)("a", get_first_authorizer(trace))("why", trace->except->what())); + fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${entire_trace}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) + ("entire_trace", chain_plug->get_log_trx_trace(trace))); // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist _blacklisted_transactions.insert(transaction_id_with_expiry{trx_id, sch_expiration}); num_failed++; } } else { + fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${txid}, auth: ${a}, cpu: ${cpu}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) + ("txid", trx_id)("a", get_first_authorizer(trace))("cpu", trace->receipt ? trace->receipt->cpu_usage_us : 0)); + fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${entire_trace}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) + ("entire_trace", chain_plug->get_log_trx_trace(trace))); num_applied++; } } LOG_AND_DROP(); From a7cc6266dd634f03905aa8cd53ec3e3589bdf5fd Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 25 Jun 2022 10:18:02 -0500 Subject: [PATCH 02/25] GH-292 Add additional logging to Produced and Received block log messages. --- libraries/chain/controller.cpp | 44 +++++++++++++------ .../chain/include/eosio/chain/controller.hpp | 10 ++++- .../testing/include/eosio/testing/tester.hpp | 9 ++-- libraries/testing/tester.cpp | 6 ++- plugins/producer_plugin/producer_plugin.cpp | 30 +++++++++---- unittests/block_tests.cpp | 6 ++- unittests/forked_tests.cpp | 3 +- 7 files changed, 77 insertions(+), 31 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 384247db91..0dc854dfa1 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -417,7 +417,8 @@ struct controller_impl { for( auto bitr = branch.rbegin(); bitr != branch.rend(); ++bitr ) { if( read_mode == db_read_mode::IRREVERSIBLE ) { - apply_block( *bitr, controller::block_status::complete, trx_meta_cache_lookup{} ); + controller::block_report br; + apply_block( br, *bitr, controller::block_status::complete, trx_meta_cache_lookup{} ); head = (*bitr); fork_db.mark_valid( head ); } @@ -718,7 +719,8 @@ struct controller_impl { pending_head = fork_db.pending_head() ) { wlog( "applying branch from fork database ending with block: ${id}", ("id", pending_head->id) ); - maybe_switch_forks( pending_head, controller::block_status::complete, forked_branch_callback{}, trx_meta_cache_lookup{} ); + controller::block_report br; + maybe_switch_forks( br, pending_head, controller::block_status::complete, forked_branch_callback{}, trx_meta_cache_lookup{} ); } } } @@ -1943,9 +1945,11 @@ struct controller_impl { } - void apply_block( const block_state_ptr& bsp, controller::block_status s, const trx_meta_cache_lookup& trx_lookup ) + void apply_block( controller::block_report& br, const block_state_ptr& bsp, controller::block_status s, + const trx_meta_cache_lookup& trx_lookup ) { try { try { + auto start = fc::time_point::now(); const signed_block_ptr& b = bsp->block; const auto& new_protocol_feature_activations = bsp->get_new_protocol_feature_activations(); @@ -2021,6 +2025,12 @@ struct controller_impl { EOS_ASSERT( r == static_cast(receipt), block_validate_exception, "receipt does not match, ${lhs} != ${rhs}", ("lhs", r)("rhs", static_cast(receipt)) ); + + if( trace ) { + br.total_net_usage += trace->net_usage; + if( trace->receipt ) br.total_cpu_usage_us += trace->receipt->cpu_usage_us; + br.total_elapsed_time += trace->elapsed; + } } // validated in create_block_state_future() @@ -2045,6 +2055,7 @@ struct controller_impl { pending->_block_stage = completed_block{ bsp }; commit_block(false); + br.total_time = fc::time_point::now() - start; return; } catch ( const std::bad_alloc& ) { throw; @@ -2096,7 +2107,8 @@ struct controller_impl { } ); } - void push_block( std::future& block_state_future, + void push_block( controller::block_report& br, + std::future& block_state_future, const forked_branch_callback& forked_branch_cb, const trx_meta_cache_lookup& trx_lookup ) { controller::block_status s = controller::block_status::complete; @@ -2126,7 +2138,7 @@ struct controller_impl { emit( self.accepted_block_header, bsp ); if( read_mode != db_read_mode::IRREVERSIBLE ) { - maybe_switch_forks( fork_db.pending_head(), s, forked_branch_cb, trx_lookup ); + maybe_switch_forks( br, fork_db.pending_head(), s, forked_branch_cb, trx_lookup ); } else { log_irreversible(); } @@ -2170,8 +2182,9 @@ struct controller_impl { emit( self.accepted_block_header, bsp ); + controller::block_report br; if( s == controller::block_status::irreversible ) { - apply_block( bsp, s, trx_meta_cache_lookup{} ); + apply_block( br, bsp, s, trx_meta_cache_lookup{} ); head = bsp; // On replay, log_irreversible is not called and so no irreversible_block signal is emitted. @@ -2185,19 +2198,19 @@ struct controller_impl { } else { EOS_ASSERT( read_mode != db_read_mode::IRREVERSIBLE, block_validate_exception, "invariant failure: cannot replay reversible blocks while in irreversible mode" ); - maybe_switch_forks( bsp, s, forked_branch_callback{}, trx_meta_cache_lookup{} ); + maybe_switch_forks( br, bsp, s, forked_branch_callback{}, trx_meta_cache_lookup{} ); } } FC_LOG_AND_RETHROW( ) } - void maybe_switch_forks( const block_state_ptr& new_head, controller::block_status s, + void maybe_switch_forks( controller::block_report& br, const block_state_ptr& new_head, controller::block_status s, const forked_branch_callback& forked_branch_cb, const trx_meta_cache_lookup& trx_lookup ) { bool head_changed = true; if( new_head->header.previous == head->id ) { try { - apply_block( new_head, s, trx_lookup ); + apply_block( br, new_head, s, trx_lookup ); fork_db.mark_valid( new_head ); head = new_head; } catch ( const std::exception& e ) { @@ -2228,8 +2241,9 @@ struct controller_impl { for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { auto except = std::exception_ptr{}; try { - apply_block( *ritr, (*ritr)->is_valid() ? controller::block_status::validated - : controller::block_status::complete, trx_lookup ); + br = controller::block_report{}; + apply_block( br, *ritr, (*ritr)->is_valid() ? controller::block_status::validated + : controller::block_status::complete, trx_lookup ); fork_db.mark_valid( *ritr ); head = *ritr; } catch ( const std::bad_alloc& ) { @@ -2260,7 +2274,8 @@ struct controller_impl { // re-apply good blocks for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { - apply_block( *ritr, controller::block_status::validated /* we previously validated these blocks*/, trx_lookup ); + br = controller::block_report{}; + apply_block( br, *ritr, controller::block_status::validated /* we previously validated these blocks*/, trx_lookup ); head = *ritr; } std::rethrow_exception(except); @@ -2820,11 +2835,12 @@ std::future controller::create_block_state_future( const block_ return my->create_block_state_future( id, b ); } -void controller::push_block( std::future& block_state_future, +void controller::push_block( controller::block_report& br, + std::future& block_state_future, const forked_branch_callback& forked_branch_cb, const trx_meta_cache_lookup& trx_lookup ) { validate_db_available_size(); - my->push_block( block_state_future, forked_branch_cb, trx_lookup ); + my->push_block( br, block_state_future, forked_branch_cb, trx_lookup ); } transaction_trace_ptr controller::push_transaction( const transaction_metadata_ptr& trx, diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index b95b9e33ea..67996ccd21 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -170,12 +170,20 @@ namespace eosio { namespace chain { std::future create_block_state_future( const block_id_type& id, const signed_block_ptr& b ); + struct block_report { + size_t total_net_usage = 0; + size_t total_cpu_usage_us = 0; + fc::microseconds total_elapsed_time{}; + fc::microseconds total_time{}; + }; /** + * @param br returns statistics for block * @param block_state_future provide from call to create_block_state_future * @param cb calls cb with forked applied transactions for each forked block * @param trx_lookup user provided lookup function for externally cached transaction_metadata */ - void push_block( std::future& block_state_future, + void push_block( block_report& br, + std::future& block_state_future, const forked_branch_callback& cb, const trx_meta_cache_lookup& trx_lookup ); diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 67ede1bc49..975220063f 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -597,7 +597,8 @@ namespace eosio { namespace testing { signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { auto sb = _produce_block(skip_time, false); auto bsf = validating_node->create_block_state_future( sb->calculate_id(), sb ); - validating_node->push_block( bsf, forked_branch_callback{}, trx_meta_cache_lookup{} ); + controller::block_report br; + validating_node->push_block( br, bsf, forked_branch_callback{}, trx_meta_cache_lookup{} ); return sb; } @@ -608,14 +609,16 @@ namespace eosio { namespace testing { void validate_push_block(const signed_block_ptr& sb) { auto bs = validating_node->create_block_state_future( sb->calculate_id(), sb ); - validating_node->push_block( bs, forked_branch_callback{}, trx_meta_cache_lookup{} ); + controller::block_report br; + validating_node->push_block( br, bs, forked_branch_callback{}, trx_meta_cache_lookup{} ); } signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { unapplied_transactions.add_aborted( control->abort_block() ); auto sb = _produce_block(skip_time, true); auto bsf = validating_node->create_block_state_future( sb->calculate_id(), sb ); - validating_node->push_block( bsf, forked_branch_callback{}, trx_meta_cache_lookup{} ); + controller::block_report br; + validating_node->push_block( br, bsf, forked_branch_callback{}, trx_meta_cache_lookup{} ); return sb; } diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 27042d491a..b65102652f 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -322,7 +322,8 @@ namespace eosio { namespace testing { void base_tester::push_block(signed_block_ptr b) { auto bsf = control->create_block_state_future(b->calculate_id(), b); unapplied_transactions.add_aborted( control->abort_block() ); - control->push_block( bsf, [this]( const branch_type& forked_branch ) { + controller::block_report br; + control->push_block( br, bsf, [this]( const branch_type& forked_branch ) { unapplied_transactions.add_forked( forked_branch ); }, [this]( const transaction_id_type& id ) { return unapplied_transactions.get_trx( id ); @@ -1050,7 +1051,8 @@ namespace eosio { namespace testing { if( block ) { //&& !b.control->is_known_block(block->id()) ) { auto bsf = b.control->create_block_state_future( block->calculate_id(), block ); b.control->abort_block(); - b.control->push_block(bsf, forked_branch_callback{}, trx_meta_cache_lookup{}); //, eosio::chain::validation_steps::created_block); + controller::block_report br; + b.control->push_block(br, bsf, forked_branch_callback{}, trx_meta_cache_lookup{}); //, eosio::chain::validation_steps::created_block); } } }; diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index a4f4ddd85c..5f6a374ebb 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -470,8 +470,9 @@ class producer_plugin_impl : public std::enable_shared_from_thistimestamp < fc::minutes(5) || (blk_num % 1000 == 0) ) { - ilog("Received block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, conf: ${confs}, latency: ${latency} ms]", + ilog("Received block ${id}... #${n} @ ${t} signed by ${p} " + "[trxs: ${count}, lib: ${lib}, conf: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: ${latency} ms]", ("p",block->producer)("id",id.str().substr(8,16))("n",blk_num)("t",block->timestamp) ("count",block->transactions.size())("lib",chain.last_irreversible_block_num()) - ("confs", block->confirmed)("latency", (fc::time_point::now() - block->timestamp).count()/1000 ) ); + ("confs", block->confirmed)("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) + ("elapsed", br.total_elapsed_time)("time", br.total_time) + ("latency", (fc::time_point::now() - block->timestamp).count()/1000 ) ); if( chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hbs->id != id && hbs->block != nullptr ) { // not applied to head - ilog("Block not applied to head ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, dpos: ${dpos}, conf: ${confs}, latency: ${latency} ms]", + ilog("Block not applied to head ${id}... #${n} @ ${t} signed by ${p} " + "[trxs: ${count}, dpos: ${dpos}, conf: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: ${latency} ms]", ("p",hbs->block->producer)("id",hbs->id.str().substr(8,16))("n",hbs->block_num)("t",hbs->block->timestamp) ("count",hbs->block->transactions.size())("dpos", hbs->dpos_irreversible_blocknum) - ("confs", hbs->block->confirmed)("latency", (fc::time_point::now() - hbs->block->timestamp).count()/1000 ) ); + ("confs", hbs->block->confirmed)("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) + ("elapsed", br.total_elapsed_time)("time", br.total_time) + ("latency", (fc::time_point::now() - hbs->block->timestamp).count()/1000 ) ); } } @@ -2450,11 +2457,18 @@ void producer_plugin_impl::produce_block() { _account_fails.report(); _account_fails.clear(); - ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, confirmed: ${confs}]", + controller::block_report br; + for( const auto& r : new_bs->block->transactions ) { + br.total_cpu_usage_us += r.cpu_usage_us; + br.total_net_usage += r.net_usage_words * 8; + } + ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} " + "[trxs: ${count}, lib: ${lib}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}]", ("p",new_bs->header.producer)("id",new_bs->id.str().substr(8,16)) ("n",new_bs->block_num)("t",new_bs->header.timestamp) - ("count",new_bs->block->transactions.size())("lib",chain.last_irreversible_block_num())("confs", new_bs->header.confirmed)); - + ("count",new_bs->block->transactions.size())("lib",chain.last_irreversible_block_num()) + ("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) + ("confs", new_bs->header.confirmed)); } void producer_plugin::log_failed_transaction(const transaction_id_type& trx_id, const packed_transaction_ptr& packed_trx_ptr, const char* reason) const { diff --git a/unittests/block_tests.cpp b/unittests/block_tests.cpp index d837a13dd2..d3072f133f 100644 --- a/unittests/block_tests.cpp +++ b/unittests/block_tests.cpp @@ -46,7 +46,8 @@ BOOST_AUTO_TEST_CASE(block_with_invalid_tx_test) tester validator; auto bs = validator.control->create_block_state_future( copy_b->calculate_id(), copy_b ); validator.control->abort_block(); - BOOST_REQUIRE_EXCEPTION(validator.control->push_block( bs, forked_branch_callback{}, trx_meta_cache_lookup{} ), fc::exception , + controller::block_report br; + BOOST_REQUIRE_EXCEPTION(validator.control->push_block( br, bs, forked_branch_callback{}, trx_meta_cache_lookup{} ), fc::exception , [] (const fc::exception &e)->bool { return e.code() == account_name_exists_exception::code_value ; }) ; @@ -84,7 +85,8 @@ BOOST_AUTO_TEST_CASE(block_with_invalid_tx_mroot_test) tester validator; auto bs = validator.control->create_block_state_future( copy_b->calculate_id(), copy_b ); validator.control->abort_block(); - BOOST_REQUIRE_EXCEPTION(validator.control->push_block( bs, forked_branch_callback{}, trx_meta_cache_lookup{} ), fc::exception , + controller::block_report br; + BOOST_REQUIRE_EXCEPTION(validator.control->push_block( br, bs, forked_branch_callback{}, trx_meta_cache_lookup{} ), fc::exception , [] (const fc::exception &e)->bool { return e.code() == block_validate_exception::code_value && e.to_detail_string().find("invalid block transaction merkle root") != std::string::npos; diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 6c178d0894..ebb321fc71 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -269,7 +269,8 @@ BOOST_AUTO_TEST_CASE( forking ) try { auto bad_id = bad_block.calculate_id(); auto bad_block_bs = c.control->create_block_state_future( bad_id, std::make_shared(std::move(bad_block)) ); c.control->abort_block(); - BOOST_REQUIRE_EXCEPTION(c.control->push_block( bad_block_bs, forked_branch_callback{}, trx_meta_cache_lookup{} ), fc::exception, + controller::block_report br; + BOOST_REQUIRE_EXCEPTION(c.control->push_block( br, bad_block_bs, forked_branch_callback{}, trx_meta_cache_lookup{} ), fc::exception, [] (const fc::exception &ex)->bool { return ex.to_detail_string().find("block signed by unexpected key") != std::string::npos; }); From 5f40d51a8c55bc0900d0facfdb5ac97ab81f82d3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 25 Jun 2022 11:52:13 -0500 Subject: [PATCH 03/25] GH-294 Add wall clock time to some existing transaction logging. Added log of time between transactions to give some information on time the chain is idle. --- plugins/producer_plugin/producer_plugin.cpp | 35 +++++++++++++++------ 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 5f6a374ebb..028a448dc0 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -333,6 +333,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _protocol_features_to_activate; bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block @@ -685,6 +686,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisbilled_cpu_time_us; auto trace = chain.push_transaction( trx, block_deadline, max_trx_time, prev_billed_cpu_time_us, false, sub_bill ); - fc_dlog( _trx_failed_trace_log, "Subjective bill for ${a}: ${b} elapsed ${t}us", ("a",first_auth)("b",sub_bill)("t",trace->elapsed)); if( trace->except ) { if( exception_is_exhausted( *trace->except ) ) { _unapplied_transactions.add_incoming( trx, persist_until_expired, return_failure_traces, next ); @@ -715,8 +716,10 @@ class producer_plugin_impl : public std::enable_shared_from_thiselapsed, fc::time_point::now() ); + fc_dlog( _trx_failed_trace_log, "Subjective bill for failed ${a}: ${b} elapsed ${t}us, time ${r}us", + ("a",first_auth)("b",sub_bill)("t",trace->elapsed)("r", fc::time_point::now() - start)); + if (!disable_subjective_billing) + _subjective_billing.subjective_bill_failure( first_auth, trace->elapsed, fc::time_point::now() ); if( _pending_block_mode == pending_block_mode::producing ) { auto failure_code = trace->except->code(); @@ -736,6 +739,8 @@ class producer_plugin_impl : public std::enable_shared_from_thiselapsed)("r", fc::time_point::now() - start)); if( persist_until_expired && !_disable_persist_until_expired ) { // if this trx didnt fail/soft-fail and the persist flag is set, store its ID so that we can // ensure its applied to all future speculative blocks as well. @@ -758,6 +763,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisread_only; auto trace = chain.push_transaction( trx, deadline, max_trx_time, prev_billed_cpu_time_us, false, sub_bill ); - fc_dlog( _trx_failed_trace_log, "Subjective unapplied bill for ${a}: ${b} prev ${t}us", ("a",first_auth)("b",prev_billed_cpu_time_us)("t",trace->elapsed)); if( trace->except ) { if( exception_is_exhausted( *trace->except ) ) { if( block_is_exhausted() ) { @@ -2054,7 +2060,8 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin break; } } else { - fc_dlog( _trx_failed_trace_log, "Subjective unapplied bill for failed ${a}: ${b} prev ${t}us", ("a",first_auth)("b",prev_billed_cpu_time_us)("t",trace->elapsed)); + fc_dlog( _trx_failed_trace_log, "Subjective bill for failed ${a}: ${b} prev ${t}us, time ${r}us", + ("a",first_auth)("b",prev_billed_cpu_time_us)("t",trace->elapsed)("r", fc::time_point::now() - start)); auto failure_code = trace->except->code(); if( failure_code != tx_duplicate::code_value ) { // this failed our configured maximum transaction time, we don't want to replay it @@ -2073,7 +2080,8 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin continue; } } else { - fc_dlog( _trx_successful_trace_log, "Subjective unapplied bill for success ${a}: ${b} prev ${t}us", ("a",first_auth)("b",prev_billed_cpu_time_us)("t",trace->elapsed)); + fc_dlog( _trx_successful_trace_log, "Subjective bill for success ${a}: ${b} prev ${t}us, time ${r}us", + ("a",first_auth)("b",prev_billed_cpu_time_us)("t",trace->elapsed)("r", fc::time_point::now() - start)); // if db_read_mode SPECULATIVE then trx is in the pending block and not immediately reverted if (!disable_subjective_billing) _subjective_billing.subjective_bill( trx->id(), trx->packed_trx()->expiration(), first_auth, trace->elapsed, @@ -2137,6 +2145,7 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p num_processed++; + _idle_trx_time = fc::time_point::now(); // configurable ratio of incoming txns vs deferred txns while (incoming_trx_weight >= 1.0 && pending_incoming_process_limit && itr != end ) { if (deadline <= fc::time_point::now()) { @@ -2172,6 +2181,7 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p }; try { + auto start = fc::time_point::now(); fc::microseconds max_trx_time = fc::milliseconds( _max_transaction_time_ms.load() ); if( max_trx_time.count() < 0 ) max_trx_time = fc::microseconds::maximum(); @@ -2183,9 +2193,11 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p break; } } else { - fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${txid}, auth: ${a} : ${why} ", + fc_dlog(_trx_failed_trace_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${txid}, time: ${r}, auth: ${a} : ${why} ", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("txid", trx_id)("a", get_first_authorizer(trace))("why", trace->except->what())); + ("txid", trx_id)("r", fc::time_point::now() - start)("a", get_first_authorizer(trace)) + ("why", trace->except->what())); fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${entire_trace}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("entire_trace", chain_plug->get_log_trx_trace(trace))); @@ -2194,9 +2206,11 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p num_failed++; } } else { - fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${txid}, auth: ${a}, cpu: ${cpu}", + fc_dlog(_trx_successful_trace_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${txid}, time: ${r}, auth: ${a}, cpu: ${cpu}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("txid", trx_id)("a", get_first_authorizer(trace))("cpu", trace->receipt ? trace->receipt->cpu_usage_us : 0)); + ("txid", trx_id)("r", fc::time_point::now() - start)("a", get_first_authorizer(trace)) + ("cpu", trace->receipt ? trace->receipt->cpu_usage_us : 0)); fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${entire_trace}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("entire_trace", chain_plug->get_log_trx_trace(trace))); @@ -2222,6 +2236,7 @@ bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline { bool exhausted = false; if( pending_incoming_process_limit ) { + _idle_trx_time = fc::time_point::now(); size_t processed = 0; fc_dlog( _log, "Processing ${n} pending transactions", ("n", pending_incoming_process_limit) ); auto itr = _unapplied_transactions.incoming_begin(); From e2c73a9228d5135295ad2877bf438dca4ea115a4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 17 Mar 2020 18:31:49 -0500 Subject: [PATCH 04/25] Start of a cfa in light validation mode test --- unittests/api_tests.cpp | 58 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 45ab8896e7..b402bab2c0 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -850,6 +850,64 @@ BOOST_FIXTURE_TEST_CASE(deferred_cfa_success, TESTER) try { BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() +BOOST_AUTO_TEST_CASE(light_validation_skip_cfa) try { + tester chain; + + std::vector blocks; + blocks.push_back(chain.produce_block()); + + chain.create_account( N(testapi) ); + chain.create_account( N(dummy) ); + blocks.push_back(chain.produce_block()); + chain.set_code( N(testapi), contracts::test_api_wasm() ); + blocks.push_back(chain.produce_block()); + + cf_action cfa; + signed_transaction trx; + action act({}, cfa); + trx.context_free_actions.push_back(act); + trx.context_free_data.emplace_back(fc::raw::pack(100)); // verify payload matches context free data + trx.context_free_data.emplace_back(fc::raw::pack(200)); + // add a normal action along with cfa + dummy_action da = { DUMMY_ACTION_DEFAULT_A, DUMMY_ACTION_DEFAULT_B, DUMMY_ACTION_DEFAULT_C }; + action act1(vector{{N(testapi), config::active_name}}, da); + trx.actions.push_back(act1); + chain.set_transaction_headers(trx); + // run normal passing case + auto sigs = trx.sign(chain.get_private_key(N(testapi), "active"), chain.control->get_chain_id()); + auto trace = chain.push_transaction(trx); + blocks.push_back(chain.produce_block()); + + BOOST_REQUIRE(trace->receipt); + BOOST_CHECK_EQUAL(trace->receipt->status, transaction_receipt::executed); + BOOST_CHECK_EQUAL(2, trace->action_traces.size()); + + flat_set trusted_producers = { N(eosio) }; + validating_tester other(trusted_producers); + other.skip_validate = true; + + transaction_trace_ptr other_trace; + auto cc = other.control->applied_transaction.connect( [&](std::tuple x) { + auto& t = std::get<0>(x); + if( t && t->id == trace->id ) { + other_trace = t; + } + } ); + + for (auto& new_block : blocks) { + other.validate_push_block(new_block); + } + blocks.clear(); + + BOOST_REQUIRE(other_trace); + BOOST_REQUIRE(other_trace->receipt); + BOOST_CHECK_EQUAL(other_trace->receipt->status, transaction_receipt::executed); + BOOST_CHECK_EQUAL(1, other_trace->action_traces.size()); // no cfa + + other.close(); + +} FC_LOG_AND_RETHROW() + /************************************************************************************* * checktime_tests test case *************************************************************************************/ From 714c29abd5905cdf9f84f0f05634c0938aca793b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 17 Mar 2020 18:36:15 -0500 Subject: [PATCH 05/25] Skip context free action account verification for light validation --- libraries/chain/transaction_context.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 8e357777cf..87dd755ec6 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -723,12 +723,14 @@ namespace eosio { namespace chain { const auto& db = control.db(); const auto& auth_manager = control.get_authorization_manager(); - for( const auto& a : trx.context_free_actions ) { - auto* code = db.find(a.account); - EOS_ASSERT( code != nullptr, transaction_exception, - "action's code account '${account}' does not exist", ("account", a.account) ); - EOS_ASSERT( a.authorization.size() == 0, transaction_exception, - "context-free actions cannot have authorizations" ); + if( trx.context_free_actions.size() > 0 && !control.skip_auth_check() ) { + for( const auto& a : trx.context_free_actions ) { + auto* code = db.find( a.account ); + EOS_ASSERT( code != nullptr, transaction_exception, + "action's code account '${account}' does not exist", ("account", a.account) ); + EOS_ASSERT( a.authorization.size() == 0, transaction_exception, + "context-free actions cannot have authorizations" ); + } } flat_set actors; From 683737a864df2d13103e064066df4d293387d4a3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 18 Mar 2020 12:33:04 -0500 Subject: [PATCH 06/25] Do not execute cfa in light validation mode --- libraries/chain/apply_context.cpp | 82 ++++++++++++++++--------------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 4a9611458c..7d9aeceef8 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -72,51 +72,53 @@ void apply_context::exec_one() try { action_return_value.clear(); receiver_account = &db.get( receiver ); - privileged = receiver_account->is_privileged(); - auto native = control.find_apply_handler( receiver, act->account, act->name ); - if( native ) { - if( trx_context.enforce_whiteblacklist && control.is_producing_block() ) { - control.check_contract_list( receiver ); - control.check_action_list( act->account, act->name ); + if( !context_free || (context_free && !control.skip_auth_check() ) ) { + privileged = receiver_account->is_privileged(); + auto native = control.find_apply_handler( receiver, act->account, act->name ); + if( native ) { + if( trx_context.enforce_whiteblacklist && control.is_producing_block() ) { + control.check_contract_list( receiver ); + control.check_action_list( act->account, act->name ); + } + (*native)( *this ); } - (*native)( *this ); - } - if( ( receiver_account->code_hash != digest_type() ) && - ( !( act->account == config::system_account_name - && act->name == "setcode"_n - && receiver == config::system_account_name ) + if( ( receiver_account->code_hash != digest_type() ) && + ( !( act->account == config::system_account_name + && act->name == N( setcode ) + && receiver == config::system_account_name ) || control.is_builtin_activated( builtin_protocol_feature_t::forward_setcode ) - ) - ) { - if( trx_context.enforce_whiteblacklist && control.is_producing_block() ) { - control.check_contract_list( receiver ); - control.check_action_list( act->account, act->name ); + ) + ) { + if( trx_context.enforce_whiteblacklist && control.is_producing_block() ) { + control.check_contract_list( receiver ); + control.check_action_list( act->account, act->name ); + } + try { + control.get_wasm_interface().apply( receiver_account->code_hash, receiver_account->vm_type, receiver_account->vm_version, *this ); + } catch( const wasm_exit& ) {} } - try { - control.get_wasm_interface().apply( receiver_account->code_hash, receiver_account->vm_type, receiver_account->vm_version, *this ); - } catch( const wasm_exit& ) {} - } - if( !privileged && control.is_builtin_activated( builtin_protocol_feature_t::ram_restrictions ) ) { - const size_t checktime_interval = 10; - size_t counter = 0; - bool not_in_notify_context = (receiver == act->account); - const auto end = _account_ram_deltas.end(); - for( auto itr = _account_ram_deltas.begin(); itr != end; ++itr, ++counter ) { - if( counter == checktime_interval ) { - trx_context.checktime(); - counter = 0; - } - if( itr->delta > 0 && itr->account != receiver ) { - EOS_ASSERT( not_in_notify_context, unauthorized_ram_usage_increase, - "unprivileged contract cannot increase RAM usage of another account within a notify context: ${account}", - ("account", itr->account) - ); - EOS_ASSERT( has_authorization( itr->account ), unauthorized_ram_usage_increase, - "unprivileged contract cannot increase RAM usage of another account that has not authorized the action: ${account}", - ("account", itr->account) - ); + if( !privileged && control.is_builtin_activated( builtin_protocol_feature_t::ram_restrictions ) ) { + const size_t checktime_interval = 10; + size_t counter = 0; + bool not_in_notify_context = (receiver == act->account); + const auto end = _account_ram_deltas.end(); + for( auto itr = _account_ram_deltas.begin(); itr != end; ++itr, ++counter ) { + if( counter == checktime_interval ) { + trx_context.checktime(); + counter = 0; + } + if( itr->delta > 0 && itr->account != receiver ) { + EOS_ASSERT( not_in_notify_context, unauthorized_ram_usage_increase, + "unprivileged contract cannot increase RAM usage of another account within a notify context: ${account}", + ("account", itr->account) + ); + EOS_ASSERT( has_authorization( itr->account ), unauthorized_ram_usage_increase, + "unprivileged contract cannot increase RAM usage of another account that has not authorized the action: ${account}", + ("account", itr->account) + ); + } } } } From cca3fb4b6ae19928a4dba0d380d1f41de1fb5d04 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 18 Mar 2020 12:33:36 -0500 Subject: [PATCH 07/25] Add test for cfa not executed in light validation --- unittests/api_tests.cpp | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index b402bab2c0..15321928d4 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -851,7 +851,7 @@ BOOST_FIXTURE_TEST_CASE(deferred_cfa_success, TESTER) try { } FC_LOG_AND_RETHROW() BOOST_AUTO_TEST_CASE(light_validation_skip_cfa) try { - tester chain; + tester chain(setup_policy::full); std::vector blocks; blocks.push_back(chain.produce_block()); @@ -881,10 +881,19 @@ BOOST_AUTO_TEST_CASE(light_validation_skip_cfa) try { BOOST_REQUIRE(trace->receipt); BOOST_CHECK_EQUAL(trace->receipt->status, transaction_receipt::executed); BOOST_CHECK_EQUAL(2, trace->action_traces.size()); + BOOST_CHECK_EQUAL("test\n", trace->action_traces.at(0).console); // cfa executed + BOOST_CHECK_EQUAL("", trace->action_traces.at(1).console); + + + fc::temp_directory tempdir; + auto conf_genesis = tester::default_config( tempdir ); + + auto& cfg = conf_genesis.first; + cfg.trusted_producers = { N(eosio) }; // light validation + + tester other( conf_genesis.first, conf_genesis.second ); + other.execute_setup_policy( setup_policy::full ); - flat_set trusted_producers = { N(eosio) }; - validating_tester other(trusted_producers); - other.skip_validate = true; transaction_trace_ptr other_trace; auto cc = other.control->applied_transaction.connect( [&](std::tuple x) { @@ -895,14 +904,16 @@ BOOST_AUTO_TEST_CASE(light_validation_skip_cfa) try { } ); for (auto& new_block : blocks) { - other.validate_push_block(new_block); + other.push_block(new_block); } blocks.clear(); BOOST_REQUIRE(other_trace); BOOST_REQUIRE(other_trace->receipt); BOOST_CHECK_EQUAL(other_trace->receipt->status, transaction_receipt::executed); - BOOST_CHECK_EQUAL(1, other_trace->action_traces.size()); // no cfa + BOOST_CHECK_EQUAL(2, other_trace->action_traces.size()); + BOOST_CHECK_EQUAL("", other_trace->action_traces.at(0).console); // cfa not executed for light validation (trusted producer) + BOOST_CHECK_EQUAL("", other_trace->action_traces.at(1).console); other.close(); From 1434fb068d589ea8bde8911dd309a58062cdc09e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 18 Mar 2020 13:27:26 -0500 Subject: [PATCH 08/25] Fix indent --- libraries/chain/apply_context.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 7d9aeceef8..d62552d826 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -84,12 +84,12 @@ void apply_context::exec_one() } if( ( receiver_account->code_hash != digest_type() ) && - ( !( act->account == config::system_account_name - && act->name == N( setcode ) - && receiver == config::system_account_name ) - || control.is_builtin_activated( builtin_protocol_feature_t::forward_setcode ) - ) - ) { + ( !( act->account == config::system_account_name + && act->name == N( setcode ) + && receiver == config::system_account_name ) + || control.is_builtin_activated( builtin_protocol_feature_t::forward_setcode ) + ) + ) { if( trx_context.enforce_whiteblacklist && control.is_producing_block() ) { control.check_contract_list( receiver ); control.check_action_list( act->account, act->name ); From 1f34d31374bb3b78dd2dc4cc8f9a35f686dba19e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 19 Mar 2020 07:47:11 -0500 Subject: [PATCH 09/25] Use skip_trx_checks instead of skip_auth_check --- libraries/chain/apply_context.cpp | 2 +- libraries/chain/transaction_context.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index d62552d826..e3d0a9c646 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -72,7 +72,7 @@ void apply_context::exec_one() try { action_return_value.clear(); receiver_account = &db.get( receiver ); - if( !context_free || (context_free && !control.skip_auth_check() ) ) { + if( !context_free || (context_free && !control.skip_trx_checks() ) ) { privileged = receiver_account->is_privileged(); auto native = control.find_apply_handler( receiver, act->account, act->name ); if( native ) { diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 87dd755ec6..7d28412e5c 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -723,7 +723,7 @@ namespace eosio { namespace chain { const auto& db = control.db(); const auto& auth_manager = control.get_authorization_manager(); - if( trx.context_free_actions.size() > 0 && !control.skip_auth_check() ) { + if( !trx.context_free_actions.empty() && !control.skip_trx_checks() ) { for( const auto& a : trx.context_free_actions ) { auto* code = db.find( a.account ); EOS_ASSERT( code != nullptr, transaction_exception, From ef17c0bf574b371fab255a7437b0ddefcd65197a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 19 Mar 2020 10:20:24 -0500 Subject: [PATCH 10/25] Simplified if --- libraries/chain/apply_context.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index e3d0a9c646..62e976ae25 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -72,7 +72,7 @@ void apply_context::exec_one() try { action_return_value.clear(); receiver_account = &db.get( receiver ); - if( !context_free || (context_free && !control.skip_trx_checks() ) ) { + if( !(context_free && control.skip_trx_checks()) ) { privileged = receiver_account->is_privileged(); auto native = control.find_apply_handler( receiver, act->account, act->name ); if( native ) { From 2fa67190284e28ae44fb6cff18f931621ea6b8af Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 19 Mar 2020 10:20:50 -0500 Subject: [PATCH 11/25] Additional checks requested from PR --- unittests/api_tests.cpp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 15321928d4..139f076005 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -881,7 +881,11 @@ BOOST_AUTO_TEST_CASE(light_validation_skip_cfa) try { BOOST_REQUIRE(trace->receipt); BOOST_CHECK_EQUAL(trace->receipt->status, transaction_receipt::executed); BOOST_CHECK_EQUAL(2, trace->action_traces.size()); + + BOOST_CHECK(trace->action_traces.at(0).context_free); // cfa BOOST_CHECK_EQUAL("test\n", trace->action_traces.at(0).console); // cfa executed + + BOOST_CHECK(!trace->action_traces.at(1).context_free); // non-cfa BOOST_CHECK_EQUAL("", trace->action_traces.at(1).console); @@ -911,9 +915,19 @@ BOOST_AUTO_TEST_CASE(light_validation_skip_cfa) try { BOOST_REQUIRE(other_trace); BOOST_REQUIRE(other_trace->receipt); BOOST_CHECK_EQUAL(other_trace->receipt->status, transaction_receipt::executed); + BOOST_CHECK(*trace->receipt == *other_trace->receipt); BOOST_CHECK_EQUAL(2, other_trace->action_traces.size()); + + BOOST_CHECK(other_trace->action_traces.at(0).context_free); // cfa BOOST_CHECK_EQUAL("", other_trace->action_traces.at(0).console); // cfa not executed for light validation (trusted producer) + BOOST_CHECK_EQUAL(trace->action_traces.at(0).receipt->global_sequence, other_trace->action_traces.at(0).receipt->global_sequence); + BOOST_CHECK_EQUAL(trace->action_traces.at(0).receipt->digest(), other_trace->action_traces.at(0).receipt->digest()); + + BOOST_CHECK(!other_trace->action_traces.at(1).context_free); // non-cfa BOOST_CHECK_EQUAL("", other_trace->action_traces.at(1).console); + BOOST_CHECK_EQUAL(trace->action_traces.at(1).receipt->global_sequence, other_trace->action_traces.at(1).receipt->global_sequence); + BOOST_CHECK_EQUAL(trace->action_traces.at(1).receipt->digest(), other_trace->action_traces.at(1).receipt->digest()); + other.close(); From 5e7edd3d480c5594e9226cba5a342e5c5206ce87 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 15 Nov 2019 12:47:30 -0600 Subject: [PATCH 12/25] Merge pull request #8218 from EOSIO/8199-cleos-transaction-signatures Add option to provide transaction signature keys to cleos --- programs/cleos/main.cpp | 302 +++++++------ tests/Cluster.py | 6 +- tests/Node.py | 87 ++-- ...onsensus-validation-malicious-producers.py | 398 ++++++++++++++++++ tests/nodeos_run_test.py | 10 +- tests/nodeos_under_min_avail_ram.py | 2 +- tests/p2p_network_test.py | 2 +- tests/prod_preactivation_test.py | 6 +- 8 files changed, 635 insertions(+), 178 deletions(-) create mode 100755 tests/consensus-validation-malicious-producers.py diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 36152bd21c..4e21e77430 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -234,6 +234,49 @@ void add_standard_transaction_options(CLI::App* cmd, string default_permission = cmd->add_option("--retry-num-blocks", tx_retry_num_blocks, localized("Request node to retry transaction until in a block of given height, blocking call")); } +bool is_public_key_str(const std::string& potential_key_str) { + return boost::istarts_with(potential_key_str, "EOS") || boost::istarts_with(potential_key_str, "PUB_R1") || boost::istarts_with(potential_key_str, "PUB_K1") || boost::istarts_with(potential_key_str, "PUB_WA"); +} + +class signing_keys_option { +public: + signing_keys_option() {} + void add_option(CLI::App* cmd) { + cmd->add_option("--sign-with", public_key_json, localized("The public key or json array of public keys to sign with")); + } + + std::vector get_keys() { + std::vector signing_keys; + if (!public_key_json.empty()) { + if (is_public_key_str(public_key_json)) { + try { + signing_keys.push_back(public_key_type(public_key_json)); + } EOS_RETHROW_EXCEPTIONS(public_key_type_exception, "Invalid public key: ${public_key}", ("public_key", public_key_json)) + } else { + fc::variant json_keys; + try { + json_keys = fc::json::from_string(public_key_json, fc::json::relaxed_parser); + } EOS_RETHROW_EXCEPTIONS(json_parse_exception, "Fail to parse JSON from string: ${string}", ("string", public_key_json)); + try { + std::vector keys = json_keys.template as>(); + signing_keys = std::move(keys); + } EOS_RETHROW_EXCEPTIONS(public_key_type_exception, "Invalid public key array format '${data}'", ("data", fc::json::to_string(json_keys))) + } + } + return signing_keys; + } +private: + string public_key_json; +}; + +signing_keys_option signing_keys_opt; + + +void add_standard_transaction_options_plus_signing(CLI::App* cmd, string default_permission = "") { + add_standard_transaction_options(cmd, default_permission); + signing_keys_opt.add_option(cmd); +} + vector get_account_permissions(const vector& permissions) { auto fixedPermissions = permissions | boost::adaptors::transformed([](const string& p) { vector pieces; @@ -325,7 +368,8 @@ void sign_transaction(signed_transaction& trx, fc::variant& required_keys, const trx = signed_trx.as(); } -fc::variant push_transaction( signed_transaction& trx, packed_transaction::compression_type compression = packed_transaction::compression_type::none ) { +fc::variant push_transaction( signed_transaction& trx, const std::vector& signing_keys = std::vector(), + packed_transaction::compression_type compression = packed_transaction::compression_type::none ) { auto info = get_info(); if (trx.signatures.size() == 0) { // #5445 can't change txn content if already signed @@ -352,7 +396,13 @@ fc::variant push_transaction( signed_transaction& trx, packed_transaction::compr } if (!tx_skip_sign) { - auto required_keys = determine_required_keys(trx); + fc::variant required_keys; + if (signing_keys.size() > 0) { + required_keys = fc::variant(signing_keys); + } + else { + required_keys = determine_required_keys(trx); + } sign_transaction(trx, required_keys, info.chain_id); } @@ -413,11 +463,11 @@ fc::variant push_transaction( signed_transaction& trx, packed_transaction::compr } } -fc::variant push_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::compression_type::none ) { +fc::variant push_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::compression_type::none, const std::vector& signing_keys = std::vector() ) { signed_transaction trx; trx.actions = std::forward(actions); - return push_transaction(trx, compression); + return push_transaction(trx, signing_keys, compression); } void print_return_value( const fc::variant& at ) { @@ -590,37 +640,14 @@ void print_result( const fc::variant& result ) { try { } } FC_CAPTURE_AND_RETHROW( (result) ) } -void send_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::compression_type::none ) { +using std::cout; +void send_actions(std::vector&& actions, const std::vector& signing_keys = std::vector(), packed_transaction::compression_type compression = packed_transaction::compression_type::none ) { std::ofstream out; if (tx_json_save_file.length()) { out.open(tx_json_save_file); EOSC_ASSERT(!out.fail(), "ERROR: Failed to create file \"${p}\"", ("p", tx_json_save_file)); } - auto result = push_actions( move(actions), compression); - - string jsonstr; - if (tx_json_save_file.length()) { - jsonstr = fc::json::to_pretty_string( result ); - out << jsonstr; - out.close(); - } - if( tx_print_json ) { - if (jsonstr.length() == 0) { - jsonstr = fc::json::to_pretty_string( result ); - } - cout << jsonstr << endl; - } else { - print_result( result ); - } -} - -void send_transaction( signed_transaction& trx, packed_transaction::compression_type compression = packed_transaction::compression_type::none ) { - std::ofstream out; - if (tx_json_save_file.length()) { - out.open(tx_json_save_file); - EOSC_ASSERT(!out.fail(), "ERROR: Failed to create file \"${p}\"", ("p", tx_json_save_file)); - } - auto result = push_transaction(trx, compression); + auto result = push_actions( move(actions), compression, signing_keys); string jsonstr; if (tx_json_save_file.length()) { @@ -773,7 +800,7 @@ authority parse_json_authority(const std::string& authorityJsonOrFile) { } authority parse_json_authority_or_key(const std::string& authorityJsonOrFile) { - if (boost::istarts_with(authorityJsonOrFile, "EOS") || boost::istarts_with(authorityJsonOrFile, "PUB_R1")) { + if (is_public_key_str(authorityJsonOrFile)) { try { return authority(public_key_type(authorityJsonOrFile)); } EOS_RETHROW_EXCEPTIONS(public_key_type_exception, "Invalid public key: ${public_key}", ("public_key", authorityJsonOrFile)) @@ -963,7 +990,7 @@ struct set_action_permission_subcommand { permissions->add_option("type", typeStr, localized("The type of the action"))->required(); permissions->add_option("requirement", requirementStr, localized("[delete] NULL, [set/update] The permission name require for executing the given action"))->required(); - add_standard_transaction_options(permissions, "account@active"); + add_standard_transaction_options_plus_signing(permissions, "account@active"); permissions->callback([this] { name account = name(accountStr); @@ -972,10 +999,10 @@ struct set_action_permission_subcommand { bool is_delete = boost::iequals(requirementStr, "null"); if (is_delete) { - send_actions({create_unlinkauth(account, code, type)}); + send_actions({create_unlinkauth(account, code, type)}, signing_keys_opt.get_keys()); } else { name requirement = name(requirementStr); - send_actions({create_linkauth(account, code, type, requirement)}); + send_actions({create_linkauth(account, code, type, requirement)}, signing_keys_opt.get_keys()); } }); } @@ -1084,7 +1111,7 @@ struct register_producer_subcommand { register_producer->add_option("producer_key", producer_key_str, localized("The producer's public key"))->required(); register_producer->add_option("url", url, localized("The URL where info about producer can be found"), true); register_producer->add_option("location", loc, localized("Relative location for purpose of nearest neighbor scheduling"), true); - add_standard_transaction_options(register_producer, "account@active"); + add_standard_transaction_options_plus_signing(register_producer, "account@active"); register_producer->callback([this] { @@ -1095,7 +1122,7 @@ struct register_producer_subcommand { auto regprod_var = regproducer_variant(name(producer_str), producer_key, url, loc ); auto accountPermissions = get_account_permissions(tx_permission, {name(producer_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, "regproducer"_n, regprod_var)}); + send_actions({create_action(accountPermissions, config::system_account_name, "regproducer"_n, regprod_var)}, signing_keys_opt.get_keys()); }); } }; @@ -1139,7 +1166,7 @@ struct create_account_subcommand { (localized("Transfer voting power and right to unstake tokens to receiver"))); } - add_standard_transaction_options(createAccount, "creator@active"); + add_standard_transaction_options_plus_signing(createAccount, "creator@active"); createAccount->callback([this] { auth_type owner, active; @@ -1176,12 +1203,12 @@ struct create_account_subcommand { auto cpu = to_asset(stake_cpu); if ( net.get_amount() != 0 || cpu.get_amount() != 0 ) { action delegate = create_delegate( name(creator), name(account_name), net, cpu, transfer); - send_actions( { create, buyram, delegate } ); + send_actions( { create, buyram, delegate }, signing_keys_opt.get_keys()); } else { - send_actions( { create, buyram } ); + send_actions( { create, buyram }, signing_keys_opt.get_keys()); } } else { - send_actions( { create } ); + send_actions( { create }, signing_keys_opt.get_keys()); } }); } @@ -1193,14 +1220,14 @@ struct unregister_producer_subcommand { unregister_producer_subcommand(CLI::App* actionRoot) { auto unregister_producer = actionRoot->add_subcommand("unregprod", localized("Unregister an existing producer")); unregister_producer->add_option("account", producer_str, localized("The account to unregister as a producer"))->required(); - add_standard_transaction_options(unregister_producer, "account@active"); + add_standard_transaction_options_plus_signing(unregister_producer, "account@active"); unregister_producer->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("producer", producer_str); auto accountPermissions = get_account_permissions(tx_permission, {name(producer_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, "unregprod"_n, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, "unregprod"_n, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1213,7 +1240,7 @@ struct vote_producer_proxy_subcommand { auto vote_proxy = actionRoot->add_subcommand("proxy", localized("Vote your stake through a proxy")); vote_proxy->add_option("voter", voter_str, localized("The voting account"))->required(); vote_proxy->add_option("proxy", proxy_str, localized("The proxy account"))->required(); - add_standard_transaction_options(vote_proxy, "voter@active"); + add_standard_transaction_options_plus_signing(vote_proxy, "voter@active"); vote_proxy->callback([this] { fc::variant act_payload = fc::mutable_variant_object() @@ -1221,7 +1248,7 @@ struct vote_producer_proxy_subcommand { ("proxy", proxy_str) ("producers", std::vector{}); auto accountPermissions = get_account_permissions(tx_permission, {name(voter_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, "voteproducer"_n, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, "voteproducer"_n, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1234,7 +1261,7 @@ struct vote_producers_subcommand { auto vote_producers = actionRoot->add_subcommand("prods", localized("Vote for one or more producers")); vote_producers->add_option("voter", voter_str, localized("The voting account"))->required(); vote_producers->add_option("producers", producer_names, localized("The account(s) to vote for. All options from this position and following will be treated as the producer list."))->required(); - add_standard_transaction_options(vote_producers, "voter@active"); + add_standard_transaction_options_plus_signing(vote_producers, "voter@active"); vote_producers->callback([this] { @@ -1245,7 +1272,7 @@ struct vote_producers_subcommand { ("proxy", "") ("producers", producer_names); auto accountPermissions = get_account_permissions(tx_permission, {name(voter_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, "voteproducer"_n, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, "voteproducer"_n, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1258,7 +1285,7 @@ struct approve_producer_subcommand { auto approve_producer = actionRoot->add_subcommand("approve", localized("Add one producer to list of voted producers")); approve_producer->add_option("voter", voter, localized("The voting account"))->required(); approve_producer->add_option("producer", producer_name, localized("The account to vote for"))->required(); - add_standard_transaction_options(approve_producer, "voter@active"); + add_standard_transaction_options_plus_signing(approve_producer, "voter@active"); approve_producer->callback([this] { auto result = call(get_table_func, fc::mutable_variant_object("json", true) @@ -1298,7 +1325,7 @@ struct approve_producer_subcommand { ("proxy", "") ("producers", prods); auto accountPermissions = get_account_permissions(tx_permission, {name(voter), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, "voteproducer"_n, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, "voteproducer"_n, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1311,7 +1338,7 @@ struct unapprove_producer_subcommand { auto approve_producer = actionRoot->add_subcommand("unapprove", localized("Remove one producer from list of voted producers")); approve_producer->add_option("voter", voter, localized("The voting account"))->required(); approve_producer->add_option("producer", producer_name, localized("The account to remove from voted producers"))->required(); - add_standard_transaction_options(approve_producer, "voter@active"); + add_standard_transaction_options_plus_signing(approve_producer, "voter@active"); approve_producer->callback([this] { auto result = call(get_table_func, fc::mutable_variant_object("json", true) @@ -1350,7 +1377,7 @@ struct unapprove_producer_subcommand { ("proxy", "") ("producers", prods); auto accountPermissions = get_account_permissions(tx_permission, {name(voter), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, "voteproducer"_n, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, "voteproducer"_n, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1513,7 +1540,7 @@ struct delegate_bandwidth_subcommand { delegate_bandwidth->add_option("--buyram", buy_ram_amount, localized("The amount of tokens to buy RAM with")); delegate_bandwidth->add_option("--buy-ram-bytes", buy_ram_bytes, localized("The amount of RAM to buy in bytes")); delegate_bandwidth->add_flag("--transfer", transfer, localized("Transfer voting power and right to unstake tokens to receiver")); - add_standard_transaction_options(delegate_bandwidth, "from@active"); + add_standard_transaction_options_plus_signing(delegate_bandwidth, "from@active"); delegate_bandwidth->callback([this] { fc::variant act_payload = fc::mutable_variant_object() @@ -1530,7 +1557,7 @@ struct delegate_bandwidth_subcommand { } else if (buy_ram_bytes) { acts.push_back( create_buyrambytes(name(from_str), name(receiver_str), buy_ram_bytes) ); } - send_actions(std::move(acts)); + send_actions(std::move(acts), signing_keys_opt.get_keys()); }); } }; @@ -1548,7 +1575,7 @@ struct undelegate_bandwidth_subcommand { undelegate_bandwidth->add_option("receiver", receiver_str, localized("The account to undelegate bandwidth from"))->required(); undelegate_bandwidth->add_option("unstake_net_quantity", unstake_net_amount, localized("The amount of tokens to undelegate for network bandwidth"))->required(); undelegate_bandwidth->add_option("unstake_cpu_quantity", unstake_cpu_amount, localized("The amount of tokens to undelegate for CPU bandwidth"))->required(); - add_standard_transaction_options(undelegate_bandwidth, "from@active"); + add_standard_transaction_options_plus_signing(undelegate_bandwidth, "from@active"); undelegate_bandwidth->callback([this] { fc::variant act_payload = fc::mutable_variant_object() @@ -1557,7 +1584,7 @@ struct undelegate_bandwidth_subcommand { ("unstake_net_quantity", to_asset(unstake_net_amount)) ("unstake_cpu_quantity", to_asset(unstake_cpu_amount)); auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, "undelegatebw"_n, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, "undelegatebw"_n, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1571,14 +1598,15 @@ struct bidname_subcommand { bidname->add_option("bidder", bidder_str, localized("The bidding account"))->required(); bidname->add_option("newname", newname_str, localized("The bidding name"))->required(); bidname->add_option("bid", bid_amount, localized("The amount of tokens to bid"))->required(); - add_standard_transaction_options(bidname, "bidder@active"); + add_standard_transaction_options_plus_signing(bidname, "bidder@active"); + bidname->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("bidder", bidder_str) ("newname", newname_str) ("bid", to_asset(bid_amount)); auto accountPermissions = get_account_permissions(tx_permission, {name(bidder_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, "bidname"_n, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, "bidname"_n, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1677,13 +1705,13 @@ struct buyram_subcommand { buyram->add_option("amount", amount, localized("The amount of tokens to pay for RAM, or number of bytes/kibibytes of RAM if --bytes/--kbytes is set"))->required(); buyram->add_flag("--kbytes,-k", kbytes, localized("The amount to buy in kibibytes (KiB)")); buyram->add_flag("--bytes,-b", bytes, localized("The amount to buy in bytes")); - add_standard_transaction_options(buyram, "payer@active"); + add_standard_transaction_options_plus_signing(buyram, "payer@active"); buyram->callback([this] { EOSC_ASSERT( !kbytes || !bytes, "ERROR: --kbytes and --bytes cannot be set at the same time" ); if (kbytes || bytes) { - send_actions( { create_buyrambytes(name(from_str), name(receiver_str), fc::to_uint64(amount) * ((kbytes) ? 1024ull : 1ull)) } ); + send_actions( { create_buyrambytes(name(from_str), name(receiver_str), fc::to_uint64(amount) * ((kbytes) ? 1024ull : 1ull)) }, signing_keys_opt.get_keys()); } else { - send_actions( { create_buyram(name(from_str), name(receiver_str), to_asset(amount)) } ); + send_actions( { create_buyram(name(from_str), name(receiver_str), to_asset(amount)) }, signing_keys_opt.get_keys()); } }); } @@ -1698,14 +1726,14 @@ struct sellram_subcommand { auto sellram = actionRoot->add_subcommand("sellram", localized("Sell RAM")); sellram->add_option("account", receiver_str, localized("The account to receive tokens for sold RAM"))->required(); sellram->add_option("bytes", amount, localized("The amount of RAM bytes to sell"))->required(); - add_standard_transaction_options(sellram, "account@active"); + add_standard_transaction_options_plus_signing(sellram, "account@active"); sellram->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("account", receiver_str) ("bytes", amount); auto accountPermissions = get_account_permissions(tx_permission, {name(receiver_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, "sellram"_n, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, "sellram"_n, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1716,13 +1744,13 @@ struct claimrewards_subcommand { claimrewards_subcommand(CLI::App* actionRoot) { auto claim_rewards = actionRoot->add_subcommand("claimrewards", localized("Claim producer rewards")); claim_rewards->add_option("owner", owner, localized("The account to claim rewards for"))->required(); - add_standard_transaction_options(claim_rewards, "owner@active"); + add_standard_transaction_options_plus_signing(claim_rewards, "owner@active"); claim_rewards->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("owner", owner); auto accountPermissions = get_account_permissions(tx_permission, {name(owner), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, "claimrewards"_n, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, "claimrewards"_n, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1733,14 +1761,14 @@ struct regproxy_subcommand { regproxy_subcommand(CLI::App* actionRoot) { auto register_proxy = actionRoot->add_subcommand("regproxy", localized("Register an account as a proxy (for voting)")); register_proxy->add_option("proxy", proxy, localized("The proxy account to register"))->required(); - add_standard_transaction_options(register_proxy, "proxy@active"); + add_standard_transaction_options_plus_signing(register_proxy, "proxy@active"); register_proxy->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("proxy", proxy) ("isproxy", true); auto accountPermissions = get_account_permissions(tx_permission, {name(proxy), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, "regproxy"_n, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, "regproxy"_n, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1751,14 +1779,14 @@ struct unregproxy_subcommand { unregproxy_subcommand(CLI::App* actionRoot) { auto unregister_proxy = actionRoot->add_subcommand("unregproxy", localized("Unregister an account as a proxy (for voting)")); unregister_proxy->add_option("proxy", proxy, localized("The proxy account to unregister"))->required(); - add_standard_transaction_options(unregister_proxy, "proxy@active"); + add_standard_transaction_options_plus_signing(unregister_proxy, "proxy@active"); unregister_proxy->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("proxy", proxy) ("isproxy", false); auto accountPermissions = get_account_permissions(tx_permission, {name(proxy), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, "regproxy"_n, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, "regproxy"_n, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1773,7 +1801,7 @@ struct canceldelay_subcommand { cancel_delay->add_option("canceling_account", canceling_account, localized("Account from authorization on the original delayed transaction"))->required(); cancel_delay->add_option("canceling_permission", canceling_permission, localized("Permission from authorization on the original delayed transaction"))->required(); cancel_delay->add_option("trx_id", trx_id, localized("The transaction id of the original delayed transaction"))->required(); - add_standard_transaction_options(cancel_delay, "canceling_account@canceling_permission"); + add_standard_transaction_options_plus_signing(cancel_delay, "canceling_account@canceling_permission"); cancel_delay->callback([this] { auto canceling_auth = permission_level{name(canceling_account), name(canceling_permission)}; @@ -1781,7 +1809,7 @@ struct canceldelay_subcommand { ("canceling_auth", canceling_auth) ("trx_id", trx_id); auto accountPermissions = get_account_permissions(tx_permission, canceling_auth); - send_actions({create_action(accountPermissions, config::system_account_name, "canceldelay"_n, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, "canceldelay"_n, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1795,13 +1823,13 @@ struct deposit_subcommand { auto deposit = actionRoot->add_subcommand("deposit", localized("Deposit into owner's REX fund by transfering from owner's liquid token balance")); deposit->add_option("owner", owner_str, localized("Account which owns the REX fund"))->required(); deposit->add_option("amount", amount_str, localized("Amount to be deposited into REX fund"))->required(); - add_standard_transaction_options(deposit, "owner@active"); + add_standard_transaction_options_plus_signing(deposit, "owner@active"); deposit->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("owner", owner_str) ("amount", amount_str); auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1815,13 +1843,13 @@ struct withdraw_subcommand { auto withdraw = actionRoot->add_subcommand("withdraw", localized("Withdraw from owner's REX fund by transfering to owner's liquid token balance")); withdraw->add_option("owner", owner_str, localized("Account which owns the REX fund"))->required(); withdraw->add_option("amount", amount_str, localized("Amount to be withdrawn from REX fund"))->required(); - add_standard_transaction_options(withdraw, "owner@active"); + add_standard_transaction_options_plus_signing(withdraw, "owner@active"); withdraw->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("owner", owner_str) ("amount", amount_str); auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1835,13 +1863,13 @@ struct buyrex_subcommand { auto buyrex = actionRoot->add_subcommand("buyrex", localized("Buy REX using tokens in owner's REX fund")); buyrex->add_option("from", from_str, localized("Account buying REX tokens"))->required(); buyrex->add_option("amount", amount_str, localized("Amount to be taken from REX fund and used in buying REX"))->required(); - add_standard_transaction_options(buyrex, "from@active"); + add_standard_transaction_options_plus_signing(buyrex, "from@active"); buyrex->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("from", from_str) ("amount", amount_str); auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1856,7 +1884,7 @@ struct lendrex_subcommand { auto lendrex = actionRoot->add_subcommand("lendrex", localized("Deposit tokens to REX fund and use the tokens to buy REX")); lendrex->add_option("from", from_str, localized("Account buying REX tokens"))->required(); lendrex->add_option("amount", amount_str, localized("Amount of liquid tokens to be used in buying REX"))->required(); - add_standard_transaction_options(lendrex, "from@active"); + add_standard_transaction_options_plus_signing(lendrex, "from@active"); lendrex->callback([this] { fc::variant act_payload1 = fc::mutable_variant_object() ("owner", from_str) @@ -1866,7 +1894,7 @@ struct lendrex_subcommand { ("amount", amount_str); auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name1, act_payload1), - create_action(accountPermissions, config::system_account_name, act_name2, act_payload2)}); + create_action(accountPermissions, config::system_account_name, act_name2, act_payload2)}, signing_keys_opt.get_keys()); }); } }; @@ -1884,7 +1912,7 @@ struct unstaketorex_subcommand { unstaketorex->add_option("receiver", receiver_str, localized("Account that tokens have been staked to"))->required(); unstaketorex->add_option("from_net", from_net_str, localized("Amount to be unstaked from Net resources and used in REX purchase"))->required(); unstaketorex->add_option("from_cpu", from_cpu_str, localized("Amount to be unstaked from CPU resources and used in REX purchase"))->required(); - add_standard_transaction_options(unstaketorex, "owner@active"); + add_standard_transaction_options_plus_signing(unstaketorex, "owner@active"); unstaketorex->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("owner", owner_str) @@ -1892,7 +1920,7 @@ struct unstaketorex_subcommand { ("from_net", from_net_str) ("from_cpu", from_cpu_str); auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1906,13 +1934,13 @@ struct sellrex_subcommand { auto sellrex = actionRoot->add_subcommand("sellrex", localized("Sell REX tokens")); sellrex->add_option("from", from_str, localized("Account selling REX tokens"))->required(); sellrex->add_option("rex", rex_str, localized("Amount of REX tokens to be sold"))->required(); - add_standard_transaction_options(sellrex, "from@active"); + add_standard_transaction_options_plus_signing(sellrex, "from@active"); sellrex->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("from", from_str) ("rex", rex_str); auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1924,11 +1952,11 @@ struct cancelrexorder_subcommand { cancelrexorder_subcommand(CLI::App* actionRoot) { auto cancelrexorder = actionRoot->add_subcommand("cancelrexorder", localized("Cancel queued REX sell order if one exists")); cancelrexorder->add_option("owner", owner_str, localized("Owner account of sell order"))->required(); - add_standard_transaction_options(cancelrexorder, "owner@active"); + add_standard_transaction_options_plus_signing(cancelrexorder, "owner@active"); cancelrexorder->callback([this] { fc::variant act_payload = fc::mutable_variant_object()("owner", owner_str); auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1946,7 +1974,7 @@ struct rentcpu_subcommand { rentcpu->add_option("receiver", receiver_str, localized("Account to whom rented CPU bandwidth is staked"))->required(); rentcpu->add_option("loan_payment", loan_payment_str, localized("Loan fee to be paid, used to calculate amount of rented bandwidth"))->required(); rentcpu->add_option("loan_fund", loan_fund_str, localized("Loan fund to be used in automatic renewal, can be 0 tokens"))->required(); - add_standard_transaction_options(rentcpu, "from@active"); + add_standard_transaction_options_plus_signing(rentcpu, "from@active"); rentcpu->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("from", from_str) @@ -1954,7 +1982,7 @@ struct rentcpu_subcommand { ("loan_payment", loan_payment_str) ("loan_fund", loan_fund_str); auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1972,7 +2000,7 @@ struct rentnet_subcommand { rentnet->add_option("receiver", receiver_str, localized("Account to whom rented Network bandwidth is staked"))->required(); rentnet->add_option("loan_payment", loan_payment_str, localized("Loan fee to be paid, used to calculate amount of rented bandwidth"))->required(); rentnet->add_option("loan_fund", loan_fund_str, localized("Loan fund to be used in automatic renewal, can be 0 tokens"))->required(); - add_standard_transaction_options(rentnet, "from@active"); + add_standard_transaction_options_plus_signing(rentnet, "from@active"); rentnet->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("from", from_str) @@ -1980,7 +2008,7 @@ struct rentnet_subcommand { ("loan_payment", loan_payment_str) ("loan_fund", loan_fund_str); auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -1996,14 +2024,14 @@ struct fundcpuloan_subcommand { fundcpuloan->add_option("from", from_str, localized("Loan owner"))->required(); fundcpuloan->add_option("loan_num", loan_num_str, localized("Loan ID"))->required(); fundcpuloan->add_option("payment", payment_str, localized("Amount to be deposited"))->required(); - add_standard_transaction_options(fundcpuloan, "from@active"); + add_standard_transaction_options_plus_signing(fundcpuloan, "from@active"); fundcpuloan->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("from", from_str) ("loan_num", loan_num_str) ("payment", payment_str); auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -2019,14 +2047,14 @@ struct fundnetloan_subcommand { fundnetloan->add_option("from", from_str, localized("Loan owner"))->required(); fundnetloan->add_option("loan_num", loan_num_str, localized("Loan ID"))->required(); fundnetloan->add_option("payment", payment_str, localized("Amount to be deposited"))->required(); - add_standard_transaction_options(fundnetloan, "from@active"); + add_standard_transaction_options_plus_signing(fundnetloan, "from@active"); fundnetloan->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("from", from_str) ("loan_num", loan_num_str) ("payment", payment_str); auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -2042,14 +2070,14 @@ struct defcpuloan_subcommand { defcpuloan->add_option("from", from_str, localized("Loan owner"))->required(); defcpuloan->add_option("loan_num", loan_num_str, localized("Loan ID"))->required(); defcpuloan->add_option("amount", amount_str, localized("Amount to be withdrawn"))->required(); - add_standard_transaction_options(defcpuloan, "from@active"); + add_standard_transaction_options_plus_signing(defcpuloan, "from@active"); defcpuloan->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("from", from_str) ("loan_num", loan_num_str) ("amount", amount_str); auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -2065,14 +2093,14 @@ struct defnetloan_subcommand { defnetloan->add_option("from", from_str, localized("Loan owner"))->required(); defnetloan->add_option("loan_num", loan_num_str, localized("Loan ID"))->required(); defnetloan->add_option("amount", amount_str, localized("Amount to be withdrawn"))->required(); - add_standard_transaction_options(defnetloan, "from@active"); + add_standard_transaction_options_plus_signing(defnetloan, "from@active"); defnetloan->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("from", from_str) ("loan_num", loan_num_str) ("amount", amount_str); auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -2086,13 +2114,13 @@ struct mvtosavings_subcommand { auto mvtosavings = actionRoot->add_subcommand("mvtosavings", localized("Move REX tokens to savings bucket")); mvtosavings->add_option("owner", owner_str, localized("REX owner"))->required(); mvtosavings->add_option("rex", rex_str, localized("Amount of REX to be moved to savings bucket"))->required(); - add_standard_transaction_options(mvtosavings, "owner@active"); + add_standard_transaction_options_plus_signing(mvtosavings, "owner@active"); mvtosavings->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("owner", owner_str) ("rex", rex_str); auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -2106,13 +2134,13 @@ struct mvfrsavings_subcommand { auto mvfrsavings = actionRoot->add_subcommand("mvfromsavings", localized("Move REX tokens out of savings bucket")); mvfrsavings->add_option("owner", owner_str, localized("REX owner"))->required(); mvfrsavings->add_option("rex", rex_str, localized("Amount of REX to be moved out of savings bucket"))->required(); - add_standard_transaction_options(mvfrsavings, "owner@active"); + add_standard_transaction_options_plus_signing(mvfrsavings, "owner@active"); mvfrsavings->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("owner", owner_str) ("rex", rex_str); auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -2124,11 +2152,11 @@ struct updaterex_subcommand { updaterex_subcommand(CLI::App* actionRoot) { auto updaterex = actionRoot->add_subcommand("updaterex", localized("Update REX owner vote stake and vote weight")); updaterex->add_option("owner", owner_str, localized("REX owner"))->required(); - add_standard_transaction_options(updaterex, "owner@active"); + add_standard_transaction_options_plus_signing(updaterex, "owner@active"); updaterex->callback([this] { fc::variant act_payload = fc::mutable_variant_object()("owner", owner_str); auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -2140,11 +2168,11 @@ struct consolidate_subcommand { consolidate_subcommand(CLI::App* actionRoot) { auto consolidate = actionRoot->add_subcommand("consolidate", localized("Consolidate REX maturity buckets into one that matures in 4 days")); consolidate->add_option("owner", owner_str, localized("REX owner"))->required(); - add_standard_transaction_options(consolidate, "owner@active"); + add_standard_transaction_options_plus_signing(consolidate, "owner@active"); consolidate->callback([this] { fc::variant act_payload = fc::mutable_variant_object()("owner", owner_str); auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -2158,13 +2186,13 @@ struct rexexec_subcommand { auto rexexec = actionRoot->add_subcommand("rexexec", localized("Perform REX maintenance by processing expired loans and unfilled sell orders")); rexexec->add_option("user", user_str, localized("User executing the action"))->required(); rexexec->add_option("max", max_str, localized("Maximum number of CPU loans, Network loans, and sell orders to be processed"))->required(); - add_standard_transaction_options(rexexec, "user@active"); + add_standard_transaction_options_plus_signing(rexexec, "user@active"); rexexec->callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("user", user_str) ("max", max_str); auto accountPermissions = get_account_permissions(tx_permission, {name(user_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -2176,11 +2204,11 @@ struct closerex_subcommand { closerex_subcommand(CLI::App* actionRoot) { auto closerex = actionRoot->add_subcommand("closerex", localized("Delete unused REX-related user table entries")); closerex->add_option("owner", owner_str, localized("REX owner"))->required(); - add_standard_transaction_options(closerex, "owner@active"); + add_standard_transaction_options_plus_signing(closerex, "owner@active"); closerex->callback([this] { fc::variant act_payload = fc::mutable_variant_object()("owner", owner_str); auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); - send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}, signing_keys_opt.get_keys()); }); } }; @@ -3203,7 +3231,7 @@ int main( int argc, char** argv ) { actions.emplace_back( create_setcode(name(account), code_bytes ) ); if ( shouldSend ) { std::cerr << localized("Setting Code...") << std::endl; - send_actions(std::move(actions), packed_transaction::compression_type::zlib); + send_actions(std::move(actions), signing_keys_opt.get_keys(), packed_transaction::compression_type::zlib); } } else { std::cerr << localized("Skipping set code because the new code is the same as the existing code") << std::endl; @@ -3251,16 +3279,16 @@ int main( int argc, char** argv ) { } EOS_RETHROW_EXCEPTIONS(abi_type_exception, "Fail to parse ABI JSON") if ( shouldSend ) { std::cerr << localized("Setting ABI...") << std::endl; - send_actions(std::move(actions), packed_transaction::compression_type::zlib); + send_actions(std::move(actions), signing_keys_opt.get_keys(), packed_transaction::compression_type::zlib); } } else { std::cerr << localized("Skipping set abi because the new abi is the same as the existing abi") << std::endl; } }; - add_standard_transaction_options(contractSubcommand, "account@active"); - add_standard_transaction_options(codeSubcommand, "account@active"); - add_standard_transaction_options(abiSubcommand, "account@active"); + add_standard_transaction_options_plus_signing(contractSubcommand, "account@active"); + add_standard_transaction_options_plus_signing(codeSubcommand, "account@active"); + add_standard_transaction_options_plus_signing(abiSubcommand, "account@active"); contractSubcommand->callback([&] { if(!contract_clear) EOS_ASSERT( !contractPath.empty(), contract_exception, " contract-dir is null ", ("f", contractPath) ); shouldSend = false; @@ -3268,7 +3296,7 @@ int main( int argc, char** argv ) { set_abi_callback(); if (actions.size()) { std::cerr << localized("Publishing contract...") << std::endl; - send_actions(std::move(actions), packed_transaction::compression_type::zlib); + send_actions(std::move(actions), signing_keys_opt.get_keys(), packed_transaction::compression_type::zlib); } else { std::cout << "no transaction is sent" << std::endl; } @@ -3303,7 +3331,7 @@ int main( int argc, char** argv ) { transfer->add_option("--contract,-c", con, localized("The contract that controls the token")); transfer->add_flag("--pay-ram-to-open", pay_ram, localized("Pay RAM to open recipient's token balance row")); - add_standard_transaction_options(transfer, "sender@active"); + add_standard_transaction_options_plus_signing(transfer, "sender@active"); transfer->callback([&] { if (tx_force_unique && memo.size() == 0) { // use the memo to add a nonce @@ -3314,10 +3342,10 @@ int main( int argc, char** argv ) { auto transfer_amount = to_asset(name(con), amount); auto transfer = create_transfer(con, name(sender), name(recipient), transfer_amount, memo); if (!pay_ram) { - send_actions( { transfer }); + send_actions( { transfer }, signing_keys_opt.get_keys()); } else { auto open_ = create_open(con, name(recipient), transfer_amount.get_symbol(), name(sender)); - send_actions( { open_, transfer } ); + send_actions( { open_, transfer }, signing_keys_opt.get_keys()); } }); @@ -3615,7 +3643,7 @@ int main( int argc, char** argv ) { localized("A JSON string or filename defining the action to execute on the contract"), true)->required(); actionsSubcommand->add_option("data", data, localized("The arguments to the contract"))->required(); - add_standard_transaction_options(actionsSubcommand); + add_standard_transaction_options_plus_signing(actionsSubcommand); actionsSubcommand->callback([&] { fc::variant action_args_var; if( !data.empty() ) { @@ -3624,26 +3652,26 @@ int main( int argc, char** argv ) { auto accountPermissions = get_account_permissions(tx_permission); send_actions({chain::action{accountPermissions, name(contract_account), name(action), - variant_to_bin( name(contract_account), name(action), action_args_var ) }}); + variant_to_bin( name(contract_account), name(action), action_args_var ) }}, signing_keys_opt.get_keys()); }); // push transaction string trx_to_push; auto trxSubcommand = push->add_subcommand("transaction", localized("Push an arbitrary JSON transaction")); trxSubcommand->add_option("transaction", trx_to_push, localized("The JSON string or filename defining the transaction to push"))->required(); - add_standard_transaction_options(trxSubcommand); + add_standard_transaction_options_plus_signing(trxSubcommand); trxSubcommand->add_flag("-o,--read-only", tx_read_only, localized("Specify a transaction is read-only")); trxSubcommand->callback([&] { fc::variant trx_var = json_from_file_or_string(trx_to_push); try { signed_transaction trx = trx_var.as(); - std::cout << fc::json::to_pretty_string( push_transaction( trx )) << std::endl; + std::cout << fc::json::to_pretty_string( push_transaction( trx, signing_keys_opt.get_keys() )) << std::endl; } catch( const std::exception& ) { // unable to convert so try via abi signed_transaction trx; abi_serializer::from_variant( trx_var, trx, abi_serializer_resolver, abi_serializer::create_yield_function( abi_serializer_max_time ) ); - std::cout << fc::json::to_pretty_string( push_transaction( trx )) << std::endl; + std::cout << fc::json::to_pretty_string( push_transaction( trx, signing_keys_opt.get_keys() )) << std::endl; } }); @@ -3682,7 +3710,7 @@ int main( int argc, char** argv ) { }; auto propose_action = msig->add_subcommand("propose", localized("Propose action")); - add_standard_transaction_options(propose_action, "proposer@active"); + add_standard_transaction_options_plus_signing(propose_action, "proposer@active"); propose_action->add_option("proposal_name", proposal_name, localized("The proposal name (string)"))->required(); propose_action->add_option("requested_permissions", requested_perm, localized("The JSON string or filename defining requested permissions"))->required(); propose_action->add_option("trx_permissions", transaction_perm, localized("The JSON string or filename defining transaction permissions"))->required(); @@ -3743,12 +3771,12 @@ int main( int argc, char** argv ) { ("requested", requested_perm_var) ("trx", trx_var); - send_actions({chain::action{accountPermissions, "eosio.msig"_n, "propose"_n, variant_to_bin( "eosio.msig"_n, "propose"_n, args ) }}); + send_actions({chain::action{accountPermissions, "eosio.msig"_n, "propose"_n, variant_to_bin( "eosio.msig"_n, "propose"_n, args ) }}, signing_keys_opt.get_keys()); }); //multisig propose transaction auto propose_trx = msig->add_subcommand("propose_trx", localized("Propose transaction")); - add_standard_transaction_options(propose_trx, "proposer@active"); + add_standard_transaction_options_plus_signing(propose_trx, "proposer@active"); propose_trx->add_option("proposal_name", proposal_name, localized("The proposal name (string)"))->required(); propose_trx->add_option("requested_permissions", requested_perm, localized("The JSON string or filename defining requested permissions"))->required(); propose_trx->add_option("transaction", trx_to_push, localized("The JSON string or filename defining the transaction to push"))->required(); @@ -3776,7 +3804,7 @@ int main( int argc, char** argv ) { ("requested", requested_perm_var) ("trx", trx_var); - send_actions({chain::action{accountPermissions, "eosio.msig"_n, "propose"_n, variant_to_bin( "eosio.msig"_n, "propose"_n, args ) }}); + send_actions({chain::action{accountPermissions, "eosio.msig"_n, "propose"_n, variant_to_bin( "eosio.msig"_n, "propose"_n, args ) }}, signing_keys_opt.get_keys()); }); @@ -3993,12 +4021,12 @@ int main( int argc, char** argv ) { } auto accountPermissions = get_account_permissions(tx_permission, {name(proposer), config::active_name}); - send_actions({chain::action{accountPermissions, "eosio.msig"_n, name(action), variant_to_bin( "eosio.msig"_n, name(action), args ) }}); + send_actions({chain::action{accountPermissions, "eosio.msig"_n, name(action), variant_to_bin( "eosio.msig"_n, name(action), args ) }}, signing_keys_opt.get_keys()); }; // multisig approve auto approve = msig->add_subcommand("approve", localized("Approve proposed transaction")); - add_standard_transaction_options(approve, "proposer@active"); + add_standard_transaction_options_plus_signing(approve, "proposer@active"); approve->add_option("proposer", proposer, localized("The proposer name (string)"))->required(); approve->add_option("proposal_name", proposal_name, localized("The proposal name (string)"))->required(); approve->add_option("permissions", perm, localized("The JSON string of filename defining approving permissions"))->required(); @@ -4007,7 +4035,7 @@ int main( int argc, char** argv ) { // multisig unapprove auto unapprove = msig->add_subcommand("unapprove", localized("Unapprove proposed transaction")); - add_standard_transaction_options(unapprove, "proposer@active"); + add_standard_transaction_options_plus_signing(unapprove, "proposer@active"); unapprove->add_option("proposer", proposer, localized("The proposer name (string)"))->required(); unapprove->add_option("proposal_name", proposal_name, localized("The proposal name (string)"))->required(); unapprove->add_option("permissions", perm, localized("The JSON string of filename defining approving permissions"))->required(); @@ -4016,20 +4044,20 @@ int main( int argc, char** argv ) { // multisig invalidate string invalidator; auto invalidate = msig->add_subcommand("invalidate", localized("Invalidate all multisig approvals of an account")); - add_standard_transaction_options(invalidate, "invalidator@active"); - invalidate->add_option("invalidator", invalidator, localized("invalidator name (string)"))->required(); + add_standard_transaction_options_plus_signing(invalidate, "invalidator@active"); + invalidate->add_option("invalidator", invalidator, localized("Invalidator name (string)"))->required(); invalidate->callback([&] { auto args = fc::mutable_variant_object() ("account", invalidator); auto accountPermissions = get_account_permissions(tx_permission, {name(invalidator), config::active_name}); - send_actions({chain::action{accountPermissions, "eosio.msig"_n, "invalidate"_n, variant_to_bin( "eosio.msig"_n, "invalidate"_n, args ) }}); + send_actions({chain::action{accountPermissions, "eosio.msig"_n, "invalidate"_n, variant_to_bin( "eosio.msig"_n, "invalidate"_n, args ) }}, signing_keys_opt.get_keys()); }); // multisig cancel string canceler; auto cancel = msig->add_subcommand("cancel", localized("Cancel proposed transaction")); - add_standard_transaction_options(cancel, "canceler@active"); + add_standard_transaction_options_plus_signing(cancel, "canceler@active"); cancel->add_option("proposer", proposer, localized("The proposer name (string)"))->required(); cancel->add_option("proposal_name", proposal_name, localized("proposal name (string)"))->required(); cancel->add_option("canceler", canceler, localized("The canceler name (string)")); @@ -4050,14 +4078,14 @@ int main( int argc, char** argv ) { ("proposal_name", proposal_name) ("canceler", canceler); - send_actions({chain::action{accountPermissions, "eosio.msig"_n, "cancel"_n, variant_to_bin( "eosio.msig"_n, "cancel"_n, args ) }}); + send_actions({chain::action{accountPermissions, "eosio.msig"_n, "cancel"_n, variant_to_bin( "eosio.msig"_n, "cancel"_n, args ) }}, signing_keys_opt.get_keys()); } ); // multisig exec string executer; auto exec = msig->add_subcommand("exec", localized("Execute proposed transaction")); - add_standard_transaction_options(exec, "executer@active"); + add_standard_transaction_options_plus_signing(exec, "executer@active"); exec->add_option("proposer", proposer, localized("The proposer name (string)"))->required(); exec->add_option("proposal_name", proposal_name, localized("The proposal name (string)"))->required(); exec->add_option("executer", executer, localized("The account paying for execution (string)")); @@ -4079,7 +4107,7 @@ int main( int argc, char** argv ) { ("proposal_name", proposal_name) ("executer", executer); - send_actions({chain::action{accountPermissions, "eosio.msig"_n, "exec"_n, variant_to_bin( "eosio.msig"_n, "exec"_n, args ) }}); + send_actions({chain::action{accountPermissions, "eosio.msig"_n, "exec"_n, variant_to_bin( "eosio.msig"_n, "exec"_n, args ) }}, signing_keys_opt.get_keys()); } ); @@ -4092,7 +4120,7 @@ int main( int argc, char** argv ) { executer = ""; string trx_to_exec; auto wrap_exec = wrap->add_subcommand("exec", localized("Execute a transaction while bypassing authorization checks")); - add_standard_transaction_options(wrap_exec, "executer@active & --contract@active"); + add_standard_transaction_options_plus_signing(wrap_exec, "executer@active & --contract@active"); wrap_exec->add_option("executer", executer, localized("Account executing the transaction and paying for the deferred transaction RAM"))->required(); wrap_exec->add_option("transaction", trx_to_exec, localized("The JSON string or filename defining the transaction to execute"))->required(); wrap_exec->add_option("--contract,-c", wrap_con, localized("The account which controls the wrap contract")); @@ -4109,7 +4137,7 @@ int main( int argc, char** argv ) { ("executer", executer ) ("trx", trx_var); - send_actions({chain::action{accountPermissions, name(wrap_con), "exec"_n, variant_to_bin( name(wrap_con), "exec"_n, args ) }}); + send_actions({chain::action{accountPermissions, name(wrap_con), "exec"_n, variant_to_bin( name(wrap_con), "exec"_n, args ) }}, signing_keys_opt.get_keys()); }); // system subcommand diff --git a/tests/Cluster.py b/tests/Cluster.py index 9c3bbb685c..df1bf1b499 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -1109,7 +1109,7 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli wasmFile="%s.wasm" % (contract) abiFile="%s.abi" % (contract) Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + trans=biosNode.publishContract(eosioAccount, contractDir, wasmFile, abiFile, waitForTransBlock=True) if trans is None: Utils.Print("ERROR: Failed to publish contract %s." % (contract)) return None @@ -1238,7 +1238,7 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli wasmFile="%s.wasm" % (contract) abiFile="%s.abi" % (contract) Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioTokenAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + trans=biosNode.publishContract(eosioTokenAccount, contractDir, wasmFile, abiFile, waitForTransBlock=True) if trans is None: Utils.Print("ERROR: Failed to publish contract %s." % (contract)) return None @@ -1294,7 +1294,7 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli wasmFile="%s.wasm" % (contract) abiFile="%s.abi" % (contract) Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + trans=biosNode.publishContract(eosioAccount, contractDir, wasmFile, abiFile, waitForTransBlock=True) if trans is None: Utils.Print("ERROR: Failed to publish contract %s." % (contract)) return None diff --git a/tests/Node.py b/tests/Node.py index bc133bedd1..63a65b1c0e 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -372,10 +372,11 @@ def isTransFinalized(self, transId): # Create & initialize account and return creation transactions. Return transaction json object - def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False, additionalArgs=''): + def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False, sign=False, additionalArgs=''): + signStr = Node.__sign_str(sign, [ creatorAccount.activePublicKey ]) cmdDesc="system newaccount" - cmd='%s -j %s %s %s %s --stake-net "%s %s" --stake-cpu "%s %s" --buy-ram "%s %s" %s' % ( - cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey, + cmd='%s -j %s %s %s %s %s --stake-net "%s %s" --stake-cpu "%s %s" --buy-ram "%s %s" %s' % ( + cmdDesc, signStr, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey, stakeNet, CORE_SYMBOL, stakeCPU, CORE_SYMBOL, buyRAM, CORE_SYMBOL, additionalArgs) msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) @@ -389,12 +390,13 @@ def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, w return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) - def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, exitOnError=False): + def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, exitOnError=False, sign=False): """Create account and return creation transactions. Return transaction json object. waitForTransBlock: wait on creation transaction id to appear in a block.""" + signStr = Node.__sign_str(sign, [ creatorAccount.activePublicKey ]) cmdDesc="create account" - cmd="%s -j %s %s %s %s" % ( - cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey) + cmd="%s -j %s %s %s %s %s" % ( + cmdDesc, signStr, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey) msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) @@ -524,7 +526,7 @@ def __call__(self): def waitForIrreversibleBlock(self, blockNum, timeout=None, reportInterval=None): return self.waitForBlock(blockNum, timeout=timeout, blockType=BlockType.lib, reportInterval=reportInterval) - def __transferFundsCmdArr(self, source, destination, amountStr, memo, force, retry): + def __transferFundsCmdArr(self, source, destination, amountStr, memo, force, retry, sign): assert isinstance(amountStr, str) assert(source) assert(isinstance(source, Account)) @@ -534,6 +536,13 @@ def __transferFundsCmdArr(self, source, destination, amountStr, memo, force, ret cmd="%s %s -v transfer --expiration 90 %s -j %s %s" % ( Utils.EosClientPath, self.eosClientArgs(), self.getRetryCmdArg(retry), source.name, destination.name) cmdArr=cmd.split() + # not using __sign_str, since cmdArr messes up the string + if sign: + cmdArr.append("--sign-with") + cmdArr.append("[ \"%s\" ]" % (source.activePublicKey)) + + cmdArr.append(source.name) + cmdArr.append(destination.name) cmdArr.append(amountStr) cmdArr.append(memo) if force: @@ -543,8 +552,8 @@ def __transferFundsCmdArr(self, source, destination, amountStr, memo, force, ret return cmdArr # Trasfer funds. Returns "transfer" json return object - def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False, exitOnError=True, reportStatus=True, retry=None): - cmdArr = self.__transferFundsCmdArr(source, destination, amountStr, memo, force, retry) + def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False, exitOnError=True, reportStatus=True, retry=None, sign=False): + cmdArr = self.__transferFundsCmdArr(source, destination, amountStr, memo, force, retry, sign) trans=None start=time.perf_counter() try: @@ -569,8 +578,8 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) # Trasfer funds. Returns (popen, cmdArr) for checkDelayedOutput - def transferFundsAsync(self, source, destination, amountStr, memo="memo", force=False, exitOnError=True, retry=None): - cmdArr = self.__transferFundsCmdArr(source, destination, amountStr, memo, force, retry) + def transferFundsAsync(self, source, destination, amountStr, memo="memo", force=False, exitOnError=True, retry=None, sign=False): + cmdArr = self.__transferFundsCmdArr(source, destination, amountStr, memo, force, retry, sign) start=time.perf_counter() try: popen=Utils.delayedCheckOutput(cmdArr) @@ -744,8 +753,9 @@ def getAccountCodeHash(self, account): return None # publish contract and return transaction as json object - def publishContract(self, account, contractDir, wasmFile, abiFile, waitForTransBlock=False, shouldFail=False): - cmd="%s %s -v set contract -j %s %s" % (Utils.EosClientPath, self.eosClientArgs(), account, contractDir) + def publishContract(self, account, contractDir, wasmFile, abiFile, waitForTransBlock=False, shouldFail=False, sign=False): + signStr = Node.__sign_str(sign, [ account.activePublicKey ]) + cmd="%s %s -v set contract -j %s %s %s" % (Utils.EosClientPath, self.eosClientArgs(), signStr, account.name, contractDir) cmd += "" if wasmFile is None else (" "+ wasmFile) cmd += "" if abiFile is None else (" " + abiFile) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) @@ -846,9 +856,13 @@ def pushTransaction(self, trans, opts="", silentErrors=False, permissions=None): return (False, msg) # returns tuple with transaction execution status and transaction - def pushMessage(self, account, action, data, opts, silentErrors=False): + def pushMessage(self, account, action, data, opts, silentErrors=False, signatures=None): cmd="%s %s push action -j %s %s" % (Utils.EosClientPath, self.eosClientArgs(), account, action) cmdArr=cmd.split() + # not using __sign_str, since cmdArr messes up the string + if signatures is not None: + cmdArr.append("--sign-with") + cmdArr.append("[ \"%s\" ]" % ("\", \"".join(signatures))) if data is not None: cmdArr.append(data) if opts is not None: @@ -870,55 +884,72 @@ def pushMessage(self, account, action, data, opts, silentErrors=False): Utils.Print("ERROR: Exception during push message. cmd Duration=%.3f sec. %s" % (end - start, msg)) return (False, msg) - def setPermission(self, account, code, pType, requirement, waitForTransBlock=False, exitOnError=False): + @staticmethod + def __sign_str(sign, keys): + assert(isinstance(sign, bool)) + assert(isinstance(keys, list)) + if not sign: + return "" + + return "--sign-with '[ \"" + "\", \"".join(keys) + "\" ]'" + + def setPermission(self, account, code, pType, requirement, waitForTransBlock=False, exitOnError=False, sign=False): + assert(isinstance(account, Account)) + assert(isinstance(code, Account)) + signStr = Node.__sign_str(sign, [ account.activePublicKey ]) + Utils.Print("REMOVE signStr: <%s>" % (signStr)) cmdDesc="set action permission" - cmd="%s -j %s %s %s %s" % (cmdDesc, account, code, pType, requirement) + cmd="%s -j %s %s %s %s %s" % (cmdDesc, signStr, account.name, code.name, pType, requirement) trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError) self.trackCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) - def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, transferTo=False, waitForTransBlock=False, exitOnError=False, reportStatus=True): + def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, transferTo=False, waitForTransBlock=False, exitOnError=False, reportStatus=True, sign=False): if toAccount is None: toAccount=fromAccount + signStr = Node.__sign_str(sign, [ fromAccount.activePublicKey ]) cmdDesc="system delegatebw" transferStr="--transfer" if transferTo else "" - cmd="%s -j %s %s \"%s %s\" \"%s %s\" %s" % ( - cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr) + cmd="%s -j %s %s %s \"%s %s\" \"%s %s\" %s" % ( + cmdDesc, signStr, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr) msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans, reportStatus=reportStatus) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) - def undelegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, waitForTransBlock=False, exitOnError=False): + def undelegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, waitForTransBlock=False, exitOnError=False, sign=False): if toAccount is None: toAccount=fromAccount + signStr = Node.__sign_str(sign, [ fromAccount.activePublicKey ]) cmdDesc="system undelegatebw" - cmd="%s -j %s %s \"%s %s\" \"%s %s\"" % ( - cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL) + cmd="%s -j %s %s %s \"%s %s\" \"%s %s\"" % ( + cmdDesc, signStr, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL) msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) - def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnError=False): + def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnError=False, sign=False): + signStr = Node.__sign_str(sign, [ producer.activePublicKey ]) cmdDesc="system regproducer" - cmd="%s -j %s %s %s %s" % ( - cmdDesc, producer.name, producer.activePublicKey, url, location) + cmd="%s -j %s %s %s %s %s" % ( + cmdDesc, signStr, producer.name, producer.activePublicKey, url, location) msg="producer=%s" % (producer.name); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) - def vote(self, account, producers, waitForTransBlock=False, exitOnError=False): + def vote(self, account, producers, waitForTransBlock=False, exitOnError=False, sign=False): + signStr = Node.__sign_str(sign, [ account.activePublicKey ]) cmdDesc = "system voteproducer prods" - cmd="%s -j %s %s" % ( - cmdDesc, account.name, " ".join(producers)) + cmd="%s -j %s %s %s" % ( + cmdDesc, signStr, account.name, " ".join(producers)) msg="account=%s, producers=[ %s ]" % (account.name, ", ".join(producers)); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py new file mode 100755 index 0000000000..16b98f8408 --- /dev/null +++ b/tests/consensus-validation-malicious-producers.py @@ -0,0 +1,398 @@ +#!/usr/bin/env python3 + +import testUtils + +import argparse +import signal +from collections import namedtuple +import os +import shutil + +############################################################### +# Test for validating consensus based block production. We introduce malicious producers which +# reject all transactions. +# We have three test scenarios: +# - No malicious producers. Transactions should be incorporated into the chain. +# - Minority malicious producers (less than a third producer count). Transactions will get incorporated +# into the chain as majority appoves the transactions. +# - Majority malicious producer count (greater than a third producer count). Transactions won't get +# incorporated into the chain as majority rejects the transactions. +############################################################### + + +Print=testUtils.Utils.Print +errorExit=Utils.errorExit + +StagedNodeInfo=namedtuple("StagedNodeInfo", "config logging") + + +logging00="""{ + "includes": [], + "appenders": [{ + "name": "stderr", + "type": "console", + "args": { + "stream": "std_error", + "level_colors": [{ + "level": "debug", + "color": "green" + },{ + "level": "warn", + "color": "brown" + },{ + "level": "error", + "color": "red" + } + ] + }, + "enabled": true + },{ + "name": "stdout", + "type": "console", + "args": { + "stream": "std_out", + "level_colors": [{ + "level": "debug", + "color": "green" + },{ + "level": "warn", + "color": "brown" + },{ + "level": "error", + "color": "red" + } + ] + }, + "enabled": true + },{ + "name": "net", + "type": "gelf", + "args": { + "endpoint": "10.160.11.21:12201", + "host": "testnet_00" + }, + "enabled": true + } + ], + "loggers": [{ + "name": "default", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] + } + ] +}""" + +config00="""genesis-json = ./genesis.json +block-log-dir = blocks +readonly = 0 +send-whole-blocks = true +shared-file-dir = blockchain +shared-file-size = 8192 +http-server-address = 127.0.0.1:8888 +p2p-listen-endpoint = 0.0.0.0:9876 +p2p-server-address = localhost:9876 +allowed-connection = any +p2p-peer-address = localhost:9877 +required-participation = true +private-key = ["EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"] +producer-name = initu +plugin = eosio::producer_plugin +plugin = eosio::chain_api_plugin +plugin = eosio::history_plugin +plugin = eosio::history_api_plugin""" + + +config01="""genesis-json = ./genesis.json +block-log-dir = blocks +readonly = 0 +send-whole-blocks = true +shared-file-dir = blockchain +shared-file-size = 8192 +http-server-address = 127.0.0.1:8889 +p2p-listen-endpoint = 0.0.0.0:9877 +p2p-server-address = localhost:9877 +allowed-connection = any +p2p-peer-address = localhost:9876 +required-participation = true +private-key = ["EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"] +producer-name = defproducerb +plugin = eosio::producer_plugin +plugin = eosio::chain_api_plugin +plugin = eosio::history_plugin +plugin = eosio::history_api_plugin""" + + +producers="""producer-name = defproducerd +producer-name = defproducerf +producer-name = defproducerh +producer-name = defproducerj +producer-name = defproducerl +producer-name = defproducern +producer-name = defproducerp +producer-name = defproducerr +producer-name = defproducert +producer-name = defproducera +producer-name = defproducerc +producer-name = defproducere +producer-name = defproducerg +producer-name = defproduceri +producer-name = defproducerk +producer-name = defproducerm +producer-name = defproducero +producer-name = defproducerq +producer-name = defproducers""" + +zeroExecTime="trans-execution-time = 0" + +def getNoMaliciousStagedNodesInfo(): + stagedNodesInfo=[] + myConfig00=config00 + stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00)) + myConfig01=config01+"\n"+producers + stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00)) + return stagedNodesInfo + +def getMinorityMaliciousProducerStagedNodesInfo(): + stagedNodesInfo=[] + myConfig00=config00+"\n"+producers + stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00)) + myConfig01=config01+"\n"+zeroExecTime + stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00)) + return stagedNodesInfo + +def getMajorityMaliciousProducerStagedNodesInfo(): + stagedNodesInfo=[] + myConfig00=config00 + stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00)) + myConfig01=config01+"\n"+producers+"\n"+zeroExecTime + stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00)) + return stagedNodesInfo + +stagingDir="staging" +def stageScenario(stagedNodeInfos): + assert(stagedNodeInfos != None) + assert(len(stagedNodeInfos) > 1) + + os.makedirs(stagingDir) + count=0 + for stagedNodeInfo in stagedNodeInfos: + configPath=os.path.join(stagingDir, "etc/eosio/node_%02d" % (count)) + os.makedirs(configPath) + with open(os.path.join(configPath, "config.ini"), "w") as textFile: + print(stagedNodeInfo.config,file=textFile) + with open(os.path.join(configPath, "logging.json"), "w") as textFile: + print(stagedNodeInfo.logging,file=textFile) + count += 1 + return + +def cleanStaging(): + os.path.exists(stagingDir) and shutil.rmtree(stagingDir) + +def error(msg="", errorCode=1): + Print("ERROR:", msg) + +parser = argparse.ArgumentParser() +tests=[1,2,3] + +parser.add_argument("-t", "--tests", type=str, help="1|2|3 1=run no malicious producers test, 2=minority malicious, 3=majority malicious.", default=None) +parser.add_argument("-w", type=int, help="system wait time", default=testUtils.Utils.systemWaitTimeout) +parser.add_argument("-v", help="verbose logging", action='store_true') +parser.add_argument("--dump-error-details", + help="Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout", + action='store_true') +parser.add_argument("--keep-logs", help="Don't delete var/lib/node_* folders upon test completion", + action='store_true') +parser.add_argument("--not-noon", help="This is not the Noon branch.", action='store_true') +parser.add_argument("--dont-kill", help="Leave cluster running after test finishes", action='store_true') + +args = parser.parse_args() +testsArg=args.tests +debug=args.v +waitTimeout=args.w +dumpErrorDetails=args.dump-error-details +keepLogs=args.keep-logs +amINoon=not args.not_noon +killEosInstances= not args.dont-kill +killWallet= not args.dont-kill + +testUtils.Utils.Debug=debug + +assert (testsArg is None or testsArg == "1" or testsArg == "2" or testsArg == "3") +if testsArg is not None: + tests=[int(testsArg)] + +testUtils.Utils.setSystemWaitTimeout(waitTimeout) +testUtils.Utils.iAmNotNoon() + +def myTest(transWillEnterBlock): + testSuccessful=False + + cluster=testUtils.Cluster(walletd=True, staging=True) + walletMgr=testUtils.WalletMgr(True) + + try: + cluster.killall() + cluster.cleanup() + walletMgr.killall() + walletMgr.cleanup() + + pnodes=2 + total_nodes=pnodes + topo="mesh" + delay=0 + Print("Stand up cluster") + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False: + error("Failed to stand up eos cluster.") + return False + + accounts=testUtils.Cluster.createAccountKeys(1) + if accounts is None: + error("FAILURE - create keys") + return False + currencyAccount=accounts[0] + currencyAccount.name="currency0000" + + testWalletName="test" + Print("Creating wallet \"%s\"." % (testWalletName)) + testWallet=walletMgr.create(testWalletName) + + for account in accounts: + Print("Importing keys for account %s into wallet %s." % (account.name, testWallet.name)) + if not walletMgr.importKey(account, testWallet): + error("Failed to import key for account %s" % (account.name)) + return False + + node=cluster.getNode(0) + node2=cluster.getNode(1) + + defproduceraAccount=testUtils.Cluster.defproduceraAccount + + Print("Importing keys for account %s into wallet %s." % (defproduceraAccount.name, testWallet.name)) + if not walletMgr.importKey(defproduceraAccount, testWallet): + error("Failed to import key for account %s" % (defproduceraAccount.name)) + return False + + Print("Create new account %s via %s" % (currencyAccount.name, defproduceraAccount.name)) + transId=node.createAccount(currencyAccount, defproduceraAccount, stakedDeposit=5000, waitForTransBlock=True) + if transId is None: + error("Failed to create account %s" % (currencyAccount.name)) + return False + + wasmFile="currency.wasm" + abiFile="currency.abi" + Print("Publish contract") + trans=node.publishContract(currencyAccount, wasmFile, abiFile, waitForTransBlock=True) + if trans is None: + error("Failed to publish contract.") + return False + + Print("push transfer action to currency0000 contract") + contract="currency0000" + action="transfer" + data="{\"from\":\"currency0000\",\"to\":\"defproducera\",\"quantity\":" + if amINoon: + data +="\"00.0050 CUR\",\"memo\":\"test\"}" + else: + data +="50}" + opts="--permission currency0000@active" + if not amINoon: + opts += " --scope currency0000,defproducera" + + trans=node.pushMessage(contract, action, data, opts, silentErrors=True) + transInBlock=False + if not trans[0]: + # On slower systems e.g Travis the transaction rejection can happen immediately + # We want to handle fast and slow failures. + if "allocated processing time was exceeded" in trans[1]: + Print("Push message transaction immediately failed.") + else: + error("Exception in push message. %s" % (trans[1])) + return False + + else: + transId=testUtils.Node.getTransId(trans[1]) + + Print("verify transaction exists") + if not node2.waitForTransInBlock(transId): + error("Transaction never made it to node2") + return False + + Print("Get details for transaction %s" % (transId)) + transaction=node2.getTransaction(transId, exitOnError=True) + signature=transaction["transaction"]["signatures"][0] + + blockNum=int(transaction["transaction"]["ref_block_num"]) + blockNum += 1 + Print("Our transaction is in block %d" % (blockNum)) + + block=node2.getBlock(blockNum, exitOnError=True) + cycles=block["cycles"] + if len(cycles) > 0: + blockTransSignature=cycles[0][0]["user_input"][0]["signatures"][0] + # Print("Transaction signature: %s\nBlock transaction signature: %s" % + # (signature, blockTransSignature)) + transInBlock=(signature == blockTransSignature) + + if transWillEnterBlock: + if not transInBlock: + error("Transaction did not enter the chain.") + return False + else: + Print("SUCCESS: Transaction1 entered in the chain.") + elif not transWillEnterBlock: + if transInBlock: + error("Transaction entered the chain.") + return False + else: + Print("SUCCESS: Transaction2 did not enter the chain.") + + testSuccessful=True + finally: + if not testSuccessful and dumpErrorDetails: + cluster.dumpErrorDetails() + walletMgr.dumpErrorDetails() + Print("== Errors see above ==") + + if killEosInstances: + Print("Shut down the cluster%s" % (" and cleanup." if (testSuccessful and not keepLogs) else ".")) + cluster.killall() + walletMgr.killall() + if testSuccessful and not keepLogs: + Print("Cleanup cluster and wallet data.") + cluster.cleanup() + walletMgr.cleanup() + + return True + + +try: + if 1 in tests: + Print("Cluster with no malicious producers. All producers expected to approve transaction. Hence transaction is expected to enter the chain.") + cleanStaging() + stageScenario(getNoMaliciousStagedNodesInfo()) + if not myTest(True): + exit(1) + + if 2 in tests: + Print("\nCluster with minority(1) malicious nodes. Majority producers expected to approve transaction. Hence transaction is expected to enter the chain.") + cleanStaging() + stageScenario(getMinorityMaliciousProducerStagedNodesInfo()) + if not myTest(True): + exit(1) + + if 3 in tests: + Print("\nCluster with majority(20) malicious nodes. Majority producers expected to block transaction. Hence transaction is not expected to enter the chain.") + cleanStaging() + stageScenario(getMajorityMaliciousProducerStagedNodesInfo()) + if not myTest(False): + exit(1) + +finally: + cleanStaging() + +exit(0) diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index 7324891347..4f96715577 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -309,7 +309,7 @@ wasmFile="eosio.token.wasm" abiFile="eosio.token.abi" Print("Publish contract") - trans=node.publishContract(currencyAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + trans=node.publishContract(currencyAccount, contractDir, wasmFile, abiFile, waitForTransBlock=True) if trans is None: cmdError("%s set contract currency1111" % (ClientName)) errorExit("Failed to publish contract.") @@ -576,12 +576,13 @@ errorExit("Failed to lock wallet %s" % (defproduceraWallet.name)) + simpleDB = Account("simpledb") contractDir="contracts/simpledb" wasmFile="simpledb.wasm" abiFile="simpledb.abi" Print("Setting simpledb contract without simpledb account was causing core dump in %s." % (ClientName)) Print("Verify %s generates an error, but does not core dump." % (ClientName)) - retMap=node.publishContract("simpledb", contractDir, wasmFile, abiFile, shouldFail=True) + retMap=node.publishContract(simpleDB, contractDir, wasmFile, abiFile, shouldFail=True) if retMap is None: errorExit("Failed to publish, but should have returned a details map") if retMap["returncode"] == 0 or retMap["returncode"] == 139: # 139 SIGSEGV @@ -590,14 +591,13 @@ Print("Test successful, %s returned error code: %d" % (ClientName, retMap["returncode"])) Print("set permission") - code="currency1111" pType="transfer" requirement="active" - trans=node.setPermission(testeraAccount.name, code, pType, requirement, waitForTransBlock=True, exitOnError=True) + trans=node.setPermission(testeraAccount, currencyAccount, pType, requirement, waitForTransBlock=True, exitOnError=True) Print("remove permission") requirement="null" - trans=node.setPermission(testeraAccount.name, code, pType, requirement, waitForTransBlock=True, exitOnError=True) + trans=node.setPermission(testeraAccount, currencyAccount, pType, requirement, waitForTransBlock=True, exitOnError=True) Print("Locking all wallets.") if not walletMgr.lockAllWallets(): diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index 04c85d0cf9..42603e25a9 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -112,7 +112,7 @@ wasmFile="integration_test.wasm" abiFile="integration_test.abi" Print("Publish contract") - trans=nodes[0].publishContract(contractAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + trans=nodes[0].publishContract(contractAccount, contractDir, wasmFile, abiFile, waitForTransBlock=True) if trans is None: Utils.cmdError("%s set contract %s" % (ClientName, contractAccount.name)) errorExit("Failed to publish contract.") diff --git a/tests/p2p_network_test.py b/tests/p2p_network_test.py index 2205a4aa0a..7df694523f 100755 --- a/tests/p2p_network_test.py +++ b/tests/p2p_network_test.py @@ -149,7 +149,7 @@ wasmFile="eosio.system.wasm" abiFile="eosio.system.abi" Print("\nPush system contract %s %s" % (wasmFile, abiFile)) -trans=node0.publishContract(eosio.name, wasmFile, abiFile, waitForTransBlock=True) +trans=node0.publishContract(eosio, wasmFile, abiFile, waitForTransBlock=True) if trans is None: Utils.errorExit("Failed to publish eosio.system.") else: diff --git a/tests/prod_preactivation_test.py b/tests/prod_preactivation_test.py index 406366dacd..2fb906935e 100755 --- a/tests/prod_preactivation_test.py +++ b/tests/prod_preactivation_test.py @@ -110,7 +110,7 @@ abiFile="%s.abi" % (contract) Print("publish a new bios contract %s should fails because env.is_feature_activated unresolveable" % (contractDir)) - retMap = node0.publishContract("eosio", contractDir, wasmFile, abiFile, True, shouldFail=True) + retMap = node0.publishContract(cluster.eosioAccount, contractDir, wasmFile, abiFile, True, shouldFail=True) if retMap["output"].decode("utf-8").find("unresolveable") < 0: errorExit("bios contract not result in expected unresolveable error") @@ -150,7 +150,7 @@ time.sleep(0.6) Print("publish a new bios contract %s should fails because node1 is not producing block yet" % (contractDir)) - retMap = node0.publishContract("eosio", contractDir, wasmFile, abiFile, True, shouldFail=True) + retMap = node0.publishContract(cluster.eosioAccount, contractDir, wasmFile, abiFile, True, shouldFail=True) if retMap["output"].decode("utf-8").find("unresolveable") < 0: errorExit("bios contract not result in expected unresolveable error") @@ -167,7 +167,7 @@ errorExit("No blocks produced by node 1") time.sleep(0.6) - retMap = node0.publishContract("eosio", contractDir, wasmFile, abiFile, True) + retMap = node0.publishContract(cluster.eosioAccount, contractDir, wasmFile, abiFile, True) Print("sucessfully set new contract with new intrinsic!!!") testSuccessful=True From e66feb63c4e8794ceb6411d255371b79c645490d Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 27 Jun 2022 14:48:10 -0500 Subject: [PATCH 13/25] GH-403 Fix a couple compilation errors from the backport merge. --- programs/cleos/main.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 4e21e77430..18a1e87d49 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -255,12 +255,12 @@ class signing_keys_option { } else { fc::variant json_keys; try { - json_keys = fc::json::from_string(public_key_json, fc::json::relaxed_parser); + json_keys = fc::json::from_string(public_key_json, fc::json::parse_type::relaxed_parser); } EOS_RETHROW_EXCEPTIONS(json_parse_exception, "Fail to parse JSON from string: ${string}", ("string", public_key_json)); try { std::vector keys = json_keys.template as>(); signing_keys = std::move(keys); - } EOS_RETHROW_EXCEPTIONS(public_key_type_exception, "Invalid public key array format '${data}'", ("data", fc::json::to_string(json_keys))) + } EOS_RETHROW_EXCEPTIONS(public_key_type_exception, "Invalid public key array format '${data}'", ("data", fc::json::to_string(json_keys, fc::time_point::maximum()))) } } return signing_keys; From 4e2eab1c28ff71dcf40ec921ca5a3737bcafc8c9 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 27 Jun 2022 15:00:25 -0500 Subject: [PATCH 14/25] GH-403 - Delete inadvertently reintroduced test. Per GH-211 should be deleted. --- ...onsensus-validation-malicious-producers.py | 398 ------------------ 1 file changed, 398 deletions(-) delete mode 100755 tests/consensus-validation-malicious-producers.py diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py deleted file mode 100755 index 16b98f8408..0000000000 --- a/tests/consensus-validation-malicious-producers.py +++ /dev/null @@ -1,398 +0,0 @@ -#!/usr/bin/env python3 - -import testUtils - -import argparse -import signal -from collections import namedtuple -import os -import shutil - -############################################################### -# Test for validating consensus based block production. We introduce malicious producers which -# reject all transactions. -# We have three test scenarios: -# - No malicious producers. Transactions should be incorporated into the chain. -# - Minority malicious producers (less than a third producer count). Transactions will get incorporated -# into the chain as majority appoves the transactions. -# - Majority malicious producer count (greater than a third producer count). Transactions won't get -# incorporated into the chain as majority rejects the transactions. -############################################################### - - -Print=testUtils.Utils.Print -errorExit=Utils.errorExit - -StagedNodeInfo=namedtuple("StagedNodeInfo", "config logging") - - -logging00="""{ - "includes": [], - "appenders": [{ - "name": "stderr", - "type": "console", - "args": { - "stream": "std_error", - "level_colors": [{ - "level": "debug", - "color": "green" - },{ - "level": "warn", - "color": "brown" - },{ - "level": "error", - "color": "red" - } - ] - }, - "enabled": true - },{ - "name": "stdout", - "type": "console", - "args": { - "stream": "std_out", - "level_colors": [{ - "level": "debug", - "color": "green" - },{ - "level": "warn", - "color": "brown" - },{ - "level": "error", - "color": "red" - } - ] - }, - "enabled": true - },{ - "name": "net", - "type": "gelf", - "args": { - "endpoint": "10.160.11.21:12201", - "host": "testnet_00" - }, - "enabled": true - } - ], - "loggers": [{ - "name": "default", - "level": "debug", - "enabled": true, - "additivity": false, - "appenders": [ - "stderr", - "net" - ] - } - ] -}""" - -config00="""genesis-json = ./genesis.json -block-log-dir = blocks -readonly = 0 -send-whole-blocks = true -shared-file-dir = blockchain -shared-file-size = 8192 -http-server-address = 127.0.0.1:8888 -p2p-listen-endpoint = 0.0.0.0:9876 -p2p-server-address = localhost:9876 -allowed-connection = any -p2p-peer-address = localhost:9877 -required-participation = true -private-key = ["EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"] -producer-name = initu -plugin = eosio::producer_plugin -plugin = eosio::chain_api_plugin -plugin = eosio::history_plugin -plugin = eosio::history_api_plugin""" - - -config01="""genesis-json = ./genesis.json -block-log-dir = blocks -readonly = 0 -send-whole-blocks = true -shared-file-dir = blockchain -shared-file-size = 8192 -http-server-address = 127.0.0.1:8889 -p2p-listen-endpoint = 0.0.0.0:9877 -p2p-server-address = localhost:9877 -allowed-connection = any -p2p-peer-address = localhost:9876 -required-participation = true -private-key = ["EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"] -producer-name = defproducerb -plugin = eosio::producer_plugin -plugin = eosio::chain_api_plugin -plugin = eosio::history_plugin -plugin = eosio::history_api_plugin""" - - -producers="""producer-name = defproducerd -producer-name = defproducerf -producer-name = defproducerh -producer-name = defproducerj -producer-name = defproducerl -producer-name = defproducern -producer-name = defproducerp -producer-name = defproducerr -producer-name = defproducert -producer-name = defproducera -producer-name = defproducerc -producer-name = defproducere -producer-name = defproducerg -producer-name = defproduceri -producer-name = defproducerk -producer-name = defproducerm -producer-name = defproducero -producer-name = defproducerq -producer-name = defproducers""" - -zeroExecTime="trans-execution-time = 0" - -def getNoMaliciousStagedNodesInfo(): - stagedNodesInfo=[] - myConfig00=config00 - stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00)) - myConfig01=config01+"\n"+producers - stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00)) - return stagedNodesInfo - -def getMinorityMaliciousProducerStagedNodesInfo(): - stagedNodesInfo=[] - myConfig00=config00+"\n"+producers - stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00)) - myConfig01=config01+"\n"+zeroExecTime - stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00)) - return stagedNodesInfo - -def getMajorityMaliciousProducerStagedNodesInfo(): - stagedNodesInfo=[] - myConfig00=config00 - stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00)) - myConfig01=config01+"\n"+producers+"\n"+zeroExecTime - stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00)) - return stagedNodesInfo - -stagingDir="staging" -def stageScenario(stagedNodeInfos): - assert(stagedNodeInfos != None) - assert(len(stagedNodeInfos) > 1) - - os.makedirs(stagingDir) - count=0 - for stagedNodeInfo in stagedNodeInfos: - configPath=os.path.join(stagingDir, "etc/eosio/node_%02d" % (count)) - os.makedirs(configPath) - with open(os.path.join(configPath, "config.ini"), "w") as textFile: - print(stagedNodeInfo.config,file=textFile) - with open(os.path.join(configPath, "logging.json"), "w") as textFile: - print(stagedNodeInfo.logging,file=textFile) - count += 1 - return - -def cleanStaging(): - os.path.exists(stagingDir) and shutil.rmtree(stagingDir) - -def error(msg="", errorCode=1): - Print("ERROR:", msg) - -parser = argparse.ArgumentParser() -tests=[1,2,3] - -parser.add_argument("-t", "--tests", type=str, help="1|2|3 1=run no malicious producers test, 2=minority malicious, 3=majority malicious.", default=None) -parser.add_argument("-w", type=int, help="system wait time", default=testUtils.Utils.systemWaitTimeout) -parser.add_argument("-v", help="verbose logging", action='store_true') -parser.add_argument("--dump-error-details", - help="Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout", - action='store_true') -parser.add_argument("--keep-logs", help="Don't delete var/lib/node_* folders upon test completion", - action='store_true') -parser.add_argument("--not-noon", help="This is not the Noon branch.", action='store_true') -parser.add_argument("--dont-kill", help="Leave cluster running after test finishes", action='store_true') - -args = parser.parse_args() -testsArg=args.tests -debug=args.v -waitTimeout=args.w -dumpErrorDetails=args.dump-error-details -keepLogs=args.keep-logs -amINoon=not args.not_noon -killEosInstances= not args.dont-kill -killWallet= not args.dont-kill - -testUtils.Utils.Debug=debug - -assert (testsArg is None or testsArg == "1" or testsArg == "2" or testsArg == "3") -if testsArg is not None: - tests=[int(testsArg)] - -testUtils.Utils.setSystemWaitTimeout(waitTimeout) -testUtils.Utils.iAmNotNoon() - -def myTest(transWillEnterBlock): - testSuccessful=False - - cluster=testUtils.Cluster(walletd=True, staging=True) - walletMgr=testUtils.WalletMgr(True) - - try: - cluster.killall() - cluster.cleanup() - walletMgr.killall() - walletMgr.cleanup() - - pnodes=2 - total_nodes=pnodes - topo="mesh" - delay=0 - Print("Stand up cluster") - if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False: - error("Failed to stand up eos cluster.") - return False - - accounts=testUtils.Cluster.createAccountKeys(1) - if accounts is None: - error("FAILURE - create keys") - return False - currencyAccount=accounts[0] - currencyAccount.name="currency0000" - - testWalletName="test" - Print("Creating wallet \"%s\"." % (testWalletName)) - testWallet=walletMgr.create(testWalletName) - - for account in accounts: - Print("Importing keys for account %s into wallet %s." % (account.name, testWallet.name)) - if not walletMgr.importKey(account, testWallet): - error("Failed to import key for account %s" % (account.name)) - return False - - node=cluster.getNode(0) - node2=cluster.getNode(1) - - defproduceraAccount=testUtils.Cluster.defproduceraAccount - - Print("Importing keys for account %s into wallet %s." % (defproduceraAccount.name, testWallet.name)) - if not walletMgr.importKey(defproduceraAccount, testWallet): - error("Failed to import key for account %s" % (defproduceraAccount.name)) - return False - - Print("Create new account %s via %s" % (currencyAccount.name, defproduceraAccount.name)) - transId=node.createAccount(currencyAccount, defproduceraAccount, stakedDeposit=5000, waitForTransBlock=True) - if transId is None: - error("Failed to create account %s" % (currencyAccount.name)) - return False - - wasmFile="currency.wasm" - abiFile="currency.abi" - Print("Publish contract") - trans=node.publishContract(currencyAccount, wasmFile, abiFile, waitForTransBlock=True) - if trans is None: - error("Failed to publish contract.") - return False - - Print("push transfer action to currency0000 contract") - contract="currency0000" - action="transfer" - data="{\"from\":\"currency0000\",\"to\":\"defproducera\",\"quantity\":" - if amINoon: - data +="\"00.0050 CUR\",\"memo\":\"test\"}" - else: - data +="50}" - opts="--permission currency0000@active" - if not amINoon: - opts += " --scope currency0000,defproducera" - - trans=node.pushMessage(contract, action, data, opts, silentErrors=True) - transInBlock=False - if not trans[0]: - # On slower systems e.g Travis the transaction rejection can happen immediately - # We want to handle fast and slow failures. - if "allocated processing time was exceeded" in trans[1]: - Print("Push message transaction immediately failed.") - else: - error("Exception in push message. %s" % (trans[1])) - return False - - else: - transId=testUtils.Node.getTransId(trans[1]) - - Print("verify transaction exists") - if not node2.waitForTransInBlock(transId): - error("Transaction never made it to node2") - return False - - Print("Get details for transaction %s" % (transId)) - transaction=node2.getTransaction(transId, exitOnError=True) - signature=transaction["transaction"]["signatures"][0] - - blockNum=int(transaction["transaction"]["ref_block_num"]) - blockNum += 1 - Print("Our transaction is in block %d" % (blockNum)) - - block=node2.getBlock(blockNum, exitOnError=True) - cycles=block["cycles"] - if len(cycles) > 0: - blockTransSignature=cycles[0][0]["user_input"][0]["signatures"][0] - # Print("Transaction signature: %s\nBlock transaction signature: %s" % - # (signature, blockTransSignature)) - transInBlock=(signature == blockTransSignature) - - if transWillEnterBlock: - if not transInBlock: - error("Transaction did not enter the chain.") - return False - else: - Print("SUCCESS: Transaction1 entered in the chain.") - elif not transWillEnterBlock: - if transInBlock: - error("Transaction entered the chain.") - return False - else: - Print("SUCCESS: Transaction2 did not enter the chain.") - - testSuccessful=True - finally: - if not testSuccessful and dumpErrorDetails: - cluster.dumpErrorDetails() - walletMgr.dumpErrorDetails() - Print("== Errors see above ==") - - if killEosInstances: - Print("Shut down the cluster%s" % (" and cleanup." if (testSuccessful and not keepLogs) else ".")) - cluster.killall() - walletMgr.killall() - if testSuccessful and not keepLogs: - Print("Cleanup cluster and wallet data.") - cluster.cleanup() - walletMgr.cleanup() - - return True - - -try: - if 1 in tests: - Print("Cluster with no malicious producers. All producers expected to approve transaction. Hence transaction is expected to enter the chain.") - cleanStaging() - stageScenario(getNoMaliciousStagedNodesInfo()) - if not myTest(True): - exit(1) - - if 2 in tests: - Print("\nCluster with minority(1) malicious nodes. Majority producers expected to approve transaction. Hence transaction is expected to enter the chain.") - cleanStaging() - stageScenario(getMinorityMaliciousProducerStagedNodesInfo()) - if not myTest(True): - exit(1) - - if 3 in tests: - Print("\nCluster with majority(20) malicious nodes. Majority producers expected to block transaction. Hence transaction is not expected to enter the chain.") - cleanStaging() - stageScenario(getMajorityMaliciousProducerStagedNodesInfo()) - if not myTest(False): - exit(1) - -finally: - cleanStaging() - -exit(0) From 2be38776f7157ff319d3ae9e5ea4abbc21b76b9e Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 27 Jun 2022 16:59:02 -0500 Subject: [PATCH 15/25] GH-403 Fix failing tests due to duplicated command args from backport merge. --- tests/Node.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 63a65b1c0e..de0d6ea7f3 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -533,8 +533,8 @@ def __transferFundsCmdArr(self, source, destination, amountStr, memo, force, ret assert(destination) assert(isinstance(destination, Account)) - cmd="%s %s -v transfer --expiration 90 %s -j %s %s" % ( - Utils.EosClientPath, self.eosClientArgs(), self.getRetryCmdArg(retry), source.name, destination.name) + cmd="%s %s -v transfer --expiration 90 %s -j" % ( + Utils.EosClientPath, self.eosClientArgs(), self.getRetryCmdArg(retry)) cmdArr=cmd.split() # not using __sign_str, since cmdArr messes up the string if sign: From dcb80fb15baff3d611bd58e87cbec5baf3dee36a Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 28 Jun 2022 09:09:17 -0500 Subject: [PATCH 16/25] GH-403 Fix failing tests due to publishContract param changes. --- tests/plugin_http_api_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/plugin_http_api_test.py b/tests/plugin_http_api_test.py index e77a1cb6cc..f10ab94773 100755 --- a/tests/plugin_http_api_test.py +++ b/tests/plugin_http_api_test.py @@ -78,7 +78,7 @@ def activateAllBuiltinProtocolFeatures(self): walletAccounts = [eosioAccount] self.keosd.create(testWalletName, walletAccounts) - retMap = self.nodeos.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + retMap = self.nodeos.publishContract(eosioAccount, contractDir, wasmFile, abiFile, waitForTransBlock=True) self.nodeos.preactivateAllBuiltinProtocolFeature() From 6be26f3ac8b9d1327a4872472dfbf49966fd5239 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Tue, 28 Jun 2022 10:57:28 -0500 Subject: [PATCH 17/25] fix cherry-pick build failures. --- libraries/chain/apply_context.cpp | 2 +- unittests/api_tests.cpp | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 62e976ae25..2cdd7b5a91 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -85,7 +85,7 @@ void apply_context::exec_one() if( ( receiver_account->code_hash != digest_type() ) && ( !( act->account == config::system_account_name - && act->name == N( setcode ) + && act->name == "setcode"_n && receiver == config::system_account_name ) || control.is_builtin_activated( builtin_protocol_feature_t::forward_setcode ) ) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 139f076005..ffe9a87211 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -856,10 +856,10 @@ BOOST_AUTO_TEST_CASE(light_validation_skip_cfa) try { std::vector blocks; blocks.push_back(chain.produce_block()); - chain.create_account( N(testapi) ); - chain.create_account( N(dummy) ); + chain.create_account( "testapi"_n ); + chain.create_account( "dummy"_n ); blocks.push_back(chain.produce_block()); - chain.set_code( N(testapi), contracts::test_api_wasm() ); + chain.set_code( "testapi"_n, contracts::test_api_wasm() ); blocks.push_back(chain.produce_block()); cf_action cfa; @@ -870,11 +870,11 @@ BOOST_AUTO_TEST_CASE(light_validation_skip_cfa) try { trx.context_free_data.emplace_back(fc::raw::pack(200)); // add a normal action along with cfa dummy_action da = { DUMMY_ACTION_DEFAULT_A, DUMMY_ACTION_DEFAULT_B, DUMMY_ACTION_DEFAULT_C }; - action act1(vector{{N(testapi), config::active_name}}, da); + action act1(vector{{"testapi"_n, config::active_name}}, da); trx.actions.push_back(act1); chain.set_transaction_headers(trx); // run normal passing case - auto sigs = trx.sign(chain.get_private_key(N(testapi), "active"), chain.control->get_chain_id()); + auto sigs = trx.sign(chain.get_private_key("testapi"_n, "active"), chain.control->get_chain_id()); auto trace = chain.push_transaction(trx); blocks.push_back(chain.produce_block()); @@ -893,14 +893,13 @@ BOOST_AUTO_TEST_CASE(light_validation_skip_cfa) try { auto conf_genesis = tester::default_config( tempdir ); auto& cfg = conf_genesis.first; - cfg.trusted_producers = { N(eosio) }; // light validation + cfg.trusted_producers = { "eosio"_n }; // light validation tester other( conf_genesis.first, conf_genesis.second ); other.execute_setup_policy( setup_policy::full ); - transaction_trace_ptr other_trace; - auto cc = other.control->applied_transaction.connect( [&](std::tuple x) { + auto cc = other.control->applied_transaction.connect( [&](std::tuple x) { auto& t = std::get<0>(x); if( t && t->id == trace->id ) { other_trace = t; From ab002a67e24814adb042def5860fbdaeaab0944a Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 27 Nov 2019 15:18:28 -0600 Subject: [PATCH 18/25] GH-422 Merge pull request #8232 from EOSIO/8224-repeat-transaction-lr-test Create integration test for sending copies of the same transaction into the network --- tests/CMakeLists.txt | 4 ++ tests/Node.py | 35 ++++++++++----- tests/TestHelper.py | 12 +++-- tests/nodeos_high_transaction_test.py | 64 ++++++++++++++++++++++----- 4 files changed, 89 insertions(+), 26 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d2523482ee..91a456a14d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -223,6 +223,10 @@ set_property(TEST plugin_http_api_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME resource_monitor_plugin_test COMMAND tests/resource_monitor_plugin_test.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST resource_monitor_plugin_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_repeat_transaction_lr_test COMMAND tests/nodeos_high_transaction_test.py -v --clean-run --dump-error-detail -p 4 -n 8 --num-transactions 1000 --max-transactions-per-second 500 --send-duplicates WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_repeat_transaction_lr_test PROPERTY LABELS long_running_tests) + + if(ENABLE_COVERAGE_TESTING) set(Coverage_NAME ${PROJECT_NAME}_coverage) diff --git a/tests/Node.py b/tests/Node.py index de0d6ea7f3..8bdebf1290 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -526,15 +526,27 @@ def __call__(self): def waitForIrreversibleBlock(self, blockNum, timeout=None, reportInterval=None): return self.waitForBlock(blockNum, timeout=timeout, blockType=BlockType.lib, reportInterval=reportInterval) - def __transferFundsCmdArr(self, source, destination, amountStr, memo, force, retry, sign): + def __transferFundsCmdArr(self, source, destination, amountStr, memo, force, retry, sign, dontSend, expiration): assert isinstance(amountStr, str) assert(source) assert(isinstance(source, Account)) assert(destination) assert(isinstance(destination, Account)) + assert(expiration is None or isinstance(expiration, int)) - cmd="%s %s -v transfer --expiration 90 %s -j" % ( - Utils.EosClientPath, self.eosClientArgs(), self.getRetryCmdArg(retry)) + dontSendStr = "" + if dontSend: + dontSendStr = "--dont-broadcast " + if expiration is None: + # default transaction expiration to be 4 minutes in the future + expiration = 240 + + expirationStr = "" + if expiration is not None: + expirationStr = "--expiration %d " % (expiration) + + cmd="%s %s -v transfer %s -j %s %s" % ( + Utils.EosClientPath, self.eosClientArgs(), self.getRetryCmdArg(retry), dontSendStr, expirationStr) cmdArr=cmd.split() # not using __sign_str, since cmdArr messes up the string if sign: @@ -552,8 +564,8 @@ def __transferFundsCmdArr(self, source, destination, amountStr, memo, force, ret return cmdArr # Trasfer funds. Returns "transfer" json return object - def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False, exitOnError=True, reportStatus=True, retry=None, sign=False): - cmdArr = self.__transferFundsCmdArr(source, destination, amountStr, memo, force, retry, sign) + def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False, exitOnError=True, reportStatus=True, retry=None, sign=False, dontSend=False, expiration=None): + cmdArr = self.__transferFundsCmdArr(source, destination, amountStr, memo, force, retry, sign, dontSend, expiration) trans=None start=time.perf_counter() try: @@ -561,7 +573,8 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False if Utils.Debug: end=time.perf_counter() Utils.Print("cmd Duration: %.3f sec" % (end-start)) - self.trackCmdTransaction(trans, reportStatus=reportStatus) + if not dontSend: + self.trackCmdTransaction(trans, reportStatus=reportStatus) except subprocess.CalledProcessError as ex: end=time.perf_counter() msg=ex.output.decode("utf-8") @@ -578,8 +591,8 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) # Trasfer funds. Returns (popen, cmdArr) for checkDelayedOutput - def transferFundsAsync(self, source, destination, amountStr, memo="memo", force=False, exitOnError=True, retry=None, sign=False): - cmdArr = self.__transferFundsCmdArr(source, destination, amountStr, memo, force, retry, sign) + def transferFundsAsync(self, source, destination, amountStr, memo="memo", force=False, exitOnError=True, retry=None, sign=False, dontSend=False, expiration=None): + cmdArr = self.__transferFundsCmdArr(source, destination, amountStr, memo, force, retry, sign, dontSend, expiration) start=time.perf_counter() try: popen=Utils.delayedCheckOutput(cmdArr) @@ -820,11 +833,11 @@ def getTableColumns(self, contract, scope, table): keys=list(row.keys()) return keys + # returns tuple with indication if transaction was successfully sent and either the transaction or else the exception output def pushTransaction(self, trans, opts="", silentErrors=False, permissions=None): assert(isinstance(trans, dict)) if isinstance(permissions, str): permissions=[permissions] - reportStatus = True cmd="%s %s push transaction -j" % (Utils.EosClientPath, self.eosClientArgs()) cmdArr=cmd.split() @@ -852,7 +865,7 @@ def pushTransaction(self, trans, opts="", silentErrors=False, permissions=None): msg=ex.output.decode("utf-8") if not silentErrors: end=time.perf_counter() - Utils.Print("ERROR: Exception during push transaction. cmd Duration=%.3f sec. %s" % (end - start, msg)) + Utils.Print("ERROR: Exception during push message. cmd Duration=%.3f sec. %s" % (end - start, msg)) return (False, msg) # returns tuple with transaction execution status and transaction @@ -867,7 +880,6 @@ def pushMessage(self, account, action, data, opts, silentErrors=False, signature cmdArr.append(data) if opts is not None: cmdArr += opts.split() - s=" ".join(cmdArr) if Utils.Debug: Utils.Print("cmd: %s" % (cmdArr)) start=time.perf_counter() try: @@ -897,7 +909,6 @@ def setPermission(self, account, code, pType, requirement, waitForTransBlock=Fal assert(isinstance(account, Account)) assert(isinstance(code, Account)) signStr = Node.__sign_str(sign, [ account.activePublicKey ]) - Utils.Print("REMOVE signStr: <%s>" % (signStr)) cmdDesc="set action permission" cmd="%s -j %s %s %s %s %s" % (cmdDesc, signStr, account.name, code.name, pType, requirement) trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 9cf3099493..7643f7add9 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -11,20 +11,21 @@ def __init__(self): self.args=[] class AppArg: - def __init__(self, flag, type, help, default, choices=None): + def __init__(self, flag, help, type=None, default=None, choices=None, action=None): self.flag=flag self.type=type self.help=help self.default=default self.choices=choices + self.action=action def add(self, flag, type, help, default, choices=None): - arg=self.AppArg(flag, type, help, default, choices) + arg=self.AppArg(flag, help, type=type, default=default, choices=choices) self.args.append(arg) def add_bool(self, flag, help, action='store_true'): - arg=self.AppArg(flag=flag, help=help, action=action) + arg=self.AppArg(flag, help, action=action) self.args.append(arg) # pylint: disable=too-many-instance-attributes @@ -112,7 +113,10 @@ def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): parser.add_argument("--alternate-version-labels-file", type=str, help="Provide a file to define the labels that can be used in the test and the path to the version installation associated with that.") for arg in applicationSpecificArgs.args: - parser.add_argument(arg.flag, type=arg.type, help=arg.help, choices=arg.choices, default=arg.default) + if arg.type is not None: + parser.add_argument(arg.flag, type=arg.type, help=arg.help, choices=arg.choices, default=arg.default) + else: + parser.add_argument(arg.flag, help=arg.help, action=arg.action) args = parser.parse_args() return args diff --git a/tests/nodeos_high_transaction_test.py b/tests/nodeos_high_transaction_test.py index d8e14fe925..062c0bafcf 100755 --- a/tests/nodeos_high_transaction_test.py +++ b/tests/nodeos_high_transaction_test.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 from testUtils import Utils +import signal import time from Cluster import Cluster from Cluster import NamedAccounts @@ -30,6 +31,7 @@ extraArgs = appArgs.add(flag="--num-transactions", type=int, help="How many total transactions should be sent", default=10000) extraArgs = appArgs.add(flag="--max-transactions-per-second", type=int, help="How many transactions per second should be sent", default=500) extraArgs = appArgs.add(flag="--total-accounts", type=int, help="How many accounts should be involved in sending transfers. Must be greater than %d" % (minTotalAccounts), default=100) +extraArgs = appArgs.add_bool(flag="--send-duplicates", help="If identical transactions should be sent to all nodes") args = TestHelper.parse_args({"-p", "-n","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run"}, applicationSpecificArgs=appArgs) Utils.Debug=args.v @@ -67,6 +69,9 @@ WalletdName=Utils.EosWalletName ClientName="cleos" +maxTransactionAttempts = 2 # max number of attempts to try to send a transaction +maxTransactionAttemptsNoSend = 1 # max number of attempts to try to create a transaction to be sent as a duplicate + try: TestHelper.printSystemInfo("BEGIN") @@ -79,7 +84,7 @@ if cluster.launch(pnodes=totalProducerNodes, totalNodes=totalNodes, totalProducers=totalProducers, useBiosBootFile=False, - extraNodeosArgs=traceNodeosArgs) is False: + extraNodeosArgs=traceNodeosArgs, topo="ring") is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") @@ -110,8 +115,9 @@ nonProdNodes=[] prodNodes=[] + allNodes=cluster.getNodes() for i in range(0, totalNodes): - node=cluster.getNode(i) + node=allNodes[i] nodeProducers=Cluster.parseProducers(i) numProducers=len(nodeProducers) Print("node has producers=%s" % (nodeProducers)) @@ -185,6 +191,7 @@ def cacheTransIdInBlock(transId, transToBlock, node): assert btrans is not None, Print("ERROR: could not retrieve \"trx\" from transaction_receipt: %s, from transId: %s that led to block: %s" % (json.dumps(trans_receipt, indent=2), transId, json.dumps(block, indent=2))) btransId = btrans["id"] assert btransId is not None, Print("ERROR: could not retrieve \"id\" from \"trx\": %s, from transId: %s that led to block: %s" % (json.dumps(btrans, indent=2), transId, json.dumps(block, indent=2))) + assert btransId not in transToBlock, Print("ERROR: transaction_id: %s found in block: %d, but originally seen in block number: %d" % (btransId, blockNum, transToBlock[btransId]["block_num"])) transToBlock[btransId] = block break @@ -219,6 +226,20 @@ def findTransInBlock(transId, transToBlock, node): #verify nodes are in sync and advancing cluster.waitOnClusterSync(blockAdvancing=5) + nodeOrder = [] + if args.send_duplicates: + # kill bios, since it prevents the ring topography from really being a ring + cluster.biosNode.kill(signal.SIGTERM) + nodeOrder.append(0) + # jump to node furthest in ring from node 0 + next = int((totalNodes + 1) / 2) + nodeOrder.append(next) + # then just fill in the rest of the nodes + for i in range(1, next): + nodeOrder.append(i) + for i in range(next + 1, totalNodes): + nodeOrder.append(i) + Print("Sending %d transfers" % (numTransactions)) delayAfterRounds = int(maxTransactionsPerSecond / args.total_accounts) history = [] @@ -238,22 +259,45 @@ def findTransInBlock(transId, transToBlock, node): time.sleep(delayTime) transferAmount = Node.currencyIntToStr(round + 1, CORE_SYMBOL) + Print("Sending round %d, transfer: %s" % (round, transferAmount)) for accountIndex in range(0, args.total_accounts): fromAccount = accounts[accountIndex] toAccountIndex = accountIndex + 1 if accountIndex + 1 < args.total_accounts else 0 toAccount = accounts[toAccountIndex] node = nonProdNodes[accountIndex % nonProdNodeCount] - trans=node.transferFunds(fromAccount, toAccount, transferAmount, "transfer round %d" % (round), exitOnError=False, reportStatus=False) - if trans is None: - # delay and see if transfer is accepted now - Utils.Print("Transfer rejected, delay 1 second and see if it is then accepted") - time.sleep(1) - trans=node.transferFunds(fromAccount, toAccount, transferAmount, "transfer round %d" % (round), exitOnError=False, reportStatus=False) + trans = None + attempts = 0 + maxAttempts = maxTransactionAttempts if not args.send_duplicates else maxTransactionAttemptsNoSend # for send_duplicates we are just constructing a transaction, so should never require a second attempt + # can try up to maxAttempts times to send the transfer + while trans is None and attempts < maxAttempts: + if attempts > 0: + # delay and see if transfer is accepted now + Utils.Print("Transfer rejected, delay 1 second and see if it is then accepted") + time.sleep(1) + trans=node.transferFunds(fromAccount, toAccount, transferAmount, "transfer round %d" % (round), exitOnError=False, reportStatus=False, sign = True, dontSend = args.send_duplicates) + attempts += 1 + + if args.send_duplicates: + sendTrans = trans + trans = None + numAccepted = 0 + attempts = 0 + while trans is None and attempts < maxTransactionAttempts: + for node in map(lambda ordinal: allNodes[ordinal], nodeOrder): + repeatTrans = node.pushTransaction(sendTrans, silentErrors=True) + if repeatTrans is not None: + if trans is None and repeatTrans[0]: + trans = repeatTrans[1] + transId = Node.getTransId(trans) + + numAccepted += 1 + + attempts += 1 assert trans is not None, Print("ERROR: failed round: %d, fromAccount: %s, toAccount: %s" % (round, accountIndex, toAccountIndex)) - # store off the transaction id, which we can use with the node.transCache - history.append(Node.getTransId(trans)) + transId = Node.getTransId(trans) + history.append(transId) nextTime = time.perf_counter() Print("Sending transfers took %s sec" % (nextTime - startTransferTime)) From e8260d61a9269f6dfc9c88e701b010b1084d8f14 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 28 Jun 2022 15:03:27 -0500 Subject: [PATCH 19/25] GH-422 Fix log message. --- tests/Node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index 8bdebf1290..43473819c7 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -865,7 +865,7 @@ def pushTransaction(self, trans, opts="", silentErrors=False, permissions=None): msg=ex.output.decode("utf-8") if not silentErrors: end=time.perf_counter() - Utils.Print("ERROR: Exception during push message. cmd Duration=%.3f sec. %s" % (end - start, msg)) + Utils.Print("ERROR: Exception during push transaction. cmd Duration=%.3f sec. %s" % (end - start, msg)) return (False, msg) # returns tuple with transaction execution status and transaction From 4acbfccd1ca8d5467e1189f61a9e14dc7f54eabf Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 28 Jun 2022 15:05:12 -0500 Subject: [PATCH 20/25] GH-422 Set default expiration back to 90 to allow for tests to finish. --- tests/Node.py | 4 ++-- tests/nodeos_high_transaction_test.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 43473819c7..6446e01771 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -564,7 +564,7 @@ def __transferFundsCmdArr(self, source, destination, amountStr, memo, force, ret return cmdArr # Trasfer funds. Returns "transfer" json return object - def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False, exitOnError=True, reportStatus=True, retry=None, sign=False, dontSend=False, expiration=None): + def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False, exitOnError=True, reportStatus=True, retry=None, sign=False, dontSend=False, expiration=90): cmdArr = self.__transferFundsCmdArr(source, destination, amountStr, memo, force, retry, sign, dontSend, expiration) trans=None start=time.perf_counter() @@ -591,7 +591,7 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) # Trasfer funds. Returns (popen, cmdArr) for checkDelayedOutput - def transferFundsAsync(self, source, destination, amountStr, memo="memo", force=False, exitOnError=True, retry=None, sign=False, dontSend=False, expiration=None): + def transferFundsAsync(self, source, destination, amountStr, memo="memo", force=False, exitOnError=True, retry=None, sign=False, dontSend=False, expiration=90): cmdArr = self.__transferFundsCmdArr(source, destination, amountStr, memo, force, retry, sign, dontSend, expiration) start=time.perf_counter() try: diff --git a/tests/nodeos_high_transaction_test.py b/tests/nodeos_high_transaction_test.py index 062c0bafcf..46882d905d 100755 --- a/tests/nodeos_high_transaction_test.py +++ b/tests/nodeos_high_transaction_test.py @@ -275,7 +275,8 @@ def findTransInBlock(transId, transToBlock, node): # delay and see if transfer is accepted now Utils.Print("Transfer rejected, delay 1 second and see if it is then accepted") time.sleep(1) - trans=node.transferFunds(fromAccount, toAccount, transferAmount, "transfer round %d" % (round), exitOnError=False, reportStatus=False, sign = True, dontSend = args.send_duplicates) + expiration=None if args.send_duplicates else 90 + trans=node.transferFunds(fromAccount, toAccount, transferAmount, "transfer round %d" % (round), exitOnError=False, reportStatus=False, sign = True, dontSend = args.send_duplicates, expiration=expiration) attempts += 1 if args.send_duplicates: From b3dd31145f9a18adfce5050274e0eea795ecd9cf Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 29 Jun 2022 08:47:16 -0500 Subject: [PATCH 21/25] GH-422 Fix error reporting to be in the assert printout. --- tests/nodeos_high_transaction_test.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/nodeos_high_transaction_test.py b/tests/nodeos_high_transaction_test.py index 46882d905d..a200020dd1 100755 --- a/tests/nodeos_high_transaction_test.py +++ b/tests/nodeos_high_transaction_test.py @@ -52,7 +52,7 @@ transBlocksBehind=args.transaction_time_delta * blocksPerSec numTransactions = args.num_transactions maxTransactionsPerSecond = args.max_transactions_per_second -assert args.total_accounts >= minTotalAccounts, Print("ERROR: Only %d was selected for --total-accounts, must have at least %d" % (args.total_accounts, minTotalAccounts)) +assert args.total_accounts >= minTotalAccounts, "ERROR: Only %d was selected for --total-accounts, must have at least %d" % (args.total_accounts, minTotalAccounts) if numTransactions % args.total_accounts > 0: oldNumTransactions = numTransactions numTransactions = int((oldNumTransactions + args.total_accounts - 1)/args.total_accounts) * args.total_accounts @@ -182,16 +182,16 @@ def cacheTransIdInBlock(transId, transToBlock, node): lastIrreversibleBlockNum = node.getIrreversibleBlockNum() blockNum = Node.getTransBlockNum(trans) - assert blockNum is not None, Print("ERROR: could not retrieve block num from transId: %s, trans: %s" % (transId, json.dumps(trans, indent=2))) + assert blockNum is not None, "ERROR: could not retrieve block num from transId: %s, trans: %s" % (transId, json.dumps(trans, indent=2)) block = node.getBlock(blockNum) if block is not None: transactions = block["transactions"] for trans_receipt in transactions: btrans = trans_receipt["trx"] - assert btrans is not None, Print("ERROR: could not retrieve \"trx\" from transaction_receipt: %s, from transId: %s that led to block: %s" % (json.dumps(trans_receipt, indent=2), transId, json.dumps(block, indent=2))) + assert btrans is not None, "ERROR: could not retrieve \"trx\" from transaction_receipt: %s, from transId: %s that led to block: %s" % (json.dumps(trans_receipt, indent=2), transId, json.dumps(block, indent=2)) btransId = btrans["id"] - assert btransId is not None, Print("ERROR: could not retrieve \"id\" from \"trx\": %s, from transId: %s that led to block: %s" % (json.dumps(btrans, indent=2), transId, json.dumps(block, indent=2))) - assert btransId not in transToBlock, Print("ERROR: transaction_id: %s found in block: %d, but originally seen in block number: %d" % (btransId, blockNum, transToBlock[btransId]["block_num"])) + assert btransId is not None, "ERROR: could not retrieve \"id\" from \"trx\": %s, from transId: %s that led to block: %s" % (json.dumps(btrans, indent=2), transId, json.dumps(block, indent=2)) + assert btransId not in transToBlock, "ERROR: transaction_id: %s found in block: %d, but originally seen in block number: %d" % (btransId, blockNum, transToBlock[btransId]["block_num"]) transToBlock[btransId] = block break @@ -212,8 +212,8 @@ def findTransInBlock(transId, transToBlock, node): if transId in transToBlock: return (block, trans) = cacheTransIdInBlock(transId, transToBlock, node) - assert trans is not None, Print("ERROR: could not find transaction for transId: %s" % (transId)) - assert block is not None, Print("ERROR: could not retrieve block with block num: %d, from transId: %s, trans: %s" % (blockNum, transId, json.dumps(trans, indent=2))) + assert trans is not None, "ERROR: could not find transaction for transId: %s" % (transId) + assert block is not None, "ERROR: could not retrieve block with block num: %d, from transId: %s, trans: %s" % (blockNum, transId, json.dumps(trans, indent=2)) transToBlock = {} for transId in checkTransIds: @@ -296,7 +296,7 @@ def findTransInBlock(transId, transToBlock, node): attempts += 1 - assert trans is not None, Print("ERROR: failed round: %d, fromAccount: %s, toAccount: %s" % (round, accountIndex, toAccountIndex)) + assert trans is not None, "ERROR: failed round: %d, fromAccount: %s, toAccount: %s" % (round, accountIndex, toAccountIndex) transId = Node.getTransId(trans) history.append(transId) @@ -333,11 +333,11 @@ def findTransInBlock(transId, transToBlock, node): if newestBlockNum > lastBlockNum: missingTransactions[-1]["highest_block_seen"] = newestBlockNum blockNum = Node.getTransBlockNum(trans) - assert blockNum is not None, Print("ERROR: could not retrieve block num from transId: %s, trans: %s" % (transId, json.dumps(trans, indent=2))) + assert blockNum is not None, "ERROR: could not retrieve block num from transId: %s, trans: %s" % (transId, json.dumps(trans, indent=2)) else: block = transToBlock[transId] blockNum = block["block_num"] - assert blockNum is not None, Print("ERROR: could not retrieve block num for block retrieved for transId: %s, block: %s" % (transId, json.dumps(block, indent=2))) + assert blockNum is not None, "ERROR: could not retrieve block num for block retrieved for transId: %s, block: %s" % (transId, json.dumps(block, indent=2)) if lastBlockNum is not None: if blockNum > lastBlockNum + transBlocksBehind or blockNum + transBlocksBehind < lastBlockNum: From 5a099a0fb6c526f01654474310f6ff0628529ab6 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 29 Jun 2022 08:47:53 -0500 Subject: [PATCH 22/25] GH-422 Remove spaces in arguments. --- tests/nodeos_high_transaction_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/nodeos_high_transaction_test.py b/tests/nodeos_high_transaction_test.py index a200020dd1..3fe447f5ff 100755 --- a/tests/nodeos_high_transaction_test.py +++ b/tests/nodeos_high_transaction_test.py @@ -276,7 +276,7 @@ def findTransInBlock(transId, transToBlock, node): Utils.Print("Transfer rejected, delay 1 second and see if it is then accepted") time.sleep(1) expiration=None if args.send_duplicates else 90 - trans=node.transferFunds(fromAccount, toAccount, transferAmount, "transfer round %d" % (round), exitOnError=False, reportStatus=False, sign = True, dontSend = args.send_duplicates, expiration=expiration) + trans=node.transferFunds(fromAccount, toAccount, transferAmount, "transfer round %d" % (round), exitOnError=False, reportStatus=False, sign=True, dontSend=args.send_duplicates, expiration=expiration) attempts += 1 if args.send_duplicates: From 7661a16cc70675d41ea6ec20814c197590971f68 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 29 Jun 2022 08:48:39 -0500 Subject: [PATCH 23/25] GH-422 Add opts back in since default was removed in Node.py. Needed for test to pass. --- tests/nodeos_high_transaction_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/nodeos_high_transaction_test.py b/tests/nodeos_high_transaction_test.py index 3fe447f5ff..d40379431c 100755 --- a/tests/nodeos_high_transaction_test.py +++ b/tests/nodeos_high_transaction_test.py @@ -286,7 +286,7 @@ def findTransInBlock(transId, transToBlock, node): attempts = 0 while trans is None and attempts < maxTransactionAttempts: for node in map(lambda ordinal: allNodes[ordinal], nodeOrder): - repeatTrans = node.pushTransaction(sendTrans, silentErrors=True) + repeatTrans = node.pushTransaction(sendTrans, opts="--skip-sign", silentErrors=True) if repeatTrans is not None: if trans is None and repeatTrans[0]: trans = repeatTrans[1] From d14a5f36353d561ee3e914e40d2f6561c2791058 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 6 May 2020 15:11:49 -0500 Subject: [PATCH 24/25] GH-423 Merge pull request #9050 from EOSIO/cleos-set-code-compression-dev Add cleos --compression option for transactions --- programs/cleos/main.cpp | 45 ++++++++++++++++++++++++++++++++--------- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 18a1e87d49..9caf59baf1 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -198,6 +198,24 @@ vector tx_permission; eosio::client::http::http_context context; +enum class tx_compression_type { + none, + zlib, + default_compression +}; +static std::map compression_type_map{ + {"none", tx_compression_type::none }, + {"zlib", tx_compression_type::zlib } +}; +tx_compression_type tx_compression = tx_compression_type::default_compression; +packed_transaction::compression_type to_compression_type( tx_compression_type t ) { + switch( t ) { + case tx_compression_type::none: return packed_transaction::compression_type::none; + case tx_compression_type::zlib: return packed_transaction::compression_type::zlib; + case tx_compression_type::default_compression: return packed_transaction::compression_type::none; + } +} + void add_standard_transaction_options(CLI::App* cmd, string default_permission = "") { CLI::callback_t parse_expiration = [](CLI::results_t res) -> bool { double value_s; @@ -219,6 +237,8 @@ void add_standard_transaction_options(CLI::App* cmd, string default_permission = cmd->add_option("-r,--ref-block", tx_ref_block_num_or_id, (localized("Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake)"))); cmd->add_flag("--use-old-rpc", tx_use_old_rpc, localized("Use old RPC push_transaction, rather than new RPC send_transaction")); cmd->add_flag("--use-old-send-rpc", tx_use_old_send_rpc, localized("Use old RPC send_transaction, rather than new RPC /v1/chain/send_transaction2")); + cmd->add_option("--compression", tx_compression, localized("Compression for transaction 'none' or 'zlib'"))->transform( + CLI::CheckedTransformer(compression_type_map, CLI::ignore_case)); string msg = "An account and permission level to authorize, as in 'account@permission'"; if(!default_permission.empty()) @@ -368,8 +388,8 @@ void sign_transaction(signed_transaction& trx, fc::variant& required_keys, const trx = signed_trx.as(); } -fc::variant push_transaction( signed_transaction& trx, const std::vector& signing_keys = std::vector(), - packed_transaction::compression_type compression = packed_transaction::compression_type::none ) { +fc::variant push_transaction( signed_transaction& trx, const std::vector& signing_keys = std::vector() ) +{ auto info = get_info(); if (trx.signatures.size() == 0) { // #5445 can't change txn content if already signed @@ -406,6 +426,7 @@ fc::variant push_transaction( signed_transaction& trx, const std::vector 0), "ERROR: --retry-irreversible and --retry-num-blocks are mutually exclusive" ); @@ -463,11 +484,11 @@ fc::variant push_transaction( signed_transaction& trx, const std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::compression_type::none, const std::vector& signing_keys = std::vector() ) { +fc::variant push_actions(std::vector&& actions, const std::vector& signing_keys = std::vector() ) { signed_transaction trx; trx.actions = std::forward(actions); - return push_transaction(trx, signing_keys, compression); + return push_transaction(trx, signing_keys); } void print_return_value( const fc::variant& at ) { @@ -641,13 +662,13 @@ void print_result( const fc::variant& result ) { try { } FC_CAPTURE_AND_RETHROW( (result) ) } using std::cout; -void send_actions(std::vector&& actions, const std::vector& signing_keys = std::vector(), packed_transaction::compression_type compression = packed_transaction::compression_type::none ) { +void send_actions(std::vector&& actions, const std::vector& signing_keys = std::vector() ) { std::ofstream out; if (tx_json_save_file.length()) { out.open(tx_json_save_file); EOSC_ASSERT(!out.fail(), "ERROR: Failed to create file \"${p}\"", ("p", tx_json_save_file)); } - auto result = push_actions( move(actions), compression, signing_keys); + auto result = push_actions( move(actions), signing_keys); string jsonstr; if (tx_json_save_file.length()) { @@ -3231,7 +3252,9 @@ int main( int argc, char** argv ) { actions.emplace_back( create_setcode(name(account), code_bytes ) ); if ( shouldSend ) { std::cerr << localized("Setting Code...") << std::endl; - send_actions(std::move(actions), signing_keys_opt.get_keys(), packed_transaction::compression_type::zlib); + if( tx_compression == tx_compression_type::default_compression ) + tx_compression = tx_compression_type::zlib; + send_actions(std::move(actions), signing_keys_opt.get_keys()); } } else { std::cerr << localized("Skipping set code because the new code is the same as the existing code") << std::endl; @@ -3279,7 +3302,9 @@ int main( int argc, char** argv ) { } EOS_RETHROW_EXCEPTIONS(abi_type_exception, "Fail to parse ABI JSON") if ( shouldSend ) { std::cerr << localized("Setting ABI...") << std::endl; - send_actions(std::move(actions), signing_keys_opt.get_keys(), packed_transaction::compression_type::zlib); + if( tx_compression == tx_compression_type::default_compression ) + tx_compression = tx_compression_type::zlib; + send_actions(std::move(actions), signing_keys_opt.get_keys()); } } else { std::cerr << localized("Skipping set abi because the new abi is the same as the existing abi") << std::endl; @@ -3296,7 +3321,9 @@ int main( int argc, char** argv ) { set_abi_callback(); if (actions.size()) { std::cerr << localized("Publishing contract...") << std::endl; - send_actions(std::move(actions), signing_keys_opt.get_keys(), packed_transaction::compression_type::zlib); + if( tx_compression == tx_compression_type::default_compression ) + tx_compression = tx_compression_type::zlib; + send_actions(std::move(actions), signing_keys_opt.get_keys()); } else { std::cout << "no transaction is sent" << std::endl; } From ce31ba0eb251f5d55b3592d9b35ce98273334156 Mon Sep 17 00:00:00 2001 From: Lin Huang Date: Wed, 29 Jun 2022 12:57:26 -0400 Subject: [PATCH 25/25] Fix build when build path has spaces for mandel and appbase --- libraries/appbase | 2 +- unittests/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/appbase b/libraries/appbase index f99c9bc7ed..80763b678a 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit f99c9bc7ed13f4235d013d257e65145eea00030e +Subproject commit 80763b678abfcdab4199fab23d15966b194e99b2 diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index af0b7e064a..6d64ceec97 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -55,7 +55,7 @@ target_include_directories( unit_test PUBLIC add_test(NAME protocol_feature_digest_unit_test COMMAND unit_test --run_test=protocol_feature_digest_tests --report_level=detailed --color_output --catch_system_errors=no) set(ctest_tests "protocol_feature_digest_tests") foreach(TEST_SUITE ${UNIT_TESTS}) # create an independent target for each test suite - execute_process(COMMAND sh -c "grep -E 'BOOST_AUTO_TEST_SUITE\\s*[(]' ${TEST_SUITE} | grep -vE '//.*BOOST_AUTO_TEST_SUITE\\s*[(]' | cut -d ')' -f 1 | cut -d '(' -f 2" OUTPUT_VARIABLE SUITE_NAME OUTPUT_STRIP_TRAILING_WHITESPACE) # get the test suite name from the *.cpp file + execute_process(COMMAND sh -c "grep -E 'BOOST_AUTO_TEST_SUITE\\s*[(]' '${TEST_SUITE}' | grep -vE '//.*BOOST_AUTO_TEST_SUITE\\s*[(]' | cut -d ')' -f 1 | cut -d '(' -f 2" OUTPUT_VARIABLE SUITE_NAME OUTPUT_STRIP_TRAILING_WHITESPACE) # get the test suite name from the *.cpp file if (NOT "" STREQUAL "${SUITE_NAME}") # ignore empty lines execute_process(COMMAND sh -c "echo ${SUITE_NAME} | sed -e 's/s$//' | sed -e 's/_test$//'" OUTPUT_VARIABLE TRIMMED_SUITE_NAME OUTPUT_STRIP_TRAILING_WHITESPACE) # trim "_test" or "_tests" from the end of ${SUITE_NAME} # to run unit_test with all log from blockchain displayed, put "--verbose" after "--", i.e. "unit_test -- --verbose"