From 2952bfb2647176d3ee92dd15e23cecc8b1b0f72d Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 25 May 2023 11:16:26 -0400 Subject: [PATCH 01/16] Cleanup use of `my->` in `plugin_startup()` and `plugin_shutdown()`. --- plugins/producer_plugin/producer_plugin.cpp | 75 +++++++++++---------- 1 file changed, 38 insertions(+), 37 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 833a0bb992..64b88b142f 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -332,6 +332,9 @@ class producer_plugin_impl : public std::enable_shared_from_this_thread_pool.start( my->_thread_pool_size, []( const fc::exception& e ) { + _thread_pool.start(_thread_pool_size, [](const fc::exception& e) { fc_elog( _log, "Exception in producer thread pool, exiting: ${e}", ("e", e.to_detail_string()) ); app().quit(); } ); - chain::controller& chain = my->chain_plug->chain(); - EOS_ASSERT( my->_producers.empty() || chain.get_read_mode() != chain::db_read_mode::IRREVERSIBLE, plugin_config_exception, + chain::controller& chain = chain_plug->chain(); + EOS_ASSERT( _producers.empty() || chain.get_read_mode() != chain::db_read_mode::IRREVERSIBLE, plugin_config_exception, "node cannot have any producer-name configured because block production is impossible when read_mode is \"irreversible\"" ); - EOS_ASSERT( my->_producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, plugin_config_exception, + EOS_ASSERT( _producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, plugin_config_exception, "node cannot have any producer-name configured because block production is not safe when validation_mode is not \"full\"" ); - EOS_ASSERT( my->_producers.empty() || my->chain_plug->accept_transactions(), plugin_config_exception, + EOS_ASSERT( _producers.empty() || chain_plug->accept_transactions(), plugin_config_exception, "node cannot have any producer-name configured because no block production is possible with no [api|p2p]-accepted-transactions" ); - my->_accepted_block_connection.emplace(chain.accepted_block.connect( [this]( const auto& bsp ){ my->on_block( bsp ); } )); - my->_accepted_block_header_connection.emplace(chain.accepted_block_header.connect( [this]( const auto& bsp ){ my->on_block_header( bsp ); } )); - my->_irreversible_block_connection.emplace(chain.irreversible_block.connect( [this]( const auto& bsp ){ my->on_irreversible_block( bsp->block ); } )); + _accepted_block_connection.emplace(chain.accepted_block.connect( [this]( const auto& bsp ){ on_block( bsp ); } )); + _accepted_block_header_connection.emplace(chain.accepted_block_header.connect( [this]( const auto& bsp ){ on_block_header( bsp ); } )); + _irreversible_block_connection.emplace(chain.irreversible_block.connect( [this]( const auto& bsp ){ on_irreversible_block( bsp->block ); } )); - my->_block_start_connection.emplace(chain.block_start.connect( [this, &chain]( uint32_t bs ) { + _block_start_connection.emplace(chain.block_start.connect( [this, &chain]( uint32_t bs ) { try { - my->_snapshot_scheduler.on_start_block(bs, chain); + _snapshot_scheduler.on_start_block(bs, chain); } catch (const snapshot_execution_exception & e) { fc_elog( _log, "Exception during snapshot execution: ${e}", ("e", e.to_detail_string()) ); @@ -1179,24 +1182,24 @@ void producer_plugin::plugin_startup() const auto lib_num = chain.last_irreversible_block_num(); const auto lib = chain.fetch_block_by_number(lib_num); if (lib) { - my->on_irreversible_block(lib); + on_irreversible_block(lib); } else { - my->_irreversible_block_time = fc::time_point::maximum(); + _irreversible_block_time = fc::time_point::maximum(); } - if (!my->_producers.empty()) { - ilog("Launching block production for ${n} producers at ${time}.", ("n", my->_producers.size())("time",fc::time_point::now())); + if (!_producers.empty()) { + ilog("Launching block production for ${n} producers at ${time}.", ("n", _producers.size())("time",fc::time_point::now())); - if (my->_production_enabled) { + if (_production_enabled) { if (chain.head_block_num() == 0) { new_chain_banner(chain); } } } - if ( my->_ro_thread_pool_size > 0 ) { + if ( _ro_thread_pool_size > 0 ) { std::atomic num_threads_started = 0; - my->_ro_thread_pool.start( my->_ro_thread_pool_size, + _ro_thread_pool.start( _ro_thread_pool_size, []( const fc::exception& e ) { fc_elog( _log, "Exception in read-only thread pool, exiting: ${e}", ("e", e.to_detail_string()) ); app().quit(); @@ -1210,16 +1213,16 @@ void producer_plugin::plugin_startup() // when C++20 is used. auto time_slept_ms = 0; constexpr auto max_time_slept_ms = 1000; - while ( num_threads_started.load() < my->_ro_thread_pool_size && time_slept_ms < max_time_slept_ms ) { + while ( num_threads_started.load() < _ro_thread_pool_size && time_slept_ms < max_time_slept_ms ) { std::this_thread::sleep_for( 1ms ); ++time_slept_ms; } - EOS_ASSERT(num_threads_started.load() == my->_ro_thread_pool_size, producer_exception, "read-only threads failed to start. num_threads_started: ${n}, time_slept_ms: ${t}ms", ("n", num_threads_started.load())("t", time_slept_ms)); + EOS_ASSERT(num_threads_started.load() == _ro_thread_pool_size, producer_exception, "read-only threads failed to start. num_threads_started: ${n}, time_slept_ms: ${t}ms", ("n", num_threads_started.load())("t", time_slept_ms)); - my->start_write_window(); + start_write_window(); } - my->schedule_production_loop(); + schedule_production_loop(); ilog("producer plugin: plugin_startup() end"); } catch( ... ) { @@ -1229,27 +1232,25 @@ void producer_plugin::plugin_startup() } } FC_CAPTURE_AND_RETHROW() } -void producer_plugin::plugin_shutdown() { - try { - my->_timer.cancel(); - } catch ( const std::bad_alloc& ) { - chain_apis::api_base::handle_bad_alloc(); - } catch ( const boost::interprocess::bad_alloc& ) { - chain_apis::api_base::handle_bad_alloc(); - } catch(const fc::exception& e) { - edump((e.to_detail_string())); - } catch(const std::exception& e) { - edump((fc::std_exception_wrapper::from_current_exception(e).to_detail_string())); - } +void producer_plugin::plugin_startup() { + my->plugin_startup(); +} - my->_thread_pool.stop(); +void producer_plugin_impl::plugin_shutdown() { + boost::system::error_code ec; + _timer.cancel(ec); + _thread_pool.stop(); + _unapplied_transactions.clear(); - my->_unapplied_transactions.clear(); + app().executor().post( 0, [me = shared_from_this()](){} ); // keep my pointer alive until queue is drained - app().executor().post( 0, [me = my](){} ); // keep my pointer alive until queue is drained fc_ilog(_log, "exit shutdown"); } +void producer_plugin::plugin_shutdown() { + my->plugin_shutdown(); +} + void producer_plugin::handle_sighup() { fc::logger::update( logger_name, _log ); fc::logger::update(trx_successful_trace_logger_name, _trx_successful_trace_log); From b3b131f991b9a61066b378d02343b34f7b5225f2 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 25 May 2023 16:11:29 -0400 Subject: [PATCH 02/16] cleanup `plugin_initialize()` --- .../eosio/producer_plugin/producer_plugin.hpp | 2 +- plugins/producer_plugin/producer_plugin.cpp | 154 ++++++++++-------- 2 files changed, 84 insertions(+), 72 deletions(-) diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 1587a3d29b..84fa2a9f2b 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -171,9 +171,9 @@ class producer_plugin : public appbase::plugin { void register_update_produced_block_metrics(std::function&&); void register_update_incoming_block_metrics(std::function&&); - private: inline static bool test_mode_{false}; // to be moved into appbase (application_base) + private: std::shared_ptr my; }; diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 64b88b142f..52c992a99a 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -331,9 +331,11 @@ class producer_plugin_impl : public std::enable_shared_from_thischain_plug = app().find_plugin(); - EOS_ASSERT( my->chain_plug, plugin_config_exception, "chain_plugin not found" ); - my->_options = &options; - LOAD_VALUE_SET(options, "producer-name", my->_producers) +void producer_plugin_impl::plugin_initialize(const boost::program_options::variables_map& options) +{ + chain_plug = app().find_plugin(); + EOS_ASSERT( chain_plug, plugin_config_exception, "chain_plugin not found" ); + _options = &options; + LOAD_VALUE_SET(options, "producer-name", _producers) - chain::controller& chain = my->chain_plug->chain(); + chain::controller& chain = chain_plug->chain(); if( options.count("signature-provider") ) { const std::vector key_spec_pairs = options["signature-provider"].as>(); for (const auto& key_spec_pair : key_spec_pairs) { try { const auto& [pubkey, provider] = app().get_plugin().signature_provider_for_specification(key_spec_pair); - my->_signature_providers[pubkey] = provider; + _signature_providers[pubkey] = provider; } catch(secure_enclave_exception& e) { elog("Error with Secure Enclave signature provider: ${e}; ignoring ${val}", ("e", e.top_message())("val", key_spec_pair)); } catch (fc::exception& e) { @@ -952,7 +952,7 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ EOS_ASSERT( subjective_account_max_failures_window_size > 0, plugin_config_exception, "subjective-account-max-failures-window-size ${s} must be greater than 0", ("s", subjective_account_max_failures_window_size) ); - my->_account_fails.set_max_failures_per_account( options.at("subjective-account-max-failures").as(), + _account_fails.set_max_failures_per_account( options.at("subjective-account-max-failures").as(), subjective_account_max_failures_window_size ); uint32_t cpu_effort_pct = options.at("cpu-effort-percent").as(); @@ -960,15 +960,15 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ "cpu-effort-percent ${pct} must be 0 - 100", ("pct", cpu_effort_pct) ); cpu_effort_pct *= config::percent_1; - my->_cpu_effort_us = EOS_PERCENT( config::block_interval_us, cpu_effort_pct ); + _cpu_effort_us = EOS_PERCENT( config::block_interval_us, cpu_effort_pct ); - my->_max_block_cpu_usage_threshold_us = options.at( "max-block-cpu-usage-threshold-us" ).as(); - EOS_ASSERT( my->_max_block_cpu_usage_threshold_us < config::block_interval_us, plugin_config_exception, - "max-block-cpu-usage-threshold-us ${t} must be 0 .. ${bi}", ("bi", config::block_interval_us)("t", my->_max_block_cpu_usage_threshold_us) ); + _max_block_cpu_usage_threshold_us = options.at( "max-block-cpu-usage-threshold-us" ).as(); + EOS_ASSERT( _max_block_cpu_usage_threshold_us < config::block_interval_us, plugin_config_exception, + "max-block-cpu-usage-threshold-us ${t} must be 0 .. ${bi}", ("bi", config::block_interval_us)("t", _max_block_cpu_usage_threshold_us) ); - my->_max_block_net_usage_threshold_bytes = options.at( "max-block-net-usage-threshold-bytes" ).as(); + _max_block_net_usage_threshold_bytes = options.at( "max-block-net-usage-threshold-bytes" ).as(); - my->_max_scheduled_transaction_time_per_block_ms = options.at("max-scheduled-transaction-time-per-block-ms").as(); + _max_scheduled_transaction_time_per_block_ms = options.at("max-scheduled-transaction-time-per-block-ms").as(); if( options.at( "subjective-cpu-leeway-us" ).as() != config::default_subjective_cpu_leeway_us ) { chain.set_subjective_cpu_leeway( fc::microseconds( options.at( "subjective-cpu-leeway-us" ).as() ) ); @@ -979,79 +979,79 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ "subjective-account-decay-time-minutes ${dt} must be greater than 0", ("dt", subjective_account_decay_time.to_seconds() / 60)); chain.get_mutable_subjective_billing().set_expired_accumulator_average_window( subjective_account_decay_time ); - my->_max_transaction_time_ms = options.at("max-transaction-time").as(); + _max_transaction_time_ms = options.at("max-transaction-time").as(); - my->_max_irreversible_block_age_us = fc::seconds(options.at("max-irreversible-block-age").as()); + _max_irreversible_block_age_us = fc::seconds(options.at("max-irreversible-block-age").as()); auto max_incoming_transaction_queue_size = options.at("incoming-transaction-queue-size-mb").as() * 1024*1024; EOS_ASSERT( max_incoming_transaction_queue_size > 0, plugin_config_exception, "incoming-transaction-queue-size-mb ${mb} must be greater than 0", ("mb", max_incoming_transaction_queue_size) ); - my->_unapplied_transactions.set_max_transaction_queue_size( max_incoming_transaction_queue_size ); + _unapplied_transactions.set_max_transaction_queue_size( max_incoming_transaction_queue_size ); - my->_incoming_defer_ratio = options.at("incoming-defer-ratio").as(); + _incoming_defer_ratio = options.at("incoming-defer-ratio").as(); bool disable_subjective_billing = options.at("disable-subjective-billing").as(); - my->_disable_subjective_p2p_billing = options.at("disable-subjective-p2p-billing").as(); - my->_disable_subjective_api_billing = options.at("disable-subjective-api-billing").as(); + _disable_subjective_p2p_billing = options.at("disable-subjective-p2p-billing").as(); + _disable_subjective_api_billing = options.at("disable-subjective-api-billing").as(); dlog( "disable-subjective-billing: ${s}, disable-subjective-p2p-billing: ${p2p}, disable-subjective-api-billing: ${api}", - ("s", disable_subjective_billing)("p2p", my->_disable_subjective_p2p_billing)("api", my->_disable_subjective_api_billing) ); + ("s", disable_subjective_billing)("p2p", _disable_subjective_p2p_billing)("api", _disable_subjective_api_billing) ); if( !disable_subjective_billing ) { - my->_disable_subjective_p2p_billing = my->_disable_subjective_api_billing = false; - } else if( !my->_disable_subjective_p2p_billing || !my->_disable_subjective_api_billing ) { + _disable_subjective_p2p_billing = _disable_subjective_api_billing = false; + } else if( !_disable_subjective_p2p_billing || !_disable_subjective_api_billing ) { disable_subjective_billing = false; } if( disable_subjective_billing ) { chain.get_mutable_subjective_billing().disable(); ilog( "Subjective CPU billing disabled" ); - } else if( !my->_disable_subjective_p2p_billing && !my->_disable_subjective_api_billing ) { + } else if( !_disable_subjective_p2p_billing && !_disable_subjective_api_billing ) { ilog( "Subjective CPU billing enabled" ); } else { - if( my->_disable_subjective_p2p_billing ) ilog( "Subjective CPU billing of P2P trxs disabled " ); - if( my->_disable_subjective_api_billing ) ilog( "Subjective CPU billing of API trxs disabled " ); + if( _disable_subjective_p2p_billing ) ilog( "Subjective CPU billing of P2P trxs disabled " ); + if( _disable_subjective_api_billing ) ilog( "Subjective CPU billing of API trxs disabled " ); } - my->_thread_pool_size = options.at( "producer-threads" ).as(); - EOS_ASSERT( my->_thread_pool_size > 0, plugin_config_exception, - "producer-threads ${num} must be greater than 0", ("num", my->_thread_pool_size)); + _thread_pool_size = options.at( "producer-threads" ).as(); + EOS_ASSERT( _thread_pool_size > 0, plugin_config_exception, + "producer-threads ${num} must be greater than 0", ("num", _thread_pool_size)); if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); if( sd.is_relative()) { - my->_snapshots_dir = app().data_dir() / sd; - if (!std::filesystem::exists(my->_snapshots_dir)) { - std::filesystem::create_directories(my->_snapshots_dir); + _snapshots_dir = app().data_dir() / sd; + if (!std::filesystem::exists(_snapshots_dir)) { + std::filesystem::create_directories(_snapshots_dir); } } else { - my->_snapshots_dir = sd; + _snapshots_dir = sd; } - EOS_ASSERT( std::filesystem::is_directory(my->_snapshots_dir), snapshot_directory_not_found_exception, - "No such directory '${dir}'", ("dir", my->_snapshots_dir) ); + EOS_ASSERT( std::filesystem::is_directory(_snapshots_dir), snapshot_directory_not_found_exception, + "No such directory '${dir}'", ("dir", _snapshots_dir) ); if (auto resmon_plugin = app().find_plugin()) { - resmon_plugin->monitor_directory(my->_snapshots_dir); + resmon_plugin->monitor_directory(_snapshots_dir); } } if ( options.count( "read-only-threads" ) ) { - my->_ro_thread_pool_size = options.at( "read-only-threads" ).as(); - } else if ( my->_producers.empty() ) { + _ro_thread_pool_size = options.at( "read-only-threads" ).as(); + } else if ( _producers.empty() ) { if( options.count( "plugin" ) ) { const auto& v = options.at( "plugin" ).as>(); auto i = std::find_if( v.cbegin(), v.cend(), []( const std::string& p ) { return p == "eosio::chain_api_plugin"; } ); if( i != v.cend() ) { // default to 3 threads for non producer nodes running chain_api_plugin if not specified - my->_ro_thread_pool_size = 3; - ilog( "chain_api_plugin configured, defaulting read-only-threads to ${t}", ("t", my->_ro_thread_pool_size) ); + _ro_thread_pool_size = 3; + ilog( "chain_api_plugin configured, defaulting read-only-threads to ${t}", ("t", _ro_thread_pool_size) ); } } } - EOS_ASSERT( test_mode_ || my->_ro_thread_pool_size == 0 || my->_producers.empty(), plugin_config_exception, "read-only-threads not allowed on producer node" ); + EOS_ASSERT( producer_plugin::test_mode_ || _ro_thread_pool_size == 0 || _producers.empty(), plugin_config_exception, "read-only-threads not allowed on producer node" ); // only initialize other read-only options when read-only thread pool is enabled - if ( my->_ro_thread_pool_size > 0 ) { + if ( _ro_thread_pool_size > 0 ) { #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED if (chain.is_eos_vm_oc_enabled()) { // EOS VM OC requires 4.2TB Virtual for each executing thread. Make sure the memory @@ -1077,50 +1077,50 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ // reserve 1 for the app thread, 1 for anything else which might use VM EOS_ASSERT( num_threads_supported > 2, plugin_config_exception, "With the EOS VM OC configured, there is not enough system virtual memory to support the required minimum of 3 threads (1 for main thread, 1 for read-only, and 1 for anything else), vm total: ${t}, vm used: ${u}", ("t", vm_total_kb)("u", vm_used_kb)); num_threads_supported -= 2; - auto actual_threads_allowed = std::min(my->_ro_max_threads_allowed, num_threads_supported); - ilog("vm total in kb: ${total}, vm used in kb: ${used}, number of EOS VM OC threads supported ((vm total - vm used)/4.2 TB - 2): ${supp}, max allowed: ${max}, actual allowed: ${actual}", ("total", vm_total_kb) ("used", vm_used_kb) ("supp", num_threads_supported) ("max", my->_ro_max_threads_allowed)("actual", actual_threads_allowed)); - EOS_ASSERT( my->_ro_thread_pool_size <= actual_threads_allowed, plugin_config_exception, "read-only-threads (${th}) greater than number of threads allowed for EOS VM OC (${allowed})", ("th", my->_ro_thread_pool_size) ("allowed", actual_threads_allowed) ); + auto actual_threads_allowed = std::min(_ro_max_threads_allowed, num_threads_supported); + ilog("vm total in kb: ${total}, vm used in kb: ${used}, number of EOS VM OC threads supported ((vm total - vm used)/4.2 TB - 2): ${supp}, max allowed: ${max}, actual allowed: ${actual}", ("total", vm_total_kb) ("used", vm_used_kb) ("supp", num_threads_supported) ("max", _ro_max_threads_allowed)("actual", actual_threads_allowed)); + EOS_ASSERT( _ro_thread_pool_size <= actual_threads_allowed, plugin_config_exception, "read-only-threads (${th}) greater than number of threads allowed for EOS VM OC (${allowed})", ("th", _ro_thread_pool_size) ("allowed", actual_threads_allowed) ); } #endif - EOS_ASSERT( my->_ro_thread_pool_size <= my->_ro_max_threads_allowed, plugin_config_exception, "read-only-threads (${th}) greater than the number of threads allowed (${allowed})", ("th", my->_ro_thread_pool_size) ("allowed", my->_ro_max_threads_allowed) ); + EOS_ASSERT( _ro_thread_pool_size <= _ro_max_threads_allowed, plugin_config_exception, "read-only-threads (${th}) greater than the number of threads allowed (${allowed})", ("th", _ro_thread_pool_size) ("allowed", _ro_max_threads_allowed) ); - my->_ro_write_window_time_us = fc::microseconds( options.at( "read-only-write-window-time-us" ).as() ); - my->_ro_read_window_time_us = fc::microseconds( options.at( "read-only-read-window-time-us" ).as() ); - EOS_ASSERT( my->_ro_read_window_time_us > my->_ro_read_window_minimum_time_us, plugin_config_exception, "read-only-read-window-time-us (${read}) must be at least greater than ${min} us", ("read", my->_ro_read_window_time_us) ("min", my->_ro_read_window_minimum_time_us) ); - my->_ro_read_window_effective_time_us = my->_ro_read_window_time_us - my->_ro_read_window_minimum_time_us; + _ro_write_window_time_us = fc::microseconds( options.at( "read-only-write-window-time-us" ).as() ); + _ro_read_window_time_us = fc::microseconds( options.at( "read-only-read-window-time-us" ).as() ); + EOS_ASSERT( _ro_read_window_time_us > _ro_read_window_minimum_time_us, plugin_config_exception, "read-only-read-window-time-us (${read}) must be at least greater than ${min} us", ("read", _ro_read_window_time_us) ("min", _ro_read_window_minimum_time_us) ); + _ro_read_window_effective_time_us = _ro_read_window_time_us - _ro_read_window_minimum_time_us; // Make sure a read-only transaction can finish within the read // window if scheduled at the very beginning of the window. // Add _ro_read_window_minimum_time_us for safety margin. - if ( my->_max_transaction_time_ms.load() > 0 ) { - EOS_ASSERT( my->_ro_read_window_time_us > ( fc::milliseconds(my->_max_transaction_time_ms.load()) + my->_ro_read_window_minimum_time_us ), plugin_config_exception, "read-only-read-window-time-us (${read} us) must be greater than max-transaction-time (${trx_time} us) plus ${min} us, required: ${read} us > (${trx_time} us + ${min} us).", ("read", my->_ro_read_window_time_us) ("trx_time", my->_max_transaction_time_ms.load() * 1000) ("min", my->_ro_read_window_minimum_time_us) ); + if ( _max_transaction_time_ms.load() > 0 ) { + EOS_ASSERT( _ro_read_window_time_us > ( fc::milliseconds(_max_transaction_time_ms.load()) + _ro_read_window_minimum_time_us ), plugin_config_exception, "read-only-read-window-time-us (${read} us) must be greater than max-transaction-time (${trx_time} us) plus ${min} us, required: ${read} us > (${trx_time} us + ${min} us).", ("read", _ro_read_window_time_us) ("trx_time", _max_transaction_time_ms.load() * 1000) ("min", _ro_read_window_minimum_time_us) ); } ilog("read-only-write-window-time-us: ${ww} us, read-only-read-window-time-us: ${rw} us, effective read window time to be used: ${w} us", - ("ww", my->_ro_write_window_time_us)("rw", my->_ro_read_window_time_us)("w", my->_ro_read_window_effective_time_us)); + ("ww", _ro_write_window_time_us)("rw", _ro_read_window_time_us)("w", _ro_read_window_effective_time_us)); } // Make sure _ro_max_trx_time_us is alwasys set. - if ( my->_max_transaction_time_ms.load() > 0 ) { - my->_ro_max_trx_time_us = fc::milliseconds(my->_max_transaction_time_ms.load()); + if ( _max_transaction_time_ms.load() > 0 ) { + _ro_max_trx_time_us = fc::milliseconds(_max_transaction_time_ms.load()); } else { // max-transaction-time can be set to negative for unlimited time - my->_ro_max_trx_time_us = fc::microseconds::maximum(); + _ro_max_trx_time_us = fc::microseconds::maximum(); } - ilog("read-only-threads ${s}, max read-only trx time to be enforced: ${t} us}", ("s", my->_ro_thread_pool_size)("t", my->_ro_max_trx_time_us)); + ilog("read-only-threads ${s}, max read-only trx time to be enforced: ${t} us}", ("s", _ro_thread_pool_size)("t", _ro_max_trx_time_us)); - my->_incoming_block_sync_provider = app().get_method().register_provider( + _incoming_block_sync_provider = app().get_method().register_provider( [this](const signed_block_ptr& block, const std::optional& block_id, const block_state_ptr& bsp) { - return my->on_incoming_block(block, block_id, bsp); + return on_incoming_block(block, block_id, bsp); }); - my->_incoming_transaction_async_provider = app().get_method().register_provider( + _incoming_transaction_async_provider = app().get_method().register_provider( [this](const packed_transaction_ptr& trx, bool api_trx, transaction_metadata::trx_type trx_type, bool return_failure_traces, next_function next) -> void { - return my->on_incoming_transaction_async(trx, api_trx, trx_type, return_failure_traces, next ); + return on_incoming_transaction_async(trx, api_trx, trx_type, return_failure_traces, next ); }); if (options.count("greylist-account")) { std::vector greylist = options["greylist-account"].as>(); - greylist_params param; + producer_plugin::greylist_params param; for (auto &a : greylist) { param.accounts.push_back(account_name(a)); } @@ -1139,9 +1139,17 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ } } - my->_snapshot_scheduler.set_db_path(my->_snapshots_dir); - my->_snapshot_scheduler.set_snapshots_path(my->_snapshots_dir); -} FC_LOG_AND_RETHROW() } + _snapshot_scheduler.set_db_path(_snapshots_dir); + _snapshot_scheduler.set_snapshots_path(_snapshots_dir); +} + +void producer_plugin::plugin_initialize(const boost::program_options::variables_map& options) +{ + try { + handle_sighup(); // Sets loggers + my->plugin_initialize(options); + } FC_LOG_AND_RETHROW() +} using namespace std::chrono_literals; void producer_plugin_impl::plugin_startup() @@ -1338,15 +1346,19 @@ producer_plugin::runtime_options producer_plugin::get_runtime_options() const { }; } -void producer_plugin::add_greylist_accounts(const greylist_params& params) { +void producer_plugin_impl::add_greylist_accounts(const producer_plugin::greylist_params& params) { EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); - chain::controller& chain = my->chain_plug->chain(); + chain::controller& chain = chain_plug->chain(); for (auto &acc : params.accounts) { chain.add_resource_greylist(acc); } } +void producer_plugin::add_greylist_accounts(const greylist_params& params) { + my->add_greylist_accounts(params); +} + void producer_plugin::remove_greylist_accounts(const greylist_params& params) { EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); From f4d067e9d8f912981d9f723c04a7ab51966bf11c Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 25 May 2023 16:17:50 -0400 Subject: [PATCH 03/16] Fix incorrect indentation in producer_plugin::plugin_initialize --- plugins/producer_plugin/producer_plugin.cpp | 60 ++++++++++++++------- 1 file changed, 41 insertions(+), 19 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 52c992a99a..ba307c7622 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -950,16 +950,17 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia auto subjective_account_max_failures_window_size = options.at("subjective-account-max-failures-window-size").as(); EOS_ASSERT( subjective_account_max_failures_window_size > 0, plugin_config_exception, - "subjective-account-max-failures-window-size ${s} must be greater than 0", ("s", subjective_account_max_failures_window_size) ); + "subjective-account-max-failures-window-size ${s} must be greater than 0", + ("s", subjective_account_max_failures_window_size) ); _account_fails.set_max_failures_per_account( options.at("subjective-account-max-failures").as(), - subjective_account_max_failures_window_size ); + subjective_account_max_failures_window_size ); uint32_t cpu_effort_pct = options.at("cpu-effort-percent").as(); EOS_ASSERT( cpu_effort_pct >= 0 && cpu_effort_pct <= 100, plugin_config_exception, "cpu-effort-percent ${pct} must be 0 - 100", ("pct", cpu_effort_pct) ); - cpu_effort_pct *= config::percent_1; - + cpu_effort_pct *= config::percent_1; + _cpu_effort_us = EOS_PERCENT( config::block_interval_us, cpu_effort_pct ); _max_block_cpu_usage_threshold_us = options.at( "max-block-cpu-usage-threshold-us" ).as(); @@ -1008,7 +1009,7 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia } else if( !_disable_subjective_p2p_billing && !_disable_subjective_api_billing ) { ilog( "Subjective CPU billing enabled" ); } else { - if( _disable_subjective_p2p_billing ) ilog( "Subjective CPU billing of P2P trxs disabled " ); + if( _disable_subjective_p2p_billing ) ilog( "Subjective CPU billing of P2P trxs disabled " ); if( _disable_subjective_api_billing ) ilog( "Subjective CPU billing of API trxs disabled " ); } @@ -1071,29 +1072,49 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia meminfo_file.ignore(std::numeric_limits::max(), '\n'); } - EOS_ASSERT( vm_total_kb > 0, plugin_config_exception, "Unable to get system virtual memory size (not a Linux?), therefore cannot determine if the system has enough virtual memory for multi-threaded read-only transactions on EOS VM OC"); - EOS_ASSERT( vm_total_kb > vm_used_kb, plugin_config_exception, "vm total (${t}) must be greater than vm used (${u})", ("t", vm_total_kb)("u", vm_used_kb)); + EOS_ASSERT( vm_total_kb > 0, plugin_config_exception, + "Unable to get system virtual memory size (not a Linux?), therefore cannot determine if the system has enough " + "virtual memory for multi-threaded read-only transactions on EOS VM OC"); + EOS_ASSERT( vm_total_kb > vm_used_kb, plugin_config_exception, + "vm total (${t}) must be greater than vm used (${u})", ("t", vm_total_kb)("u", vm_used_kb)); uint32_t num_threads_supported = (vm_total_kb - vm_used_kb) / 4200000000; // reserve 1 for the app thread, 1 for anything else which might use VM - EOS_ASSERT( num_threads_supported > 2, plugin_config_exception, "With the EOS VM OC configured, there is not enough system virtual memory to support the required minimum of 3 threads (1 for main thread, 1 for read-only, and 1 for anything else), vm total: ${t}, vm used: ${u}", ("t", vm_total_kb)("u", vm_used_kb)); + EOS_ASSERT( num_threads_supported > 2, plugin_config_exception, + "With the EOS VM OC configured, there is not enough system virtual memory to support the required minimum of " + "3 threads (1 for main thread, 1 for read-only, and 1 for anything else), vm total: ${t}, vm used: ${u}", + ("t", vm_total_kb)("u", vm_used_kb)); num_threads_supported -= 2; auto actual_threads_allowed = std::min(_ro_max_threads_allowed, num_threads_supported); - ilog("vm total in kb: ${total}, vm used in kb: ${used}, number of EOS VM OC threads supported ((vm total - vm used)/4.2 TB - 2): ${supp}, max allowed: ${max}, actual allowed: ${actual}", ("total", vm_total_kb) ("used", vm_used_kb) ("supp", num_threads_supported) ("max", _ro_max_threads_allowed)("actual", actual_threads_allowed)); - EOS_ASSERT( _ro_thread_pool_size <= actual_threads_allowed, plugin_config_exception, "read-only-threads (${th}) greater than number of threads allowed for EOS VM OC (${allowed})", ("th", _ro_thread_pool_size) ("allowed", actual_threads_allowed) ); + ilog("vm total in kb: ${total}, vm used in kb: ${used}, number of EOS VM OC threads supported " + "((vm total - vm used)/4.2 TB - 2): ${supp}, max allowed: ${max}, actual allowed: ${actual}", + ("total", vm_total_kb) ("used", vm_used_kb) ("supp", num_threads_supported) + ("max", _ro_max_threads_allowed)("actual", actual_threads_allowed)); + EOS_ASSERT( _ro_thread_pool_size <= actual_threads_allowed, plugin_config_exception, + "read-only-threads (${th}) greater than number of threads allowed for EOS VM OC (${allowed})", + ("th", _ro_thread_pool_size) ("allowed", actual_threads_allowed) ); } #endif - EOS_ASSERT( _ro_thread_pool_size <= _ro_max_threads_allowed, plugin_config_exception, "read-only-threads (${th}) greater than the number of threads allowed (${allowed})", ("th", _ro_thread_pool_size) ("allowed", _ro_max_threads_allowed) ); + EOS_ASSERT( _ro_thread_pool_size <= _ro_max_threads_allowed, plugin_config_exception, + "read-only-threads (${th}) greater than the number of threads allowed (${allowed})", + ("th", _ro_thread_pool_size) ("allowed", _ro_max_threads_allowed) ); _ro_write_window_time_us = fc::microseconds( options.at( "read-only-write-window-time-us" ).as() ); _ro_read_window_time_us = fc::microseconds( options.at( "read-only-read-window-time-us" ).as() ); - EOS_ASSERT( _ro_read_window_time_us > _ro_read_window_minimum_time_us, plugin_config_exception, "read-only-read-window-time-us (${read}) must be at least greater than ${min} us", ("read", _ro_read_window_time_us) ("min", _ro_read_window_minimum_time_us) ); + EOS_ASSERT( _ro_read_window_time_us > _ro_read_window_minimum_time_us, plugin_config_exception, + "read-only-read-window-time-us (${read}) must be at least greater than ${min} us", + ("read", _ro_read_window_time_us) ("min", _ro_read_window_minimum_time_us) ); _ro_read_window_effective_time_us = _ro_read_window_time_us - _ro_read_window_minimum_time_us; // Make sure a read-only transaction can finish within the read // window if scheduled at the very beginning of the window. // Add _ro_read_window_minimum_time_us for safety margin. if ( _max_transaction_time_ms.load() > 0 ) { - EOS_ASSERT( _ro_read_window_time_us > ( fc::milliseconds(_max_transaction_time_ms.load()) + _ro_read_window_minimum_time_us ), plugin_config_exception, "read-only-read-window-time-us (${read} us) must be greater than max-transaction-time (${trx_time} us) plus ${min} us, required: ${read} us > (${trx_time} us + ${min} us).", ("read", _ro_read_window_time_us) ("trx_time", _max_transaction_time_ms.load() * 1000) ("min", _ro_read_window_minimum_time_us) ); + EOS_ASSERT( _ro_read_window_time_us > ( fc::milliseconds(_max_transaction_time_ms.load()) + _ro_read_window_minimum_time_us ), + plugin_config_exception, + "read-only-read-window-time-us (${read} us) must be greater than max-transaction-time (${trx_time} us) " + "plus ${min} us, required: ${read} us > (${trx_time} us + ${min} us).", + ("read", _ro_read_window_time_us) ("trx_time", _max_transaction_time_ms.load() * 1000) + ("min", _ro_read_window_minimum_time_us) ); } ilog("read-only-write-window-time-us: ${ww} us, read-only-read-window-time-us: ${rw} us, effective read window time to be used: ${w} us", ("ww", _ro_write_window_time_us)("rw", _ro_read_window_time_us)("w", _ro_read_window_effective_time_us)); @@ -1109,14 +1130,15 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia ilog("read-only-threads ${s}, max read-only trx time to be enforced: ${t} us}", ("s", _ro_thread_pool_size)("t", _ro_max_trx_time_us)); _incoming_block_sync_provider = app().get_method().register_provider( - [this](const signed_block_ptr& block, const std::optional& block_id, const block_state_ptr& bsp) { - return on_incoming_block(block, block_id, bsp); - }); + [this](const signed_block_ptr& block, const std::optional& block_id, const block_state_ptr& bsp) { + return on_incoming_block(block, block_id, bsp); + }); _incoming_transaction_async_provider = app().get_method().register_provider( - [this](const packed_transaction_ptr& trx, bool api_trx, transaction_metadata::trx_type trx_type, bool return_failure_traces, next_function next) -> void { - return on_incoming_transaction_async(trx, api_trx, trx_type, return_failure_traces, next ); - }); + [this](const packed_transaction_ptr& trx, bool api_trx, transaction_metadata::trx_type trx_type, + bool return_failure_traces, next_function next) -> void { + return on_incoming_transaction_async(trx, api_trx, trx_type, return_failure_traces, next ); + }); if (options.count("greylist-account")) { std::vector greylist = options["greylist-account"].as>(); From fd3d54f42ad9ee44c4e8d9c8a196d2f3dcb706f0 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 25 May 2023 16:49:10 -0400 Subject: [PATCH 04/16] More producer_plugin cleanup towards proper pimpl idiom --- plugins/producer_plugin/producer_plugin.cpp | 1064 ++++++++++--------- 1 file changed, 562 insertions(+), 502 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index ba307c7622..ddeba1a53e 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -288,507 +288,615 @@ struct block_time_tracker { } // anonymous namespace class producer_plugin_impl : public std::enable_shared_from_this { - public: - producer_plugin_impl(boost::asio::io_service& io) +public: + producer_plugin_impl(boost::asio::io_service& io) :_timer(io) ,_transaction_ack_channel(app().get_channel()) ,_ro_timer(io) - { - } + { + } - uint32_t calculate_next_block_slot(const account_name& producer_name, uint32_t current_block_slot) const; - void schedule_production_loop(); - void schedule_maybe_produce_block( bool exhausted ); - void produce_block(); - bool maybe_produce_block(); - bool block_is_exhausted() const; - bool remove_expired_trxs( const fc::time_point& deadline ); - bool remove_expired_blacklisted_trxs( const fc::time_point& deadline ); - bool process_unapplied_trxs( const fc::time_point& deadline ); - void process_scheduled_and_incoming_trxs( const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr ); - bool process_incoming_trxs( const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr ); - - struct push_result { - bool block_exhausted = false; - bool trx_exhausted = false; - bool failed = false; - }; - push_result push_transaction( const fc::time_point& block_deadline, - const transaction_metadata_ptr& trx, - bool api_trx, bool return_failure_trace, - const next_function& next ); - push_result handle_push_result( const transaction_metadata_ptr& trx, - const next_function& next, - const fc::time_point& start, - chain::controller& chain, - const transaction_trace_ptr& trace, - bool return_failure_trace, - bool disable_subjective_enforcement, - account_name first_auth, - int64_t sub_bill, - uint32_t prev_billed_cpu_time_us ); - void log_trx_results( const transaction_metadata_ptr& trx, const transaction_trace_ptr& trace, const fc::time_point& start ); - void log_trx_results( const transaction_metadata_ptr& trx, const fc::exception_ptr& except_ptr ); - void log_trx_results( const packed_transaction_ptr& trx, const transaction_trace_ptr& trace, - const fc::exception_ptr& except_ptr, uint32_t billed_cpu_us, const fc::time_point& start, bool is_transient ); - void add_greylist_accounts(const producer_plugin::greylist_params& params); - - void plugin_shutdown(); - void plugin_startup(); - void plugin_initialize(const boost::program_options::variables_map& options); + uint32_t calculate_next_block_slot(const account_name& producer_name, uint32_t current_block_slot) const; + void schedule_production_loop(); + void schedule_maybe_produce_block( bool exhausted ); + void produce_block(); + bool maybe_produce_block(); + bool block_is_exhausted() const; + bool remove_expired_trxs( const fc::time_point& deadline ); + bool remove_expired_blacklisted_trxs( const fc::time_point& deadline ); + bool process_unapplied_trxs( const fc::time_point& deadline ); + void process_scheduled_and_incoming_trxs( const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr ); + bool process_incoming_trxs( const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr ); + + struct push_result { + bool block_exhausted = false; + bool trx_exhausted = false; + bool failed = false; + }; + push_result push_transaction( const fc::time_point& block_deadline, + const transaction_metadata_ptr& trx, + bool api_trx, bool return_failure_trace, + const next_function& next ); + push_result handle_push_result( const transaction_metadata_ptr& trx, + const next_function& next, + const fc::time_point& start, + chain::controller& chain, + const transaction_trace_ptr& trace, + bool return_failure_trace, + bool disable_subjective_enforcement, + account_name first_auth, + int64_t sub_bill, + uint32_t prev_billed_cpu_time_us ); + void log_trx_results( const transaction_metadata_ptr& trx, const transaction_trace_ptr& trace, const fc::time_point& start ); + void log_trx_results( const transaction_metadata_ptr& trx, const fc::exception_ptr& except_ptr ); + void log_trx_results( const packed_transaction_ptr& trx, const transaction_trace_ptr& trace, + const fc::exception_ptr& except_ptr, uint32_t billed_cpu_us, const fc::time_point& start, bool is_transient ); + void add_greylist_accounts(const producer_plugin::greylist_params& params) { + EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); + + chain::controller& chain = chain_plug->chain(); + for (auto &acc : params.accounts) { + chain.add_resource_greylist(acc); + } + } - boost::program_options::variables_map _options; - bool _production_enabled = false; - bool _pause_production = false; - - using signature_provider_type = signature_provider_plugin::signature_provider_type; - std::map _signature_providers; - std::set _producers; - boost::asio::deadline_timer _timer; - using producer_watermark = std::pair; - std::map _producer_watermarks; - pending_block_mode _pending_block_mode = pending_block_mode::speculating; - unapplied_transaction_queue _unapplied_transactions; - size_t _thread_pool_size = config::default_controller_thread_pool_size; - named_thread_pool _thread_pool; - - std::atomic _max_transaction_time_ms; // modified by app thread, read by net_plugin thread pool - std::atomic _received_block{0}; // modified by net_plugin thread pool - fc::microseconds _max_irreversible_block_age_us; - int32_t _cpu_effort_us = 0; - fc::time_point _pending_block_deadline; - uint32_t _max_block_cpu_usage_threshold_us = 0; - uint32_t _max_block_net_usage_threshold_bytes = 0; - int32_t _max_scheduled_transaction_time_per_block_ms = 0; - bool _disable_subjective_p2p_billing = true; - bool _disable_subjective_api_billing = true; - fc::time_point _irreversible_block_time; - fc::time_point _idle_trx_time{fc::time_point::now()}; - - std::vector _protocol_features_to_activate; - bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block - - chain_plugin* chain_plug = nullptr; - - compat::channels::transaction_ack::channel_type& _transaction_ack_channel; - - incoming::methods::block_sync::method_type::handle _incoming_block_sync_provider; - incoming::methods::transaction_async::method_type::handle _incoming_transaction_async_provider; - - transaction_id_with_expiry_index _blacklisted_transactions; - account_failures _account_fails; - block_time_tracker _time_tracker; - - std::optional _accepted_block_connection; - std::optional _accepted_block_header_connection; - std::optional _irreversible_block_connection; - std::optional _block_start_connection; - - /* - * HACK ALERT - * Boost timers can be in a state where a handler has not yet executed but is not abortable. - * As this method needs to mutate state handlers depend on for proper functioning to maintain - * invariants for other code (namely accepting incoming transactions in a nearly full block) - * the handlers capture a corelation ID at the time they are set. When they are executed - * they must check that correlation_id against the global ordinal. If it does not match that - * implies that this method has been called with the handler in the state where it should be - * cancelled but wasn't able to be. - */ - uint32_t _timer_corelation_id = 0; - - // keep a expected ratio between defer txn and incoming txn - double _incoming_defer_ratio = 1.0; // 1:1 - - // path to write the snapshots to - std::filesystem::path _snapshots_dir; - - // async snapshot scheduler - snapshot_scheduler _snapshot_scheduler; - - std::function _update_produced_block_metrics; - std::function _update_incoming_block_metrics; - - // ro for read-only - struct ro_trx_t { - transaction_metadata_ptr trx; - next_func_t next; - }; - // The queue storing previously exhausted read-only transactions to be re-executed by read-only threads - // thread-safe - class ro_trx_queue_t { - public: - void push_front(ro_trx_t&& t) { - std::lock_guard g(mtx); - queue.push_front(std::move(t)); - } + void remove_greylist_accounts(const producer_plugin::greylist_params& params) { + EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); - bool empty() const { - std::lock_guard g(mtx); - return queue.empty(); - } + chain::controller& chain = chain_plug->chain(); + for (auto &acc : params.accounts) { + chain.remove_resource_greylist(acc); + } + } - bool pop_front(ro_trx_t& t) { - std::unique_lock g(mtx); - if (queue.empty()) - return false; - t = queue.front(); - queue.pop_front(); - return true; - } + producer_plugin::greylist_params get_greylist() const { + chain::controller& chain = chain_plug->chain(); + producer_plugin::greylist_params result; + const auto& list = chain.get_resource_greylist(); + result.accounts.reserve(list.size()); + for (auto &acc: list) { + result.accounts.push_back(acc); + } + return result; + } + + producer_plugin::integrity_hash_information get_integrity_hash() { + chain::controller& chain = chain_plug->chain(); - private: - mutable std::mutex mtx; - deque queue; // boost deque which is faster than std::deque - }; + auto reschedule = fc::make_scoped_exit([this]() { + schedule_production_loop(); + }); - uint32_t _ro_thread_pool_size{ 0 }; - // Due to uncertainty to get total virtual memory size on a 5-level paging system for eos-vm-oc and - // possible memory exhuastion for large number of contract usage for non-eos-vm-oc, set a hard limit - static constexpr uint32_t _ro_max_threads_allowed{ 8 }; - named_thread_pool _ro_thread_pool; - fc::microseconds _ro_write_window_time_us{ 200000 }; - fc::microseconds _ro_read_window_time_us{ 60000 }; - static constexpr fc::microseconds _ro_read_window_minimum_time_us{ 10000 }; - fc::microseconds _ro_read_window_effective_time_us{ 0 }; // calculated during option initialization - std::atomic _ro_all_threads_exec_time_us; // total time spent by all threads executing transactions. use atomic for simplicity and performance - fc::time_point _ro_read_window_start_time; - fc::time_point _ro_window_deadline; // only modified on app thread, read-window deadline or write-window deadline - boost::asio::deadline_timer _ro_timer; // only accessible from the main thread - fc::microseconds _ro_max_trx_time_us{ 0 }; // calculated during option initialization - ro_trx_queue_t _ro_exhausted_trx_queue; - std::atomic _ro_num_active_exec_tasks{ 0 }; - std::vector> _ro_exec_tasks_fut; - - void start_write_window(); - void switch_to_write_window(); - void switch_to_read_window(); - bool read_only_execution_task(uint32_t pending_block_num); - void repost_exhausted_transactions(const fc::time_point& deadline); - bool push_read_only_transaction(transaction_metadata_ptr trx, next_function next); - - void consider_new_watermark( account_name producer, uint32_t block_num, block_timestamp_type timestamp) { - auto itr = _producer_watermarks.find( producer ); - if( itr != _producer_watermarks.end() ) { - itr->second.first = std::max( itr->second.first, block_num ); - itr->second.second = std::max( itr->second.second, timestamp ); - } else if( _producers.count( producer ) > 0 ) { - _producer_watermarks.emplace( producer, std::make_pair(block_num, timestamp) ); - } + if (chain.is_building_block()) { + // abort the pending block + abort_block(); + } else { + reschedule.cancel(); } - std::optional get_watermark( account_name producer ) const { - auto itr = _producer_watermarks.find( producer ); + return {chain.head_block_id(), chain.calculate_integrity_hash()}; + } - if( itr == _producer_watermarks.end() ) return {}; + void create_snapshot(producer_plugin::next_function next) { + chain::controller& chain = chain_plug->chain(); + + auto reschedule = fc::make_scoped_exit([this](){ + schedule_production_loop(); + }); - return itr->second; + auto predicate = [&]() -> void { + if (chain.is_building_block()) { + // abort the pending block + abort_block(); + } else { + reschedule.cancel(); + } + }; + + _snapshot_scheduler.create_snapshot(std::move(next), chain, predicate); + } + + void update_runtime_options(const producer_plugin::runtime_options& options); + producer_plugin::runtime_options get_runtime_options() const { + return { + _max_transaction_time_ms, + _max_irreversible_block_age_us.count() < 0 ? -1 : _max_irreversible_block_age_us.count() / 1'000'000, + _cpu_effort_us, + _max_scheduled_transaction_time_per_block_ms, + chain_plug->chain().get_subjective_cpu_leeway() ? + chain_plug->chain().get_subjective_cpu_leeway()->count() : + std::optional(), + _incoming_defer_ratio, + chain_plug->chain().get_greylist_limit() + }; + } + void plugin_shutdown(); + void plugin_startup(); + void plugin_initialize(const boost::program_options::variables_map& options); + + boost::program_options::variables_map _options; + bool _production_enabled = false; + bool _pause_production = false; + + using signature_provider_type = signature_provider_plugin::signature_provider_type; + std::map _signature_providers; + std::set _producers; + boost::asio::deadline_timer _timer; + using producer_watermark = std::pair; + std::map _producer_watermarks; + pending_block_mode _pending_block_mode = pending_block_mode::speculating; + unapplied_transaction_queue _unapplied_transactions; + size_t _thread_pool_size = config::default_controller_thread_pool_size; + named_thread_pool _thread_pool; + + std::atomic _max_transaction_time_ms; // modified by app thread, read by net_plugin thread pool + std::atomic _received_block{0}; // modified by net_plugin thread pool + fc::microseconds _max_irreversible_block_age_us; + int32_t _cpu_effort_us = 0; + fc::time_point _pending_block_deadline; + uint32_t _max_block_cpu_usage_threshold_us = 0; + uint32_t _max_block_net_usage_threshold_bytes = 0; + int32_t _max_scheduled_transaction_time_per_block_ms = 0; + bool _disable_subjective_p2p_billing = true; + bool _disable_subjective_api_billing = true; + fc::time_point _irreversible_block_time; + fc::time_point _idle_trx_time{fc::time_point::now()}; + + std::vector _protocol_features_to_activate; + bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block + + chain_plugin* chain_plug = nullptr; + + compat::channels::transaction_ack::channel_type& _transaction_ack_channel; + + incoming::methods::block_sync::method_type::handle _incoming_block_sync_provider; + incoming::methods::transaction_async::method_type::handle _incoming_transaction_async_provider; + + transaction_id_with_expiry_index _blacklisted_transactions; + account_failures _account_fails; + block_time_tracker _time_tracker; + + std::optional _accepted_block_connection; + std::optional _accepted_block_header_connection; + std::optional _irreversible_block_connection; + std::optional _block_start_connection; + + /* + * HACK ALERT + * Boost timers can be in a state where a handler has not yet executed but is not abortable. + * As this method needs to mutate state handlers depend on for proper functioning to maintain + * invariants for other code (namely accepting incoming transactions in a nearly full block) + * the handlers capture a corelation ID at the time they are set. When they are executed + * they must check that correlation_id against the global ordinal. If it does not match that + * implies that this method has been called with the handler in the state where it should be + * cancelled but wasn't able to be. + */ + uint32_t _timer_corelation_id = 0; + + // keep a expected ratio between defer txn and incoming txn + double _incoming_defer_ratio = 1.0; // 1:1 + + // path to write the snapshots to + std::filesystem::path _snapshots_dir; + + // async snapshot scheduler + snapshot_scheduler _snapshot_scheduler; + + std::function _update_produced_block_metrics; + std::function _update_incoming_block_metrics; + + // ro for read-only + struct ro_trx_t { + transaction_metadata_ptr trx; + next_func_t next; + }; + // The queue storing previously exhausted read-only transactions to be re-executed by read-only threads + // thread-safe + class ro_trx_queue_t { + public: + void push_front(ro_trx_t&& t) { + std::lock_guard g(mtx); + queue.push_front(std::move(t)); } - void on_block( const block_state_ptr& bsp ) { - auto& chain = chain_plug->chain(); - auto before = _unapplied_transactions.size(); - _unapplied_transactions.clear_applied( bsp ); - chain.get_mutable_subjective_billing().on_block( _log, bsp, fc::time_point::now() ); - if (before > 0) { - fc_dlog( _log, "Removed applied transactions before: ${before}, after: ${after}", - ("before", before)("after", _unapplied_transactions.size()) ); - } + bool empty() const { + std::lock_guard g(mtx); + return queue.empty(); } - void on_block_header( const block_state_ptr& bsp ) { - consider_new_watermark( bsp->header.producer, bsp->block_num, bsp->block->timestamp ); + bool pop_front(ro_trx_t& t) { + std::unique_lock g(mtx); + if (queue.empty()) + return false; + t = queue.front(); + queue.pop_front(); + return true; } - void on_irreversible_block( const signed_block_ptr& lib ) { - const chain::controller& chain = chain_plug->chain(); - EOS_ASSERT(chain.is_write_window(), producer_exception, "write window is expected for on_irreversible_block signal"); - _irreversible_block_time = lib->timestamp.to_time_point(); - _snapshot_scheduler.on_irreversible_block(lib, chain); + private: + mutable std::mutex mtx; + deque queue; // boost deque which is faster than std::deque + }; + + uint32_t _ro_thread_pool_size{ 0 }; + // Due to uncertainty to get total virtual memory size on a 5-level paging system for eos-vm-oc and + // possible memory exhuastion for large number of contract usage for non-eos-vm-oc, set a hard limit + static constexpr uint32_t _ro_max_threads_allowed{ 8 }; + named_thread_pool _ro_thread_pool; + fc::microseconds _ro_write_window_time_us{ 200000 }; + fc::microseconds _ro_read_window_time_us{ 60000 }; + static constexpr fc::microseconds _ro_read_window_minimum_time_us{ 10000 }; + fc::microseconds _ro_read_window_effective_time_us{ 0 }; // calculated during option initialization + std::atomic _ro_all_threads_exec_time_us; // total time spent by all threads executing transactions. use atomic for simplicity and performance + fc::time_point _ro_read_window_start_time; + fc::time_point _ro_window_deadline; // only modified on app thread, read-window deadline or write-window deadline + boost::asio::deadline_timer _ro_timer; // only accessible from the main thread + fc::microseconds _ro_max_trx_time_us{ 0 }; // calculated during option initialization + ro_trx_queue_t _ro_exhausted_trx_queue; + std::atomic _ro_num_active_exec_tasks{ 0 }; + std::vector> _ro_exec_tasks_fut; + + void start_write_window(); + void switch_to_write_window(); + void switch_to_read_window(); + bool read_only_execution_task(uint32_t pending_block_num); + void repost_exhausted_transactions(const fc::time_point& deadline); + bool push_read_only_transaction(transaction_metadata_ptr trx, next_function next); + + void consider_new_watermark( account_name producer, uint32_t block_num, block_timestamp_type timestamp) { + auto itr = _producer_watermarks.find( producer ); + if( itr != _producer_watermarks.end() ) { + itr->second.first = std::max( itr->second.first, block_num ); + itr->second.second = std::max( itr->second.second, timestamp ); + } else if( _producers.count( producer ) > 0 ) { + _producer_watermarks.emplace( producer, std::make_pair(block_num, timestamp) ); } + } - void abort_block() { - auto& chain = chain_plug->chain(); + std::optional get_watermark( account_name producer ) const { + auto itr = _producer_watermarks.find( producer ); - if( chain.is_building_block() ) { - _time_tracker.report( _idle_trx_time, chain.pending_block_num() ); - } - _unapplied_transactions.add_aborted( chain.abort_block() ); - _idle_trx_time = fc::time_point::now(); + if( itr == _producer_watermarks.end() ) return {}; + + return itr->second; + } + + void on_block( const block_state_ptr& bsp ) { + auto& chain = chain_plug->chain(); + auto before = _unapplied_transactions.size(); + _unapplied_transactions.clear_applied( bsp ); + chain.get_mutable_subjective_billing().on_block( _log, bsp, fc::time_point::now() ); + if (before > 0) { + fc_dlog( _log, "Removed applied transactions before: ${before}, after: ${after}", + ("before", before)("after", _unapplied_transactions.size()) ); } + } - bool on_incoming_block(const signed_block_ptr& block, const std::optional& block_id, const block_state_ptr& bsp) { - auto& chain = chain_plug->chain(); - if ( _pending_block_mode == pending_block_mode::producing ) { - fc_wlog( _log, "dropped incoming block #${num} id: ${id}", - ("num", block->block_num())("id", block_id ? (*block_id).str() : "UNKNOWN") ); - return false; - } + void on_block_header( const block_state_ptr& bsp ) { + consider_new_watermark( bsp->header.producer, bsp->block_num, bsp->block->timestamp ); + } - // start a new speculative block, speculative start_block may have been interrupted - auto ensure = fc::make_scoped_exit([this](){ - schedule_production_loop(); - }); + void on_irreversible_block( const signed_block_ptr& lib ) { + const chain::controller& chain = chain_plug->chain(); + EOS_ASSERT(chain.is_write_window(), producer_exception, "write window is expected for on_irreversible_block signal"); + _irreversible_block_time = lib->timestamp.to_time_point(); + _snapshot_scheduler.on_irreversible_block(lib, chain); + } - const auto& id = block_id ? *block_id : block->calculate_id(); - auto blk_num = block->block_num(); + void abort_block() { + auto& chain = chain_plug->chain(); - auto now = fc::time_point::now(); - if (now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0)) // only log every 1000 during sync - fc_dlog(_log, "received incoming block ${n} ${id}", ("n", blk_num)("id", id)); + if( chain.is_building_block() ) { + _time_tracker.report( _idle_trx_time, chain.pending_block_num() ); + } + _unapplied_transactions.add_aborted( chain.abort_block() ); + _idle_trx_time = fc::time_point::now(); + } - EOS_ASSERT( block->timestamp < (now + fc::seconds( 7 )), block_from_the_future, - "received a block from the future, ignoring it: ${id}", ("id", id) ); + bool on_incoming_block(const signed_block_ptr& block, const std::optional& block_id, const block_state_ptr& bsp) { + auto& chain = chain_plug->chain(); + if ( _pending_block_mode == pending_block_mode::producing ) { + fc_wlog( _log, "dropped incoming block #${num} id: ${id}", + ("num", block->block_num())("id", block_id ? (*block_id).str() : "UNKNOWN") ); + return false; + } - /* de-dupe here... no point in aborting block if we already know the block */ - auto existing = chain.fetch_block_by_id( id ); - if( existing ) { return true; } // return true because the block is valid + // start a new speculative block, speculative start_block may have been interrupted + auto ensure = fc::make_scoped_exit([this](){ + schedule_production_loop(); + }); - // start processing of block - std::future bsf; - if( !bsp ) { - bsf = chain.create_block_state_future( id, block ); - } + const auto& id = block_id ? *block_id : block->calculate_id(); + auto blk_num = block->block_num(); - // abort the pending block - abort_block(); + auto now = fc::time_point::now(); + if (now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0)) // only log every 1000 during sync + fc_dlog(_log, "received incoming block ${n} ${id}", ("n", blk_num)("id", id)); - // push the new block - auto handle_error = [&](const auto& e) - { - elog((e.to_detail_string())); - app().get_channel().publish( priority::medium, block ); - throw; - }; + EOS_ASSERT( block->timestamp < (now + fc::seconds( 7 )), block_from_the_future, + "received a block from the future, ignoring it: ${id}", ("id", id) ); - controller::block_report br; - try { - const block_state_ptr& bspr = bsp ? bsp : bsf.get(); - chain.push_block( br, bspr, [this]( const branch_type& forked_branch ) { - _unapplied_transactions.add_forked( forked_branch ); - }, [this]( const transaction_id_type& id ) { - return _unapplied_transactions.get_trx( id ); - } ); - } catch ( const guard_exception& e ) { - chain_plugin::handle_guard_exception(e); - return false; - } catch ( const std::bad_alloc& ) { - chain_apis::api_base::handle_bad_alloc(); - } catch ( boost::interprocess::bad_alloc& ) { - chain_apis::api_base::handle_db_exhaustion(); - } catch ( const fork_database_exception& e ) { - elog("Cannot recover from ${e}. Shutting down.", ("e", e.to_detail_string())); - appbase::app().quit(); - return false; - } catch( const fc::exception& e ) { - handle_error(e); - } catch (const std::exception& e) { - handle_error(fc::std_exception_wrapper::from_current_exception(e)); - } + /* de-dupe here... no point in aborting block if we already know the block */ + auto existing = chain.fetch_block_by_id( id ); + if( existing ) { return true; } // return true because the block is valid - const auto& hbs = chain.head_block_state(); - now = fc::time_point::now(); - if( hbs->header.timestamp.next().to_time_point() >= now ) { - _production_enabled = true; - } + // start processing of block + std::future bsf; + if( !bsp ) { + bsf = chain.create_block_state_future( id, block ); + } - if( now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0) ) { - ilog("Received block ${id}... #${n} @ ${t} signed by ${p} " - "[trxs: ${count}, lib: ${lib}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: ${latency} ms]", - ("p",block->producer)("id",id.str().substr(8,16))("n",blk_num)("t",block->timestamp) - ("count",block->transactions.size())("lib",chain.last_irreversible_block_num()) - ("confs", block->confirmed)("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) + // abort the pending block + abort_block(); + + // push the new block + auto handle_error = [&](const auto& e) + { + elog((e.to_detail_string())); + app().get_channel().publish( priority::medium, block ); + throw; + }; + + controller::block_report br; + try { + const block_state_ptr& bspr = bsp ? bsp : bsf.get(); + chain.push_block( br, bspr, [this]( const branch_type& forked_branch ) { + _unapplied_transactions.add_forked( forked_branch ); + }, [this]( const transaction_id_type& id ) { + return _unapplied_transactions.get_trx( id ); + } ); + } catch ( const guard_exception& e ) { + chain_plugin::handle_guard_exception(e); + return false; + } catch ( const std::bad_alloc& ) { + chain_apis::api_base::handle_bad_alloc(); + } catch ( boost::interprocess::bad_alloc& ) { + chain_apis::api_base::handle_db_exhaustion(); + } catch ( const fork_database_exception& e ) { + elog("Cannot recover from ${e}. Shutting down.", ("e", e.to_detail_string())); + appbase::app().quit(); + return false; + } catch( const fc::exception& e ) { + handle_error(e); + } catch (const std::exception& e) { + handle_error(fc::std_exception_wrapper::from_current_exception(e)); + } + + const auto& hbs = chain.head_block_state(); + now = fc::time_point::now(); + if( hbs->header.timestamp.next().to_time_point() >= now ) { + _production_enabled = true; + } + + if( now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0) ) { + ilog("Received block ${id}... #${n} @ ${t} signed by ${p} " + "[trxs: ${count}, lib: ${lib}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: ${latency} ms]", + ("p",block->producer)("id",id.str().substr(8,16))("n",blk_num)("t",block->timestamp) + ("count",block->transactions.size())("lib",chain.last_irreversible_block_num()) + ("confs", block->confirmed)("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) + ("elapsed", br.total_elapsed_time)("time", br.total_time) + ("latency", (now - block->timestamp).count()/1000 ) ); + if( chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hbs->id != id && hbs->block != nullptr ) { // not applied to head + ilog("Block not applied to head ${id}... #${n} @ ${t} signed by ${p} " + "[trxs: ${count}, dpos: ${dpos}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: ${latency} ms]", + ("p",hbs->block->producer)("id",hbs->id.str().substr(8,16))("n",hbs->block_num)("t",hbs->block->timestamp) + ("count",hbs->block->transactions.size())("dpos", hbs->dpos_irreversible_blocknum) + ("confs", hbs->block->confirmed)("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) ("elapsed", br.total_elapsed_time)("time", br.total_time) - ("latency", (now - block->timestamp).count()/1000 ) ); - if( chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hbs->id != id && hbs->block != nullptr ) { // not applied to head - ilog("Block not applied to head ${id}... #${n} @ ${t} signed by ${p} " - "[trxs: ${count}, dpos: ${dpos}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: ${latency} ms]", - ("p",hbs->block->producer)("id",hbs->id.str().substr(8,16))("n",hbs->block_num)("t",hbs->block->timestamp) - ("count",hbs->block->transactions.size())("dpos", hbs->dpos_irreversible_blocknum) - ("confs", hbs->block->confirmed)("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) - ("elapsed", br.total_elapsed_time)("time", br.total_time) - ("latency", (now - hbs->block->timestamp).count()/1000 ) ); - } - } - if (_update_incoming_block_metrics) { - _update_incoming_block_metrics({.trxs_incoming_total = block->transactions.size(), - .cpu_usage_us = br.total_cpu_usage_us, - .net_usage_us = br.total_net_usage, - .last_irreversible = chain.last_irreversible_block_num(), - .head_block_num = chain.head_block_num()}); + ("latency", (now - hbs->block->timestamp).count()/1000 ) ); } + } + if (_update_incoming_block_metrics) { + _update_incoming_block_metrics({.trxs_incoming_total = block->transactions.size(), + .cpu_usage_us = br.total_cpu_usage_us, + .net_usage_us = br.total_net_usage, + .last_irreversible = chain.last_irreversible_block_num(), + .head_block_num = chain.head_block_num()}); + } - return true; + return true; + } + + void restart_speculative_block() { + // abort the pending block + abort_block(); + + schedule_production_loop(); + } + + void on_incoming_transaction_async(const packed_transaction_ptr& trx, + bool api_trx, + transaction_metadata::trx_type trx_type, + bool return_failure_traces, + next_function next) { + if ( trx_type == transaction_metadata::trx_type::read_only ) { + // Post all read only trxs to read_only queue for execution. + auto trx_metadata = transaction_metadata::create_no_recover_keys( trx, transaction_metadata::trx_type::read_only ); + app().executor().post(priority::low, exec_queue::read_only, [this, trx{std::move(trx_metadata)}, next{std::move(next)}]() mutable { + push_read_only_transaction( std::move(trx), std::move(next) ); + } ); + return; } - void restart_speculative_block() { - // abort the pending block - abort_block(); + chain::controller& chain = chain_plug->chain(); + const auto max_trx_time_ms = ( trx_type == transaction_metadata::trx_type::read_only ) ? -1 : _max_transaction_time_ms.load(); + fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); + + auto future = transaction_metadata::start_recover_keys( trx, _thread_pool.get_executor(), + chain.get_chain_id(), fc::microseconds( max_trx_cpu_usage ), + trx_type, + chain.configured_subjective_signature_length_limit() ); + + auto is_transient = (trx_type == transaction_metadata::trx_type::read_only || trx_type == transaction_metadata::trx_type::dry_run); + if( !is_transient ) { + next = [this, trx, next{std::move(next)}]( const next_function_variant& response ) { + next( response ); + + fc::exception_ptr except_ptr; // rejected + if( std::holds_alternative( response ) ) { + except_ptr = std::get( response ); + } else if( std::get( response )->except ) { + except_ptr = std::get( response )->except->dynamic_copy_exception(); + } - schedule_production_loop(); + _transaction_ack_channel.publish( priority::low, std::pair( except_ptr, trx ) ); + }; } - void on_incoming_transaction_async(const packed_transaction_ptr& trx, - bool api_trx, - transaction_metadata::trx_type trx_type, - bool return_failure_traces, - next_function next) { - if ( trx_type == transaction_metadata::trx_type::read_only ) { - // Post all read only trxs to read_only queue for execution. - auto trx_metadata = transaction_metadata::create_no_recover_keys( trx, transaction_metadata::trx_type::read_only ); - app().executor().post(priority::low, exec_queue::read_only, [this, trx{std::move(trx_metadata)}, next{std::move(next)}]() mutable { - push_read_only_transaction( std::move(trx), std::move(next) ); + boost::asio::post(_thread_pool.get_executor(), [self = this, future{std::move(future)}, api_trx, is_transient, return_failure_traces, + next{std::move(next)}, trx=trx]() mutable { + if( future.valid() ) { + future.wait(); + app().executor().post( priority::low, exec_queue::read_write, [self, future{std::move(future)}, api_trx, is_transient, next{std::move( next )}, trx{std::move(trx)}, return_failure_traces]() mutable { + auto start = fc::time_point::now(); + auto idle_time = start - self->_idle_trx_time; + self->_time_tracker.add_idle_time( idle_time ); + fc_tlog( _log, "Time since last trx: ${t}us", ("t", idle_time) ); + + auto exception_handler = [self, is_transient, &next, trx{std::move(trx)}, &start](fc::exception_ptr ex) { + self->_time_tracker.add_idle_time( start - self->_idle_trx_time ); + self->log_trx_results( trx, nullptr, ex, 0, start, is_transient ); + next( std::move(ex) ); + self->_idle_trx_time = fc::time_point::now(); + auto dur = self->_idle_trx_time - start; + self->_time_tracker.add_fail_time(dur, is_transient); + }; + try { + auto result = future.get(); + if( !self->process_incoming_transaction_async( result, api_trx, return_failure_traces, next) ) { + if( self->_pending_block_mode == pending_block_mode::producing ) { + self->schedule_maybe_produce_block( true ); + } else { + self->restart_speculative_block(); + } + } + self->_idle_trx_time = fc::time_point::now(); + } CATCH_AND_CALL(exception_handler); } ); - return; } + }); + } - chain::controller& chain = chain_plug->chain(); - const auto max_trx_time_ms = ( trx_type == transaction_metadata::trx_type::read_only ) ? -1 : _max_transaction_time_ms.load(); - fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); - - auto future = transaction_metadata::start_recover_keys( trx, _thread_pool.get_executor(), - chain.get_chain_id(), fc::microseconds( max_trx_cpu_usage ), - trx_type, - chain.configured_subjective_signature_length_limit() ); - - auto is_transient = (trx_type == transaction_metadata::trx_type::read_only || trx_type == transaction_metadata::trx_type::dry_run); - if( !is_transient ) { - next = [this, trx, next{std::move(next)}]( const next_function_variant& response ) { - next( response ); - - fc::exception_ptr except_ptr; // rejected - if( std::holds_alternative( response ) ) { - except_ptr = std::get( response ); - } else if( std::get( response )->except ) { - except_ptr = std::get( response )->except->dynamic_copy_exception(); - } + bool process_incoming_transaction_async(const transaction_metadata_ptr& trx, + bool api_trx, + bool return_failure_trace, + const next_function& next) { + bool exhausted = false; + chain::controller& chain = chain_plug->chain(); + try { + const auto& id = trx->id(); + + fc::time_point bt = chain.is_building_block() ? chain.pending_block_time() : chain.head_block_time(); + const fc::time_point expire = trx->packed_trx()->expiration().to_time_point(); + if( expire < bt ) { + auto except_ptr = std::static_pointer_cast( + std::make_shared( + FC_LOG_MESSAGE( error, "expired transaction ${id}, expiration ${e}, block time ${bt}", + ("id", id)("e", expire)("bt", bt)))); + log_trx_results( trx, except_ptr ); + next( std::move(except_ptr) ); + return true; + } - _transaction_ack_channel.publish( priority::low, std::pair( except_ptr, trx ) ); - }; + if( chain.is_known_unexpired_transaction( id )) { + auto except_ptr = std::static_pointer_cast( std::make_shared( + FC_LOG_MESSAGE( error, "duplicate transaction ${id}", ("id", id)))); + next( std::move(except_ptr) ); + return true; } - boost::asio::post(_thread_pool.get_executor(), [self = this, future{std::move(future)}, api_trx, is_transient, return_failure_traces, - next{std::move(next)}, trx=trx]() mutable { - if( future.valid() ) { - future.wait(); - app().executor().post( priority::low, exec_queue::read_write, [self, future{std::move(future)}, api_trx, is_transient, next{std::move( next )}, trx{std::move(trx)}, return_failure_traces]() mutable { - auto start = fc::time_point::now(); - auto idle_time = start - self->_idle_trx_time; - self->_time_tracker.add_idle_time( idle_time ); - fc_tlog( _log, "Time since last trx: ${t}us", ("t", idle_time) ); - - auto exception_handler = [self, is_transient, &next, trx{std::move(trx)}, &start](fc::exception_ptr ex) { - self->_time_tracker.add_idle_time( start - self->_idle_trx_time ); - self->log_trx_results( trx, nullptr, ex, 0, start, is_transient ); - next( std::move(ex) ); - self->_idle_trx_time = fc::time_point::now(); - auto dur = self->_idle_trx_time - start; - self->_time_tracker.add_fail_time(dur, is_transient); - }; - try { - auto result = future.get(); - if( !self->process_incoming_transaction_async( result, api_trx, return_failure_traces, next) ) { - if( self->_pending_block_mode == pending_block_mode::producing ) { - self->schedule_maybe_produce_block( true ); - } else { - self->restart_speculative_block(); - } - } - self->_idle_trx_time = fc::time_point::now(); - } CATCH_AND_CALL(exception_handler); - } ); - } - }); - } + if( !chain.is_building_block()) { + _unapplied_transactions.add_incoming( trx, api_trx, return_failure_trace, next ); + return true; + } - bool process_incoming_transaction_async(const transaction_metadata_ptr& trx, - bool api_trx, - bool return_failure_trace, - const next_function& next) { - bool exhausted = false; - chain::controller& chain = chain_plug->chain(); - try { - const auto& id = trx->id(); - - fc::time_point bt = chain.is_building_block() ? chain.pending_block_time() : chain.head_block_time(); - const fc::time_point expire = trx->packed_trx()->expiration().to_time_point(); - if( expire < bt ) { - auto except_ptr = std::static_pointer_cast( - std::make_shared( - FC_LOG_MESSAGE( error, "expired transaction ${id}, expiration ${e}, block time ${bt}", - ("id", id)("e", expire)("bt", bt)))); - log_trx_results( trx, except_ptr ); - next( std::move(except_ptr) ); - return true; - } + const auto block_deadline = _pending_block_deadline; + push_result pr = push_transaction( block_deadline, trx, api_trx, return_failure_trace, next ); - if( chain.is_known_unexpired_transaction( id )) { - auto except_ptr = std::static_pointer_cast( std::make_shared( - FC_LOG_MESSAGE( error, "duplicate transaction ${id}", ("id", id)))); - next( std::move(except_ptr) ); - return true; - } + exhausted = pr.block_exhausted; + if( pr.trx_exhausted ) { + _unapplied_transactions.add_incoming( trx, api_trx, return_failure_trace, next ); + } - if( !chain.is_building_block()) { - _unapplied_transactions.add_incoming( trx, api_trx, return_failure_trace, next ); - return true; - } + } catch ( const guard_exception& e ) { + chain_plugin::handle_guard_exception(e); + } catch ( boost::interprocess::bad_alloc& ) { + chain_apis::api_base::handle_db_exhaustion(); + } catch ( std::bad_alloc& ) { + chain_apis::api_base::handle_bad_alloc(); + } CATCH_AND_CALL(next); - const auto block_deadline = _pending_block_deadline; - push_result pr = push_transaction( block_deadline, trx, api_trx, return_failure_trace, next ); + return !exhausted; + } - exhausted = pr.block_exhausted; - if( pr.trx_exhausted ) { - _unapplied_transactions.add_incoming( trx, api_trx, return_failure_trace, next ); - } - } catch ( const guard_exception& e ) { - chain_plugin::handle_guard_exception(e); - } catch ( boost::interprocess::bad_alloc& ) { - chain_apis::api_base::handle_db_exhaustion(); - } catch ( std::bad_alloc& ) { - chain_apis::api_base::handle_bad_alloc(); - } CATCH_AND_CALL(next); + fc::microseconds get_irreversible_block_age() { + auto now = fc::time_point::now(); + if (now < _irreversible_block_time) { + return fc::microseconds(0); + } else { + return now - _irreversible_block_time; + } + } - return !exhausted; + account_name get_pending_block_producer() { + auto& chain = chain_plug->chain(); + if (chain.is_building_block()) { + return chain.pending_block_producer(); + } else { + return {}; } + } + bool production_disabled_by_policy() { + return !_production_enabled || _pause_production || (_max_irreversible_block_age_us.count() >= 0 && get_irreversible_block_age() >= _max_irreversible_block_age_us); + } - fc::microseconds get_irreversible_block_age() { - auto now = fc::time_point::now(); - if (now < _irreversible_block_time) { - return fc::microseconds(0); - } else { - return now - _irreversible_block_time; - } - } + bool is_producer_key(const chain::public_key_type& key) const { + return _signature_providers.find(key) != _signature_providers.end(); + } - account_name get_pending_block_producer() { - auto& chain = chain_plug->chain(); - if (chain.is_building_block()) { - return chain.pending_block_producer(); - } else { - return {}; - } + chain::signature_type sign_compact(const chain::public_key_type& key, const fc::sha256& digest) const { + if(key != chain::public_key_type()) { + auto private_key_itr = _signature_providers.find(key); + EOS_ASSERT(private_key_itr != _signature_providers.end(), producer_priv_key_not_found, + "Local producer has no private key in config.ini corresponding to public key ${key}", ("key", key)); + + return private_key_itr->second(digest); + } + else { + return chain::signature_type(); } + } - bool production_disabled_by_policy() { - return !_production_enabled || _pause_production || (_max_irreversible_block_age_us.count() >= 0 && get_irreversible_block_age() >= _max_irreversible_block_age_us); + void resume() { + _pause_production = false; + // it is possible that we are only speculating because of this policy which we have now changed + // re-evaluate that now + // + if (_pending_block_mode == pending_block_mode::speculating) { + abort_block(); + fc_ilog(_log, "Producer resumed. Scheduling production."); + schedule_production_loop(); + } else { + fc_ilog(_log, "Producer resumed."); } + } - enum class start_block_result { - succeeded, - failed, - waiting_for_block, - waiting_for_production, - exhausted - }; + enum class start_block_result { + succeeded, + failed, + waiting_for_block, + waiting_for_production, + exhausted + }; - inline bool should_interrupt_start_block( const fc::time_point& deadline, uint32_t pending_block_num ) const; - start_block_result start_block(); + inline bool should_interrupt_start_block( const fc::time_point& deadline, uint32_t pending_block_num ) const; + start_block_result start_block(); - block_timestamp_type calculate_pending_block_time() const; - void schedule_delayed_production_loop(const std::weak_ptr& weak_this, std::optional wake_up_time); - std::optional calculate_producer_wake_up_time( const block_timestamp_type& ref_block_time ) const; + block_timestamp_type calculate_pending_block_time() const; + void schedule_delayed_production_loop(const std::weak_ptr& weak_this, std::optional wake_up_time); + std::optional calculate_producer_wake_up_time( const block_timestamp_type& ref_block_time ) const; }; @@ -891,23 +999,12 @@ void producer_plugin::set_program_options( bool producer_plugin::is_producer_key(const chain::public_key_type& key) const { - auto private_key_itr = my->_signature_providers.find(key); - if(private_key_itr != my->_signature_providers.end()) - return true; - return false; + return my->is_producer_key(key); } chain::signature_type producer_plugin::sign_compact(const chain::public_key_type& key, const fc::sha256& digest) const { - if(key != chain::public_key_type()) { - auto private_key_itr = my->_signature_providers.find(key); - EOS_ASSERT(private_key_itr != my->_signature_providers.end(), producer_priv_key_not_found, "Local producer has no private key in config.ini corresponding to public key ${key}", ("key", key)); - - return private_key_itr->second(digest); - } - else { - return chain::signature_type(); - } + return my->sign_compact(key, digest); } template @@ -1298,51 +1395,41 @@ void producer_plugin::pause() { } void producer_plugin::resume() { - my->_pause_production = false; - // it is possible that we are only speculating because of this policy which we have now changed - // re-evaluate that now - // - if (my->_pending_block_mode == pending_block_mode::speculating) { - my->abort_block(); - fc_ilog(_log, "Producer resumed. Scheduling production."); - my->schedule_production_loop(); - } else { - fc_ilog(_log, "Producer resumed."); - } + my->resume(); } bool producer_plugin::paused() const { return my->_pause_production; } -void producer_plugin::update_runtime_options(const runtime_options& options) { - chain::controller& chain = my->chain_plug->chain(); +void producer_plugin_impl::update_runtime_options(const producer_plugin::runtime_options& options) { + chain::controller& chain = chain_plug->chain(); bool check_speculating = false; if (options.max_transaction_time) { - my->_max_transaction_time_ms = *options.max_transaction_time; + _max_transaction_time_ms = *options.max_transaction_time; } if (options.max_irreversible_block_age) { - my->_max_irreversible_block_age_us = fc::seconds(*options.max_irreversible_block_age); + _max_irreversible_block_age_us = fc::seconds(*options.max_irreversible_block_age); check_speculating = true; } if (options.cpu_effort_us) { - my->_cpu_effort_us = *options.cpu_effort_us; + _cpu_effort_us = *options.cpu_effort_us; } if (options.max_scheduled_transaction_time_per_block_ms) { - my->_max_scheduled_transaction_time_per_block_ms = *options.max_scheduled_transaction_time_per_block_ms; + _max_scheduled_transaction_time_per_block_ms = *options.max_scheduled_transaction_time_per_block_ms; } if (options.incoming_defer_ratio) { - my->_incoming_defer_ratio = *options.incoming_defer_ratio; + _incoming_defer_ratio = *options.incoming_defer_ratio; } - if (check_speculating && my->_pending_block_mode == pending_block_mode::speculating) { - my->abort_block(); - my->schedule_production_loop(); + if (check_speculating && _pending_block_mode == pending_block_mode::speculating) { + abort_block(); + schedule_production_loop(); } if (options.subjective_cpu_leeway_us) { @@ -1354,27 +1441,12 @@ void producer_plugin::update_runtime_options(const runtime_options& options) { } } -producer_plugin::runtime_options producer_plugin::get_runtime_options() const { - return { - my->_max_transaction_time_ms, - my->_max_irreversible_block_age_us.count() < 0 ? -1 : my->_max_irreversible_block_age_us.count() / 1'000'000, - my->_cpu_effort_us, - my->_max_scheduled_transaction_time_per_block_ms, - my->chain_plug->chain().get_subjective_cpu_leeway() ? - my->chain_plug->chain().get_subjective_cpu_leeway()->count() : - std::optional(), - my->_incoming_defer_ratio, - my->chain_plug->chain().get_greylist_limit() - }; +void producer_plugin::update_runtime_options(const runtime_options& options) { + my->update_runtime_options(options); } -void producer_plugin_impl::add_greylist_accounts(const producer_plugin::greylist_params& params) { - EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); - - chain::controller& chain = chain_plug->chain(); - for (auto &acc : params.accounts) { - chain.add_resource_greylist(acc); - } +producer_plugin::runtime_options producer_plugin::get_runtime_options() const { + return my->get_runtime_options(); } void producer_plugin::add_greylist_accounts(const greylist_params& params) { @@ -1382,23 +1454,11 @@ void producer_plugin::add_greylist_accounts(const greylist_params& params) { } void producer_plugin::remove_greylist_accounts(const greylist_params& params) { - EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); - - chain::controller& chain = my->chain_plug->chain(); - for (auto &acc : params.accounts) { - chain.remove_resource_greylist(acc); - } + my->remove_greylist_accounts(params); } producer_plugin::greylist_params producer_plugin::get_greylist() const { - chain::controller& chain = my->chain_plug->chain(); - greylist_params result; - const auto& list = chain.get_resource_greylist(); - result.accounts.reserve(list.size()); - for (auto &acc: list) { - result.accounts.push_back(acc); - } - return result; + return my->get_greylist(); } producer_plugin::whitelist_blacklist producer_plugin::get_whitelist_blacklist() const { From 04eab3feaef13ce4d7b7dff95c18bc8b8e230b73 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 25 May 2023 16:57:01 -0400 Subject: [PATCH 05/16] More producer_plugin cleanup towards proper pimpl idiom --- plugins/producer_plugin/producer_plugin.cpp | 53 +++++++-------------- 1 file changed, 17 insertions(+), 36 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index ddeba1a53e..e218878585 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -410,6 +410,9 @@ class producer_plugin_impl : public std::enable_shared_from_thischain().get_greylist_limit() }; } + + void schedule_protocol_feature_activations(const producer_plugin::scheduled_protocol_feature_activations& schedule); + void plugin_shutdown(); void plugin_startup(); void plugin_initialize(const boost::program_options::variables_map& options); @@ -1489,47 +1492,21 @@ void producer_plugin::set_whitelist_blacklist(const producer_plugin::whitelist_b } producer_plugin::integrity_hash_information producer_plugin::get_integrity_hash() const { - chain::controller& chain = my->chain_plug->chain(); - - auto reschedule = fc::make_scoped_exit([this](){ - my->schedule_production_loop(); - }); - - if (chain.is_building_block()) { - // abort the pending block - my->abort_block(); - } else { - reschedule.cancel(); - } - - return {chain.head_block_id(), chain.calculate_integrity_hash()}; + return my->get_integrity_hash(); } void producer_plugin::create_snapshot(producer_plugin::next_function next) { - chain::controller& chain = my->chain_plug->chain(); - - auto reschedule = fc::make_scoped_exit([this](){ - my->schedule_production_loop(); - }); - - auto predicate = [&]() -> void { - if (chain.is_building_block()) { - // abort the pending block - my->abort_block(); - } else { - reschedule.cancel(); - } - }; - - my->_snapshot_scheduler.create_snapshot(next, chain, predicate); + my->create_snapshot(std::move(next)); } -chain::snapshot_scheduler::snapshot_schedule_result producer_plugin::schedule_snapshot(const chain::snapshot_scheduler::snapshot_request_information& sri) +chain::snapshot_scheduler::snapshot_schedule_result +producer_plugin::schedule_snapshot(const chain::snapshot_scheduler::snapshot_request_information& sri) { return my->_snapshot_scheduler.schedule_snapshot(sri); } -chain::snapshot_scheduler::snapshot_schedule_result producer_plugin::unschedule_snapshot(const chain::snapshot_scheduler::snapshot_request_id_information& sri) +chain::snapshot_scheduler::snapshot_schedule_result +producer_plugin::unschedule_snapshot(const chain::snapshot_scheduler::snapshot_request_id_information& sri) { return my->_snapshot_scheduler.unschedule_snapshot(sri.snapshot_request_id); } @@ -1544,8 +1521,8 @@ producer_plugin::get_scheduled_protocol_feature_activations()const { return {my->_protocol_features_to_activate}; } -void producer_plugin::schedule_protocol_feature_activations( const scheduled_protocol_feature_activations& schedule ) { - const chain::controller& chain = my->chain_plug->chain(); +void producer_plugin_impl::schedule_protocol_feature_activations(const producer_plugin::scheduled_protocol_feature_activations& schedule) { + const chain::controller& chain = chain_plug->chain(); std::set set_of_features_to_activate( schedule.protocol_features_to_activate.begin(), schedule.protocol_features_to_activate.end() ); EOS_ASSERT( set_of_features_to_activate.size() == schedule.protocol_features_to_activate.size(), @@ -1558,8 +1535,12 @@ void producer_plugin::schedule_protocol_feature_activations( const scheduled_pro "protocol feature requires preactivation: ${digest}", ("digest", feature_digest)); } - my->_protocol_features_to_activate = schedule.protocol_features_to_activate; - my->_protocol_features_signaled = false; + _protocol_features_to_activate = schedule.protocol_features_to_activate; + _protocol_features_signaled = false; +} + +void producer_plugin::schedule_protocol_feature_activations( const scheduled_protocol_feature_activations& schedule ) { + my->schedule_protocol_feature_activations(schedule); } fc::variants producer_plugin::get_supported_protocol_features( const get_supported_protocol_features_params& params ) const { From eaff6c165eb198c07d75553995a995ac9b628e42 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 26 May 2023 09:05:51 -0400 Subject: [PATCH 06/16] Address PR comments, fix indentation/spacing in plugin_initialize. --- plugins/producer_plugin/producer_plugin.cpp | 196 ++++++++++---------- 1 file changed, 100 insertions(+), 96 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 3b555e7fd7..d3913d2d51 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -397,15 +397,14 @@ class producer_plugin_impl : public std::enable_shared_from_thischain().get_subjective_cpu_leeway() ? - chain_plug->chain().get_subjective_cpu_leeway()->count() : - std::optional(), + chain_plug->chain().get_subjective_cpu_leeway() ? chain_plug->chain().get_subjective_cpu_leeway()->count() : std::optional(), _incoming_defer_ratio, chain_plug->chain().get_greylist_limit() }; @@ -1024,13 +1023,13 @@ if( options.count(op_name) ) { \ void producer_plugin_impl::plugin_initialize(const boost::program_options::variables_map& options) { chain_plug = app().find_plugin(); - EOS_ASSERT( chain_plug, plugin_config_exception, "chain_plugin not found" ); + EOS_ASSERT(chain_plug, plugin_config_exception, "chain_plugin not found" ); _options = &options; LOAD_VALUE_SET(options, "producer-name", _producers) - chain::controller& chain = chain_plug->chain(); + chain::controller& chain = chain_plug->chain(); - if( options.count("signature-provider") ) { + if (options.count("signature-provider")) { const std::vector key_spec_pairs = options["signature-provider"].as>(); for (const auto& key_spec_pair : key_spec_pairs) { try { @@ -1047,36 +1046,38 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia } auto subjective_account_max_failures_window_size = options.at("subjective-account-max-failures-window-size").as(); - EOS_ASSERT( subjective_account_max_failures_window_size > 0, plugin_config_exception, - "subjective-account-max-failures-window-size ${s} must be greater than 0", - ("s", subjective_account_max_failures_window_size) ); + EOS_ASSERT(subjective_account_max_failures_window_size > 0, plugin_config_exception, + "subjective-account-max-failures-window-size ${s} must be greater than 0", + ("s", subjective_account_max_failures_window_size)); - _account_fails.set_max_failures_per_account( options.at("subjective-account-max-failures").as(), - subjective_account_max_failures_window_size ); + _account_fails.set_max_failures_per_account(options.at("subjective-account-max-failures").as(), + subjective_account_max_failures_window_size); uint32_t cpu_effort_pct = options.at("cpu-effort-percent").as(); - EOS_ASSERT( cpu_effort_pct >= 0 && cpu_effort_pct <= 100, plugin_config_exception, - "cpu-effort-percent ${pct} must be 0 - 100", ("pct", cpu_effort_pct) ); + EOS_ASSERT(cpu_effort_pct >= 0 && cpu_effort_pct <= 100, plugin_config_exception, + "cpu-effort-percent ${pct} must be 0 - 100", ("pct", cpu_effort_pct)); cpu_effort_pct *= config::percent_1; - _cpu_effort_us = EOS_PERCENT( config::block_interval_us, cpu_effort_pct ); + _cpu_effort_us = EOS_PERCENT(config::block_interval_us, cpu_effort_pct); - _max_block_cpu_usage_threshold_us = options.at( "max-block-cpu-usage-threshold-us" ).as(); - EOS_ASSERT( _max_block_cpu_usage_threshold_us < config::block_interval_us, plugin_config_exception, - "max-block-cpu-usage-threshold-us ${t} must be 0 .. ${bi}", ("bi", config::block_interval_us)("t", _max_block_cpu_usage_threshold_us) ); + _max_block_cpu_usage_threshold_us = options.at("max-block-cpu-usage-threshold-us").as(); + EOS_ASSERT(_max_block_cpu_usage_threshold_us < config::block_interval_us, plugin_config_exception, + "max-block-cpu-usage-threshold-us ${t} must be 0 .. ${bi}", + ("bi", config::block_interval_us)("t", _max_block_cpu_usage_threshold_us)); - _max_block_net_usage_threshold_bytes = options.at( "max-block-net-usage-threshold-bytes" ).as(); + _max_block_net_usage_threshold_bytes = options.at("max-block-net-usage-threshold-bytes").as(); _max_scheduled_transaction_time_per_block_ms = options.at("max-scheduled-transaction-time-per-block-ms").as(); - if( options.at( "subjective-cpu-leeway-us" ).as() != config::default_subjective_cpu_leeway_us ) { - chain.set_subjective_cpu_leeway( fc::microseconds( options.at( "subjective-cpu-leeway-us" ).as() ) ); + if (options.at("subjective-cpu-leeway-us").as() != config::default_subjective_cpu_leeway_us) { + chain.set_subjective_cpu_leeway(fc::microseconds(options.at("subjective-cpu-leeway-us").as())); } - fc::microseconds subjective_account_decay_time = fc::minutes(options.at( "subjective-account-decay-time-minutes" ).as()); - EOS_ASSERT( subjective_account_decay_time.count() > 0, plugin_config_exception, - "subjective-account-decay-time-minutes ${dt} must be greater than 0", ("dt", subjective_account_decay_time.to_seconds() / 60)); - chain.get_mutable_subjective_billing().set_expired_accumulator_average_window( subjective_account_decay_time ); + fc::microseconds subjective_account_decay_time = fc::minutes(options.at("subjective-account-decay-time-minutes").as()); + EOS_ASSERT(subjective_account_decay_time.count() > 0, plugin_config_exception, + "subjective-account-decay-time-minutes ${dt} must be greater than 0", + ("dt", subjective_account_decay_time.to_seconds() / 60)); + chain.get_mutable_subjective_billing().set_expired_accumulator_average_window(subjective_account_decay_time); _max_transaction_time_ms = options.at("max-transaction-time").as(); @@ -1084,34 +1085,36 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia auto max_incoming_transaction_queue_size = options.at("incoming-transaction-queue-size-mb").as() * 1024*1024; - EOS_ASSERT( max_incoming_transaction_queue_size > 0, plugin_config_exception, - "incoming-transaction-queue-size-mb ${mb} must be greater than 0", ("mb", max_incoming_transaction_queue_size) ); + EOS_ASSERT(max_incoming_transaction_queue_size > 0, plugin_config_exception, + "incoming-transaction-queue-size-mb ${mb} must be greater than 0", + ("mb", max_incoming_transaction_queue_size)); - _unapplied_transactions.set_max_transaction_queue_size( max_incoming_transaction_queue_size ); + _unapplied_transactions.set_max_transaction_queue_size(max_incoming_transaction_queue_size); _incoming_defer_ratio = options.at("incoming-defer-ratio").as(); _disable_subjective_p2p_billing = options.at("disable-subjective-p2p-billing").as(); _disable_subjective_api_billing = options.at("disable-subjective-api-billing").as(); - dlog( "disable-subjective-p2p-billing: ${p2p}, disable-subjective-api-billing: ${api}", - ("p2p", _disable_subjective_p2p_billing)("api", _disable_subjective_api_billing) ); - if( _disable_subjective_p2p_billing && _disable_subjective_api_billing ) { + dlog("disable-subjective-p2p-billing: ${p2p}, disable-subjective-api-billing: ${api}", + ("p2p", _disable_subjective_p2p_billing)("api", _disable_subjective_api_billing)); + if (_disable_subjective_p2p_billing && _disable_subjective_api_billing) { chain.get_mutable_subjective_billing().disable(); - ilog( "Subjective CPU billing disabled" ); - } else if( !_disable_subjective_p2p_billing && !_disable_subjective_api_billing ) { - ilog( "Subjective CPU billing enabled" ); + ilog("Subjective CPU billing disabled"); + } else if (!_disable_subjective_p2p_billing && !_disable_subjective_api_billing) { + ilog("Subjective CPU billing enabled"); } else { - if( _disable_subjective_p2p_billing ) ilog( "Subjective CPU billing of P2P trxs disabled " ); - if( _disable_subjective_api_billing ) ilog( "Subjective CPU billing of API trxs disabled " ); + if (_disable_subjective_p2p_billing) ilog("Subjective CPU billing of P2P trxs disabled "); + if (_disable_subjective_api_billing) ilog("Subjective CPU billing of API trxs disabled "); } - _thread_pool_size = options.at( "producer-threads" ).as(); - EOS_ASSERT( _thread_pool_size > 0, plugin_config_exception, - "producer-threads ${num} must be greater than 0", ("num", _thread_pool_size)); + _thread_pool_size = options.at("producer-threads").as(); + EOS_ASSERT(_thread_pool_size > 0, plugin_config_exception, + "producer-threads ${num} must be greater than 0", + ("num", _thread_pool_size)); - if( options.count( "snapshots-dir" )) { - auto sd = options.at( "snapshots-dir" ).as(); - if( sd.is_relative()) { + if (options.count("snapshots-dir")) { + auto sd = options.at("snapshots-dir").as(); + if (sd.is_relative()) { _snapshots_dir = app().data_dir() / sd; if (!std::filesystem::exists(_snapshots_dir)) { std::filesystem::create_directories(_snapshots_dir); @@ -1120,31 +1123,32 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia _snapshots_dir = sd; } - EOS_ASSERT( std::filesystem::is_directory(_snapshots_dir), snapshot_directory_not_found_exception, - "No such directory '${dir}'", ("dir", _snapshots_dir) ); + EOS_ASSERT(std::filesystem::is_directory(_snapshots_dir), snapshot_directory_not_found_exception, + "No such directory '${dir}'", ("dir", _snapshots_dir)); if (auto resmon_plugin = app().find_plugin()) { resmon_plugin->monitor_directory(_snapshots_dir); } } - if ( options.count( "read-only-threads" ) ) { - _ro_thread_pool_size = options.at( "read-only-threads" ).as(); - } else if ( _producers.empty() ) { - if( options.count( "plugin" ) ) { - const auto& v = options.at( "plugin" ).as>(); - auto i = std::find_if( v.cbegin(), v.cend(), []( const std::string& p ) { return p == "eosio::chain_api_plugin"; } ); - if( i != v.cend() ) { + if (options.count("read-only-threads")) { + _ro_thread_pool_size = options.at("read-only-threads").as(); + } else if (_producers.empty()) { + if (options.count("plugin")) { + const auto& v = options.at("plugin").as>(); + auto i = std::find_if (v.cbegin(), v.cend(), [](const std::string& p) { return p == "eosio::chain_api_plugin"; }); + if (i != v.cend()) { // default to 3 threads for non producer nodes running chain_api_plugin if not specified _ro_thread_pool_size = 3; - ilog( "chain_api_plugin configured, defaulting read-only-threads to ${t}", ("t", _ro_thread_pool_size) ); + ilog("chain_api_plugin configured, defaulting read-only-threads to ${t}", ("t", _ro_thread_pool_size)); } } } - EOS_ASSERT( producer_plugin::test_mode_ || _ro_thread_pool_size == 0 || _producers.empty(), plugin_config_exception, "read-only-threads not allowed on producer node" ); + EOS_ASSERT(producer_plugin::test_mode_ || _ro_thread_pool_size == 0 || _producers.empty(), plugin_config_exception, + "read-only-threads not allowed on producer node"); // only initialize other read-only options when read-only thread pool is enabled - if ( _ro_thread_pool_size > 0 ) { + if (_ro_thread_pool_size > 0) { #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED if (chain.is_eos_vm_oc_enabled()) { // EOS VM OC requires 4.2TB Virtual for each executing thread. Make sure the memory @@ -1155,69 +1159,69 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia std::ifstream meminfo_file("/proc/meminfo"); while (meminfo_file >> attr_name) { if (attr_name == "VmallocTotal:") { - if ( !(meminfo_file >> vm_total_kb) ) + if (!(meminfo_file >> vm_total_kb)) break; } else if (attr_name == "VmallocUsed:") { - if ( !(meminfo_file >> vm_used_kb) ) + if (!(meminfo_file >> vm_used_kb)) break; } meminfo_file.ignore(std::numeric_limits::max(), '\n'); } - EOS_ASSERT( vm_total_kb > 0, plugin_config_exception, - "Unable to get system virtual memory size (not a Linux?), therefore cannot determine if the system has enough " - "virtual memory for multi-threaded read-only transactions on EOS VM OC"); - EOS_ASSERT( vm_total_kb > vm_used_kb, plugin_config_exception, - "vm total (${t}) must be greater than vm used (${u})", ("t", vm_total_kb)("u", vm_used_kb)); + EOS_ASSERT(vm_total_kb > 0, plugin_config_exception, + "Unable to get system virtual memory size (not a Linux?), therefore cannot determine if the system has enough " + "virtual memory for multi-threaded read-only transactions on EOS VM OC"); + EOS_ASSERT(vm_total_kb > vm_used_kb, plugin_config_exception, + "vm total (${t}) must be greater than vm used (${u})", ("t", vm_total_kb)("u", vm_used_kb)); uint32_t num_threads_supported = (vm_total_kb - vm_used_kb) / 4200000000; // reserve 1 for the app thread, 1 for anything else which might use VM - EOS_ASSERT( num_threads_supported > 2, plugin_config_exception, - "With the EOS VM OC configured, there is not enough system virtual memory to support the required minimum of " - "3 threads (1 for main thread, 1 for read-only, and 1 for anything else), vm total: ${t}, vm used: ${u}", - ("t", vm_total_kb)("u", vm_used_kb)); + EOS_ASSERT(num_threads_supported > 2, plugin_config_exception, + "With the EOS VM OC configured, there is not enough system virtual memory to support the required minimum of " + "3 threads (1 for main thread, 1 for read-only, and 1 for anything else), vm total: ${t}, vm used: ${u}", + ("t", vm_total_kb)("u", vm_used_kb)); num_threads_supported -= 2; auto actual_threads_allowed = std::min(_ro_max_threads_allowed, num_threads_supported); ilog("vm total in kb: ${total}, vm used in kb: ${used}, number of EOS VM OC threads supported " "((vm total - vm used)/4.2 TB - 2): ${supp}, max allowed: ${max}, actual allowed: ${actual}", ("total", vm_total_kb) ("used", vm_used_kb) ("supp", num_threads_supported) ("max", _ro_max_threads_allowed)("actual", actual_threads_allowed)); - EOS_ASSERT( _ro_thread_pool_size <= actual_threads_allowed, plugin_config_exception, - "read-only-threads (${th}) greater than number of threads allowed for EOS VM OC (${allowed})", - ("th", _ro_thread_pool_size) ("allowed", actual_threads_allowed) ); + EOS_ASSERT(_ro_thread_pool_size <= actual_threads_allowed, plugin_config_exception, + "read-only-threads (${th}) greater than number of threads allowed for EOS VM OC (${allowed})", + ("th", _ro_thread_pool_size) ("allowed", actual_threads_allowed)); } #endif - EOS_ASSERT( _ro_thread_pool_size <= _ro_max_threads_allowed, plugin_config_exception, - "read-only-threads (${th}) greater than the number of threads allowed (${allowed})", - ("th", _ro_thread_pool_size) ("allowed", _ro_max_threads_allowed) ); - - _ro_write_window_time_us = fc::microseconds( options.at( "read-only-write-window-time-us" ).as() ); - _ro_read_window_time_us = fc::microseconds( options.at( "read-only-read-window-time-us" ).as() ); - EOS_ASSERT( _ro_read_window_time_us > _ro_read_window_minimum_time_us, plugin_config_exception, - "read-only-read-window-time-us (${read}) must be at least greater than ${min} us", - ("read", _ro_read_window_time_us) ("min", _ro_read_window_minimum_time_us) ); + EOS_ASSERT(_ro_thread_pool_size <= _ro_max_threads_allowed, plugin_config_exception, + "read-only-threads (${th}) greater than the number of threads allowed (${allowed})", + ("th", _ro_thread_pool_size) ("allowed", _ro_max_threads_allowed)); + + _ro_write_window_time_us = fc::microseconds(options.at("read-only-write-window-time-us").as()); + _ro_read_window_time_us = fc::microseconds(options.at("read-only-read-window-time-us").as()); + EOS_ASSERT(_ro_read_window_time_us > _ro_read_window_minimum_time_us, plugin_config_exception, + "read-only-read-window-time-us (${read}) must be at least greater than ${min} us", + ("read", _ro_read_window_time_us) ("min", _ro_read_window_minimum_time_us)); _ro_read_window_effective_time_us = _ro_read_window_time_us - _ro_read_window_minimum_time_us; // Make sure a read-only transaction can finish within the read // window if scheduled at the very beginning of the window. // Add _ro_read_window_minimum_time_us for safety margin. - if ( _max_transaction_time_ms.load() > 0 ) { - EOS_ASSERT( _ro_read_window_time_us > ( fc::milliseconds(_max_transaction_time_ms.load()) + _ro_read_window_minimum_time_us ), - plugin_config_exception, - "read-only-read-window-time-us (${read} us) must be greater than max-transaction-time (${trx_time} us) " - "plus ${min} us, required: ${read} us > (${trx_time} us + ${min} us).", - ("read", _ro_read_window_time_us) ("trx_time", _max_transaction_time_ms.load() * 1000) - ("min", _ro_read_window_minimum_time_us) ); + if (_max_transaction_time_ms.load() > 0) { + EOS_ASSERT(_ro_read_window_time_us > (fc::milliseconds(_max_transaction_time_ms.load()) + _ro_read_window_minimum_time_us), + plugin_config_exception, + "read-only-read-window-time-us (${read} us) must be greater than max-transaction-time (${trx_time} us) " + "plus ${min} us, required: ${read} us > (${trx_time} us + ${min} us).", + ("read", _ro_read_window_time_us) ("trx_time", _max_transaction_time_ms.load() * 1000) + ("min", _ro_read_window_minimum_time_us)); } ilog("read-only-write-window-time-us: ${ww} us, read-only-read-window-time-us: ${rw} us, effective read window time to be used: ${w} us", ("ww", _ro_write_window_time_us)("rw", _ro_read_window_time_us)("w", _ro_read_window_effective_time_us)); } // Make sure _ro_max_trx_time_us is alwasys set. - if ( _max_transaction_time_ms.load() > 0 ) { + if (_max_transaction_time_ms.load() > 0) { _ro_max_trx_time_us = fc::milliseconds(_max_transaction_time_ms.load()); } else { // max-transaction-time can be set to negative for unlimited time - _ro_max_trx_time_us = fc::microseconds::maximum(); + _ro_max_trx_time_us = fc::microseconds::maximum(); } ilog("read-only-threads ${s}, max read-only trx time to be enforced: ${t} us", ("s", _ro_thread_pool_size)("t", _ro_max_trx_time_us)); @@ -1229,7 +1233,7 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia _incoming_transaction_async_provider = app().get_method().register_provider( [this](const packed_transaction_ptr& trx, bool api_trx, transaction_metadata::trx_type trx_type, bool return_failure_traces, next_function next) -> void { - return on_incoming_transaction_async(trx, api_trx, trx_type, return_failure_traces, next ); + return on_incoming_transaction_async(trx, api_trx, trx_type, return_failure_traces, next); }); if (options.count("greylist-account")) { @@ -1243,13 +1247,13 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia { uint32_t greylist_limit = options.at("greylist-limit").as(); - chain.set_greylist_limit( greylist_limit ); + chain.set_greylist_limit(greylist_limit); } - if( options.count("disable-subjective-account-billing") ) { + if (options.count("disable-subjective-account-billing")) { std::vector accounts = options["disable-subjective-account-billing"].as>(); - for( const auto& a : accounts ) { - chain.get_mutable_subjective_billing().disable_account( account_name(a) ); + for(const auto& a : accounts) { + chain.get_mutable_subjective_billing().disable_account(account_name(a)); } } @@ -1475,12 +1479,12 @@ void producer_plugin::set_whitelist_blacklist(const producer_plugin::whitelist_b ); chain::controller& chain = my->chain_plug->chain(); - if(params.actor_whitelist) chain.set_actor_whitelist(*params.actor_whitelist); - if(params.actor_blacklist) chain.set_actor_blacklist(*params.actor_blacklist); - if(params.contract_whitelist) chain.set_contract_whitelist(*params.contract_whitelist); - if(params.contract_blacklist) chain.set_contract_blacklist(*params.contract_blacklist); - if(params.action_blacklist) chain.set_action_blacklist(*params.action_blacklist); - if(params.key_blacklist) chain.set_key_blacklist(*params.key_blacklist); + if (params.actor_whitelist) chain.set_actor_whitelist(*params.actor_whitelist); + if (params.actor_blacklist) chain.set_actor_blacklist(*params.actor_blacklist); + if (params.contract_whitelist) chain.set_contract_whitelist(*params.contract_whitelist); + if (params.contract_blacklist) chain.set_contract_blacklist(*params.contract_blacklist); + if (params.action_blacklist) chain.set_action_blacklist(*params.action_blacklist); + if (params.key_blacklist) chain.set_key_blacklist(*params.key_blacklist); } producer_plugin::integrity_hash_information producer_plugin::get_integrity_hash() const { From ceab818352faa9d572a8d40406d7788b58da92ac Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 26 May 2023 09:23:37 -0400 Subject: [PATCH 07/16] More whitespace cleanup --- plugins/producer_plugin/producer_plugin.cpp | 118 ++++++++++---------- 1 file changed, 62 insertions(+), 56 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index d3913d2d51..bb4b56a622 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -145,7 +145,7 @@ class account_failures { // return true if exceeds max_failures_per_account and should be dropped bool failure_limit( const account_name& n ) { auto fitr = failed_accounts.find( n ); - if( fitr != failed_accounts.end() && fitr->second.num_failures >= max_failures_per_account ) { + if (fitr != failed_accounts.end() && fitr->second.num_failures >= max_failures_per_account) { ++fitr->second.num_failures; return true; } @@ -167,26 +167,25 @@ class account_failures { private: void report(uint32_t block_num, const chain::subjective_billing& sub_bill) const { - if( _log.is_enabled(fc::log_level::debug)) { + if (_log.is_enabled(fc::log_level::debug)) { auto now = fc::time_point::now(); - for ( const auto& e : failed_accounts ) { + for (const auto& e : failed_accounts) { std::string reason; - if( e.second.is_deadline() ) reason += "deadline"; - if( e.second.is_tx_cpu_usage() ) { - if( !reason.empty() ) reason += ", "; + if (e.second.is_deadline()) reason += "deadline"; + if (e.second.is_tx_cpu_usage()) { + if (!reason.empty()) reason += ", "; reason += "tx_cpu_usage"; } - if( e.second.is_eosio_assert() ) { - if( !reason.empty() ) reason += ", "; + if (e.second.is_eosio_assert()) { + if (!reason.empty()) reason += ", "; reason += "assert"; } - if( e.second.is_other() ) { - if( !reason.empty() ) reason += ", "; + if (e.second.is_other()) { + if (!reason.empty()) reason += ", "; reason += "other"; } - fc_dlog( _log, "Failed ${n} trxs, account: ${a}, sub bill: ${b}us, reason: ${r}", - ("n", e.second.num_failures)("b", sub_bill.get_subjective_bill(e.first, now)) - ("a", e.first)("r", reason) ); + fc_dlog(_log, "Failed ${n} trxs, account: ${a}, sub bill: ${b}us, reason: ${r}", + ("n", e.second.num_failures)("b", sub_bill.get_subjective_bill(e.first, now))("a", e.first)("r", reason)); } } } @@ -198,19 +197,18 @@ class account_failures { ex_other_exception = 8 }; - void add( const account_name& n, const fc::exception& e ) { + void add(const account_name& n, const fc::exception& e) { auto exception_code = e.code(); - if( exception_code == tx_cpu_usage_exceeded::code_value ) { - ex_flags = set_field( ex_flags, ex_fields::ex_tx_cpu_usage_exceeded ); - } else if( exception_code == deadline_exception::code_value ) { - ex_flags = set_field( ex_flags, ex_fields::ex_deadline_exception ); - } else if( exception_code == eosio_assert_message_exception::code_value || - exception_code == eosio_assert_code_exception::code_value ) { - ex_flags = set_field( ex_flags, ex_fields::ex_eosio_assert_exception ); + if (exception_code == tx_cpu_usage_exceeded::code_value) { + ex_flags = set_field(ex_flags, ex_fields::ex_tx_cpu_usage_exceeded); + } else if (exception_code == deadline_exception::code_value) { + ex_flags = set_field(ex_flags, ex_fields::ex_deadline_exception); + } else if (exception_code == eosio_assert_message_exception::code_value || + exception_code == eosio_assert_code_exception::code_value) { + ex_flags = set_field(ex_flags, ex_fields::ex_eosio_assert_exception); } else { - ex_flags = set_field( ex_flags, ex_fields::ex_other_exception ); - fc_dlog( _log, "Failed trx, account: ${a}, reason: ${r}, except: ${e}", - ("a", n)("r", exception_code)("e", e) ); + ex_flags = set_field(ex_flags, ex_fields::ex_other_exception); + fc_dlog(_log, "Failed trx, account: ${a}, reason: ${r}, except: ${e}", ("a", n)("r", exception_code)("e", e)); } } @@ -235,8 +233,8 @@ struct block_time_tracker { block_idle_time += idle; } - void add_fail_time( const fc::microseconds& fail_time, bool is_transient ) { - if( is_transient ) { + void add_fail_time(const fc::microseconds& fail_time, bool is_transient) { + if (is_transient) { // transient time includes both success and fail time transient_trx_time += fail_time; ++transient_trx_num; @@ -246,8 +244,8 @@ struct block_time_tracker { } } - void add_success_time( const fc::microseconds& time, bool is_transient ) { - if( is_transient ) { + void add_success_time(const fc::microseconds& time, bool is_transient) { + if (is_transient) { transient_trx_time += time; ++transient_trx_num; } else { @@ -257,15 +255,16 @@ struct block_time_tracker { } void report( const fc::time_point& idle_trx_time, uint32_t block_num ) { - if( _log.is_enabled( fc::log_level::debug ) ) { + if (_log.is_enabled(fc::log_level::debug)) { auto now = fc::time_point::now(); - add_idle_time( now - idle_trx_time ); - fc_dlog( _log, "Block #${n} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, transient: ${trans_trx_num}, ${trans_trx_time}us, other: ${o}us", - ("n", block_num) - ("i", block_idle_time)("t", now - clear_time)("sn", trx_success_num)("s", trx_success_time) - ("fn", trx_fail_num)("f", trx_fail_time) - ("trans_trx_num", transient_trx_num)("trans_trx_time", transient_trx_time) - ("o", (now - clear_time) - block_idle_time - trx_success_time - trx_fail_time - transient_trx_time) ); + add_idle_time(now - idle_trx_time); + fc_dlog(_log, "Block #${n} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, " + "transient: ${trans_trx_num}, ${trans_trx_time}us, other: ${o}us", + ("n", block_num) + ("i", block_idle_time)("t", now - clear_time)("sn", trx_success_num)("s", trx_success_time) + ("fn", trx_fail_num)("f", trx_fail_time) + ("trans_trx_num", transient_trx_num)("trans_trx_time", transient_trx_time) + ("o", (now - clear_time) - block_idle_time - trx_success_time - trx_fail_time - transient_trx_time) ); } } @@ -313,33 +312,40 @@ class producer_plugin_impl : public std::enable_shared_from_this& next ); - push_result handle_push_result( const transaction_metadata_ptr& trx, - const next_function& next, - const fc::time_point& start, - chain::controller& chain, - const transaction_trace_ptr& trace, - bool return_failure_trace, - bool disable_subjective_enforcement, - account_name first_auth, - int64_t sub_bill, - uint32_t prev_billed_cpu_time_us ); - void log_trx_results( const transaction_metadata_ptr& trx, const transaction_trace_ptr& trace, const fc::time_point& start ); - void log_trx_results( const transaction_metadata_ptr& trx, const fc::exception_ptr& except_ptr ); - void log_trx_results( const packed_transaction_ptr& trx, const transaction_trace_ptr& trace, - const fc::exception_ptr& except_ptr, uint32_t billed_cpu_us, const fc::time_point& start, bool is_transient ); + push_result push_transaction(const fc::time_point& block_deadline, + const transaction_metadata_ptr& trx, + bool api_trx, + bool return_failure_trace, + const next_function& next); + push_result handle_push_result(const transaction_metadata_ptr& trx, + const next_function& next, + const fc::time_point& start, + chain::controller& chain, + const transaction_trace_ptr& trace, + bool return_failure_trace, + bool disable_subjective_enforcement, + account_name first_auth, + int64_t sub_bill, + uint32_t prev_billed_cpu_time_us); + void log_trx_results(const transaction_metadata_ptr& trx, + const transaction_trace_ptr& trace, + const fc::time_point& start); + void log_trx_results(const transaction_metadata_ptr& trx, const fc::exception_ptr& except_ptr); + void log_trx_results(const packed_transaction_ptr& trx, + const transaction_trace_ptr& trace, + const fc::exception_ptr& except_ptr, + uint32_t billed_cpu_us, + const fc::time_point& start, + bool is_transient); void add_greylist_accounts(const producer_plugin::greylist_params& params) { EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); chain::controller& chain = chain_plug->chain(); - for (auto &acc : params.accounts) { + for (auto& acc : params.accounts) { chain.add_resource_greylist(acc); } } - + void remove_greylist_accounts(const producer_plugin::greylist_params& params) { EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); From a423d4055470f31bd12524c290f9c23bec8e362b Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 26 May 2023 09:38:46 -0400 Subject: [PATCH 08/16] run clang-format and do some manual cleanup (mostly fc_dlog) --- plugins/producer_plugin/producer_plugin.cpp | 2265 ++++++++++--------- 1 file changed, 1174 insertions(+), 1091 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index bb4b56a622..6a06d016b2 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -31,86 +31,86 @@ #include namespace bmi = boost::multi_index; +using bmi::hashed_unique; using bmi::indexed_by; -using bmi::ordered_non_unique; using bmi::member; +using bmi::ordered_non_unique; using bmi::tag; -using bmi::hashed_unique; using boost::multi_index_container; +using boost::signals2::scoped_connection; using std::string; using std::vector; -using boost::signals2::scoped_connection; #undef FC_LOG_AND_DROP -#define LOG_AND_DROP() \ - catch ( const guard_exception& e ) { \ - chain_plugin::handle_guard_exception(e); \ - } catch ( const std::bad_alloc& ) { \ - chain_apis::api_base::handle_bad_alloc(); \ - } catch ( boost::interprocess::bad_alloc& ) { \ - chain_apis::api_base::handle_db_exhaustion(); \ - } catch( fc::exception& er ) { \ - wlog( "${details}", ("details",er.to_detail_string()) ); \ - } catch( const std::exception& e ) { \ - fc::exception fce( \ - FC_LOG_MESSAGE( warn, "std::exception: ${what}: ",("what",e.what()) ), \ - fc::std_exception_code,\ - BOOST_CORE_TYPEID(e).name(), \ - e.what() ) ; \ - wlog( "${details}", ("details",fce.to_detail_string()) ); \ - } catch( ... ) { \ - fc::unhandled_exception e( \ - FC_LOG_MESSAGE( warn, "unknown: ", ), \ - std::current_exception() ); \ - wlog( "${details}", ("details",e.to_detail_string()) ); \ +#define LOG_AND_DROP() \ + catch (const guard_exception& e) { \ + chain_plugin::handle_guard_exception(e); \ + } \ + catch (const std::bad_alloc&) { \ + chain_apis::api_base::handle_bad_alloc(); \ + } \ + catch (boost::interprocess::bad_alloc&) { \ + chain_apis::api_base::handle_db_exhaustion(); \ + } \ + catch (fc::exception & er) { \ + wlog("${details}", ("details", er.to_detail_string())); \ + } \ + catch (const std::exception& e) { \ + fc::exception fce(FC_LOG_MESSAGE(warn, "std::exception: ${what}: ", ("what", e.what())), \ + fc::std_exception_code, \ + BOOST_CORE_TYPEID(e).name(), \ + e.what()); \ + wlog("${details}", ("details", fce.to_detail_string())); \ + } \ + catch (...) { \ + fc::unhandled_exception e(FC_LOG_MESSAGE(warn, "unknown: ", ), std::current_exception()); \ + wlog("${details}", ("details", e.to_detail_string())); \ } const std::string logger_name("producer_plugin"); -fc::logger _log; +fc::logger _log; const std::string trx_successful_trace_logger_name("transaction_success_tracing"); -fc::logger _trx_successful_trace_log; +fc::logger _trx_successful_trace_log; const std::string trx_failed_trace_logger_name("transaction_failure_tracing"); -fc::logger _trx_failed_trace_log; +fc::logger _trx_failed_trace_log; const std::string trx_trace_success_logger_name("transaction_trace_success"); -fc::logger _trx_trace_success_log; +fc::logger _trx_trace_success_log; const std::string trx_trace_failure_logger_name("transaction_trace_failure"); -fc::logger _trx_trace_failure_log; +fc::logger _trx_trace_failure_log; const std::string trx_logger_name("transaction"); -fc::logger _trx_log; +fc::logger _trx_log; const std::string transient_trx_successful_trace_logger_name("transient_trx_success_tracing"); -fc::logger _transient_trx_successful_trace_log; +fc::logger _transient_trx_successful_trace_log; const std::string transient_trx_failed_trace_logger_name("transient_trx_failure_tracing"); -fc::logger _transient_trx_failed_trace_log; +fc::logger _transient_trx_failed_trace_log; namespace eosio { - static auto _producer_plugin = application::register_plugin(); +static auto _producer_plugin = application::register_plugin(); using namespace eosio::chain; using namespace eosio::chain::plugin_interface; namespace { - bool exception_is_exhausted(const fc::exception& e) { - auto code = e.code(); - return (code == block_cpu_usage_exceeded::code_value) || - (code == block_net_usage_exceeded::code_value) || - (code == deadline_exception::code_value) || - (code == ro_trx_vm_oc_compile_temporary_failure::code_value); - } +bool exception_is_exhausted(const fc::exception& e) { + auto code = e.code(); + return (code == block_cpu_usage_exceeded::code_value) || (code == block_net_usage_exceeded::code_value) || + (code == deadline_exception::code_value) || (code == ro_trx_vm_oc_compile_temporary_failure::code_value); } +} // namespace struct transaction_id_with_expiry { - transaction_id_type trx_id; - fc::time_point expiry; + transaction_id_type trx_id; + fc::time_point expiry; }; struct by_id; @@ -118,11 +118,8 @@ struct by_expiry; using transaction_id_with_expiry_index = multi_index_container< transaction_id_with_expiry, - indexed_by< - hashed_unique, BOOST_MULTI_INDEX_MEMBER(transaction_id_with_expiry, transaction_id_type, trx_id)>, - ordered_non_unique, BOOST_MULTI_INDEX_MEMBER(transaction_id_with_expiry, fc::time_point, expiry)> - > ->; + indexed_by, BOOST_MULTI_INDEX_MEMBER(transaction_id_with_expiry, transaction_id_type, trx_id)>, + ordered_non_unique, BOOST_MULTI_INDEX_MEMBER(transaction_id_with_expiry, fc::time_point, expiry)>>>; namespace { @@ -131,20 +128,20 @@ class account_failures { public: account_failures() = default; - void set_max_failures_per_account( uint32_t max_failures, uint32_t size ) { - max_failures_per_account = max_failures; + void set_max_failures_per_account(uint32_t max_failures, uint32_t size) { + max_failures_per_account = max_failures; reset_window_size_in_num_blocks = size; } - void add( const account_name& n, const fc::exception& e ) { + void add(const account_name& n, const fc::exception& e) { auto& fa = failed_accounts[n]; ++fa.num_failures; - fa.add( n, e ); + fa.add(n, e); } // return true if exceeds max_failures_per_account and should be dropped - bool failure_limit( const account_name& n ) { - auto fitr = failed_accounts.find( n ); + bool failure_limit(const account_name& n) { + auto fitr = failed_accounts.find(n); if (fitr != failed_accounts.end() && fitr->second.num_failures >= max_failures_per_account) { ++fitr->second.num_failures; return true; @@ -153,7 +150,7 @@ class account_failures { } void report_and_clear(uint32_t block_num, const chain::subjective_billing& sub_bill) { - if (last_reset_block_num != block_num && (block_num % reset_window_size_in_num_blocks == 0) ) { + if (last_reset_block_num != block_num && (block_num % reset_window_size_in_num_blocks == 0)) { report(block_num, sub_bill); failed_accounts.clear(); last_reset_block_num = block_num; @@ -171,30 +168,35 @@ class account_failures { auto now = fc::time_point::now(); for (const auto& e : failed_accounts) { std::string reason; - if (e.second.is_deadline()) reason += "deadline"; + if (e.second.is_deadline()) + reason += "deadline"; if (e.second.is_tx_cpu_usage()) { - if (!reason.empty()) reason += ", "; + if (!reason.empty()) + reason += ", "; reason += "tx_cpu_usage"; } if (e.second.is_eosio_assert()) { - if (!reason.empty()) reason += ", "; + if (!reason.empty()) + reason += ", "; reason += "assert"; } if (e.second.is_other()) { - if (!reason.empty()) reason += ", "; + if (!reason.empty()) + reason += ", "; reason += "other"; } - fc_dlog(_log, "Failed ${n} trxs, account: ${a}, sub bill: ${b}us, reason: ${r}", + fc_dlog(_log, + "Failed ${n} trxs, account: ${a}, sub bill: ${b}us, reason: ${r}", ("n", e.second.num_failures)("b", sub_bill.get_subjective_bill(e.first, now))("a", e.first)("r", reason)); } } } struct account_failure { enum class ex_fields : uint8_t { - ex_deadline_exception = 1, - ex_tx_cpu_usage_exceeded = 2, + ex_deadline_exception = 1, + ex_tx_cpu_usage_exceeded = 2, ex_eosio_assert_exception = 4, - ex_other_exception = 8 + ex_other_exception = 8 }; void add(const account_name& n, const fc::exception& e) { @@ -212,26 +214,24 @@ class account_failures { } } - bool is_deadline() const { return has_field( ex_flags, ex_fields::ex_deadline_exception ); } - bool is_tx_cpu_usage() const { return has_field( ex_flags, ex_fields::ex_tx_cpu_usage_exceeded ); } - bool is_eosio_assert() const { return has_field( ex_flags, ex_fields::ex_eosio_assert_exception ); } - bool is_other() const { return has_field( ex_flags, ex_fields::ex_other_exception ); } + bool is_deadline() const { return has_field(ex_flags, ex_fields::ex_deadline_exception); } + bool is_tx_cpu_usage() const { return has_field(ex_flags, ex_fields::ex_tx_cpu_usage_exceeded); } + bool is_eosio_assert() const { return has_field(ex_flags, ex_fields::ex_eosio_assert_exception); } + bool is_other() const { return has_field(ex_flags, ex_fields::ex_other_exception); } uint32_t num_failures = 0; - uint8_t ex_flags = 0; + uint8_t ex_flags = 0; }; std::map failed_accounts; - uint32_t max_failures_per_account = 3; - uint32_t last_reset_block_num = 0; - uint32_t reset_window_size_in_num_blocks = 1; + uint32_t max_failures_per_account = 3; + uint32_t last_reset_block_num = 0; + uint32_t reset_window_size_in_num_blocks = 1; }; struct block_time_tracker { - void add_idle_time( const fc::microseconds& idle ) { - block_idle_time += idle; - } + void add_idle_time(const fc::microseconds& idle) { block_idle_time += idle; } void add_fail_time(const fc::microseconds& fail_time, bool is_transient) { if (is_transient) { @@ -254,34 +254,33 @@ struct block_time_tracker { } } - void report( const fc::time_point& idle_trx_time, uint32_t block_num ) { + void report(const fc::time_point& idle_trx_time, uint32_t block_num) { if (_log.is_enabled(fc::log_level::debug)) { auto now = fc::time_point::now(); add_idle_time(now - idle_trx_time); - fc_dlog(_log, "Block #${n} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, " + fc_dlog(_log, + "Block #${n} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, " "transient: ${trans_trx_num}, ${trans_trx_time}us, other: ${o}us", - ("n", block_num) - ("i", block_idle_time)("t", now - clear_time)("sn", trx_success_num)("s", trx_success_time) - ("fn", trx_fail_num)("f", trx_fail_time) - ("trans_trx_num", transient_trx_num)("trans_trx_time", transient_trx_time) - ("o", (now - clear_time) - block_idle_time - trx_success_time - trx_fail_time - transient_trx_time) ); + ("n", block_num)("i", block_idle_time)("t", now - clear_time)("sn", trx_success_num)("s", trx_success_time) + ("fn", trx_fail_num)("f", trx_fail_time)("trans_trx_num", transient_trx_num)("trans_trx_time", transient_trx_time) + ("o", (now - clear_time) - block_idle_time - trx_success_time - trx_fail_time - transient_trx_time)); } } void clear() { block_idle_time = trx_fail_time = trx_success_time = transient_trx_time = fc::microseconds{}; trx_fail_num = trx_success_num = transient_trx_num = 0; - clear_time = fc::time_point::now(); + clear_time = fc::time_point::now(); } fc::microseconds block_idle_time; - uint32_t trx_success_num = 0; - uint32_t trx_fail_num = 0; - uint32_t transient_trx_num = 0; + uint32_t trx_success_num = 0; + uint32_t trx_fail_num = 0; + uint32_t transient_trx_num = 0; fc::microseconds trx_success_time; fc::microseconds trx_fail_time; fc::microseconds transient_trx_time; - fc::time_point clear_time{fc::time_point::now()}; + fc::time_point clear_time{fc::time_point::now()}; }; } // anonymous namespace @@ -289,55 +288,51 @@ struct block_time_tracker { class producer_plugin_impl : public std::enable_shared_from_this { public: producer_plugin_impl(boost::asio::io_service& io) - :_timer(io) - ,_transaction_ack_channel(app().get_channel()) - ,_ro_timer(io) - { - } + : _timer(io) + , _transaction_ack_channel(app().get_channel()) + , _ro_timer(io) {} uint32_t calculate_next_block_slot(const account_name& producer_name, uint32_t current_block_slot) const; - void schedule_production_loop(); - void schedule_maybe_produce_block( bool exhausted ); - void produce_block(); - bool maybe_produce_block(); - bool block_is_exhausted() const; - bool remove_expired_trxs( const fc::time_point& deadline ); - bool remove_expired_blacklisted_trxs( const fc::time_point& deadline ); - bool process_unapplied_trxs( const fc::time_point& deadline ); - void process_scheduled_and_incoming_trxs( const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr ); - bool process_incoming_trxs( const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr ); + void schedule_production_loop(); + void schedule_maybe_produce_block(bool exhausted); + void produce_block(); + bool maybe_produce_block(); + bool block_is_exhausted() const; + bool remove_expired_trxs(const fc::time_point& deadline); + bool remove_expired_blacklisted_trxs(const fc::time_point& deadline); + bool process_unapplied_trxs(const fc::time_point& deadline); + void process_scheduled_and_incoming_trxs(const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr); + bool process_incoming_trxs(const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr); struct push_result { bool block_exhausted = false; - bool trx_exhausted = false; - bool failed = false; + bool trx_exhausted = false; + bool failed = false; }; - push_result push_transaction(const fc::time_point& block_deadline, - const transaction_metadata_ptr& trx, - bool api_trx, - bool return_failure_trace, + push_result push_transaction(const fc::time_point& block_deadline, + const transaction_metadata_ptr& trx, + bool api_trx, + bool return_failure_trace, const next_function& next); - push_result handle_push_result(const transaction_metadata_ptr& trx, + push_result handle_push_result(const transaction_metadata_ptr& trx, const next_function& next, - const fc::time_point& start, - chain::controller& chain, - const transaction_trace_ptr& trace, - bool return_failure_trace, - bool disable_subjective_enforcement, - account_name first_auth, - int64_t sub_bill, - uint32_t prev_billed_cpu_time_us); - void log_trx_results(const transaction_metadata_ptr& trx, - const transaction_trace_ptr& trace, - const fc::time_point& start); - void log_trx_results(const transaction_metadata_ptr& trx, const fc::exception_ptr& except_ptr); - void log_trx_results(const packed_transaction_ptr& trx, - const transaction_trace_ptr& trace, - const fc::exception_ptr& except_ptr, - uint32_t billed_cpu_us, - const fc::time_point& start, - bool is_transient); - void add_greylist_accounts(const producer_plugin::greylist_params& params) { + const fc::time_point& start, + chain::controller& chain, + const transaction_trace_ptr& trace, + bool return_failure_trace, + bool disable_subjective_enforcement, + account_name first_auth, + int64_t sub_bill, + uint32_t prev_billed_cpu_time_us); + void log_trx_results(const transaction_metadata_ptr& trx, const transaction_trace_ptr& trace, const fc::time_point& start); + void log_trx_results(const transaction_metadata_ptr& trx, const fc::exception_ptr& except_ptr); + void log_trx_results(const packed_transaction_ptr& trx, + const transaction_trace_ptr& trace, + const fc::exception_ptr& except_ptr, + uint32_t billed_cpu_us, + const fc::time_point& start, + bool is_transient); + void add_greylist_accounts(const producer_plugin::greylist_params& params) { EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); chain::controller& chain = chain_plug->chain(); @@ -350,28 +345,26 @@ class producer_plugin_impl : public std::enable_shared_from_this 0, chain::invalid_http_request, "At least one account is required"); chain::controller& chain = chain_plug->chain(); - for (auto &acc : params.accounts) { + for (auto& acc : params.accounts) { chain.remove_resource_greylist(acc); } } producer_plugin::greylist_params get_greylist() const { - chain::controller& chain = chain_plug->chain(); + chain::controller& chain = chain_plug->chain(); producer_plugin::greylist_params result; - const auto& list = chain.get_resource_greylist(); + const auto& list = chain.get_resource_greylist(); result.accounts.reserve(list.size()); - for (auto &acc: list) { + for (auto& acc : list) { result.accounts.push_back(acc); } return result; } - + producer_plugin::integrity_hash_information get_integrity_hash() { chain::controller& chain = chain_plug->chain(); - auto reschedule = fc::make_scoped_exit([this]() { - schedule_production_loop(); - }); + auto reschedule = fc::make_scoped_exit([this]() { schedule_production_loop(); }); if (chain.is_building_block()) { // abort the pending block @@ -385,10 +378,8 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); - - auto reschedule = fc::make_scoped_exit([this](){ - schedule_production_loop(); - }); + + auto reschedule = fc::make_scoped_exit([this]() { schedule_production_loop(); }); auto predicate = [&]() -> void { if (chain.is_building_block()) { @@ -398,76 +389,74 @@ class producer_plugin_impl : public std::enable_shared_from_thischain().get_subjective_cpu_leeway() ? chain_plug->chain().get_subjective_cpu_leeway()->count() : std::optional(), - _incoming_defer_ratio, - chain_plug->chain().get_greylist_limit() - }; + return {_max_transaction_time_ms, + _max_irreversible_block_age_us.count() < 0 ? -1 : _max_irreversible_block_age_us.count() / 1'000'000, + _cpu_effort_us, + _max_scheduled_transaction_time_per_block_ms, + chain_plug->chain().get_subjective_cpu_leeway() ? chain_plug->chain().get_subjective_cpu_leeway()->count() + : std::optional(), + _incoming_defer_ratio, + chain_plug->chain().get_greylist_limit()}; } void schedule_protocol_feature_activations(const producer_plugin::scheduled_protocol_feature_activations& schedule); - + void plugin_shutdown(); void plugin_startup(); void plugin_initialize(const boost::program_options::variables_map& options); - + boost::program_options::variables_map _options; - bool _production_enabled = false; - bool _pause_production = false; + bool _production_enabled = false; + bool _pause_production = false; using signature_provider_type = signature_provider_plugin::signature_provider_type; std::map _signature_providers; std::set _producers; boost::asio::deadline_timer _timer; using producer_watermark = std::pair; - std::map _producer_watermarks; - pending_block_mode _pending_block_mode = pending_block_mode::speculating; - unapplied_transaction_queue _unapplied_transactions; - size_t _thread_pool_size = config::default_controller_thread_pool_size; - named_thread_pool _thread_pool; - - std::atomic _max_transaction_time_ms; // modified by app thread, read by net_plugin thread pool - std::atomic _received_block{0}; // modified by net_plugin thread pool - fc::microseconds _max_irreversible_block_age_us; - int32_t _cpu_effort_us = 0; - fc::time_point _pending_block_deadline; - uint32_t _max_block_cpu_usage_threshold_us = 0; - uint32_t _max_block_net_usage_threshold_bytes = 0; - int32_t _max_scheduled_transaction_time_per_block_ms = 0; - bool _disable_subjective_p2p_billing = true; - bool _disable_subjective_api_billing = true; - fc::time_point _irreversible_block_time; - fc::time_point _idle_trx_time{fc::time_point::now()}; - - std::vector _protocol_features_to_activate; - bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block + std::map _producer_watermarks; + pending_block_mode _pending_block_mode = pending_block_mode::speculating; + unapplied_transaction_queue _unapplied_transactions; + size_t _thread_pool_size = config::default_controller_thread_pool_size; + named_thread_pool _thread_pool; + std::atomic _max_transaction_time_ms; // modified by app thread, read by net_plugin thread pool + std::atomic _received_block{0}; // modified by net_plugin thread pool + fc::microseconds _max_irreversible_block_age_us; + int32_t _cpu_effort_us = 0; + fc::time_point _pending_block_deadline; + uint32_t _max_block_cpu_usage_threshold_us = 0; + uint32_t _max_block_net_usage_threshold_bytes = 0; + int32_t _max_scheduled_transaction_time_per_block_ms = 0; + bool _disable_subjective_p2p_billing = true; + bool _disable_subjective_api_billing = true; + fc::time_point _irreversible_block_time; + fc::time_point _idle_trx_time{fc::time_point::now()}; + + std::vector _protocol_features_to_activate; + bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block chain_plugin* chain_plug = nullptr; - compat::channels::transaction_ack::channel_type& _transaction_ack_channel; + compat::channels::transaction_ack::channel_type& _transaction_ack_channel; incoming::methods::block_sync::method_type::handle _incoming_block_sync_provider; incoming::methods::transaction_async::method_type::handle _incoming_transaction_async_provider; - transaction_id_with_expiry_index _blacklisted_transactions; - account_failures _account_fails; - block_time_tracker _time_tracker; + transaction_id_with_expiry_index _blacklisted_transactions; + account_failures _account_fails; + block_time_tracker _time_tracker; - std::optional _accepted_block_connection; - std::optional _accepted_block_header_connection; - std::optional _irreversible_block_connection; - std::optional _block_start_connection; + std::optional _accepted_block_connection; + std::optional _accepted_block_header_connection; + std::optional _irreversible_block_connection; + std::optional _block_start_connection; /* * HACK ALERT @@ -522,27 +511,28 @@ class producer_plugin_impl : public std::enable_shared_from_this queue; // boost deque which is faster than std::deque + mutable std::mutex mtx; + deque queue; // boost deque which is faster than std::deque }; - uint32_t _ro_thread_pool_size{ 0 }; + uint32_t _ro_thread_pool_size{0}; // Due to uncertainty to get total virtual memory size on a 5-level paging system for eos-vm-oc and // possible memory exhuastion for large number of contract usage for non-eos-vm-oc, set a hard limit - static constexpr uint32_t _ro_max_threads_allowed{ 8 }; - named_thread_pool _ro_thread_pool; - fc::microseconds _ro_write_window_time_us{ 200000 }; - fc::microseconds _ro_read_window_time_us{ 60000 }; - static constexpr fc::microseconds _ro_read_window_minimum_time_us{ 10000 }; - fc::microseconds _ro_read_window_effective_time_us{ 0 }; // calculated during option initialization - std::atomic _ro_all_threads_exec_time_us; // total time spent by all threads executing transactions. use atomic for simplicity and performance - fc::time_point _ro_read_window_start_time; - fc::time_point _ro_window_deadline; // only modified on app thread, read-window deadline or write-window deadline - boost::asio::deadline_timer _ro_timer; // only accessible from the main thread - fc::microseconds _ro_max_trx_time_us{ 0 }; // calculated during option initialization - ro_trx_queue_t _ro_exhausted_trx_queue; - std::atomic _ro_num_active_exec_tasks{ 0 }; - std::vector> _ro_exec_tasks_fut; + static constexpr uint32_t _ro_max_threads_allowed{8}; + named_thread_pool _ro_thread_pool; + fc::microseconds _ro_write_window_time_us{200000}; + fc::microseconds _ro_read_window_time_us{60000}; + static constexpr fc::microseconds _ro_read_window_minimum_time_us{10000}; + fc::microseconds _ro_read_window_effective_time_us{0}; // calculated during option initialization + std::atomic + _ro_all_threads_exec_time_us; // total time spent by all threads executing transactions. use atomic for simplicity and performance + fc::time_point _ro_read_window_start_time; + fc::time_point _ro_window_deadline; // only modified on app thread, read-window deadline or write-window deadline + boost::asio::deadline_timer _ro_timer; // only accessible from the main thread + fc::microseconds _ro_max_trx_time_us{0}; // calculated during option initialization + ro_trx_queue_t _ro_exhausted_trx_queue; + std::atomic _ro_num_active_exec_tasks{0}; + std::vector> _ro_exec_tasks_fut; void start_write_window(); void switch_to_write_window(); @@ -551,40 +541,40 @@ class producer_plugin_impl : public std::enable_shared_from_this next); - void consider_new_watermark( account_name producer, uint32_t block_num, block_timestamp_type timestamp) { - auto itr = _producer_watermarks.find( producer ); - if( itr != _producer_watermarks.end() ) { - itr->second.first = std::max( itr->second.first, block_num ); - itr->second.second = std::max( itr->second.second, timestamp ); - } else if( _producers.count( producer ) > 0 ) { - _producer_watermarks.emplace( producer, std::make_pair(block_num, timestamp) ); + void consider_new_watermark(account_name producer, uint32_t block_num, block_timestamp_type timestamp) { + auto itr = _producer_watermarks.find(producer); + if (itr != _producer_watermarks.end()) { + itr->second.first = std::max(itr->second.first, block_num); + itr->second.second = std::max(itr->second.second, timestamp); + } else if (_producers.count(producer) > 0) { + _producer_watermarks.emplace(producer, std::make_pair(block_num, timestamp)); } } - std::optional get_watermark( account_name producer ) const { - auto itr = _producer_watermarks.find( producer ); + std::optional get_watermark(account_name producer) const { + auto itr = _producer_watermarks.find(producer); - if( itr == _producer_watermarks.end() ) return {}; + if (itr == _producer_watermarks.end()) + return {}; return itr->second; } - void on_block( const block_state_ptr& bsp ) { - auto& chain = chain_plug->chain(); - auto before = _unapplied_transactions.size(); - _unapplied_transactions.clear_applied( bsp ); - chain.get_mutable_subjective_billing().on_block( _log, bsp, fc::time_point::now() ); + void on_block(const block_state_ptr& bsp) { + auto& chain = chain_plug->chain(); + auto before = _unapplied_transactions.size(); + _unapplied_transactions.clear_applied(bsp); + chain.get_mutable_subjective_billing().on_block(_log, bsp, fc::time_point::now()); if (before > 0) { - fc_dlog( _log, "Removed applied transactions before: ${before}, after: ${after}", - ("before", before)("after", _unapplied_transactions.size()) ); + fc_dlog(_log, + "Removed applied transactions before: ${before}, after: ${after}", + ("before", before)("after", _unapplied_transactions.size())); } } - void on_block_header( const block_state_ptr& bsp ) { - consider_new_watermark( bsp->header.producer, bsp->block_num, bsp->block->timestamp ); - } + void on_block_header(const block_state_ptr& bsp) { consider_new_watermark(bsp->header.producer, bsp->block_num, bsp->block->timestamp); } - void on_irreversible_block( const signed_block_ptr& lib ) { + void on_irreversible_block(const signed_block_ptr& lib) { const chain::controller& chain = chain_plug->chain(); EOS_ASSERT(chain.is_write_window(), producer_exception, "write window is expected for on_irreversible_block signal"); _irreversible_block_time = lib->timestamp.to_time_point(); @@ -594,112 +584,113 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); - if( chain.is_building_block() ) { - _time_tracker.report( _idle_trx_time, chain.pending_block_num() ); + if (chain.is_building_block()) { + _time_tracker.report(_idle_trx_time, chain.pending_block_num()); } - _unapplied_transactions.add_aborted( chain.abort_block() ); + _unapplied_transactions.add_aborted(chain.abort_block()); _idle_trx_time = fc::time_point::now(); } bool on_incoming_block(const signed_block_ptr& block, const std::optional& block_id, const block_state_ptr& bsp) { auto& chain = chain_plug->chain(); - if ( _pending_block_mode == pending_block_mode::producing ) { - fc_wlog( _log, "dropped incoming block #${num} id: ${id}", - ("num", block->block_num())("id", block_id ? (*block_id).str() : "UNKNOWN") ); + if (_pending_block_mode == pending_block_mode::producing) { + fc_wlog( + _log, "dropped incoming block #${num} id: ${id}", ("num", block->block_num())("id", block_id ? (*block_id).str() : "UNKNOWN")); return false; } // start a new speculative block, speculative start_block may have been interrupted - auto ensure = fc::make_scoped_exit([this](){ - schedule_production_loop(); - }); + auto ensure = fc::make_scoped_exit([this]() { schedule_production_loop(); }); - const auto& id = block_id ? *block_id : block->calculate_id(); - auto blk_num = block->block_num(); + const auto& id = block_id ? *block_id : block->calculate_id(); + auto blk_num = block->block_num(); auto now = fc::time_point::now(); if (now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0)) // only log every 1000 during sync fc_dlog(_log, "received incoming block ${n} ${id}", ("n", blk_num)("id", id)); - EOS_ASSERT( block->timestamp < (now + fc::seconds( 7 )), block_from_the_future, - "received a block from the future, ignoring it: ${id}", ("id", id) ); + EOS_ASSERT(block->timestamp < (now + fc::seconds(7)), + block_from_the_future, + "received a block from the future, ignoring it: ${id}", + ("id", id)); /* de-dupe here... no point in aborting block if we already know the block */ - auto existing = chain.fetch_block_by_id( id ); - if( existing ) { return true; } // return true because the block is valid + auto existing = chain.fetch_block_by_id(id); + if (existing) { + return true; + } // return true because the block is valid // start processing of block std::future bsf; - if( !bsp ) { - bsf = chain.create_block_state_future( id, block ); + if (!bsp) { + bsf = chain.create_block_state_future(id, block); } // abort the pending block abort_block(); // push the new block - auto handle_error = [&](const auto& e) - { + auto handle_error = [&](const auto& e) { elog((e.to_detail_string())); - app().get_channel().publish( priority::medium, block ); + app().get_channel().publish(priority::medium, block); throw; }; controller::block_report br; try { const block_state_ptr& bspr = bsp ? bsp : bsf.get(); - chain.push_block( br, bspr, [this]( const branch_type& forked_branch ) { - _unapplied_transactions.add_forked( forked_branch ); - }, [this]( const transaction_id_type& id ) { - return _unapplied_transactions.get_trx( id ); - } ); - } catch ( const guard_exception& e ) { + chain.push_block( + br, + bspr, + [this](const branch_type& forked_branch) { _unapplied_transactions.add_forked(forked_branch); }, + [this](const transaction_id_type& id) { return _unapplied_transactions.get_trx(id); }); + } catch (const guard_exception& e) { chain_plugin::handle_guard_exception(e); return false; - } catch ( const std::bad_alloc& ) { + } catch (const std::bad_alloc&) { chain_apis::api_base::handle_bad_alloc(); - } catch ( boost::interprocess::bad_alloc& ) { + } catch (boost::interprocess::bad_alloc&) { chain_apis::api_base::handle_db_exhaustion(); - } catch ( const fork_database_exception& e ) { + } catch (const fork_database_exception& e) { elog("Cannot recover from ${e}. Shutting down.", ("e", e.to_detail_string())); appbase::app().quit(); return false; - } catch( const fc::exception& e ) { + } catch (const fc::exception& e) { handle_error(e); } catch (const std::exception& e) { handle_error(fc::std_exception_wrapper::from_current_exception(e)); } const auto& hbs = chain.head_block_state(); - now = fc::time_point::now(); - if( hbs->header.timestamp.next().to_time_point() >= now ) { + now = fc::time_point::now(); + if (hbs->header.timestamp.next().to_time_point() >= now) { _production_enabled = true; } - if( now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0) ) { + if (now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0)) { ilog("Received block ${id}... #${n} @ ${t} signed by ${p} " - "[trxs: ${count}, lib: ${lib}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: ${latency} ms]", - ("p",block->producer)("id",id.str().substr(8,16))("n",blk_num)("t",block->timestamp) - ("count",block->transactions.size())("lib",chain.last_irreversible_block_num()) - ("confs", block->confirmed)("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) - ("elapsed", br.total_elapsed_time)("time", br.total_time) - ("latency", (now - block->timestamp).count()/1000 ) ); - if( chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hbs->id != id && hbs->block != nullptr ) { // not applied to head + "[trxs: ${count}, lib: ${lib}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: " + "${latency} ms]", + ("p", block->producer)("id", id.str().substr(8, 16))("n", blk_num)("t", block->timestamp)( + "count", block->transactions.size())("lib", chain.last_irreversible_block_num())("confs", block->confirmed)( + "net", br.total_net_usage)("cpu", br.total_cpu_usage_us)("elapsed", br.total_elapsed_time)("time", br.total_time)( + "latency", (now - block->timestamp).count() / 1000)); + if (chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hbs->id != id && hbs->block != nullptr) { // not applied to head ilog("Block not applied to head ${id}... #${n} @ ${t} signed by ${p} " - "[trxs: ${count}, dpos: ${dpos}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: ${latency} ms]", - ("p",hbs->block->producer)("id",hbs->id.str().substr(8,16))("n",hbs->block_num)("t",hbs->block->timestamp) - ("count",hbs->block->transactions.size())("dpos", hbs->dpos_irreversible_blocknum) - ("confs", hbs->block->confirmed)("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) - ("elapsed", br.total_elapsed_time)("time", br.total_time) - ("latency", (now - hbs->block->timestamp).count()/1000 ) ); + "[trxs: ${count}, dpos: ${dpos}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, " + "latency: ${latency} ms]", + ("p", hbs->block->producer)("id", hbs->id.str().substr(8, 16))("n", hbs->block_num)("t", hbs->block->timestamp)( + "count", hbs->block->transactions.size())("dpos", hbs->dpos_irreversible_blocknum)("confs", hbs->block->confirmed)( + "net", br.total_net_usage)("cpu", br.total_cpu_usage_us)("elapsed", br.total_elapsed_time)("time", br.total_time)( + "latency", (now - hbs->block->timestamp).count() / 1000)); } } if (_update_incoming_block_metrics) { _update_incoming_block_metrics({.trxs_incoming_total = block->transactions.size(), - .cpu_usage_us = br.total_cpu_usage_us, - .net_usage_us = br.total_net_usage, - .last_irreversible = chain.last_irreversible_block_num(), - .head_block_num = chain.head_block_num()}); + .cpu_usage_us = br.total_cpu_usage_us, + .net_usage_us = br.total_net_usage, + .last_irreversible = chain.last_irreversible_block_num(), + .head_block_num = chain.head_block_num()}); } return true; @@ -712,127 +703,145 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { - if ( trx_type == transaction_metadata::trx_type::read_only ) { + if (trx_type == transaction_metadata::trx_type::read_only) { // Post all read only trxs to read_only queue for execution. - auto trx_metadata = transaction_metadata::create_no_recover_keys( trx, transaction_metadata::trx_type::read_only ); + auto trx_metadata = transaction_metadata::create_no_recover_keys(trx, transaction_metadata::trx_type::read_only); app().executor().post(priority::low, exec_queue::read_only, [this, trx{std::move(trx_metadata)}, next{std::move(next)}]() mutable { - push_read_only_transaction( std::move(trx), std::move(next) ); - } ); + push_read_only_transaction(std::move(trx), std::move(next)); + }); return; } - chain::controller& chain = chain_plug->chain(); - const auto max_trx_time_ms = ( trx_type == transaction_metadata::trx_type::read_only ) ? -1 : _max_transaction_time_ms.load(); - fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); + chain::controller& chain = chain_plug->chain(); + const auto max_trx_time_ms = (trx_type == transaction_metadata::trx_type::read_only) ? -1 : _max_transaction_time_ms.load(); + fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds(max_trx_time_ms); - auto future = transaction_metadata::start_recover_keys( trx, _thread_pool.get_executor(), - chain.get_chain_id(), fc::microseconds( max_trx_cpu_usage ), - trx_type, - chain.configured_subjective_signature_length_limit() ); + auto future = transaction_metadata::start_recover_keys(trx, + _thread_pool.get_executor(), + chain.get_chain_id(), + fc::microseconds(max_trx_cpu_usage), + trx_type, + chain.configured_subjective_signature_length_limit()); auto is_transient = (trx_type == transaction_metadata::trx_type::read_only || trx_type == transaction_metadata::trx_type::dry_run); - if( !is_transient ) { - next = [this, trx, next{std::move(next)}]( const next_function_variant& response ) { - next( response ); + if (!is_transient) { + next = [this, trx, next{std::move(next)}](const next_function_variant& response) { + next(response); fc::exception_ptr except_ptr; // rejected - if( std::holds_alternative( response ) ) { - except_ptr = std::get( response ); - } else if( std::get( response )->except ) { - except_ptr = std::get( response )->except->dynamic_copy_exception(); + if (std::holds_alternative(response)) { + except_ptr = std::get(response); + } else if (std::get(response)->except) { + except_ptr = std::get(response)->except->dynamic_copy_exception(); } - _transaction_ack_channel.publish( priority::low, std::pair( except_ptr, trx ) ); + _transaction_ack_channel.publish(priority::low, std::pair(except_ptr, trx)); }; } - boost::asio::post(_thread_pool.get_executor(), [self = this, future{std::move(future)}, api_trx, is_transient, return_failure_traces, - next{std::move(next)}, trx=trx]() mutable { - if( future.valid() ) { - future.wait(); - app().executor().post( priority::low, exec_queue::read_write, [self, future{std::move(future)}, api_trx, is_transient, next{std::move( next )}, trx{std::move(trx)}, return_failure_traces]() mutable { - auto start = fc::time_point::now(); - auto idle_time = start - self->_idle_trx_time; - self->_time_tracker.add_idle_time( idle_time ); - fc_tlog( _log, "Time since last trx: ${t}us", ("t", idle_time) ); - - auto exception_handler = [self, is_transient, &next, trx{std::move(trx)}, &start](fc::exception_ptr ex) { - self->_time_tracker.add_idle_time( start - self->_idle_trx_time ); - self->log_trx_results( trx, nullptr, ex, 0, start, is_transient ); - next( std::move(ex) ); - self->_idle_trx_time = fc::time_point::now(); - auto dur = self->_idle_trx_time - start; - self->_time_tracker.add_fail_time(dur, is_transient); - }; - try { - auto result = future.get(); - if( !self->process_incoming_transaction_async( result, api_trx, return_failure_traces, next) ) { - if( self->_pending_block_mode == pending_block_mode::producing ) { - self->schedule_maybe_produce_block( true ); - } else { - self->restart_speculative_block(); - } - } - self->_idle_trx_time = fc::time_point::now(); - } CATCH_AND_CALL(exception_handler); - } ); - } - }); - } - - bool process_incoming_transaction_async(const transaction_metadata_ptr& trx, - bool api_trx, - bool return_failure_trace, + boost::asio::post(_thread_pool.get_executor(), + [self = this, + future{std::move(future)}, + api_trx, + is_transient, + return_failure_traces, + next{std::move(next)}, + trx = trx]() mutable { + if (future.valid()) { + future.wait(); + app().executor().post( + priority::low, + exec_queue::read_write, + [self, + future{std::move(future)}, + api_trx, + is_transient, + next{std::move(next)}, + trx{std::move(trx)}, + return_failure_traces]() mutable { + auto start = fc::time_point::now(); + auto idle_time = start - self->_idle_trx_time; + self->_time_tracker.add_idle_time(idle_time); + fc_tlog(_log, "Time since last trx: ${t}us", ("t", idle_time)); + + auto exception_handler = + [self, is_transient, &next, trx{std::move(trx)}, &start](fc::exception_ptr ex) { + self->_time_tracker.add_idle_time(start - self->_idle_trx_time); + self->log_trx_results(trx, nullptr, ex, 0, start, is_transient); + next(std::move(ex)); + self->_idle_trx_time = fc::time_point::now(); + auto dur = self->_idle_trx_time - start; + self->_time_tracker.add_fail_time(dur, is_transient); + }; + try { + auto result = future.get(); + if (!self->process_incoming_transaction_async(result, api_trx, return_failure_traces, next)) { + if (self->_pending_block_mode == pending_block_mode::producing) { + self->schedule_maybe_produce_block(true); + } else { + self->restart_speculative_block(); + } + } + self->_idle_trx_time = fc::time_point::now(); + } + CATCH_AND_CALL(exception_handler); + }); + } + }); + } + + bool process_incoming_transaction_async(const transaction_metadata_ptr& trx, + bool api_trx, + bool return_failure_trace, const next_function& next) { - bool exhausted = false; - chain::controller& chain = chain_plug->chain(); + bool exhausted = false; + chain::controller& chain = chain_plug->chain(); try { const auto& id = trx->id(); - fc::time_point bt = chain.is_building_block() ? chain.pending_block_time() : chain.head_block_time(); + fc::time_point bt = chain.is_building_block() ? chain.pending_block_time() : chain.head_block_time(); const fc::time_point expire = trx->packed_trx()->expiration().to_time_point(); - if( expire < bt ) { - auto except_ptr = std::static_pointer_cast( - std::make_shared( - FC_LOG_MESSAGE( error, "expired transaction ${id}, expiration ${e}, block time ${bt}", - ("id", id)("e", expire)("bt", bt)))); - log_trx_results( trx, except_ptr ); - next( std::move(except_ptr) ); + if (expire < bt) { + auto except_ptr = std::static_pointer_cast(std::make_shared( + FC_LOG_MESSAGE(error, "expired transaction ${id}, expiration ${e}, block time ${bt}", ("id", id)("e", expire)("bt", bt)))); + log_trx_results(trx, except_ptr); + next(std::move(except_ptr)); return true; } - if( chain.is_known_unexpired_transaction( id )) { - auto except_ptr = std::static_pointer_cast( std::make_shared( - FC_LOG_MESSAGE( error, "duplicate transaction ${id}", ("id", id)))); - next( std::move(except_ptr) ); + if (chain.is_known_unexpired_transaction(id)) { + auto except_ptr = std::static_pointer_cast( + std::make_shared(FC_LOG_MESSAGE(error, "duplicate transaction ${id}", ("id", id)))); + next(std::move(except_ptr)); return true; } - if( !chain.is_building_block()) { - _unapplied_transactions.add_incoming( trx, api_trx, return_failure_trace, next ); + if (!chain.is_building_block()) { + _unapplied_transactions.add_incoming(trx, api_trx, return_failure_trace, next); return true; } - const auto block_deadline = _pending_block_deadline; - push_result pr = push_transaction( block_deadline, trx, api_trx, return_failure_trace, next ); + const auto block_deadline = _pending_block_deadline; + push_result pr = push_transaction(block_deadline, trx, api_trx, return_failure_trace, next); exhausted = pr.block_exhausted; - if( pr.trx_exhausted ) { - _unapplied_transactions.add_incoming( trx, api_trx, return_failure_trace, next ); + if (pr.trx_exhausted) { + _unapplied_transactions.add_incoming(trx, api_trx, return_failure_trace, next); } - } catch ( const guard_exception& e ) { + } catch (const guard_exception& e) { chain_plugin::handle_guard_exception(e); - } catch ( boost::interprocess::bad_alloc& ) { + } catch (boost::interprocess::bad_alloc&) { chain_apis::api_base::handle_db_exhaustion(); - } catch ( std::bad_alloc& ) { + } catch (std::bad_alloc&) { chain_apis::api_base::handle_bad_alloc(); - } CATCH_AND_CALL(next); + } + CATCH_AND_CALL(next); return !exhausted; } @@ -857,22 +866,22 @@ class producer_plugin_impl : public std::enable_shared_from_this= 0 && get_irreversible_block_age() >= _max_irreversible_block_age_us); + return !_production_enabled || _pause_production || + (_max_irreversible_block_age_us.count() >= 0 && get_irreversible_block_age() >= _max_irreversible_block_age_us); } - bool is_producer_key(const chain::public_key_type& key) const { - return _signature_providers.find(key) != _signature_providers.end(); - } + bool is_producer_key(const chain::public_key_type& key) const { return _signature_providers.find(key) != _signature_providers.end(); } chain::signature_type sign_compact(const chain::public_key_type& key, const fc::sha256& digest) const { - if(key != chain::public_key_type()) { + if (key != chain::public_key_type()) { auto private_key_itr = _signature_providers.find(key); - EOS_ASSERT(private_key_itr != _signature_providers.end(), producer_priv_key_not_found, - "Local producer has no private key in config.ini corresponding to public key ${key}", ("key", key)); + EOS_ASSERT(private_key_itr != _signature_providers.end(), + producer_priv_key_not_found, + "Local producer has no private key in config.ini corresponding to public key ${key}", + ("key", key)); return private_key_itr->second(digest); - } - else { + } else { return chain::signature_type(); } } @@ -1052,7 +1061,8 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia } auto subjective_account_max_failures_window_size = options.at("subjective-account-max-failures-window-size").as(); - EOS_ASSERT(subjective_account_max_failures_window_size > 0, plugin_config_exception, + EOS_ASSERT(subjective_account_max_failures_window_size > 0, + plugin_config_exception, "subjective-account-max-failures-window-size ${s} must be greater than 0", ("s", subjective_account_max_failures_window_size)); @@ -1060,14 +1070,17 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia subjective_account_max_failures_window_size); uint32_t cpu_effort_pct = options.at("cpu-effort-percent").as(); - EOS_ASSERT(cpu_effort_pct >= 0 && cpu_effort_pct <= 100, plugin_config_exception, - "cpu-effort-percent ${pct} must be 0 - 100", ("pct", cpu_effort_pct)); + EOS_ASSERT(cpu_effort_pct >= 0 && cpu_effort_pct <= 100, + plugin_config_exception, + "cpu-effort-percent ${pct} must be 0 - 100", + ("pct", cpu_effort_pct)); cpu_effort_pct *= config::percent_1; - + _cpu_effort_us = EOS_PERCENT(config::block_interval_us, cpu_effort_pct); _max_block_cpu_usage_threshold_us = options.at("max-block-cpu-usage-threshold-us").as(); - EOS_ASSERT(_max_block_cpu_usage_threshold_us < config::block_interval_us, plugin_config_exception, + EOS_ASSERT(_max_block_cpu_usage_threshold_us < config::block_interval_us, + plugin_config_exception, "max-block-cpu-usage-threshold-us ${t} must be 0 .. ${bi}", ("bi", config::block_interval_us)("t", _max_block_cpu_usage_threshold_us)); @@ -1080,7 +1093,8 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia } fc::microseconds subjective_account_decay_time = fc::minutes(options.at("subjective-account-decay-time-minutes").as()); - EOS_ASSERT(subjective_account_decay_time.count() > 0, plugin_config_exception, + EOS_ASSERT(subjective_account_decay_time.count() > 0, + plugin_config_exception, "subjective-account-decay-time-minutes ${dt} must be greater than 0", ("dt", subjective_account_decay_time.to_seconds() / 60)); chain.get_mutable_subjective_billing().set_expired_accumulator_average_window(subjective_account_decay_time); @@ -1089,9 +1103,10 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia _max_irreversible_block_age_us = fc::seconds(options.at("max-irreversible-block-age").as()); - auto max_incoming_transaction_queue_size = options.at("incoming-transaction-queue-size-mb").as() * 1024*1024; + auto max_incoming_transaction_queue_size = options.at("incoming-transaction-queue-size-mb").as() * 1024 * 1024; - EOS_ASSERT(max_incoming_transaction_queue_size > 0, plugin_config_exception, + EOS_ASSERT(max_incoming_transaction_queue_size > 0, + plugin_config_exception, "incoming-transaction-queue-size-mb ${mb} must be greater than 0", ("mb", max_incoming_transaction_queue_size)); @@ -1109,14 +1124,14 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia } else if (!_disable_subjective_p2p_billing && !_disable_subjective_api_billing) { ilog("Subjective CPU billing enabled"); } else { - if (_disable_subjective_p2p_billing) ilog("Subjective CPU billing of P2P trxs disabled "); - if (_disable_subjective_api_billing) ilog("Subjective CPU billing of API trxs disabled "); + if (_disable_subjective_p2p_billing) + ilog("Subjective CPU billing of P2P trxs disabled "); + if (_disable_subjective_api_billing) + ilog("Subjective CPU billing of API trxs disabled "); } _thread_pool_size = options.at("producer-threads").as(); - EOS_ASSERT(_thread_pool_size > 0, plugin_config_exception, - "producer-threads ${num} must be greater than 0", - ("num", _thread_pool_size)); + EOS_ASSERT(_thread_pool_size > 0, plugin_config_exception, "producer-threads ${num} must be greater than 0", ("num", _thread_pool_size)); if (options.count("snapshots-dir")) { auto sd = options.at("snapshots-dir").as(); @@ -1129,8 +1144,10 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia _snapshots_dir = sd; } - EOS_ASSERT(std::filesystem::is_directory(_snapshots_dir), snapshot_directory_not_found_exception, - "No such directory '${dir}'", ("dir", _snapshots_dir)); + EOS_ASSERT(std::filesystem::is_directory(_snapshots_dir), + snapshot_directory_not_found_exception, + "No such directory '${dir}'", + ("dir", _snapshots_dir)); if (auto resmon_plugin = app().find_plugin()) { resmon_plugin->monitor_directory(_snapshots_dir); @@ -1142,7 +1159,7 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia } else if (_producers.empty()) { if (options.count("plugin")) { const auto& v = options.at("plugin").as>(); - auto i = std::find_if (v.cbegin(), v.cend(), [](const std::string& p) { return p == "eosio::chain_api_plugin"; }); + auto i = std::find_if(v.cbegin(), v.cend(), [](const std::string& p) { return p == "eosio::chain_api_plugin"; }); if (i != v.cend()) { // default to 3 threads for non producer nodes running chain_api_plugin if not specified _ro_thread_pool_size = 3; @@ -1150,7 +1167,8 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia } } } - EOS_ASSERT(producer_plugin::test_mode_ || _ro_thread_pool_size == 0 || _producers.empty(), plugin_config_exception, + EOS_ASSERT(producer_plugin::test_mode_ || _ro_thread_pool_size == 0 || _producers.empty(), + plugin_config_exception, "read-only-threads not allowed on producer node"); // only initialize other read-only options when read-only thread pool is enabled @@ -1159,9 +1177,9 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia if (chain.is_eos_vm_oc_enabled()) { // EOS VM OC requires 4.2TB Virtual for each executing thread. Make sure the memory // required by configured read-only threads does not exceed the total system virtual memory. - std::string attr_name; - size_t vm_total_kb { 0 }; - size_t vm_used_kb { 0 }; + std::string attr_name; + size_t vm_total_kb{0}; + size_t vm_used_kb{0}; std::ifstream meminfo_file("/proc/meminfo"); while (meminfo_file >> attr_name) { if (attr_name == "VmallocTotal:") { @@ -1174,14 +1192,18 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia meminfo_file.ignore(std::numeric_limits::max(), '\n'); } - EOS_ASSERT(vm_total_kb > 0, plugin_config_exception, + EOS_ASSERT(vm_total_kb > 0, + plugin_config_exception, "Unable to get system virtual memory size (not a Linux?), therefore cannot determine if the system has enough " "virtual memory for multi-threaded read-only transactions on EOS VM OC"); - EOS_ASSERT(vm_total_kb > vm_used_kb, plugin_config_exception, - "vm total (${t}) must be greater than vm used (${u})", ("t", vm_total_kb)("u", vm_used_kb)); + EOS_ASSERT(vm_total_kb > vm_used_kb, + plugin_config_exception, + "vm total (${t}) must be greater than vm used (${u})", + ("t", vm_total_kb)("u", vm_used_kb)); uint32_t num_threads_supported = (vm_total_kb - vm_used_kb) / 4200000000; // reserve 1 for the app thread, 1 for anything else which might use VM - EOS_ASSERT(num_threads_supported > 2, plugin_config_exception, + EOS_ASSERT(num_threads_supported > 2, + plugin_config_exception, "With the EOS VM OC configured, there is not enough system virtual memory to support the required minimum of " "3 threads (1 for main thread, 1 for read-only, and 1 for anything else), vm total: ${t}, vm used: ${u}", ("t", vm_total_kb)("u", vm_used_kb)); @@ -1189,36 +1211,40 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia auto actual_threads_allowed = std::min(_ro_max_threads_allowed, num_threads_supported); ilog("vm total in kb: ${total}, vm used in kb: ${used}, number of EOS VM OC threads supported " "((vm total - vm used)/4.2 TB - 2): ${supp}, max allowed: ${max}, actual allowed: ${actual}", - ("total", vm_total_kb) ("used", vm_used_kb) ("supp", num_threads_supported) - ("max", _ro_max_threads_allowed)("actual", actual_threads_allowed)); - EOS_ASSERT(_ro_thread_pool_size <= actual_threads_allowed, plugin_config_exception, + ("total", vm_total_kb)("used", vm_used_kb)("supp", num_threads_supported)("max", _ro_max_threads_allowed)( + "actual", actual_threads_allowed)); + EOS_ASSERT(_ro_thread_pool_size <= actual_threads_allowed, + plugin_config_exception, "read-only-threads (${th}) greater than number of threads allowed for EOS VM OC (${allowed})", - ("th", _ro_thread_pool_size) ("allowed", actual_threads_allowed)); + ("th", _ro_thread_pool_size)("allowed", actual_threads_allowed)); } #endif - EOS_ASSERT(_ro_thread_pool_size <= _ro_max_threads_allowed, plugin_config_exception, + EOS_ASSERT(_ro_thread_pool_size <= _ro_max_threads_allowed, + plugin_config_exception, "read-only-threads (${th}) greater than the number of threads allowed (${allowed})", - ("th", _ro_thread_pool_size) ("allowed", _ro_max_threads_allowed)); + ("th", _ro_thread_pool_size)("allowed", _ro_max_threads_allowed)); _ro_write_window_time_us = fc::microseconds(options.at("read-only-write-window-time-us").as()); - _ro_read_window_time_us = fc::microseconds(options.at("read-only-read-window-time-us").as()); - EOS_ASSERT(_ro_read_window_time_us > _ro_read_window_minimum_time_us, plugin_config_exception, + _ro_read_window_time_us = fc::microseconds(options.at("read-only-read-window-time-us").as()); + EOS_ASSERT(_ro_read_window_time_us > _ro_read_window_minimum_time_us, + plugin_config_exception, "read-only-read-window-time-us (${read}) must be at least greater than ${min} us", - ("read", _ro_read_window_time_us) ("min", _ro_read_window_minimum_time_us)); + ("read", _ro_read_window_time_us)("min", _ro_read_window_minimum_time_us)); _ro_read_window_effective_time_us = _ro_read_window_time_us - _ro_read_window_minimum_time_us; // Make sure a read-only transaction can finish within the read // window if scheduled at the very beginning of the window. // Add _ro_read_window_minimum_time_us for safety margin. if (_max_transaction_time_ms.load() > 0) { - EOS_ASSERT(_ro_read_window_time_us > (fc::milliseconds(_max_transaction_time_ms.load()) + _ro_read_window_minimum_time_us), - plugin_config_exception, - "read-only-read-window-time-us (${read} us) must be greater than max-transaction-time (${trx_time} us) " - "plus ${min} us, required: ${read} us > (${trx_time} us + ${min} us).", - ("read", _ro_read_window_time_us) ("trx_time", _max_transaction_time_ms.load() * 1000) - ("min", _ro_read_window_minimum_time_us)); - } - ilog("read-only-write-window-time-us: ${ww} us, read-only-read-window-time-us: ${rw} us, effective read window time to be used: ${w} us", + EOS_ASSERT( + _ro_read_window_time_us > (fc::milliseconds(_max_transaction_time_ms.load()) + _ro_read_window_minimum_time_us), + plugin_config_exception, + "read-only-read-window-time-us (${read} us) must be greater than max-transaction-time (${trx_time} us) " + "plus ${min} us, required: ${read} us > (${trx_time} us + ${min} us).", + ("read", _ro_read_window_time_us)("trx_time", _max_transaction_time_ms.load() * 1000)("min", _ro_read_window_minimum_time_us)); + } + ilog("read-only-write-window-time-us: ${ww} us, read-only-read-window-time-us: ${rw} us, effective read window time to be used: ${w} " + "us", ("ww", _ro_write_window_time_us)("rw", _ro_read_window_time_us)("w", _ro_read_window_effective_time_us)); } @@ -1236,16 +1262,19 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia return on_incoming_block(block, block_id, bsp); }); - _incoming_transaction_async_provider = app().get_method().register_provider( - [this](const packed_transaction_ptr& trx, bool api_trx, transaction_metadata::trx_type trx_type, - bool return_failure_traces, next_function next) -> void { + _incoming_transaction_async_provider = + app().get_method().register_provider([this](const packed_transaction_ptr& trx, + bool api_trx, + transaction_metadata::trx_type trx_type, + bool return_failure_traces, + next_function next) -> void { return on_incoming_transaction_async(trx, api_trx, trx_type, return_failure_traces, next); }); if (options.count("greylist-account")) { - std::vector greylist = options["greylist-account"].as>(); + std::vector greylist = options["greylist-account"].as>(); producer_plugin::greylist_params param; - for (auto &a : greylist) { + for (auto& a : greylist) { param.accounts.push_back(account_name(a)); } add_greylist_accounts(param); @@ -1258,111 +1287,123 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia if (options.count("disable-subjective-account-billing")) { std::vector accounts = options["disable-subjective-account-billing"].as>(); - for(const auto& a : accounts) { + for (const auto& a : accounts) { chain.get_mutable_subjective_billing().disable_account(account_name(a)); } } _snapshot_scheduler.set_db_path(_snapshots_dir); - _snapshot_scheduler.set_snapshots_path(_snapshots_dir); -} + _snapshot_scheduler.set_snapshots_path(_snapshots_dir); +} -void producer_plugin::plugin_initialize(const boost::program_options::variables_map& options) -{ +void producer_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { handle_sighup(); // Sets loggers my->plugin_initialize(options); - } FC_LOG_AND_RETHROW() + } + FC_LOG_AND_RETHROW() } using namespace std::chrono_literals; -void producer_plugin_impl::plugin_startup() -{ try { +void producer_plugin_impl::plugin_startup() { try { - ilog("producer plugin: plugin_startup() begin"); + try { + ilog("producer plugin: plugin_startup() begin"); - _thread_pool.start(_thread_pool_size, [](const fc::exception& e) { - fc_elog( _log, "Exception in producer thread pool, exiting: ${e}", ("e", e.to_detail_string()) ); - app().quit(); - } ); + _thread_pool.start(_thread_pool_size, [](const fc::exception& e) { + fc_elog(_log, "Exception in producer thread pool, exiting: ${e}", ("e", e.to_detail_string())); + app().quit(); + }); - chain::controller& chain = chain_plug->chain(); - EOS_ASSERT( _producers.empty() || chain.get_read_mode() != chain::db_read_mode::IRREVERSIBLE, plugin_config_exception, - "node cannot have any producer-name configured because block production is impossible when read_mode is \"irreversible\"" ); + chain::controller& chain = chain_plug->chain(); + EOS_ASSERT( + _producers.empty() || chain.get_read_mode() != chain::db_read_mode::IRREVERSIBLE, + plugin_config_exception, + "node cannot have any producer-name configured because block production is impossible when read_mode is \"irreversible\""); - EOS_ASSERT( _producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, plugin_config_exception, - "node cannot have any producer-name configured because block production is not safe when validation_mode is not \"full\"" ); + EOS_ASSERT( + _producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, + plugin_config_exception, + "node cannot have any producer-name configured because block production is not safe when validation_mode is not \"full\""); - EOS_ASSERT( _producers.empty() || chain_plug->accept_transactions(), plugin_config_exception, - "node cannot have any producer-name configured because no block production is possible with no [api|p2p]-accepted-transactions" ); + EOS_ASSERT(_producers.empty() || chain_plug->accept_transactions(), + plugin_config_exception, + "node cannot have any producer-name configured because no block production is possible with no " + "[api|p2p]-accepted-transactions"); + + _accepted_block_connection.emplace(chain.accepted_block.connect([this](const auto& bsp) { on_block(bsp); })); + _accepted_block_header_connection.emplace(chain.accepted_block_header.connect([this](const auto& bsp) { on_block_header(bsp); })); + _irreversible_block_connection.emplace( + chain.irreversible_block.connect([this](const auto& bsp) { on_irreversible_block(bsp->block); })); + + _block_start_connection.emplace(chain.block_start.connect([this, &chain](uint32_t bs) { + try { + _snapshot_scheduler.on_start_block(bs, chain); + } catch (const snapshot_execution_exception& e) { + fc_elog(_log, "Exception during snapshot execution: ${e}", ("e", e.to_detail_string())); + app().quit(); + } + })); - _accepted_block_connection.emplace(chain.accepted_block.connect( [this]( const auto& bsp ){ on_block( bsp ); } )); - _accepted_block_header_connection.emplace(chain.accepted_block_header.connect( [this]( const auto& bsp ){ on_block_header( bsp ); } )); - _irreversible_block_connection.emplace(chain.irreversible_block.connect( [this]( const auto& bsp ){ on_irreversible_block( bsp->block ); } )); - - _block_start_connection.emplace(chain.block_start.connect( [this, &chain]( uint32_t bs ) { - try { - _snapshot_scheduler.on_start_block(bs, chain); - } - catch (const snapshot_execution_exception & e) { - fc_elog( _log, "Exception during snapshot execution: ${e}", ("e", e.to_detail_string()) ); - app().quit(); - } - } )); - - const auto lib_num = chain.last_irreversible_block_num(); - const auto lib = chain.fetch_block_by_number(lib_num); - if (lib) { - on_irreversible_block(lib); - } else { - _irreversible_block_time = fc::time_point::maximum(); - } + const auto lib_num = chain.last_irreversible_block_num(); + const auto lib = chain.fetch_block_by_number(lib_num); + if (lib) { + on_irreversible_block(lib); + } else { + _irreversible_block_time = fc::time_point::maximum(); + } - if (!_producers.empty()) { - ilog("Launching block production for ${n} producers at ${time}.", ("n", _producers.size())("time",fc::time_point::now())); + if (!_producers.empty()) { + ilog("Launching block production for ${n} producers at ${time}.", ("n", _producers.size())("time", fc::time_point::now())); - if (_production_enabled) { - if (chain.head_block_num() == 0) { - new_chain_banner(chain); + if (_production_enabled) { + if (chain.head_block_num() == 0) { + new_chain_banner(chain); + } + } } - } - } - if ( _ro_thread_pool_size > 0 ) { - std::atomic num_threads_started = 0; - _ro_thread_pool.start( _ro_thread_pool_size, - []( const fc::exception& e ) { - fc_elog( _log, "Exception in read-only thread pool, exiting: ${e}", ("e", e.to_detail_string()) ); - app().quit(); - }, - [&]() { - chain.init_thread_local_data(); - ++num_threads_started; - }); - - // This will be changed with std::latch or std::atomic<>::wait - // when C++20 is used. - auto time_slept_ms = 0; - constexpr auto max_time_slept_ms = 1000; - while ( num_threads_started.load() < _ro_thread_pool_size && time_slept_ms < max_time_slept_ms ) { - std::this_thread::sleep_for( 1ms ); - ++time_slept_ms; - } - EOS_ASSERT(num_threads_started.load() == _ro_thread_pool_size, producer_exception, "read-only threads failed to start. num_threads_started: ${n}, time_slept_ms: ${t}ms", ("n", num_threads_started.load())("t", time_slept_ms)); + if (_ro_thread_pool_size > 0) { + std::atomic num_threads_started = 0; + _ro_thread_pool.start( + _ro_thread_pool_size, + [](const fc::exception& e) { + fc_elog(_log, "Exception in read-only thread pool, exiting: ${e}", ("e", e.to_detail_string())); + app().quit(); + }, + [&]() { + chain.init_thread_local_data(); + ++num_threads_started; + }); + + // This will be changed with std::latch or std::atomic<>::wait + // when C++20 is used. + auto time_slept_ms = 0; + constexpr auto max_time_slept_ms = 1000; + while (num_threads_started.load() < _ro_thread_pool_size && time_slept_ms < max_time_slept_ms) { + std::this_thread::sleep_for(1ms); + ++time_slept_ms; + } + EOS_ASSERT(num_threads_started.load() == _ro_thread_pool_size, + producer_exception, + "read-only threads failed to start. num_threads_started: ${n}, time_slept_ms: ${t}ms", + ("n", num_threads_started.load())("t", time_slept_ms)); - start_write_window(); - } + start_write_window(); + } - schedule_production_loop(); + schedule_production_loop(); - ilog("producer plugin: plugin_startup() end"); - } catch( ... ) { - // always call plugin_shutdown, even on exception - plugin_shutdown(); - throw; + ilog("producer plugin: plugin_startup() end"); + } catch (...) { + // always call plugin_shutdown, even on exception + plugin_shutdown(); + throw; + } } -} FC_CAPTURE_AND_RETHROW() } + FC_CAPTURE_AND_RETHROW() +} void producer_plugin::plugin_startup() { my->plugin_startup(); @@ -1374,7 +1415,7 @@ void producer_plugin_impl::plugin_shutdown() { _thread_pool.stop(); _unapplied_transactions.clear(); - app().executor().post( 0, [me = shared_from_this()](){} ); // keep my pointer alive until queue is drained + app().executor().post(0, [me = shared_from_this()]() {}); // keep my pointer alive until queue is drained fc_ilog(_log, "exit shutdown"); } @@ -1384,7 +1425,7 @@ void producer_plugin::plugin_shutdown() { } void producer_plugin::handle_sighup() { - fc::logger::update( logger_name, _log ); + fc::logger::update(logger_name, _log); fc::logger::update(trx_successful_trace_logger_name, _trx_successful_trace_log); fc::logger::update(trx_failed_trace_logger_name, _trx_failed_trace_log); fc::logger::update(trx_trace_success_logger_name, _trx_trace_success_log); @@ -1408,16 +1449,16 @@ bool producer_plugin::paused() const { } void producer_plugin_impl::update_runtime_options(const producer_plugin::runtime_options& options) { - chain::controller& chain = chain_plug->chain(); - bool check_speculating = false; + chain::controller& chain = chain_plug->chain(); + bool check_speculating = false; if (options.max_transaction_time) { _max_transaction_time_ms = *options.max_transaction_time; } if (options.max_irreversible_block_age) { - _max_irreversible_block_age_us = fc::seconds(*options.max_irreversible_block_age); - check_speculating = true; + _max_irreversible_block_age_us = fc::seconds(*options.max_irreversible_block_age); + check_speculating = true; } if (options.cpu_effort_us) { @@ -1468,29 +1509,34 @@ producer_plugin::greylist_params producer_plugin::get_greylist() const { producer_plugin::whitelist_blacklist producer_plugin::get_whitelist_blacklist() const { chain::controller& chain = my->chain_plug->chain(); - return { - chain.get_actor_whitelist(), - chain.get_actor_blacklist(), - chain.get_contract_whitelist(), - chain.get_contract_blacklist(), - chain.get_action_blacklist(), - chain.get_key_blacklist() - }; + return {chain.get_actor_whitelist(), + chain.get_actor_blacklist(), + chain.get_contract_whitelist(), + chain.get_contract_blacklist(), + chain.get_action_blacklist(), + chain.get_key_blacklist()}; } void producer_plugin::set_whitelist_blacklist(const producer_plugin::whitelist_blacklist& params) { - EOS_ASSERT(params.actor_whitelist || params.actor_blacklist || params.contract_whitelist || params.contract_blacklist || params.action_blacklist || params.key_blacklist, + EOS_ASSERT(params.actor_whitelist || params.actor_blacklist || params.contract_whitelist || params.contract_blacklist || + params.action_blacklist || params.key_blacklist, chain::invalid_http_request, - "At least one of actor_whitelist, actor_blacklist, contract_whitelist, contract_blacklist, action_blacklist, and key_blacklist is required" - ); + "At least one of actor_whitelist, actor_blacklist, contract_whitelist, contract_blacklist, action_blacklist, and " + "key_blacklist is required"); chain::controller& chain = my->chain_plug->chain(); - if (params.actor_whitelist) chain.set_actor_whitelist(*params.actor_whitelist); - if (params.actor_blacklist) chain.set_actor_blacklist(*params.actor_blacklist); - if (params.contract_whitelist) chain.set_contract_whitelist(*params.contract_whitelist); - if (params.contract_blacklist) chain.set_contract_blacklist(*params.contract_blacklist); - if (params.action_blacklist) chain.set_action_blacklist(*params.action_blacklist); - if (params.key_blacklist) chain.set_key_blacklist(*params.key_blacklist); + if (params.actor_whitelist) + chain.set_actor_whitelist(*params.actor_whitelist); + if (params.actor_blacklist) + chain.set_actor_blacklist(*params.actor_blacklist); + if (params.contract_whitelist) + chain.set_contract_whitelist(*params.contract_whitelist); + if (params.contract_blacklist) + chain.set_contract_blacklist(*params.contract_blacklist); + if (params.action_blacklist) + chain.set_action_blacklist(*params.action_blacklist); + if (params.key_blacklist) + chain.set_key_blacklist(*params.key_blacklist); } producer_plugin::integrity_hash_information producer_plugin::get_integrity_hash() const { @@ -1502,144 +1548,142 @@ void producer_plugin::create_snapshot(producer_plugin::next_function_snapshot_scheduler.schedule_snapshot(sri); } chain::snapshot_scheduler::snapshot_schedule_result -producer_plugin::unschedule_snapshot(const chain::snapshot_scheduler::snapshot_request_id_information& sri) -{ +producer_plugin::unschedule_snapshot(const chain::snapshot_scheduler::snapshot_request_id_information& sri) { return my->_snapshot_scheduler.unschedule_snapshot(sri.snapshot_request_id); } -chain::snapshot_scheduler::get_snapshot_requests_result producer_plugin::get_snapshot_requests() const -{ +chain::snapshot_scheduler::get_snapshot_requests_result producer_plugin::get_snapshot_requests() const { return my->_snapshot_scheduler.get_snapshot_requests(); } -producer_plugin::scheduled_protocol_feature_activations -producer_plugin::get_scheduled_protocol_feature_activations()const { +producer_plugin::scheduled_protocol_feature_activations producer_plugin::get_scheduled_protocol_feature_activations() const { return {my->_protocol_features_to_activate}; } void producer_plugin_impl::schedule_protocol_feature_activations(const producer_plugin::scheduled_protocol_feature_activations& schedule) { const chain::controller& chain = chain_plug->chain(); - std::set set_of_features_to_activate( schedule.protocol_features_to_activate.begin(), - schedule.protocol_features_to_activate.end() ); - EOS_ASSERT( set_of_features_to_activate.size() == schedule.protocol_features_to_activate.size(), - invalid_protocol_features_to_activate, "duplicate digests" ); - chain.validate_protocol_features( schedule.protocol_features_to_activate ); + std::set set_of_features_to_activate(schedule.protocol_features_to_activate.begin(), + schedule.protocol_features_to_activate.end()); + EOS_ASSERT(set_of_features_to_activate.size() == schedule.protocol_features_to_activate.size(), + invalid_protocol_features_to_activate, + "duplicate digests"); + chain.validate_protocol_features(schedule.protocol_features_to_activate); const auto& pfs = chain.get_protocol_feature_manager().get_protocol_feature_set(); - for (auto &feature_digest : set_of_features_to_activate) { + for (auto& feature_digest : set_of_features_to_activate) { const auto& pf = pfs.get_protocol_feature(feature_digest); - EOS_ASSERT( !pf.preactivation_required, protocol_feature_exception, - "protocol feature requires preactivation: ${digest}", - ("digest", feature_digest)); + EOS_ASSERT(!pf.preactivation_required, + protocol_feature_exception, + "protocol feature requires preactivation: ${digest}", + ("digest", feature_digest)); } _protocol_features_to_activate = schedule.protocol_features_to_activate; - _protocol_features_signaled = false; + _protocol_features_signaled = false; } -void producer_plugin::schedule_protocol_feature_activations( const scheduled_protocol_feature_activations& schedule ) { +void producer_plugin::schedule_protocol_feature_activations(const scheduled_protocol_feature_activations& schedule) { my->schedule_protocol_feature_activations(schedule); } -fc::variants producer_plugin::get_supported_protocol_features( const get_supported_protocol_features_params& params ) const { - fc::variants results; - const chain::controller& chain = my->chain_plug->chain(); - const auto& pfs = chain.get_protocol_feature_manager().get_protocol_feature_set(); - const auto next_block_time = chain.head_block_time() + fc::milliseconds(config::block_interval_ms); +fc::variants producer_plugin::get_supported_protocol_features(const get_supported_protocol_features_params& params) const { + fc::variants results; + const chain::controller& chain = my->chain_plug->chain(); + const auto& pfs = chain.get_protocol_feature_manager().get_protocol_feature_set(); + const auto next_block_time = chain.head_block_time() + fc::milliseconds(config::block_interval_ms); - flat_map visited_protocol_features; - visited_protocol_features.reserve( pfs.size() ); + flat_map visited_protocol_features; + visited_protocol_features.reserve(pfs.size()); std::function add_feature = - [&results, &pfs, ¶ms, next_block_time, &visited_protocol_features, &add_feature] - ( const protocol_feature& pf ) -> bool { - if( ( params.exclude_disabled || params.exclude_unactivatable ) && !pf.enabled ) return false; - if( params.exclude_unactivatable && ( next_block_time < pf.earliest_allowed_activation_time ) ) return false; + [&results, &pfs, ¶ms, next_block_time, &visited_protocol_features, &add_feature](const protocol_feature& pf) -> bool { + if ((params.exclude_disabled || params.exclude_unactivatable) && !pf.enabled) + return false; + if (params.exclude_unactivatable && (next_block_time < pf.earliest_allowed_activation_time)) + return false; - auto res = visited_protocol_features.emplace( pf.feature_digest, false ); - if( !res.second ) return res.first->second; + auto res = visited_protocol_features.emplace(pf.feature_digest, false); + if (!res.second) + return res.first->second; const auto original_size = results.size(); - for( const auto& dependency : pf.dependencies ) { - if( !add_feature( pfs.get_protocol_feature( dependency ) ) ) { - results.resize( original_size ); + for (const auto& dependency : pf.dependencies) { + if (!add_feature(pfs.get_protocol_feature(dependency))) { + results.resize(original_size); return false; } } res.first->second = true; - results.emplace_back( pf.to_variant(true) ); + results.emplace_back(pf.to_variant(true)); return true; }; - for( const auto& pf : pfs ) { - add_feature( pf ); + for (const auto& pf : pfs) { + add_feature(pf); } return results; } producer_plugin::get_account_ram_corrections_result -producer_plugin::get_account_ram_corrections( const get_account_ram_corrections_params& params ) const { +producer_plugin::get_account_ram_corrections(const get_account_ram_corrections_params& params) const { get_account_ram_corrections_result result; - const auto& db = my->chain_plug->chain().db(); + const auto& db = my->chain_plug->chain().db(); - const auto& idx = db.get_index(); - account_name lower_bound_value{ std::numeric_limits::lowest() }; - account_name upper_bound_value{ std::numeric_limits::max() }; + const auto& idx = db.get_index(); + account_name lower_bound_value{std::numeric_limits::lowest()}; + account_name upper_bound_value{std::numeric_limits::max()}; - if( params.lower_bound ) { + if (params.lower_bound) { lower_bound_value = *params.lower_bound; } - if( params.upper_bound ) { + if (params.upper_bound) { upper_bound_value = *params.upper_bound; } - if( upper_bound_value < lower_bound_value ) + if (upper_bound_value < lower_bound_value) return result; - auto walk_range = [&]( auto itr, auto end_itr ) { - for( unsigned int count = 0; - count < params.limit && itr != end_itr; - ++itr ) - { - result.rows.push_back( fc::variant( *itr ) ); + auto walk_range = [&](auto itr, auto end_itr) { + for (unsigned int count = 0; count < params.limit && itr != end_itr; ++itr) { + result.rows.push_back(fc::variant(*itr)); ++count; } - if( itr != end_itr ) { + if (itr != end_itr) { result.more = itr->name; } }; - auto lower = idx.lower_bound( lower_bound_value ); - auto upper = idx.upper_bound( upper_bound_value ); - if( params.reverse ) { - walk_range( boost::make_reverse_iterator(upper), boost::make_reverse_iterator(lower) ); + auto lower = idx.lower_bound(lower_bound_value); + auto upper = idx.upper_bound(upper_bound_value); + if (params.reverse) { + walk_range(boost::make_reverse_iterator(upper), boost::make_reverse_iterator(lower)); } else { - walk_range( lower, upper ); + walk_range(lower, upper); } return result; } -producer_plugin::get_unapplied_transactions_result -producer_plugin::get_unapplied_transactions( const get_unapplied_transactions_params& p, const fc::time_point& deadline ) const { +producer_plugin::get_unapplied_transactions_result producer_plugin::get_unapplied_transactions(const get_unapplied_transactions_params& p, + const fc::time_point& deadline) const { - fc::time_point params_deadline = p.time_limit_ms ? std::min(fc::time_point::now().safe_add(fc::milliseconds(*p.time_limit_ms)), deadline) : deadline; + fc::time_point params_deadline = + p.time_limit_ms ? std::min(fc::time_point::now().safe_add(fc::milliseconds(*p.time_limit_ms)), deadline) : deadline; auto& ua = my->_unapplied_transactions; - auto itr = ([&](){ + auto itr = ([&]() { if (!p.lower_bound.empty()) { try { - auto trx_id = transaction_id_type( p.lower_bound ); - return ua.lower_bound( trx_id ); - } catch( ... ) { + auto trx_id = transaction_id_type(p.lower_bound); + return ua.lower_bound(trx_id); + } catch (...) { return ua.end(); } } else { @@ -1648,45 +1692,47 @@ producer_plugin::get_unapplied_transactions( const get_unapplied_transactions_pa })(); auto get_trx_type = [&](trx_enum_type t, transaction_metadata::trx_type type) { - if( type == transaction_metadata::trx_type::dry_run ) return "dry_run"; - if( type == transaction_metadata::trx_type::read_only ) return "read_only"; - switch( t ) { - case trx_enum_type::unknown: - return "unknown"; - case trx_enum_type::forked: - return "forked"; - case trx_enum_type::aborted: - return "aborted"; - case trx_enum_type::incoming_api: - return "incoming_api"; - case trx_enum_type::incoming_p2p: - return "incoming_p2p"; + if (type == transaction_metadata::trx_type::dry_run) + return "dry_run"; + if (type == transaction_metadata::trx_type::read_only) + return "read_only"; + switch (t) { + case trx_enum_type::unknown: + return "unknown"; + case trx_enum_type::forked: + return "forked"; + case trx_enum_type::aborted: + return "aborted"; + case trx_enum_type::incoming_api: + return "incoming_api"; + case trx_enum_type::incoming_p2p: + return "incoming_p2p"; } return "unknown type"; }; get_unapplied_transactions_result result; - result.size = ua.size(); + result.size = ua.size(); result.incoming_size = ua.incoming_size(); uint32_t remaining = p.limit ? *p.limit : std::numeric_limits::max(); if (deadline != fc::time_point::maximum() && remaining > 1000) remaining = 1000; while (itr != ua.end() && remaining > 0) { - auto& r = result.trxs.emplace_back(); - r.trx_id = itr->id(); - r.expiration = itr->expiration(); - const auto& pt = itr->trx_meta->packed_trx(); - r.trx_type = get_trx_type( itr->trx_type, itr->trx_meta->get_trx_type() ); - r.first_auth = pt->get_transaction().first_authorizer(); + auto& r = result.trxs.emplace_back(); + r.trx_id = itr->id(); + r.expiration = itr->expiration(); + const auto& pt = itr->trx_meta->packed_trx(); + r.trx_type = get_trx_type(itr->trx_type, itr->trx_meta->get_trx_type()); + r.first_auth = pt->get_transaction().first_authorizer(); const auto& actions = pt->get_transaction().actions; - if( !actions.empty() ) { + if (!actions.empty()) { r.first_receiver = actions[0].account; - r.first_action = actions[0].name; + r.first_action = actions[0].name; } - r.total_actions = pt->get_transaction().total_actions(); + r.total_actions = pt->get_transaction().total_actions(); r.billed_cpu_time_us = itr->trx_meta->billed_cpu_time_us; - r.size = pt->get_estimated_size(); + r.size = pt->get_estimated_size(); ++itr; remaining--; @@ -1703,18 +1749,19 @@ producer_plugin::get_unapplied_transactions( const get_unapplied_transactions_pa uint32_t producer_plugin_impl::calculate_next_block_slot(const account_name& producer_name, uint32_t current_block_slot) const { - chain::controller& chain = chain_plug->chain(); - const auto& hbs = chain.head_block_state(); - const auto& active_schedule = hbs->active_schedule.producers; + chain::controller& chain = chain_plug->chain(); + const auto& hbs = chain.head_block_state(); + const auto& active_schedule = hbs->active_schedule.producers; // determine if this producer is in the active schedule and if so, where - auto itr = std::find_if(active_schedule.begin(), active_schedule.end(), [&](const auto& asp){ return asp.producer_name == producer_name; }); + auto itr = + std::find_if(active_schedule.begin(), active_schedule.end(), [&](const auto& asp) { return asp.producer_name == producer_name; }); if (itr == active_schedule.end()) { // this producer is not in the active producer set return UINT32_MAX; } - size_t producer_index = itr - active_schedule.begin(); + size_t producer_index = itr - active_schedule.begin(); uint32_t minimum_offset = 1; // must at least be the "next" block // account for a watermark in the future which is disqualifying this producer for now @@ -1725,7 +1772,7 @@ uint32_t producer_plugin_impl::calculate_next_block_slot(const account_name& pro auto current_watermark = get_watermark(producer_name); if (current_watermark) { const auto watermark = *current_watermark; - auto block_num = chain.head_block_state()->block_num; + auto block_num = chain.head_block_state()->block_num; if (chain.is_building_block()) { ++block_num; } @@ -1734,15 +1781,16 @@ uint32_t producer_plugin_impl::calculate_next_block_slot(const account_name& pro minimum_offset = watermark.first - block_num + 1; } if (watermark.second.slot > current_block_slot) { - // if I have a watermark block timestamp then I need to wait until after that watermark timestamp - minimum_offset = std::max(minimum_offset, watermark.second.slot - current_block_slot + 1); + // if I have a watermark block timestamp then I need to wait until after that watermark timestamp + minimum_offset = std::max(minimum_offset, watermark.second.slot - current_block_slot + 1); } } // this producers next opportunity to produce is the next time its slot arrives after or at the calculated minimum uint32_t minimum_slot = current_block_slot + minimum_offset; - size_t minimum_slot_producer_index = (minimum_slot % (active_schedule.size() * config::producer_repetitions)) / config::producer_repetitions; - if ( producer_index == minimum_slot_producer_index ) { + size_t minimum_slot_producer_index = + (minimum_slot % (active_schedule.size() * config::producer_repetitions)) / config::producer_repetitions; + if (producer_index == minimum_slot_producer_index) { // this is the producer for the minimum slot, go with that return minimum_slot; } else { @@ -1757,20 +1805,20 @@ uint32_t producer_plugin_impl::calculate_next_block_slot(const account_name& pro uint32_t first_minimum_producer_slot = minimum_slot - (minimum_slot % config::producer_repetitions); // offset the aligned minimum to the *earliest* next set of slots for this producer - uint32_t next_block_slot = first_minimum_producer_slot + (producer_distance * config::producer_repetitions); + uint32_t next_block_slot = first_minimum_producer_slot + (producer_distance * config::producer_repetitions); return next_block_slot; } } block_timestamp_type producer_plugin_impl::calculate_pending_block_time() const { const chain::controller& chain = chain_plug->chain(); - const fc::time_point now = fc::time_point::now(); - const fc::time_point base = std::max(now, chain.head_block_time()); + const fc::time_point now = fc::time_point::now(); + const fc::time_point base = std::max(now, chain.head_block_time()); return block_timestamp_type(base).next(); } -bool producer_plugin_impl::should_interrupt_start_block( const fc::time_point& deadline, uint32_t pending_block_num ) const { - if( _pending_block_mode == pending_block_mode::producing ) { +bool producer_plugin_impl::should_interrupt_start_block(const fc::time_point& deadline, uint32_t pending_block_num) const { + if (_pending_block_mode == pending_block_mode::producing) { return deadline <= fc::time_point::now(); } // if we can produce then honor deadline so production starts on time @@ -1780,20 +1828,20 @@ bool producer_plugin_impl::should_interrupt_start_block( const fc::time_point& d producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { chain::controller& chain = chain_plug->chain(); - if( !chain_plug->accept_transactions() ) + if (!chain_plug->accept_transactions()) return start_block_result::waiting_for_block; const auto& hbs = chain.head_block_state(); - if( chain.get_terminate_at_block() > 0 && chain.get_terminate_at_block() <= chain.head_block_num() ) { + if (chain.get_terminate_at_block() > 0 && chain.get_terminate_at_block() <= chain.head_block_num()) { ilog("Reached configured maximum block ${num}; terminating", ("num", chain.get_terminate_at_block())); app().quit(); return start_block_result::failed; } - const fc::time_point now = fc::time_point::now(); - const block_timestamp_type block_time = calculate_pending_block_time(); - const uint32_t pending_block_num = hbs->block_num + 1; + const fc::time_point now = fc::time_point::now(); + const block_timestamp_type block_time = calculate_pending_block_time(); + const uint32_t pending_block_num = hbs->block_num + 1; _pending_block_mode = pending_block_mode::producing; @@ -1803,9 +1851,9 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { const auto current_watermark = get_watermark(scheduled_producer.producer_name); size_t num_relevant_signatures = 0; - scheduled_producer.for_each_key([&](const public_key_type& key){ + scheduled_producer.for_each_key([&](const public_key_type& key) { const auto& iter = _signature_providers.find(key); - if(iter != _signature_providers.end()) { + if (iter != _signature_providers.end()) { num_relevant_signatures++; } }); @@ -1813,18 +1861,20 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { auto irreversible_block_age = get_irreversible_block_age(); // If the next block production opportunity is in the present or future, we're synced. - if( !_production_enabled ) { + if (!_production_enabled) { _pending_block_mode = pending_block_mode::speculating; - } else if( _producers.find(scheduled_producer.producer_name) == _producers.end()) { + } else if (_producers.find(scheduled_producer.producer_name) == _producers.end()) { _pending_block_mode = pending_block_mode::speculating; } else if (num_relevant_signatures == 0) { - elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", ("authority", scheduled_producer.authority)); + elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", + ("authority", scheduled_producer.authority)); _pending_block_mode = pending_block_mode::speculating; - } else if ( _pause_production ) { + } else if (_pause_production) { elog("Not producing block because production is explicitly paused"); _pending_block_mode = pending_block_mode::speculating; - } else if ( _max_irreversible_block_age_us.count() >= 0 && irreversible_block_age >= _max_irreversible_block_age_us ) { - elog("Not producing block because the irreversible block is too old [age:${age}s, max:${max}s]", ("age", irreversible_block_age.count() / 1'000'000)( "max", _max_irreversible_block_age_us.count() / 1'000'000 )); + } else if (_max_irreversible_block_age_us.count() >= 0 && irreversible_block_age >= _max_irreversible_block_age_us) { + elog("Not producing block because the irreversible block is too old [age:${age}s, max:${max}s]", + ("age", irreversible_block_age.count() / 1'000'000)("max", _max_irreversible_block_age_us.count() / 1'000'000)); _pending_block_mode = pending_block_mode::speculating; } @@ -1833,16 +1883,15 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if (current_watermark) { const block_timestamp_type block_timestamp{block_time}; if (current_watermark->first > hbs->block_num) { - elog("Not producing block because \"${producer}\" signed a block at a higher block number (${watermark}) than the current fork's head (${head_block_num})", - ("producer", scheduled_producer.producer_name) - ("watermark", current_watermark->first) - ("head_block_num", hbs->block_num)); + elog("Not producing block because \"${producer}\" signed a block at a higher block number (${watermark}) than the current " + "fork's head (${head_block_num})", + ("producer", scheduled_producer.producer_name)("watermark", current_watermark->first)("head_block_num", hbs->block_num)); _pending_block_mode = pending_block_mode::speculating; } else if (current_watermark->second >= block_timestamp) { - elog("Not producing block because \"${producer}\" signed a block at the next block time or later (${watermark}) than the pending block time (${block_timestamp})", - ("producer", scheduled_producer.producer_name) - ("watermark", current_watermark->second) - ("block_timestamp", block_timestamp)); + elog( + "Not producing block because \"${producer}\" signed a block at the next block time or later (${watermark}) than the pending " + "block time (${block_timestamp})", + ("producer", scheduled_producer.producer_name)("watermark", current_watermark->second)("block_timestamp", block_timestamp)); _pending_block_mode = pending_block_mode::speculating; } } @@ -1854,15 +1903,15 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { return start_block_result::waiting_for_block; } - _pending_block_deadline = block_timing_util::calculate_block_deadline(_cpu_effort_us, _pending_block_mode, block_time); - auto preprocess_deadline = _pending_block_deadline; + _pending_block_deadline = block_timing_util::calculate_block_deadline(_cpu_effort_us, _pending_block_mode, block_time); + auto preprocess_deadline = _pending_block_deadline; uint32_t production_round_index = block_timestamp_type(block_time).slot % chain::config::producer_repetitions; if (production_round_index == 0) { - // first block of our round, wait for block production window - const auto start_block_time = block_time.to_time_point() - fc::microseconds( config::block_interval_us ); + // first block of our round, wait for block production window + const auto start_block_time = block_time.to_time_point() - fc::microseconds(config::block_interval_us); if (now < start_block_time) { - fc_dlog( _log, "Not starting block until ${bt}", ("bt", start_block_time) ); - schedule_delayed_production_loop( weak_from_this(), start_block_time ); + fc_dlog(_log, "Not starting block until ${bt}", ("bt", start_block_time)); + schedule_delayed_production_loop(weak_from_this(), start_block_time); return start_block_result::waiting_for_production; } } @@ -1883,69 +1932,71 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if (current_watermark) { auto watermark_bn = current_watermark->first; if (watermark_bn < hbs->block_num) { - blocks_to_confirm = (uint16_t)(std::min(std::numeric_limits::max(), (uint32_t)(hbs->block_num - watermark_bn))); + blocks_to_confirm = + (uint16_t)(std::min(std::numeric_limits::max(), (uint32_t)(hbs->block_num - watermark_bn))); } } // can not confirm irreversible blocks - blocks_to_confirm = (uint16_t)(std::min(blocks_to_confirm, (uint32_t)(hbs->block_num - hbs->dpos_irreversible_blocknum))); + blocks_to_confirm = + (uint16_t)(std::min(blocks_to_confirm, (uint32_t)(hbs->block_num - hbs->dpos_irreversible_blocknum))); } abort_block(); auto features_to_activate = chain.get_preactivated_protocol_features(); - if( _pending_block_mode == pending_block_mode::producing && _protocol_features_to_activate.size() > 0 ) { + if (_pending_block_mode == pending_block_mode::producing && _protocol_features_to_activate.size() > 0) { bool drop_features_to_activate = false; try { - chain.validate_protocol_features( _protocol_features_to_activate ); - } catch ( const std::bad_alloc& ) { - chain_apis::api_base::handle_bad_alloc(); - } catch ( const boost::interprocess::bad_alloc& ) { - chain_apis::api_base::handle_bad_alloc(); - } catch( const fc::exception& e ) { - wlog( "protocol features to activate are no longer all valid: ${details}", - ("details",e.to_detail_string()) ); + chain.validate_protocol_features(_protocol_features_to_activate); + } catch (const std::bad_alloc&) { + chain_apis::api_base::handle_bad_alloc(); + } catch (const boost::interprocess::bad_alloc&) { + chain_apis::api_base::handle_bad_alloc(); + } catch (const fc::exception& e) { + wlog("protocol features to activate are no longer all valid: ${details}", ("details", e.to_detail_string())); drop_features_to_activate = true; - } catch( const std::exception& e ) { - wlog( "protocol features to activate are no longer all valid: ${details}", - ("details",fc::std_exception_wrapper::from_current_exception(e).to_detail_string()) ); + } catch (const std::exception& e) { + wlog("protocol features to activate are no longer all valid: ${details}", + ("details", fc::std_exception_wrapper::from_current_exception(e).to_detail_string())); drop_features_to_activate = true; } - if( drop_features_to_activate ) { + if (drop_features_to_activate) { _protocol_features_to_activate.clear(); } else { auto protocol_features_to_activate = _protocol_features_to_activate; // do a copy as pending_block might be aborted - if( features_to_activate.size() > 0 ) { - protocol_features_to_activate.reserve( protocol_features_to_activate.size() - + features_to_activate.size() ); - std::set set_of_features_to_activate( protocol_features_to_activate.begin(), - protocol_features_to_activate.end() ); - for( const auto& f : features_to_activate ) { - auto res = set_of_features_to_activate.insert( f ); - if( res.second ) { - protocol_features_to_activate.push_back( f ); + if (features_to_activate.size() > 0) { + protocol_features_to_activate.reserve(protocol_features_to_activate.size() + features_to_activate.size()); + std::set set_of_features_to_activate(protocol_features_to_activate.begin(), + protocol_features_to_activate.end()); + for (const auto& f : features_to_activate) { + auto res = set_of_features_to_activate.insert(f); + if (res.second) { + protocol_features_to_activate.push_back(f); } } features_to_activate.clear(); } - std::swap( features_to_activate, protocol_features_to_activate ); + std::swap(features_to_activate, protocol_features_to_activate); _protocol_features_signaled = true; - ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", - ("num", pending_block_num)("features_to_activate", features_to_activate) ); + ilog("signaling activation of the following protocol features in block ${num}: ${features_to_activate}", + ("num", pending_block_num)("features_to_activate", features_to_activate)); } } - controller::block_status bs = _pending_block_mode == pending_block_mode::producing ? - controller::block_status::incomplete : controller::block_status::ephemeral; - chain.start_block( block_time, blocks_to_confirm, features_to_activate, bs, preprocess_deadline ); - } LOG_AND_DROP(); + controller::block_status bs = + _pending_block_mode == pending_block_mode::producing ? controller::block_status::incomplete : controller::block_status::ephemeral; + chain.start_block(block_time, blocks_to_confirm, features_to_activate, bs, preprocess_deadline); + } + LOG_AND_DROP(); - if( chain.is_building_block() ) { + if (chain.is_building_block()) { const auto& pending_block_signing_authority = chain.pending_block_signing_authority(); if (_pending_block_mode == pending_block_mode::producing && pending_block_signing_authority != scheduled_producer.authority) { - elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); + elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", + ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); _pending_block_mode = pending_block_mode::speculating; } @@ -1954,12 +2005,13 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { _account_fails.report_and_clear(hbs->block_num, subjective_bill); _time_tracker.clear(); - if( !remove_expired_trxs( preprocess_deadline ) ) + if (!remove_expired_trxs(preprocess_deadline)) return start_block_result::exhausted; - if( !remove_expired_blacklisted_trxs( preprocess_deadline ) ) + if (!remove_expired_blacklisted_trxs(preprocess_deadline)) return start_block_result::exhausted; - if( !subjective_bill.remove_expired( _log, chain.pending_block_time(), fc::time_point::now(), - [&](){ return should_interrupt_start_block( preprocess_deadline, pending_block_num ); } ) ) { + if (!subjective_bill.remove_expired(_log, chain.pending_block_time(), fc::time_point::now(), [&]() { + return should_interrupt_start_block(preprocess_deadline, pending_block_num); + })) { return start_block_result::exhausted; } @@ -1967,88 +2019,88 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { auto incoming_itr = _unapplied_transactions.incoming_begin(); if (_pending_block_mode == pending_block_mode::producing) { - if( !process_unapplied_trxs( preprocess_deadline ) ) + if (!process_unapplied_trxs(preprocess_deadline)) return start_block_result::exhausted; auto scheduled_trx_deadline = preprocess_deadline; if (_max_scheduled_transaction_time_per_block_ms >= 0) { scheduled_trx_deadline = std::min( - scheduled_trx_deadline, - fc::time_point::now() + fc::milliseconds(_max_scheduled_transaction_time_per_block_ms) - ); + scheduled_trx_deadline, fc::time_point::now() + fc::milliseconds(_max_scheduled_transaction_time_per_block_ms)); } // may exhaust scheduled_trx_deadline but not preprocess_deadline, exhausted preprocess_deadline checked below - process_scheduled_and_incoming_trxs( scheduled_trx_deadline, incoming_itr ); + process_scheduled_and_incoming_trxs(scheduled_trx_deadline, incoming_itr); } - repost_exhausted_transactions( preprocess_deadline ); + repost_exhausted_transactions(preprocess_deadline); - if( app().is_quiting() ) // db guard exception above in LOG_AND_DROP could have called app().quit() + if (app().is_quiting()) // db guard exception above in LOG_AND_DROP could have called app().quit() return start_block_result::failed; - if ( should_interrupt_start_block( preprocess_deadline, pending_block_num ) || block_is_exhausted() ) { + if (should_interrupt_start_block(preprocess_deadline, pending_block_num) || block_is_exhausted()) { return start_block_result::exhausted; } - if( !process_incoming_trxs( preprocess_deadline, incoming_itr ) ) + if (!process_incoming_trxs(preprocess_deadline, incoming_itr)) return start_block_result::exhausted; return start_block_result::succeeded; - } catch ( const guard_exception& e ) { + } catch (const guard_exception& e) { chain_plugin::handle_guard_exception(e); return start_block_result::failed; - } catch ( std::bad_alloc& ) { + } catch (std::bad_alloc&) { chain_apis::api_base::handle_bad_alloc(); - } catch ( boost::interprocess::bad_alloc& ) { + } catch (boost::interprocess::bad_alloc&) { chain_apis::api_base::handle_db_exhaustion(); } - } return start_block_result::failed; } -bool producer_plugin_impl::remove_expired_trxs( const fc::time_point& deadline ) -{ - chain::controller& chain = chain_plug->chain(); - auto pending_block_time = chain.pending_block_time(); - auto pending_block_num = chain.pending_block_num(); +bool producer_plugin_impl::remove_expired_trxs(const fc::time_point& deadline) { + chain::controller& chain = chain_plug->chain(); + auto pending_block_time = chain.pending_block_time(); + auto pending_block_num = chain.pending_block_num(); // remove all expired transactions size_t num_expired = 0; - size_t orig_count = _unapplied_transactions.size(); - bool exhausted = !_unapplied_transactions.clear_expired( pending_block_time, [&](){ return should_interrupt_start_block(deadline, pending_block_num); }, - [&num_expired]( const packed_transaction_ptr& packed_trx_ptr, trx_enum_type trx_type ) { - // expired exception is logged as part of next() call - ++num_expired; - }); + size_t orig_count = _unapplied_transactions.size(); + bool exhausted = !_unapplied_transactions.clear_expired( + pending_block_time, + [&]() { return should_interrupt_start_block(deadline, pending_block_num); }, + [&num_expired](const packed_transaction_ptr& packed_trx_ptr, trx_enum_type trx_type) { + // expired exception is logged as part of next() call + ++num_expired; + }); - if( exhausted && _pending_block_mode == pending_block_mode::producing ) { - fc_wlog( _log, "Unable to process all expired transactions of the ${n} transactions in the unapplied queue before deadline, " - "Expired ${expired}", ("n", orig_count)("expired", num_expired) ); + if (exhausted && _pending_block_mode == pending_block_mode::producing) { + fc_wlog(_log, + "Unable to process all expired transactions of the ${n} transactions in the unapplied queue before deadline, " + "Expired ${expired}", + ("n", orig_count)("expired", num_expired)); } else { - fc_dlog( _log, "Processed ${ex} expired transactions of the ${n} transactions in the unapplied queue.", - ("n", orig_count)("ex", num_expired) ); + fc_dlog(_log, + "Processed ${ex} expired transactions of the ${n} transactions in the unapplied queue.", + ("n", orig_count)("ex", num_expired)); } return !exhausted; } -bool producer_plugin_impl::remove_expired_blacklisted_trxs( const fc::time_point& deadline ) -{ - bool exhausted = false; +bool producer_plugin_impl::remove_expired_blacklisted_trxs(const fc::time_point& deadline) { + bool exhausted = false; auto& blacklist_by_expiry = _blacklisted_transactions.get(); - if(!blacklist_by_expiry.empty()) { - const chain::controller& chain = chain_plug->chain(); - const auto lib_time = chain.last_irreversible_block_time(); - const auto pending_block_num = chain.pending_block_num(); + if (!blacklist_by_expiry.empty()) { + const chain::controller& chain = chain_plug->chain(); + const auto lib_time = chain.last_irreversible_block_time(); + const auto pending_block_num = chain.pending_block_num(); int num_expired = 0; - int orig_count = _blacklisted_transactions.size(); + int orig_count = _blacklisted_transactions.size(); while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= lib_time) { - if ( should_interrupt_start_block( deadline, pending_block_num ) ) { + if (should_interrupt_start_block(deadline, pending_block_num)) { exhausted = true; break; } @@ -2056,66 +2108,59 @@ bool producer_plugin_impl::remove_expired_blacklisted_trxs( const fc::time_point num_expired++; } - fc_dlog(_log, "Processed ${n} blacklisted transactions, Expired ${expired}", - ("n", orig_count)("expired", num_expired)); + fc_dlog(_log, "Processed ${n} blacklisted transactions, Expired ${expired}", ("n", orig_count)("expired", num_expired)); } return !exhausted; } // Returns contract name, action name, and exception text of an exception that occurred in a contract inline std::string get_detailed_contract_except_info(const packed_transaction_ptr& trx, - const transaction_trace_ptr& trace, - const fc::exception_ptr& except_ptr) -{ + const transaction_trace_ptr& trace, + const fc::exception_ptr& except_ptr) { std::string contract_name; std::string act_name; - if( trace && !trace->action_traces.empty() ) { + if (trace && !trace->action_traces.empty()) { auto last_action_ordinal = trace->action_traces.size() - 1; - contract_name = trace->action_traces[last_action_ordinal].receiver.to_string(); - act_name = trace->action_traces[last_action_ordinal].act.name.to_string(); - } else if ( trx ) { + contract_name = trace->action_traces[last_action_ordinal].receiver.to_string(); + act_name = trace->action_traces[last_action_ordinal].act.name.to_string(); + } else if (trx) { const auto& actions = trx->get_transaction().actions; - if( actions.empty() ) return {}; // should not be possible + if (actions.empty()) + return {}; // should not be possible contract_name = actions[0].account.to_string(); - act_name = actions[0].name.to_string(); + act_name = actions[0].name.to_string(); } - std::string details = except_ptr ? except_ptr->top_message() - : ((trace && trace->except) ? trace->except->top_message() - : std::string()); + std::string details = except_ptr ? except_ptr->top_message() : ((trace && trace->except) ? trace->except->top_message() : std::string()); fc::escape_str(details, fc::escape_control_chars::on, 1024); // this format is parsed by external tools return "action: " + contract_name + ":" + act_name + ", " + details; } -void producer_plugin_impl::log_trx_results( const transaction_metadata_ptr& trx, - const transaction_trace_ptr& trace, - const fc::time_point& start ) -{ +void producer_plugin_impl::log_trx_results(const transaction_metadata_ptr& trx, + const transaction_trace_ptr& trace, + const fc::time_point& start) { uint32_t billed_cpu_time_us = (trace && trace->receipt) ? trace->receipt->cpu_usage_us : 0; - log_trx_results( trx->packed_trx(), trace, nullptr, billed_cpu_time_us, start, trx->is_transient() ); + log_trx_results(trx->packed_trx(), trace, nullptr, billed_cpu_time_us, start, trx->is_transient()); } -void producer_plugin_impl::log_trx_results( const transaction_metadata_ptr& trx, - const fc::exception_ptr& except_ptr ) -{ +void producer_plugin_impl::log_trx_results(const transaction_metadata_ptr& trx, const fc::exception_ptr& except_ptr) { uint32_t billed_cpu_time_us = trx ? trx->billed_cpu_time_us : 0; - log_trx_results( trx->packed_trx(), nullptr, except_ptr, billed_cpu_time_us, fc::time_point::now(), trx->is_transient() ); + log_trx_results(trx->packed_trx(), nullptr, except_ptr, billed_cpu_time_us, fc::time_point::now(), trx->is_transient()); } -void producer_plugin_impl::log_trx_results( const packed_transaction_ptr& trx, - const transaction_trace_ptr& trace, - const fc::exception_ptr& except_ptr, - uint32_t billed_cpu_us, - const fc::time_point& start, - bool is_transient ) -{ +void producer_plugin_impl::log_trx_results(const packed_transaction_ptr& trx, + const transaction_trace_ptr& trace, + const fc::exception_ptr& except_ptr, + uint32_t billed_cpu_us, + const fc::time_point& start, + bool is_transient) { chain::controller& chain = chain_plug->chain(); auto get_trace = [&](const transaction_trace_ptr& trace, const fc::exception_ptr& except_ptr) -> fc::variant { - if( trace ) { - return chain_plug->get_log_trx_trace( trace ); + if (trace) { + return chain_plug->get_log_trx_trace(trace); } else { return fc::variant{except_ptr}; } @@ -2124,55 +2169,63 @@ void producer_plugin_impl::log_trx_results( const packed_transaction_ptr& trx, bool except = except_ptr || (trace && trace->except); if (except) { if (_pending_block_mode == pending_block_mode::producing) { - fc_dlog( is_transient ? _transient_trx_failed_trace_log : _trx_failed_trace_log, - "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING ${desc}tx: ${txid}, auth: ${a}, ${details}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("desc", is_transient ? "transient " : "")("txid", trx->id()) - ("a", trx->get_transaction().first_authorizer()) - ("details", get_detailed_contract_except_info(trx, trace, except_ptr))); - - if ( !is_transient ) { - fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${trx}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("trx", chain_plug->get_log_trx(trx->get_transaction()))); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("entire_trace", get_trace(trace, except_ptr))); + fc_dlog(is_transient ? _transient_trx_failed_trace_log : _trx_failed_trace_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING ${desc}tx: ${txid}, auth: ${a}, ${details}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("desc", is_transient ? "transient " : "") + ("txid", trx->id())("a", trx->get_transaction().first_authorizer()) + ("details", get_detailed_contract_except_info(trx, trace, except_ptr))); + + if (!is_transient) { + fc_dlog(_trx_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${trx}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) + ("trx", chain_plug->get_log_trx(trx->get_transaction()))); + fc_dlog(_trx_trace_failure_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) + ("entire_trace", get_trace(trace, except_ptr))); } } else { - fc_dlog( is_transient ? _transient_trx_failed_trace_log : _trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING ${desc}tx: ${txid}, auth: ${a} : ${details}", - ("desc", is_transient ? "transient " : "") - ("txid", trx->id())("a", trx->get_transaction().first_authorizer()) - ("details", get_detailed_contract_except_info(trx, trace, except_ptr))); - if ( !is_transient ) { - fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx} ", + fc_dlog(is_transient ? _transient_trx_failed_trace_log : _trx_failed_trace_log, + "[TRX_TRACE] Speculative execution is REJECTING ${desc}tx: ${txid}, auth: ${a} : ${details}", + ("desc", is_transient ? "transient " : "")("txid", trx->id()) + ("a", trx->get_transaction().first_authorizer())("details", get_detailed_contract_except_info(trx, trace, except_ptr))); + if (!is_transient) { + fc_dlog(_trx_log, + "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx} ", ("trx", chain_plug->get_log_trx(trx->get_transaction()))); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trace} ", + fc_dlog(_trx_trace_failure_log, + "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trace} ", ("entire_trace", get_trace(trace, except_ptr))); } } } else { if (_pending_block_mode == pending_block_mode::producing) { - fc_dlog( is_transient ? _transient_trx_successful_trace_log : _trx_successful_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING ${desc}tx: ${txid}, auth: ${a}, cpu: ${cpu}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("desc", is_transient ? "transient " : "")("txid", trx->id()) - ("a", trx->get_transaction().first_authorizer())("cpu", billed_cpu_us)); - if ( !is_transient ) { - fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${trx}", + fc_dlog(is_transient ? _transient_trx_successful_trace_log : _trx_successful_trace_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING ${desc}tx: ${txid}, auth: ${a}, cpu: ${cpu}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("desc", is_transient ? "transient " : "") + ("txid", trx->id())("a", trx->get_transaction().first_authorizer())("cpu", billed_cpu_us)); + if (!is_transient) { + fc_dlog(_trx_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${trx}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("trx", chain_plug->get_log_trx(trx->get_transaction()))); - fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${entire_trace}", + fc_dlog(_trx_trace_success_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${entire_trace}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("entire_trace", get_trace(trace, except_ptr))); } } else { - fc_dlog( is_transient ? _transient_trx_successful_trace_log : _trx_successful_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING ${desc}tx: ${txid}, auth: ${a}, cpu: ${cpu}", - ("desc", is_transient ? "transient " : "") - ("txid", trx->id())("a", trx->get_transaction().first_authorizer()) - ("cpu", billed_cpu_us)); - if ( !is_transient ) { - fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${trx}", + fc_dlog(is_transient ? _transient_trx_successful_trace_log : _trx_successful_trace_log, + "[TRX_TRACE] Speculative execution is ACCEPTING ${desc}tx: ${txid}, auth: ${a}, cpu: ${cpu}", + ("desc", is_transient ? "transient " : "")("txid", trx->id())("a", trx->get_transaction().first_authorizer()) + ("cpu", billed_cpu_us)); + if (!is_transient) { + fc_dlog(_trx_log, + "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${trx}", ("trx", chain_plug->get_log_trx(trx->get_transaction()))); - fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${entire_trace}", + fc_dlog(_trx_trace_success_log, + "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${entire_trace}", ("entire_trace", get_trace(trace, except_ptr))); } } @@ -2180,212 +2233,218 @@ void producer_plugin_impl::log_trx_results( const packed_transaction_ptr& trx, } // Does not modify unapplied_transaction_queue -producer_plugin_impl::push_result -producer_plugin_impl::push_transaction( const fc::time_point& block_deadline, - const transaction_metadata_ptr& trx, - bool api_trx, - bool return_failure_trace, - const next_function& next ) -{ +producer_plugin_impl::push_result producer_plugin_impl::push_transaction(const fc::time_point& block_deadline, + const transaction_metadata_ptr& trx, + bool api_trx, + bool return_failure_trace, + const next_function& next) { auto start = fc::time_point::now(); EOS_ASSERT(!trx->is_read_only(), producer_exception, "Unexpected read-only trx"); - chain::controller& chain = chain_plug->chain(); + chain::controller& chain = chain_plug->chain(); chain::subjective_billing& subjective_bill = chain.get_mutable_subjective_billing(); auto first_auth = trx->packed_trx()->get_transaction().first_authorizer(); - bool disable_subjective_enforcement = (api_trx && _disable_subjective_api_billing) - || (!api_trx && _disable_subjective_p2p_billing) - || subjective_bill.is_account_disabled( first_auth ) - || trx->is_transient(); + bool disable_subjective_enforcement = (api_trx && _disable_subjective_api_billing) || (!api_trx && _disable_subjective_p2p_billing) || + subjective_bill.is_account_disabled(first_auth) || trx->is_transient(); - if( !disable_subjective_enforcement && _account_fails.failure_limit( first_auth ) ) { - if( next ) { - auto except_ptr = std::static_pointer_cast( std::make_shared( - FC_LOG_MESSAGE( error, "transaction ${id} exceeded failure limit for account ${a} until ${next_reset_time}", - ("id", trx->id())( "a", first_auth ) - ("next_reset_time", _account_fails.next_reset_timepoint(chain.head_block_num(),chain.head_block_time()))) ) ); - log_trx_results( trx, except_ptr ); - next( except_ptr ); + if (!disable_subjective_enforcement && _account_fails.failure_limit(first_auth)) { + if (next) { + auto except_ptr = std::static_pointer_cast(std::make_shared( + FC_LOG_MESSAGE(error, + "transaction ${id} exceeded failure limit for account ${a} until ${next_reset_time}", + ("id", trx->id())("a", first_auth)( + "next_reset_time", _account_fails.next_reset_timepoint(chain.head_block_num(), chain.head_block_time()))))); + log_trx_results(trx, except_ptr); + next(except_ptr); } _time_tracker.add_fail_time(fc::time_point::now() - start, trx->is_transient()); return push_result{.failed = true}; } - fc::microseconds max_trx_time = fc::milliseconds( _max_transaction_time_ms.load() ); - if( max_trx_time.count() < 0 ) max_trx_time = fc::microseconds::maximum(); + fc::microseconds max_trx_time = fc::milliseconds(_max_transaction_time_ms.load()); + if (max_trx_time.count() < 0) + max_trx_time = fc::microseconds::maximum(); int64_t sub_bill = 0; - if( !disable_subjective_enforcement ) - sub_bill = subjective_bill.get_subjective_bill( first_auth, fc::time_point::now() ); + if (!disable_subjective_enforcement) + sub_bill = subjective_bill.get_subjective_bill(first_auth, fc::time_point::now()); auto prev_billed_cpu_time_us = trx->billed_cpu_time_us; - if( _pending_block_mode == pending_block_mode::producing && prev_billed_cpu_time_us > 0 ) { + if (_pending_block_mode == pending_block_mode::producing && prev_billed_cpu_time_us > 0) { const auto& rl = chain.get_resource_limits_manager(); - if ( !subjective_bill.is_account_disabled( first_auth ) && !rl.is_unlimited_cpu( first_auth ) ) { - int64_t prev_billed_plus100_us = prev_billed_cpu_time_us + EOS_PERCENT( prev_billed_cpu_time_us, 100 * config::percent_1 ); - if( prev_billed_plus100_us < max_trx_time.count() ) max_trx_time = fc::microseconds( prev_billed_plus100_us ); + if (!subjective_bill.is_account_disabled(first_auth) && !rl.is_unlimited_cpu(first_auth)) { + int64_t prev_billed_plus100_us = prev_billed_cpu_time_us + EOS_PERCENT(prev_billed_cpu_time_us, 100 * config::percent_1); + if (prev_billed_plus100_us < max_trx_time.count()) + max_trx_time = fc::microseconds(prev_billed_plus100_us); } } - auto trace = chain.push_transaction( trx, block_deadline, max_trx_time, prev_billed_cpu_time_us, false, sub_bill ); + auto trace = chain.push_transaction(trx, block_deadline, max_trx_time, prev_billed_cpu_time_us, false, sub_bill); - return handle_push_result(trx, next, start, chain, trace, return_failure_trace, disable_subjective_enforcement, first_auth, sub_bill, prev_billed_cpu_time_us); + return handle_push_result( + trx, next, start, chain, trace, return_failure_trace, disable_subjective_enforcement, first_auth, sub_bill, prev_billed_cpu_time_us); } -producer_plugin_impl::push_result -producer_plugin_impl::handle_push_result( const transaction_metadata_ptr& trx, - const next_function& next, - const fc::time_point& start, - chain::controller& chain, - const transaction_trace_ptr& trace, - bool return_failure_trace, - bool disable_subjective_enforcement, - account_name first_auth, - int64_t sub_bill, - uint32_t prev_billed_cpu_time_us) { - auto end = fc::time_point::now(); +producer_plugin_impl::push_result producer_plugin_impl::handle_push_result(const transaction_metadata_ptr& trx, + const next_function& next, + const fc::time_point& start, + chain::controller& chain, + const transaction_trace_ptr& trace, + bool return_failure_trace, + bool disable_subjective_enforcement, + account_name first_auth, + int64_t sub_bill, + uint32_t prev_billed_cpu_time_us) { + auto end = fc::time_point::now(); chain::subjective_billing& subjective_bill = chain.get_mutable_subjective_billing(); push_result pr; - if( trace->except ) { + if (trace->except) { // Transient trxs are dry-run or read-only. // Dry-run trxs only run in write window. Read-only trxs can run in // both write and read windows; time spent in read window is counted // by read window summary. - if ( chain.is_write_window() ) { + if (chain.is_write_window()) { auto dur = end - start; _time_tracker.add_fail_time(dur, trx->is_transient()); } - if( exception_is_exhausted( *trace->except ) ) { - if( _pending_block_mode == pending_block_mode::producing ) { - fc_dlog(trx->is_transient() ? _transient_trx_failed_trace_log : _trx_failed_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", + if (exception_is_exhausted(*trace->except)) { + if (_pending_block_mode == pending_block_mode::producing) { + fc_dlog(trx->is_transient() ? _transient_trx_failed_trace_log : _trx_failed_trace_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("txid", trx->id())); } else { - fc_dlog(trx->is_transient() ? _transient_trx_failed_trace_log : _trx_failed_trace_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", ("txid", trx->id())); + fc_dlog(trx->is_transient() ? _transient_trx_failed_trace_log : _trx_failed_trace_log, + "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", + ("txid", trx->id())); } - if ( !trx->is_read_only() ) + if (!trx->is_read_only()) pr.block_exhausted = block_is_exhausted(); // smaller trx might fit pr.trx_exhausted = true; } else { - pr.failed = true; + pr.failed = true; const fc::exception& e = *trace->except; - if( e.code() != tx_duplicate::code_value ) { - fc_tlog( _log, "Subjective bill for failed ${a}: ${b} elapsed ${t}us, time ${r}us", - ("a",first_auth)("b",sub_bill)("t",trace->elapsed)("r", end - start)); + if (e.code() != tx_duplicate::code_value) { + fc_tlog(_log, + "Subjective bill for failed ${a}: ${b} elapsed ${t}us, time ${r}us", + ("a", first_auth)("b", sub_bill)("t", trace->elapsed)("r", end - start)); if (!disable_subjective_enforcement) // subjectively bill failure when producing since not in objective cpu account billing - subjective_bill.subjective_bill_failure( first_auth, trace->elapsed, fc::time_point::now() ); + subjective_bill.subjective_bill_failure(first_auth, trace->elapsed, fc::time_point::now()); - log_trx_results( trx, trace, start ); + log_trx_results(trx, trace, start); // this failed our configured maximum transaction time, we don't want to replay it - fc_tlog( _log, "Failed ${c} trx, auth: ${a}, prev billed: ${p}us, ran: ${r}us, id: ${id}, except: ${e}", - ("c", e.code())("a", first_auth)("p", prev_billed_cpu_time_us) - ( "r", end - start)("id", trx->id())("e", e) ); - if( !disable_subjective_enforcement ) - _account_fails.add( first_auth, e ); + fc_tlog(_log, + "Failed ${c} trx, auth: ${a}, prev billed: ${p}us, ran: ${r}us, id: ${id}, except: ${e}", + ("c", e.code())("a", first_auth)("p", prev_billed_cpu_time_us)("r", end - start)("id", trx->id())("e", e)); + if (!disable_subjective_enforcement) + _account_fails.add(first_auth, e); } - if( next ) { - if( return_failure_trace ) { - next( trace ); + if (next) { + if (return_failure_trace) { + next(trace); } else { auto e_ptr = trace->except->dynamic_copy_exception(); - next( e_ptr ); + next(e_ptr); } } } } else { - fc_tlog( _log, "Subjective bill for success ${a}: ${b} elapsed ${t}us, time ${r}us", - ("a",first_auth)("b",sub_bill)("t",trace->elapsed)("r", end - start)); + fc_tlog(_log, + "Subjective bill for success ${a}: ${b} elapsed ${t}us, time ${r}us", + ("a", first_auth)("b", sub_bill)("t", trace->elapsed)("r", end - start)); // Transient trxs are dry-run or read-only. // Dry-run trxs only run in write window. Read-only trxs can run in // both write and read windows; time spent in read window is counted // by read window summary. - if ( chain.is_write_window() ) { + if (chain.is_write_window()) { auto dur = end - start; _time_tracker.add_success_time(dur, trx->is_transient()); } - log_trx_results( trx, trace, start ); + log_trx_results(trx, trace, start); // if producing then trx is in objective cpu account billing if (!disable_subjective_enforcement && _pending_block_mode != pending_block_mode::producing) { - subjective_bill.subjective_bill( trx->id(), trx->packed_trx()->expiration(), first_auth, trace->elapsed ); + subjective_bill.subjective_bill(trx->id(), trx->packed_trx()->expiration(), first_auth, trace->elapsed); } - if( next ) next( trace ); + if (next) + next(trace); } return pr; } -bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadline ) -{ +bool producer_plugin_impl::process_unapplied_trxs(const fc::time_point& deadline) { bool exhausted = false; - if( !_unapplied_transactions.empty() ) { - const chain::controller& chain = chain_plug->chain(); - const auto pending_block_num = chain.pending_block_num(); - int num_applied = 0, num_failed = 0, num_processed = 0; - auto unapplied_trxs_size = _unapplied_transactions.size(); - auto itr = _unapplied_transactions.unapplied_begin(); - auto end_itr = _unapplied_transactions.unapplied_end(); - while( itr != end_itr ) { - if( should_interrupt_start_block( deadline, pending_block_num ) ) { + if (!_unapplied_transactions.empty()) { + const chain::controller& chain = chain_plug->chain(); + const auto pending_block_num = chain.pending_block_num(); + int num_applied = 0, num_failed = 0, num_processed = 0; + auto unapplied_trxs_size = _unapplied_transactions.size(); + auto itr = _unapplied_transactions.unapplied_begin(); + auto end_itr = _unapplied_transactions.unapplied_end(); + while (itr != end_itr) { + if (should_interrupt_start_block(deadline, pending_block_num)) { exhausted = true; break; } ++num_processed; try { - push_result pr = push_transaction( deadline, itr->trx_meta, false, itr->return_failure_trace, itr->next ); + push_result pr = push_transaction(deadline, itr->trx_meta, false, itr->return_failure_trace, itr->next); exhausted = pr.block_exhausted; - if( exhausted ) { + if (exhausted) { break; } else { - if( pr.failed ) { + if (pr.failed) { ++num_failed; } else { ++num_applied; } } - if( !pr.trx_exhausted ) { - itr = _unapplied_transactions.erase( itr ); + if (!pr.trx_exhausted) { + itr = _unapplied_transactions.erase(itr); } else { ++itr; // keep exhausted } continue; - } LOG_AND_DROP(); + } + LOG_AND_DROP(); ++num_failed; ++itr; } - fc_dlog( _log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", - ("m", num_processed)( "n", unapplied_trxs_size )("applied", num_applied)("failed", num_failed) ); + fc_dlog(_log, + "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", + ("m", num_processed)("n", unapplied_trxs_size)("applied", num_applied)("failed", num_failed)); } return !exhausted; } -void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr ) -{ +void producer_plugin_impl::process_scheduled_and_incoming_trxs(const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr) { // scheduled transactions - int num_applied = 0; - int num_failed = 0; - int num_processed = 0; - bool exhausted = false; + int num_applied = 0; + int num_failed = 0; + int num_processed = 0; + bool exhausted = false; double incoming_trx_weight = 0.0; - auto& blacklist_by_id = _blacklisted_transactions.get(); - chain::controller& chain = chain_plug->chain(); - time_point pending_block_time = chain.pending_block_time(); - auto end = _unapplied_transactions.incoming_end(); - const auto& sch_idx = chain.db().get_index(); - const auto scheduled_trxs_size = sch_idx.size(); - auto sch_itr = sch_idx.begin(); - while( sch_itr != sch_idx.end() ) { - if( sch_itr->delay_until > pending_block_time) break; // not scheduled yet - if( exhausted || deadline <= fc::time_point::now() ) { + auto& blacklist_by_id = _blacklisted_transactions.get(); + chain::controller& chain = chain_plug->chain(); + time_point pending_block_time = chain.pending_block_time(); + auto end = _unapplied_transactions.incoming_end(); + const auto& sch_idx = chain.db().get_index(); + const auto scheduled_trxs_size = sch_idx.size(); + auto sch_itr = sch_idx.begin(); + while (sch_itr != sch_idx.end()) { + if (sch_itr->delay_until > pending_block_time) + break; // not scheduled yet + if (exhausted || deadline <= fc::time_point::now()) { exhausted = true; break; } - if( sch_itr->published >= pending_block_time ) { + if (sch_itr->published >= pending_block_time) { ++sch_itr; continue; // do not allow schedule and execute in same block } @@ -2395,17 +2454,17 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p continue; } - const transaction_id_type trx_id = sch_itr->trx_id; // make copy since reference could be invalidated - const auto sch_expiration = sch_itr->expiration; - auto sch_itr_next = sch_itr; // save off next since sch_itr may be invalidated by loop + const transaction_id_type trx_id = sch_itr->trx_id; // make copy since reference could be invalidated + const auto sch_expiration = sch_itr->expiration; + auto sch_itr_next = sch_itr; // save off next since sch_itr may be invalidated by loop ++sch_itr_next; const auto next_delay_until = sch_itr_next != sch_idx.end() ? sch_itr_next->delay_until : sch_itr->delay_until; - const auto next_id = sch_itr_next != sch_idx.end() ? sch_itr_next->id : sch_itr->id; + const auto next_id = sch_itr_next != sch_idx.end() ? sch_itr_next->id : sch_itr->id; num_processed++; // configurable ratio of incoming txns vs deferred txns - while (incoming_trx_weight >= 1.0 && itr != end ) { + while (incoming_trx_weight >= 1.0 && itr != end) { if (deadline <= fc::time_point::now()) { exhausted = true; break; @@ -2414,18 +2473,19 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p incoming_trx_weight -= 1.0; auto trx_meta = itr->trx_meta; - bool api_trx = itr->trx_type == trx_enum_type::incoming_api; + bool api_trx = itr->trx_type == trx_enum_type::incoming_api; - push_result pr = push_transaction( deadline, trx_meta, api_trx, itr->return_failure_trace, itr->next ); + push_result pr = push_transaction(deadline, trx_meta, api_trx, itr->return_failure_trace, itr->next); exhausted = pr.block_exhausted; - if( pr.trx_exhausted ) { + if (pr.trx_exhausted) { ++itr; // leave in incoming } else { - itr = _unapplied_transactions.erase( itr ); + itr = _unapplied_transactions.erase(itr); } - if( exhausted ) break; + if (exhausted) + break; } if (exhausted || deadline <= fc::time_point::now()) { @@ -2434,34 +2494,36 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p } auto get_first_authorizer = [&](const transaction_trace_ptr& trace) { - for( const auto& a : trace->action_traces ) { - for( const auto& u : a.act.authorization ) + for (const auto& a : trace->action_traces) { + for (const auto& u : a.act.authorization) return u.actor; } return account_name(); }; try { - auto start = fc::time_point::now(); - fc::microseconds max_trx_time = fc::milliseconds( _max_transaction_time_ms.load() ); - if( max_trx_time.count() < 0 ) max_trx_time = fc::microseconds::maximum(); + auto start = fc::time_point::now(); + fc::microseconds max_trx_time = fc::milliseconds(_max_transaction_time_ms.load()); + if (max_trx_time.count() < 0) + max_trx_time = fc::microseconds::maximum(); auto trace = chain.push_scheduled_transaction(trx_id, deadline, max_trx_time, 0, false); - auto end = fc::time_point::now(); + auto end = fc::time_point::now(); if (trace->except) { _time_tracker.add_fail_time(end - start, false); // delayed transaction cannot be transient if (exception_is_exhausted(*trace->except)) { - if( block_is_exhausted() ) { + if (block_is_exhausted()) { exhausted = true; break; } } else { fc_dlog(_trx_failed_trace_log, - "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${txid}, time: ${r}, auth: ${a} : ${details}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("txid", trx_id)("r", end - start)("a", get_first_authorizer(trace)) - ("details", get_detailed_contract_except_info(nullptr, trace, nullptr))); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${entire_trace}", + "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${txid}, time: ${r}, auth: ${a} : " + "${details}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("txid", trx_id)("r", end - start) + ("a", get_first_authorizer(trace))("details", get_detailed_contract_except_info(nullptr, trace, nullptr))); + fc_dlog(_trx_trace_failure_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${entire_trace}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("entire_trace", chain_plug->get_log_trx_trace(trace))); // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist @@ -2471,73 +2533,78 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p } else { _time_tracker.add_success_time(end - start, false); // delayed transaction cannot be transient fc_dlog(_trx_successful_trace_log, - "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${txid}, time: ${r}, auth: ${a}, cpu: ${cpu}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("txid", trx_id)("r", end - start)("a", get_first_authorizer(trace)) - ("cpu", trace->receipt ? trace->receipt->cpu_usage_us : 0)); - fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${entire_trace}", + "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${txid}, time: ${r}, auth: ${a}, cpu: " + "${cpu}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("txid", trx_id)("r", end - start) + ("a", get_first_authorizer(trace))("cpu", trace->receipt ? trace->receipt->cpu_usage_us : 0)); + fc_dlog(_trx_trace_success_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${entire_trace}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("entire_trace", chain_plug->get_log_trx_trace(trace))); num_applied++; } - } LOG_AND_DROP(); + } + LOG_AND_DROP(); incoming_trx_weight += _incoming_defer_ratio; - if( sch_itr_next == sch_idx.end() ) break; - sch_itr = sch_idx.lower_bound( boost::make_tuple( next_delay_until, next_id ) ); + if (sch_itr_next == sch_idx.end()) + break; + sch_itr = sch_idx.lower_bound(boost::make_tuple(next_delay_until, next_id)); } - if( scheduled_trxs_size > 0 ) { - fc_dlog( _log, - "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", - ( "m", num_processed )( "n", scheduled_trxs_size )( "applied", num_applied )( "failed", num_failed ) ); + if (scheduled_trxs_size > 0) { + fc_dlog(_log, + "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", + ("m", num_processed)("n", scheduled_trxs_size)("applied", num_applied)("failed", num_failed)); } } -bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr ) -{ +bool producer_plugin_impl::process_incoming_trxs(const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr) { bool exhausted = false; - auto end = _unapplied_transactions.incoming_end(); - if( itr != end ) { + auto end = _unapplied_transactions.incoming_end(); + if (itr != end) { size_t processed = 0; - fc_dlog( _log, "Processing ${n} pending transactions", ("n", _unapplied_transactions.incoming_size()) ); - const chain::controller& chain = chain_plug->chain(); - const auto pending_block_num = chain.pending_block_num(); - while( itr != end ) { - if ( should_interrupt_start_block( deadline, pending_block_num ) ) { + fc_dlog(_log, "Processing ${n} pending transactions", ("n", _unapplied_transactions.incoming_size())); + const chain::controller& chain = chain_plug->chain(); + const auto pending_block_num = chain.pending_block_num(); + while (itr != end) { + if (should_interrupt_start_block(deadline, pending_block_num)) { exhausted = true; break; } auto trx_meta = itr->trx_meta; - bool api_trx = itr->trx_type == trx_enum_type::incoming_api; + bool api_trx = itr->trx_type == trx_enum_type::incoming_api; - push_result pr = push_transaction( deadline, trx_meta, api_trx, itr->return_failure_trace, itr->next ); + push_result pr = push_transaction(deadline, trx_meta, api_trx, itr->return_failure_trace, itr->next); exhausted = pr.block_exhausted; - if( pr.trx_exhausted ) { + if (pr.trx_exhausted) { ++itr; // leave in incoming } else { - itr = _unapplied_transactions.erase( itr ); + itr = _unapplied_transactions.erase(itr); } - if( exhausted ) break; + if (exhausted) + break; ++processed; } - fc_dlog( _log, "Processed ${n} pending transactions, ${p} left", ("n", processed)("p", _unapplied_transactions.incoming_size()) ); + fc_dlog(_log, "Processed ${n} pending transactions, ${p} left", ("n", processed)("p", _unapplied_transactions.incoming_size())); } return !exhausted; } bool producer_plugin_impl::block_is_exhausted() const { const chain::controller& chain = chain_plug->chain(); - const auto& rl = chain.get_resource_limits_manager(); + const auto& rl = chain.get_resource_limits_manager(); const uint64_t cpu_limit = rl.get_block_cpu_limit(); - if( cpu_limit < _max_block_cpu_usage_threshold_us ) return true; + if (cpu_limit < _max_block_cpu_usage_threshold_us) + return true; const uint64_t net_limit = rl.get_block_net_limit(); - if( net_limit < _max_block_net_usage_threshold_bytes ) return true; + if (net_limit < _max_block_net_usage_threshold_bytes) + return true; return false; } @@ -2556,17 +2623,19 @@ void producer_plugin_impl::schedule_production_loop() { if (result == start_block_result::failed) { elog("Failed to start a pending block, will try again later"); - _timer.expires_from_now( boost::posix_time::microseconds( config::block_interval_us / 10 )); + _timer.expires_from_now(boost::posix_time::microseconds(config::block_interval_us / 10)); // we failed to start a block, so try again later? - _timer.async_wait( app().executor().wrap( priority::high, exec_queue::read_write, - [weak_this = weak_from_this(), cid = ++_timer_corelation_id]( const boost::system::error_code& ec ) { - auto self = weak_this.lock(); - if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) { - self->schedule_production_loop(); - } - } ) ); - } else if (result == start_block_result::waiting_for_block){ + _timer.async_wait( + app().executor().wrap(priority::high, + exec_queue::read_write, + [weak_this = weak_from_this(), cid = ++_timer_corelation_id](const boost::system::error_code& ec) { + auto self = weak_this.lock(); + if (self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id) { + self->schedule_production_loop(); + } + })); + } else if (result == start_block_result::waiting_for_block) { if (!_producers.empty() && !production_disabled_by_policy()) { fc_dlog(_log, "Waiting till another block is received and scheduling Speculative/Production Change"); schedule_delayed_production_loop(weak_from_this(), calculate_producer_wake_up_time(calculate_pending_block_time())); @@ -2579,63 +2648,65 @@ void producer_plugin_impl::schedule_production_loop() { // scheduled in start_block() } else if (_pending_block_mode == pending_block_mode::producing) { - schedule_maybe_produce_block( result == start_block_result::exhausted ); + schedule_maybe_produce_block(result == start_block_result::exhausted); - } else if (_pending_block_mode == pending_block_mode::speculating && !_producers.empty() && !production_disabled_by_policy()){ + } else if (_pending_block_mode == pending_block_mode::speculating && !_producers.empty() && !production_disabled_by_policy()) { chain::controller& chain = chain_plug->chain(); fc_dlog(_log, "Speculative Block Created; Scheduling Speculative/Production Change"); - EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state" ); + EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state"); schedule_delayed_production_loop(weak_from_this(), calculate_producer_wake_up_time(chain.pending_block_timestamp())); } else { fc_dlog(_log, "Speculative Block Created"); } } -void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) { +void producer_plugin_impl::schedule_maybe_produce_block(bool exhausted) { chain::controller& chain = chain_plug->chain(); // we succeeded but block may be exhausted - static const boost::posix_time::ptime epoch( boost::gregorian::date( 1970, 1, 1 ) ); - auto deadline = block_timing_util::calculate_block_deadline(_cpu_effort_us, _pending_block_mode, chain.pending_block_time() ); + static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); + auto deadline = block_timing_util::calculate_block_deadline(_cpu_effort_us, _pending_block_mode, chain.pending_block_time()); - if( !exhausted && deadline > fc::time_point::now() ) { + if (!exhausted && deadline > fc::time_point::now()) { // ship this block off no later than its deadline - EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, - "producing without pending_block_state, start_block succeeded" ); - _timer.expires_at( epoch + boost::posix_time::microseconds( deadline.time_since_epoch().count() ) ); - fc_dlog( _log, "Scheduling Block Production on Normal Block #${num} for ${time}", - ("num", chain.head_block_num() + 1)( "time", deadline ) ); + EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state, start_block succeeded"); + _timer.expires_at(epoch + boost::posix_time::microseconds(deadline.time_since_epoch().count())); + fc_dlog(_log, "Scheduling Block Production on Normal Block #${num} for ${time}", + ("num", chain.head_block_num() + 1)("time", deadline)); } else { - EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" ); - _timer.expires_from_now( boost::posix_time::microseconds( 0 ) ); - fc_dlog( _log, "Scheduling Block Production on ${desc} Block #${num} immediately", - ("num", chain.head_block_num() + 1)("desc", block_is_exhausted() ? "Exhausted" : "Deadline exceeded") ); + EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state"); + _timer.expires_from_now(boost::posix_time::microseconds(0)); + fc_dlog(_log, + "Scheduling Block Production on ${desc} Block #${num} immediately", + ("num", chain.head_block_num() + 1)("desc", block_is_exhausted() ? "Exhausted" : "Deadline exceeded")); } - _timer.async_wait( app().executor().wrap( priority::high, exec_queue::read_write, - [&chain, weak_this = weak_from_this(), cid=++_timer_corelation_id](const boost::system::error_code& ec) { - auto self = weak_this.lock(); - if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) { - // pending_block_state expected, but can't assert inside async_wait - auto block_num = chain.is_building_block() ? chain.head_block_num() + 1 : 0; - fc_dlog( _log, "Produce block timer for ${num} running at ${time}", ("num", block_num)("time", fc::time_point::now()) ); - auto res = self->maybe_produce_block(); - fc_dlog( _log, "Producing Block #${num} returned: ${res}", ("num", block_num)( "res", res ) ); - } - } ) ); + _timer.async_wait(app().executor().wrap( + priority::high, + exec_queue::read_write, + [&chain, weak_this = weak_from_this(), cid = ++_timer_corelation_id](const boost::system::error_code& ec) { + auto self = weak_this.lock(); + if (self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id) { + // pending_block_state expected, but can't assert inside async_wait + auto block_num = chain.is_building_block() ? chain.head_block_num() + 1 : 0; + fc_dlog(_log, "Produce block timer for ${num} running at ${time}", ("num", block_num)("time", fc::time_point::now())); + auto res = self->maybe_produce_block(); + fc_dlog(_log, "Producing Block #${num} returned: ${res}", ("num", block_num)("res", res)); + } + })); } -std::optional producer_plugin_impl::calculate_producer_wake_up_time( const block_timestamp_type& ref_block_time ) const { +std::optional producer_plugin_impl::calculate_producer_wake_up_time(const block_timestamp_type& ref_block_time) const { auto ref_block_slot = ref_block_time.slot; // if we have any producers then we should at least set a timer for our next available slot uint32_t wake_up_slot = UINT32_MAX; for (const auto& p : _producers) { auto next_producer_block_slot = calculate_next_block_slot(p, ref_block_slot); - wake_up_slot = std::min(next_producer_block_slot, wake_up_slot); + wake_up_slot = std::min(next_producer_block_slot, wake_up_slot); } - if( wake_up_slot == UINT32_MAX ) { + if (wake_up_slot == UINT32_MAX) { fc_dlog(_log, "Not Scheduling Speculative/Production, no local producers had valid wake up times"); return {}; } @@ -2643,31 +2714,31 @@ std::optional producer_plugin_impl::calculate_producer_wake_up_t return block_timing_util::production_round_block_start_time(_cpu_effort_us, block_timestamp_type(wake_up_slot)); } -void producer_plugin_impl::schedule_delayed_production_loop(const std::weak_ptr& weak_this, std::optional wake_up_time) { +void producer_plugin_impl::schedule_delayed_production_loop(const std::weak_ptr& weak_this, + std::optional wake_up_time) { if (wake_up_time) { fc_dlog(_log, "Scheduling Speculative/Production Change at ${time}", ("time", wake_up_time)); static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); _timer.expires_at(epoch + boost::posix_time::microseconds(wake_up_time->time_since_epoch().count())); - _timer.async_wait( app().executor().wrap( priority::high, exec_queue::read_write, - [weak_this,cid=++_timer_corelation_id](const boost::system::error_code& ec) { + _timer.async_wait(app().executor().wrap( + priority::high, exec_queue::read_write, [weak_this, cid = ++_timer_corelation_id](const boost::system::error_code& ec) { auto self = weak_this.lock(); - if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) { + if (self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id) { self->schedule_production_loop(); } - } ) ); + })); } } bool producer_plugin_impl::maybe_produce_block() { - auto reschedule = fc::make_scoped_exit([this]{ - schedule_production_loop(); - }); + auto reschedule = fc::make_scoped_exit([this] { schedule_production_loop(); }); try { produce_block(); return true; - } LOG_AND_DROP(); + } + LOG_AND_DROP(); fc_dlog(_log, "Aborting block due to produce_block error"); abort_block(); @@ -2676,13 +2747,11 @@ bool producer_plugin_impl::maybe_produce_block() { static auto make_debug_time_logger() { auto start = fc::time_point::now(); - return fc::make_scoped_exit([=](){ - fc_dlog(_log, "Signing took ${ms}us", ("ms", fc::time_point::now() - start) ); - }); + return fc::make_scoped_exit([=]() { fc_dlog(_log, "Signing took ${ms}us", ("ms", fc::time_point::now() - start)); }); } static auto maybe_make_debug_time_logger() -> std::optional { - if (_log.is_enabled( fc::log_level::debug ) ){ + if (_log.is_enabled(fc::log_level::debug)) { return make_debug_time_logger(); } else { return {}; @@ -2691,33 +2760,38 @@ static auto maybe_make_debug_time_logger() -> std::optionalchain(); - EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "pending_block_state does not exist but it should, another plugin may have corrupted it"); + EOS_ASSERT(chain.is_building_block(), + missing_pending_block_state, + "pending_block_state does not exist but it should, another plugin may have corrupted it"); - const auto& auth = chain.pending_block_signing_authority(); + const auto& auth = chain.pending_block_signing_authority(); std::vector> relevant_providers; relevant_providers.reserve(_signature_providers.size()); - producer_authority::for_each_key(auth, [&](const public_key_type& key){ + producer_authority::for_each_key(auth, [&](const public_key_type& key) { const auto& iter = _signature_providers.find(key); if (iter != _signature_providers.end()) { relevant_providers.emplace_back(iter->second); } }); - EOS_ASSERT(relevant_providers.size() > 0, producer_priv_key_not_found, "Attempting to produce a block for which we don't have any relevant private keys"); + EOS_ASSERT(relevant_providers.size() > 0, + producer_priv_key_not_found, + "Attempting to produce a block for which we don't have any relevant private keys"); if (_protocol_features_signaled) { _protocol_features_to_activate.clear(); // clear _protocol_features_to_activate as it is already set in pending_block _protocol_features_signaled = false; } - //idump( (fc::time_point::now() - chain.pending_block_time()) ); + // idump( (fc::time_point::now() - chain.pending_block_time()) ); controller::block_report br; - chain.finalize_block( br, [&]( const digest_type& d ) { - auto debug_logger = maybe_make_debug_time_logger(); + chain.finalize_block(br, [&](const digest_type& d) { + auto debug_logger = maybe_make_debug_time_logger(); vector sigs; sigs.reserve(relevant_providers.size()); @@ -2726,7 +2800,7 @@ void producer_plugin_impl::produce_block() { sigs.emplace_back(p.get()(d)); } return sigs; - } ); + }); chain.commit_block(); @@ -2737,59 +2811,60 @@ void producer_plugin_impl::produce_block() { br.total_time += fc::time_point::now() - start; if (_update_produced_block_metrics) { - _update_produced_block_metrics( - {.unapplied_transactions_total = _unapplied_transactions.size(), - .blacklisted_transactions_total = _blacklisted_transactions.size(), - .subjective_bill_account_size_total = chain.get_subjective_billing().get_account_cache_size(), - .scheduled_trxs_total = chain.db().get_index().size(), - .trxs_produced_total = new_bs->block->transactions.size(), - .cpu_usage_us = br.total_cpu_usage_us, - .net_usage_us = br.total_net_usage, - .last_irreversible = chain.last_irreversible_block_num(), - .head_block_num = chain.head_block_num()}); + _update_produced_block_metrics({.unapplied_transactions_total = _unapplied_transactions.size(), + .blacklisted_transactions_total = _blacklisted_transactions.size(), + .subjective_bill_account_size_total = chain.get_subjective_billing().get_account_cache_size(), + .scheduled_trxs_total = chain.db().get_index().size(), + .trxs_produced_total = new_bs->block->transactions.size(), + .cpu_usage_us = br.total_cpu_usage_us, + .net_usage_us = br.total_net_usage, + .last_irreversible = chain.last_irreversible_block_num(), + .head_block_num = chain.head_block_num()}); } ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} " "[trxs: ${count}, lib: ${lib}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${et}, time: ${tt}]", - ("p",new_bs->header.producer)("id",new_bs->id.str().substr(8,16)) - ("n",new_bs->block_num)("t",new_bs->header.timestamp) - ("count",new_bs->block->transactions.size())("lib",chain.last_irreversible_block_num()) - ("net", br.total_net_usage)("cpu", br.total_cpu_usage_us)("et", br.total_elapsed_time)("tt", br.total_time) - ("confs", new_bs->header.confirmed)); + ("p", new_bs->header.producer)("id", new_bs->id.str().substr(8, 16))("n", new_bs->block_num)("t", new_bs->header.timestamp)( + "count", new_bs->block->transactions.size())("lib", chain.last_irreversible_block_num())("net", br.total_net_usage)( + "cpu", br.total_cpu_usage_us)("et", br.total_elapsed_time)("tt", br.total_time)("confs", new_bs->header.confirmed)); } void producer_plugin::received_block(uint32_t block_num) { my->_received_block = block_num; } -void producer_plugin::log_failed_transaction(const transaction_id_type& trx_id, const packed_transaction_ptr& packed_trx_ptr, const char* reason) const { - fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", +void producer_plugin::log_failed_transaction(const transaction_id_type& trx_id, + const packed_transaction_ptr& packed_trx_ptr, + const char* reason) const { + fc_dlog(_trx_log, + "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", + ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()) : fc::variant{trx_id})); + fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid} : ${why}", ("txid", trx_id)("why", reason)); + fc_dlog(_trx_trace_failure_log, + "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()) : fc::variant{trx_id})); - fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid} : ${why}", - ("txid", trx_id)("why", reason)); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", - ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()) : fc::variant{trx_id})); } // Called from only one read_only thread void producer_plugin_impl::switch_to_write_window() { - if ( _log.is_enabled( fc::log_level::debug ) ) { + if (_log.is_enabled(fc::log_level::debug)) { auto now = fc::time_point::now(); - fc_dlog( _log, "Read-only threads ${n}, read window ${r}us, total all threads ${t}us", - ("n", _ro_thread_pool_size) - ("r", now - _ro_read_window_start_time) - ("t", _ro_all_threads_exec_time_us.load())); + fc_dlog(_log, + "Read-only threads ${n}, read window ${r}us, total all threads ${t}us", + ("n", _ro_thread_pool_size)("r", now - _ro_read_window_start_time)("t", _ro_all_threads_exec_time_us.load())); } chain::controller& chain = chain_plug->chain(); // this method can be called from multiple places. it is possible // we are already in write window. - if ( chain.is_write_window() ) { + if (chain.is_write_window()) { return; } - EOS_ASSERT(_ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, "no read-only tasks should be running before switching to write window"); + EOS_ASSERT(_ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), + producer_exception, + "no read-only tasks should be running before switching to write window"); start_write_window(); } @@ -2806,13 +2881,13 @@ void producer_plugin_impl::start_write_window() { _ro_window_deadline += _ro_write_window_time_us; // not allowed on block producers, so no need to limit to block deadline auto expire_time = boost::posix_time::microseconds(_ro_write_window_time_us.count()); - _ro_timer.expires_from_now( expire_time ); - _ro_timer.async_wait( app().executor().wrap( // stay on app thread + _ro_timer.expires_from_now(expire_time); + _ro_timer.async_wait(app().executor().wrap( // stay on app thread priority::high, exec_queue::read_write, // placed in read_write so only called from main thread - [weak_this = weak_from_this()]( const boost::system::error_code& ec ) { + [weak_this = weak_from_this()](const boost::system::error_code& ec) { auto self = weak_this.lock(); - if( self && ec != boost::asio::error::operation_aborted ) { + if (self && ec != boost::asio::error::operation_aborted) { self->switch_to_read_window(); } })); @@ -2821,22 +2896,23 @@ void producer_plugin_impl::start_write_window() { // Called only from app thread void producer_plugin_impl::switch_to_read_window() { chain::controller& chain = chain_plug->chain(); - EOS_ASSERT(chain.is_write_window(), producer_exception, "expected to be in write window"); - EOS_ASSERT( _ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, "_ro_exec_tasks_fut expected to be empty" ); + EOS_ASSERT(chain.is_write_window(), producer_exception, "expected to be in write window"); + EOS_ASSERT( + _ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, "_ro_exec_tasks_fut expected to be empty"); - _time_tracker.add_idle_time( fc::time_point::now() - _idle_trx_time ); + _time_tracker.add_idle_time(fc::time_point::now() - _idle_trx_time); // we are in write window, so no read-only trx threads are processing transactions. - if ( app().executor().read_only_queue().empty() ) { // no read-only tasks to process. stay in write window - start_write_window(); // restart write window timer for next round + if (app().executor().read_only_queue().empty()) { // no read-only tasks to process. stay in write window + start_write_window(); // restart write window timer for next round return; } uint32_t pending_block_num = chain.head_block_num() + 1; _ro_read_window_start_time = fc::time_point::now(); - _ro_window_deadline = _ro_read_window_start_time + _ro_read_window_effective_time_us; - app().executor().set_to_read_window(_ro_thread_pool_size, - [received_block=&_received_block, pending_block_num, ro_window_deadline=_ro_window_deadline]() { + _ro_window_deadline = _ro_read_window_start_time + _ro_read_window_effective_time_us; + app().executor().set_to_read_window( + _ro_thread_pool_size, [received_block = &_received_block, pending_block_num, ro_window_deadline = _ro_window_deadline]() { return fc::time_point::now() >= ro_window_deadline || (received_block->load() >= pending_block_num); // should_exit() }); chain.set_to_read_window(); @@ -2846,32 +2922,29 @@ void producer_plugin_impl::switch_to_read_window() { // start a read-only execution task in each thread in the thread pool _ro_num_active_exec_tasks = _ro_thread_pool_size; _ro_exec_tasks_fut.resize(0); - for (uint32_t i = 0; i < _ro_thread_pool_size; ++i ) { - _ro_exec_tasks_fut.emplace_back( post_async_task( _ro_thread_pool.get_executor(), [self = this, pending_block_num] () { - return self->read_only_execution_task(pending_block_num); - }) ); + for (uint32_t i = 0; i < _ro_thread_pool_size; ++i) { + _ro_exec_tasks_fut.emplace_back(post_async_task( + _ro_thread_pool.get_executor(), [self = this, pending_block_num]() { return self->read_only_execution_task(pending_block_num); })); } auto expire_time = boost::posix_time::microseconds(_ro_read_window_time_us.count()); - _ro_timer.expires_from_now( expire_time ); + _ro_timer.expires_from_now(expire_time); // Needs to be on read_only because that is what is being processed until switch_to_write_window(). - _ro_timer.async_wait( app().executor().wrap( - priority::high, - exec_queue::read_only, - [weak_this = weak_from_this()]( const boost::system::error_code& ec ) { + _ro_timer.async_wait( + app().executor().wrap(priority::high, exec_queue::read_only, [weak_this = weak_from_this()](const boost::system::error_code& ec) { auto self = weak_this.lock(); - if( self && ec != boost::asio::error::operation_aborted ) { + if (self && ec != boost::asio::error::operation_aborted) { // use future to make sure all read-only tasks finished before switching to write window - for ( auto& task: self->_ro_exec_tasks_fut ) { + for (auto& task : self->_ro_exec_tasks_fut) { task.get(); } self->_ro_exec_tasks_fut.clear(); // will be executed from the main app thread because all read-only threads are idle now self->switch_to_write_window(); - } else if ( self ) { - self->_ro_exec_tasks_fut.clear(); - } - })); + } else if (self) { + self->_ro_exec_tasks_fut.clear(); + } + })); } // Called from a read only thread. Run in parallel with app and other read only threads @@ -2880,27 +2953,27 @@ bool producer_plugin_impl::read_only_execution_task(uint32_t pending_block_num) // 1. pass read window deadline // 2. net_plugin receives a block // 3. no read-only tasks to execute - while ( fc::time_point::now() < _ro_window_deadline && _received_block < pending_block_num ) { + while (fc::time_point::now() < _ro_window_deadline && _received_block < pending_block_num) { bool more = app().executor().execute_highest_read_only(); // blocks until all read only threads are idle - if ( !more ) { + if (!more) { break; } } // If all tasks are finished, do not wait until end of read window; switch to write window now. - if ( --_ro_num_active_exec_tasks == 0 ) { + if (--_ro_num_active_exec_tasks == 0) { // Needs to be on read_only because that is what is being processed until switch_to_write_window(). - app().executor().post( priority::high, exec_queue::read_only, [self=this]() { + app().executor().post(priority::high, exec_queue::read_only, [self = this]() { self->_ro_exec_tasks_fut.clear(); // will be executed from the main app thread because all read-only threads are idle now self->switch_to_write_window(); - } ); + }); // last thread post any exhausted back into read_only queue with slightly higher priority (low+1) so they are executed first ro_trx_t t; - while( _ro_exhausted_trx_queue.pop_front(t) ) { - app().executor().post(priority::low+1, exec_queue::read_only, [this, trx{std::move(t.trx)}, next{std::move(t.next)}]() mutable { - push_read_only_transaction( std::move(trx), std::move(next) ); - } ); + while (_ro_exhausted_trx_queue.pop_front(t)) { + app().executor().post(priority::low + 1, exec_queue::read_only, [this, trx{std::move(t.trx)}, next{std::move(t.next)}]() mutable { + push_read_only_transaction(std::move(trx), std::move(next)); + }); } } @@ -2910,15 +2983,15 @@ bool producer_plugin_impl::read_only_execution_task(uint32_t pending_block_num) // Called from app thread during start block. // Reschedule any exhausted read-only transactions from the last block void producer_plugin_impl::repost_exhausted_transactions(const fc::time_point& deadline) { - if ( !_ro_exhausted_trx_queue.empty() ) { - chain::controller& chain = chain_plug->chain(); - uint32_t pending_block_num = chain.pending_block_num(); + if (!_ro_exhausted_trx_queue.empty()) { + chain::controller& chain = chain_plug->chain(); + uint32_t pending_block_num = chain.pending_block_num(); // post any exhausted back into read_only queue with slightly higher priority (low+1) so they are executed first ro_trx_t t; - while( !should_interrupt_start_block( deadline, pending_block_num ) && _ro_exhausted_trx_queue.pop_front(t) ) { - app().executor().post(priority::low+1, exec_queue::read_only, [this, trx{std::move(t.trx)}, next{std::move(t.next)}]() mutable { - push_read_only_transaction( std::move(trx), std::move(next) ); - } ); + while (!should_interrupt_start_block(deadline, pending_block_num) && _ro_exhausted_trx_queue.pop_front(t)) { + app().executor().post(priority::low + 1, exec_queue::read_only, [this, trx{std::move(t.trx)}, next{std::move(t.next)}]() mutable { + push_read_only_transaction(std::move(trx), std::move(next)); + }); } } } @@ -2929,50 +3002,60 @@ bool producer_plugin_impl::push_read_only_transaction(transaction_metadata_ptr t auto retry = false; try { - auto start = fc::time_point::now(); + auto start = fc::time_point::now(); chain::controller& chain = chain_plug->chain(); - if ( !chain.is_building_block() ) { - _ro_exhausted_trx_queue.push_front( {std::move(trx), std::move(next)} ); + if (!chain.is_building_block()) { + _ro_exhausted_trx_queue.push_front({std::move(trx), std::move(next)}); return true; } // When executing a read-only trx on the main thread while in the write window, // need to switch db mode to read only. - auto db_read_only_mode_guard = fc::make_scoped_exit([&]{ - if( chain.is_write_window() ) + auto db_read_only_mode_guard = fc::make_scoped_exit([&] { + if (chain.is_write_window()) chain.unset_db_read_only_mode(); }); - if ( chain.is_write_window() ) { + if (chain.is_write_window()) { chain.set_db_read_only_mode(); auto idle_time = fc::time_point::now() - _idle_trx_time; - _time_tracker.add_idle_time( idle_time ); + _time_tracker.add_idle_time(idle_time); } // use read-window/write-window deadline if there are read/write windows, otherwise use block_deadline if only the app thead auto window_deadline = (_ro_thread_pool_size != 0) ? _ro_window_deadline : _pending_block_deadline; // Ensure the trx to finish by the end of read-window or write-window or block_deadline depending on - auto trace = chain.push_transaction( trx, window_deadline, _ro_max_trx_time_us, 0, false, 0 ); + auto trace = chain.push_transaction(trx, window_deadline, _ro_max_trx_time_us, 0, false, 0); _ro_all_threads_exec_time_us += (fc::time_point::now() - start).count(); - auto pr = handle_push_result(trx, next, start, chain, trace, true /*return_failure_trace*/, true /*disable_subjective_enforcement*/, {} /*first_auth*/, 0 /*sub_bill*/, 0 /*prev_billed_cpu_time_us*/); + auto pr = handle_push_result(trx, + next, + start, + chain, + trace, + true /*return_failure_trace*/, + true /*disable_subjective_enforcement*/, + {} /*first_auth*/, + 0 /*sub_bill*/, + 0 /*prev_billed_cpu_time_us*/); // If a transaction was exhausted, that indicates we are close to // the end of read window. Retry in next round. retry = pr.trx_exhausted; - if( retry ) { - _ro_exhausted_trx_queue.push_front( {std::move(trx), std::move(next)} ); + if (retry) { + _ro_exhausted_trx_queue.push_front({std::move(trx), std::move(next)}); } - if ( chain.is_write_window() ) { + if (chain.is_write_window()) { _idle_trx_time = fc::time_point::now(); } - } catch ( const guard_exception& e ) { + } catch (const guard_exception& e) { chain_plugin::handle_guard_exception(e); - } catch ( boost::interprocess::bad_alloc& ) { + } catch (boost::interprocess::bad_alloc&) { chain_apis::api_base::handle_db_exhaustion(); - } catch ( std::bad_alloc& ) { + } catch (std::bad_alloc&) { chain_apis::api_base::handle_bad_alloc(); - } CATCH_AND_CALL(next); + } + CATCH_AND_CALL(next); return retry; } @@ -2981,11 +3064,11 @@ const std::set& producer_plugin::producer_accounts() const { return my->_producers; } -void producer_plugin::register_update_produced_block_metrics(std::function&& fun){ +void producer_plugin::register_update_produced_block_metrics(std::function&& fun) { my->_update_produced_block_metrics = std::move(fun); } -void producer_plugin::register_update_incoming_block_metrics(std::function&& fun){ +void producer_plugin::register_update_incoming_block_metrics(std::function&& fun) { my->_update_incoming_block_metrics = std::move(fun); } From 6fcf9a9fc970be3fb3037c560e9e45e20cf45f9e Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 26 May 2023 09:48:31 -0400 Subject: [PATCH 09/16] More whitespace cleanup --- plugins/producer_plugin/producer_plugin.cpp | 34 ++++++++++++--------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 6a06d016b2..05ffea7c31 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -103,8 +103,10 @@ using namespace eosio::chain::plugin_interface; namespace { bool exception_is_exhausted(const fc::exception& e) { auto code = e.code(); - return (code == block_cpu_usage_exceeded::code_value) || (code == block_net_usage_exceeded::code_value) || - (code == deadline_exception::code_value) || (code == ro_trx_vm_oc_compile_temporary_failure::code_value); + return (code == block_cpu_usage_exceeded::code_value) || + (code == block_net_usage_exceeded::code_value) || + (code == deadline_exception::code_value) || + (code == ro_trx_vm_oc_compile_temporary_failure::code_value); } } // namespace @@ -324,6 +326,7 @@ class producer_plugin_impl : public std::enable_shared_from_this 0, chain::invalid_http_request, "At least one account is required"); @@ -671,18 +675,18 @@ class producer_plugin_impl : public std::enable_shared_from_thisproducer)("id", id.str().substr(8, 16))("n", blk_num)("t", block->timestamp)( - "count", block->transactions.size())("lib", chain.last_irreversible_block_num())("confs", block->confirmed)( - "net", br.total_net_usage)("cpu", br.total_cpu_usage_us)("elapsed", br.total_elapsed_time)("time", br.total_time)( - "latency", (now - block->timestamp).count() / 1000)); + ("p", block->producer)("id", id.str().substr(8, 16))("n", blk_num)("t", block->timestamp) + ("count", block->transactions.size())("lib", chain.last_irreversible_block_num()) + ("confs", block->confirmed)("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) + ("elapsed", br.total_elapsed_time)("time", br.total_time)("latency", (now - block->timestamp).count() / 1000)); if (chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hbs->id != id && hbs->block != nullptr) { // not applied to head ilog("Block not applied to head ${id}... #${n} @ ${t} signed by ${p} " "[trxs: ${count}, dpos: ${dpos}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, " "latency: ${latency} ms]", - ("p", hbs->block->producer)("id", hbs->id.str().substr(8, 16))("n", hbs->block_num)("t", hbs->block->timestamp)( - "count", hbs->block->transactions.size())("dpos", hbs->dpos_irreversible_blocknum)("confs", hbs->block->confirmed)( - "net", br.total_net_usage)("cpu", br.total_cpu_usage_us)("elapsed", br.total_elapsed_time)("time", br.total_time)( - "latency", (now - hbs->block->timestamp).count() / 1000)); + ("p", hbs->block->producer)("id", hbs->id.str().substr(8, 16))("n", hbs->block_num)("t", hbs->block->timestamp) + ("count", hbs->block->transactions.size())("dpos", hbs->dpos_irreversible_blocknum)("confs", hbs->block->confirmed) + ("net", br.total_net_usage)("cpu", br.total_cpu_usage_us)("elapsed", br.total_elapsed_time)("time", br.total_time) + ("latency", (now - hbs->block->timestamp).count() / 1000)); } } if (_update_incoming_block_metrics) { @@ -1211,8 +1215,8 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia auto actual_threads_allowed = std::min(_ro_max_threads_allowed, num_threads_supported); ilog("vm total in kb: ${total}, vm used in kb: ${used}, number of EOS VM OC threads supported " "((vm total - vm used)/4.2 TB - 2): ${supp}, max allowed: ${max}, actual allowed: ${actual}", - ("total", vm_total_kb)("used", vm_used_kb)("supp", num_threads_supported)("max", _ro_max_threads_allowed)( - "actual", actual_threads_allowed)); + ("total", vm_total_kb)("used", vm_used_kb)("supp", num_threads_supported)("max", _ro_max_threads_allowed) + ("actual", actual_threads_allowed)); EOS_ASSERT(_ro_thread_pool_size <= actual_threads_allowed, plugin_config_exception, "read-only-threads (${th}) greater than number of threads allowed for EOS VM OC (${allowed})", @@ -2824,9 +2828,9 @@ void producer_plugin_impl::produce_block() { ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} " "[trxs: ${count}, lib: ${lib}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${et}, time: ${tt}]", - ("p", new_bs->header.producer)("id", new_bs->id.str().substr(8, 16))("n", new_bs->block_num)("t", new_bs->header.timestamp)( - "count", new_bs->block->transactions.size())("lib", chain.last_irreversible_block_num())("net", br.total_net_usage)( - "cpu", br.total_cpu_usage_us)("et", br.total_elapsed_time)("tt", br.total_time)("confs", new_bs->header.confirmed)); + ("p", new_bs->header.producer)("id", new_bs->id.str().substr(8, 16))("n", new_bs->block_num)("t", new_bs->header.timestamp) + ("count", new_bs->block->transactions.size())("lib", chain.last_irreversible_block_num())("net", br.total_net_usage) + ("cpu", br.total_cpu_usage_us)("et", br.total_elapsed_time)("tt", br.total_time)("confs", new_bs->header.confirmed)); } void producer_plugin::received_block(uint32_t block_num) { From 2487ac651e2bd61a34394dcec0b2f03458d15c70 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 26 May 2023 10:04:06 -0400 Subject: [PATCH 10/16] More whitespace cleanup according to PR comments --- plugins/producer_plugin/producer_plugin.cpp | 130 +++++++------------- 1 file changed, 45 insertions(+), 85 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 05ffea7c31..e6d912776e 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -187,8 +187,7 @@ class account_failures { reason += ", "; reason += "other"; } - fc_dlog(_log, - "Failed ${n} trxs, account: ${a}, sub bill: ${b}us, reason: ${r}", + fc_dlog(_log, "Failed ${n} trxs, account: ${a}, sub bill: ${b}us, reason: ${r}", ("n", e.second.num_failures)("b", sub_bill.get_subjective_bill(e.first, now))("a", e.first)("r", reason)); } } @@ -260,8 +259,7 @@ struct block_time_tracker { if (_log.is_enabled(fc::log_level::debug)) { auto now = fc::time_point::now(); add_idle_time(now - idle_trx_time); - fc_dlog(_log, - "Block #${n} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, " + fc_dlog(_log, "Block #${n} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, " "transient: ${trans_trx_num}, ${trans_trx_time}us, other: ${o}us", ("n", block_num)("i", block_idle_time)("t", now - clear_time)("sn", trx_success_num)("s", trx_success_time) ("fn", trx_fail_num)("f", trx_fail_time)("trans_trx_num", transient_trx_num)("trans_trx_time", transient_trx_time) @@ -528,8 +526,8 @@ class producer_plugin_impl : public std::enable_shared_from_this - _ro_all_threads_exec_time_us; // total time spent by all threads executing transactions. use atomic for simplicity and performance + std::atomic _ro_all_threads_exec_time_us; // total time spent by all threads executing transactions. + // use atomic for simplicity and performance fc::time_point _ro_read_window_start_time; fc::time_point _ro_window_deadline; // only modified on app thread, read-window deadline or write-window deadline boost::asio::deadline_timer _ro_timer; // only accessible from the main thread @@ -570,9 +568,7 @@ class producer_plugin_impl : public std::enable_shared_from_this 0) { - fc_dlog(_log, - "Removed applied transactions before: ${before}, after: ${after}", - ("before", before)("after", _unapplied_transactions.size())); + fc_dlog(_log, "Removed applied transactions before: ${before}, after: ${after}", ("before", before)("after", _unapplied_transactions.size())); } } @@ -598,8 +594,7 @@ class producer_plugin_impl : public std::enable_shared_from_this& block_id, const block_state_ptr& bsp) { auto& chain = chain_plug->chain(); if (_pending_block_mode == pending_block_mode::producing) { - fc_wlog( - _log, "dropped incoming block #${num} id: ${id}", ("num", block->block_num())("id", block_id ? (*block_id).str() : "UNKNOWN")); + fc_wlog(_log, "dropped incoming block #${num} id: ${id}", ("num", block->block_num())("id", block_id ? (*block_id).str() : "UNKNOWN")); return false; } @@ -621,8 +616,8 @@ class producer_plugin_impl : public std::enable_shared_from_this bsf; @@ -749,24 +744,12 @@ class producer_plugin_impl : public std::enable_shared_from_this_idle_trx_time; @@ -874,7 +857,9 @@ class producer_plugin_impl : public std::enable_shared_from_this= 0 && get_irreversible_block_age() >= _max_irreversible_block_age_us); } - bool is_producer_key(const chain::public_key_type& key) const { return _signature_providers.find(key) != _signature_providers.end(); } + bool is_producer_key(const chain::public_key_type& key) const { + return _signature_providers.find(key) != _signature_providers.end(); + } chain::signature_type sign_compact(const chain::public_key_type& key, const fc::sha256& digest) const { if (key != chain::public_key_type()) { @@ -1267,11 +1252,9 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia }); _incoming_transaction_async_provider = - app().get_method().register_provider([this](const packed_transaction_ptr& trx, - bool api_trx, - transaction_metadata::trx_type trx_type, - bool return_failure_traces, - next_function next) -> void { + app().get_method().register_provider( + [this](const packed_transaction_ptr& trx, bool api_trx, transaction_metadata::trx_type trx_type, + bool return_failure_traces, next_function next) -> void { return on_incoming_transaction_async(trx, api_trx, trx_type, return_failure_traces, next); }); @@ -1321,15 +1304,13 @@ void producer_plugin_impl::plugin_startup() { chain::controller& chain = chain_plug->chain(); - EOS_ASSERT( - _producers.empty() || chain.get_read_mode() != chain::db_read_mode::IRREVERSIBLE, - plugin_config_exception, - "node cannot have any producer-name configured because block production is impossible when read_mode is \"irreversible\""); + EOS_ASSERT(_producers.empty() || chain.get_read_mode() != chain::db_read_mode::IRREVERSIBLE, + plugin_config_exception, + "node cannot have any producer-name configured because block production is impossible when read_mode is \"irreversible\""); - EOS_ASSERT( - _producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, - plugin_config_exception, - "node cannot have any producer-name configured because block production is not safe when validation_mode is not \"full\""); + EOS_ASSERT(_producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, + plugin_config_exception, + "node cannot have any producer-name configured because block production is not safe when validation_mode is not \"full\""); EOS_ASSERT(_producers.empty() || chain_plug->accept_transactions(), plugin_config_exception, @@ -1892,10 +1873,9 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { ("producer", scheduled_producer.producer_name)("watermark", current_watermark->first)("head_block_num", hbs->block_num)); _pending_block_mode = pending_block_mode::speculating; } else if (current_watermark->second >= block_timestamp) { - elog( - "Not producing block because \"${producer}\" signed a block at the next block time or later (${watermark}) than the pending " - "block time (${block_timestamp})", - ("producer", scheduled_producer.producer_name)("watermark", current_watermark->second)("block_timestamp", block_timestamp)); + elog("Not producing block because \"${producer}\" signed a block at the next block time or later (${watermark}) than the pending " + "block time (${block_timestamp})", + ("producer", scheduled_producer.producer_name)("watermark", current_watermark->second)("block_timestamp", block_timestamp)); _pending_block_mode = pending_block_mode::speculating; } } @@ -1920,8 +1900,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } } - fc_dlog(_log, "Starting block #${n} at ${time} producer ${p}", - ("n", pending_block_num)("time", now)("p", scheduled_producer.producer_name)); + fc_dlog(_log, "Starting block #${n} at ${time} producer ${p}", ("n", pending_block_num)("time", now)("p", scheduled_producer.producer_name)); try { uint16_t blocks_to_confirm = 0; @@ -2079,14 +2058,10 @@ bool producer_plugin_impl::remove_expired_trxs(const fc::time_point& deadline) { }); if (exhausted && _pending_block_mode == pending_block_mode::producing) { - fc_wlog(_log, - "Unable to process all expired transactions of the ${n} transactions in the unapplied queue before deadline, " - "Expired ${expired}", - ("n", orig_count)("expired", num_expired)); + fc_wlog(_log, "Unable to process all expired transactions of the ${n} transactions in the unapplied queue before deadline, " + "Expired ${expired}", ("n", orig_count)("expired", num_expired)); } else { - fc_dlog(_log, - "Processed ${ex} expired transactions of the ${n} transactions in the unapplied queue.", - ("n", orig_count)("ex", num_expired)); + fc_dlog(_log, "Processed ${ex} expired transactions of the ${n} transactions in the unapplied queue.", ("n", orig_count)("ex", num_expired)); } return !exhausted; @@ -2180,8 +2155,7 @@ void producer_plugin_impl::log_trx_results(const packed_transaction_ptr& trx, ("details", get_detailed_contract_except_info(trx, trace, except_ptr))); if (!is_transient) { - fc_dlog(_trx_log, - "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${trx}", + fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${trx}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("trx", chain_plug->get_log_trx(trx->get_transaction()))); fc_dlog(_trx_trace_failure_log, @@ -2195,8 +2169,7 @@ void producer_plugin_impl::log_trx_results(const packed_transaction_ptr& trx, ("desc", is_transient ? "transient " : "")("txid", trx->id()) ("a", trx->get_transaction().first_authorizer())("details", get_detailed_contract_except_info(trx, trace, except_ptr))); if (!is_transient) { - fc_dlog(_trx_log, - "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx} ", + fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx} ", ("trx", chain_plug->get_log_trx(trx->get_transaction()))); fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trace} ", @@ -2210,12 +2183,10 @@ void producer_plugin_impl::log_trx_results(const packed_transaction_ptr& trx, ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("desc", is_transient ? "transient " : "") ("txid", trx->id())("a", trx->get_transaction().first_authorizer())("cpu", billed_cpu_us)); if (!is_transient) { - fc_dlog(_trx_log, - "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${trx}", + fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${trx}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("trx", chain_plug->get_log_trx(trx->get_transaction()))); - fc_dlog(_trx_trace_success_log, - "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${entire_trace}", + fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${entire_trace}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("entire_trace", get_trace(trace, except_ptr))); } @@ -2225,11 +2196,8 @@ void producer_plugin_impl::log_trx_results(const packed_transaction_ptr& trx, ("desc", is_transient ? "transient " : "")("txid", trx->id())("a", trx->get_transaction().first_authorizer()) ("cpu", billed_cpu_us)); if (!is_transient) { - fc_dlog(_trx_log, - "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${trx}", - ("trx", chain_plug->get_log_trx(trx->get_transaction()))); - fc_dlog(_trx_trace_success_log, - "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${entire_trace}", + fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${trx}", ("trx", chain_plug->get_log_trx(trx->get_transaction()))); + fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${entire_trace}", ("entire_trace", get_trace(trace, except_ptr))); } } @@ -2321,8 +2289,7 @@ producer_plugin_impl::push_result producer_plugin_impl::handle_push_result(const ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("txid", trx->id())); } else { fc_dlog(trx->is_transient() ? _transient_trx_failed_trace_log : _trx_failed_trace_log, - "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", - ("txid", trx->id())); + "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", ("txid", trx->id())); } if (!trx->is_read_only()) pr.block_exhausted = block_is_exhausted(); // smaller trx might fit @@ -2331,16 +2298,14 @@ producer_plugin_impl::push_result producer_plugin_impl::handle_push_result(const pr.failed = true; const fc::exception& e = *trace->except; if (e.code() != tx_duplicate::code_value) { - fc_tlog(_log, - "Subjective bill for failed ${a}: ${b} elapsed ${t}us, time ${r}us", + fc_tlog(_log, "Subjective bill for failed ${a}: ${b} elapsed ${t}us, time ${r}us", ("a", first_auth)("b", sub_bill)("t", trace->elapsed)("r", end - start)); if (!disable_subjective_enforcement) // subjectively bill failure when producing since not in objective cpu account billing subjective_bill.subjective_bill_failure(first_auth, trace->elapsed, fc::time_point::now()); log_trx_results(trx, trace, start); // this failed our configured maximum transaction time, we don't want to replay it - fc_tlog(_log, - "Failed ${c} trx, auth: ${a}, prev billed: ${p}us, ran: ${r}us, id: ${id}, except: ${e}", + fc_tlog(_log, "Failed ${c} trx, auth: ${a}, prev billed: ${p}us, ran: ${r}us, id: ${id}, except: ${e}", ("c", e.code())("a", first_auth)("p", prev_billed_cpu_time_us)("r", end - start)("id", trx->id())("e", e)); if (!disable_subjective_enforcement) _account_fails.add(first_auth, e); @@ -2355,8 +2320,7 @@ producer_plugin_impl::push_result producer_plugin_impl::handle_push_result(const } } } else { - fc_tlog(_log, - "Subjective bill for success ${a}: ${b} elapsed ${t}us, time ${r}us", + fc_tlog(_log, "Subjective bill for success ${a}: ${b} elapsed ${t}us, time ${r}us", ("a", first_auth)("b", sub_bill)("t", trace->elapsed)("r", end - start)); // Transient trxs are dry-run or read-only. // Dry-run trxs only run in write window. Read-only trxs can run in @@ -2419,8 +2383,7 @@ bool producer_plugin_impl::process_unapplied_trxs(const fc::time_point& deadline ++itr; } - fc_dlog(_log, - "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", + fc_dlog(_log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", ("m", num_processed)("n", unapplied_trxs_size)("applied", num_applied)("failed", num_failed)); } return !exhausted; @@ -2558,8 +2521,7 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs(const fc::time_po } if (scheduled_trxs_size > 0) { - fc_dlog(_log, - "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", + fc_dlog(_log, "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", ("m", num_processed)("n", scheduled_trxs_size)("applied", num_applied)("failed", num_failed)); } } @@ -2680,8 +2642,7 @@ void producer_plugin_impl::schedule_maybe_produce_block(bool exhausted) { } else { EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state"); _timer.expires_from_now(boost::posix_time::microseconds(0)); - fc_dlog(_log, - "Scheduling Block Production on ${desc} Block #${num} immediately", + fc_dlog(_log, "Scheduling Block Production on ${desc} Block #${num} immediately", ("num", chain.head_block_num() + 1)("desc", block_is_exhausted() ? "Exhausted" : "Deadline exceeded")); } @@ -2853,8 +2814,7 @@ void producer_plugin::log_failed_transaction(const transaction_id_type& trx_i void producer_plugin_impl::switch_to_write_window() { if (_log.is_enabled(fc::log_level::debug)) { auto now = fc::time_point::now(); - fc_dlog(_log, - "Read-only threads ${n}, read window ${r}us, total all threads ${t}us", + fc_dlog(_log, "Read-only threads ${n}, read window ${r}us, total all threads ${t}us", ("n", _ro_thread_pool_size)("r", now - _ro_read_window_start_time)("t", _ro_all_threads_exec_time_us.load())); } From dd8a05e36d91d1744a23f3e3fe641625fd645c1d Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 26 May 2023 10:07:00 -0400 Subject: [PATCH 11/16] More whitespace cleanup according to PR comments --- plugins/producer_plugin/producer_plugin.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index e6d912776e..ed9b9fe421 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -2224,10 +2224,9 @@ producer_plugin_impl::push_result producer_plugin_impl::push_transaction(const f if (!disable_subjective_enforcement && _account_fails.failure_limit(first_auth)) { if (next) { auto except_ptr = std::static_pointer_cast(std::make_shared( - FC_LOG_MESSAGE(error, - "transaction ${id} exceeded failure limit for account ${a} until ${next_reset_time}", - ("id", trx->id())("a", first_auth)( - "next_reset_time", _account_fails.next_reset_timepoint(chain.head_block_num(), chain.head_block_time()))))); + FC_LOG_MESSAGE(error, "transaction ${id} exceeded failure limit for account ${a} until ${next_reset_time}", + ("id", trx->id())("a", first_auth) + ("next_reset_time", _account_fails.next_reset_timepoint(chain.head_block_num(), chain.head_block_time()))))); log_trx_results(trx, except_ptr); next(except_ptr); } @@ -2485,8 +2484,7 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs(const fc::time_po } } else { fc_dlog(_trx_failed_trace_log, - "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${txid}, time: ${r}, auth: ${a} : " - "${details}", + "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${txid}, time: ${r}, auth: ${a} : ${details}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("txid", trx_id)("r", end - start) ("a", get_first_authorizer(trace))("details", get_detailed_contract_except_info(nullptr, trace, nullptr))); fc_dlog(_trx_trace_failure_log, @@ -2500,8 +2498,7 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs(const fc::time_po } else { _time_tracker.add_success_time(end - start, false); // delayed transaction cannot be transient fc_dlog(_trx_successful_trace_log, - "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${txid}, time: ${r}, auth: ${a}, cpu: " - "${cpu}", + "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${txid}, time: ${r}, auth: ${a}, cpu: ${cpu}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("txid", trx_id)("r", end - start) ("a", get_first_authorizer(trace))("cpu", trace->receipt ? trace->receipt->cpu_usage_us : 0)); fc_dlog(_trx_trace_success_log, From ce82a0b88282623ea419c3e7cc2a4400c16e628c Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 26 May 2023 10:09:54 -0400 Subject: [PATCH 12/16] More whitespace cleanup --- plugins/producer_plugin/producer_plugin.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index ed9b9fe421..858f56eec2 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -2798,12 +2798,10 @@ void producer_plugin::received_block(uint32_t block_num) { void producer_plugin::log_failed_transaction(const transaction_id_type& trx_id, const packed_transaction_ptr& packed_trx_ptr, const char* reason) const { - fc_dlog(_trx_log, - "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", + fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()) : fc::variant{trx_id})); fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid} : ${why}", ("txid", trx_id)("why", reason)); - fc_dlog(_trx_trace_failure_log, - "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", + fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()) : fc::variant{trx_id})); } @@ -2823,8 +2821,7 @@ void producer_plugin_impl::switch_to_write_window() { return; } - EOS_ASSERT(_ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), - producer_exception, + EOS_ASSERT(_ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, "no read-only tasks should be running before switching to write window"); start_write_window(); @@ -2858,8 +2855,7 @@ void producer_plugin_impl::start_write_window() { void producer_plugin_impl::switch_to_read_window() { chain::controller& chain = chain_plug->chain(); EOS_ASSERT(chain.is_write_window(), producer_exception, "expected to be in write window"); - EOS_ASSERT( - _ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, "_ro_exec_tasks_fut expected to be empty"); + EOS_ASSERT(_ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, "_ro_exec_tasks_fut expected to be empty"); _time_tracker.add_idle_time(fc::time_point::now() - _idle_trx_time); From ecedaca9679d4afef75761a9d82325e643041b06 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 26 May 2023 10:12:01 -0400 Subject: [PATCH 13/16] reduce backslash indentation --- plugins/producer_plugin/producer_plugin.cpp | 46 ++++++++++----------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 858f56eec2..a0768b4816 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -44,29 +44,29 @@ using std::string; using std::vector; #undef FC_LOG_AND_DROP -#define LOG_AND_DROP() \ - catch (const guard_exception& e) { \ - chain_plugin::handle_guard_exception(e); \ - } \ - catch (const std::bad_alloc&) { \ - chain_apis::api_base::handle_bad_alloc(); \ - } \ - catch (boost::interprocess::bad_alloc&) { \ - chain_apis::api_base::handle_db_exhaustion(); \ - } \ - catch (fc::exception & er) { \ - wlog("${details}", ("details", er.to_detail_string())); \ - } \ - catch (const std::exception& e) { \ - fc::exception fce(FC_LOG_MESSAGE(warn, "std::exception: ${what}: ", ("what", e.what())), \ - fc::std_exception_code, \ - BOOST_CORE_TYPEID(e).name(), \ - e.what()); \ - wlog("${details}", ("details", fce.to_detail_string())); \ - } \ - catch (...) { \ - fc::unhandled_exception e(FC_LOG_MESSAGE(warn, "unknown: ", ), std::current_exception()); \ - wlog("${details}", ("details", e.to_detail_string())); \ +#define LOG_AND_DROP() \ + catch (const guard_exception& e) { \ + chain_plugin::handle_guard_exception(e); \ + } \ + catch (const std::bad_alloc&) { \ + chain_apis::api_base::handle_bad_alloc(); \ + } \ + catch (boost::interprocess::bad_alloc&) { \ + chain_apis::api_base::handle_db_exhaustion(); \ + } \ + catch (fc::exception & er) { \ + wlog("${details}", ("details", er.to_detail_string())); \ + } \ + catch (const std::exception& e) { \ + fc::exception fce(FC_LOG_MESSAGE(warn, "std::exception: ${what}: ", ("what", e.what())), \ + fc::std_exception_code, \ + BOOST_CORE_TYPEID(e).name(), \ + e.what()); \ + wlog("${details}", ("details", fce.to_detail_string())); \ + } \ + catch (...) { \ + fc::unhandled_exception e(FC_LOG_MESSAGE(warn, "unknown: ", ), std::current_exception());\ + wlog("${details}", ("details", e.to_detail_string())); \ } const std::string logger_name("producer_plugin"); From cefd98b186eea70d928523392a0854e7a18778ce Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 26 May 2023 10:24:18 -0400 Subject: [PATCH 14/16] cleanup EOS_ASSERT whitespace --- plugins/producer_plugin/producer_plugin.cpp | 77 +++++++-------------- 1 file changed, 25 insertions(+), 52 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index a0768b4816..55b0dcdb24 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -608,10 +608,7 @@ class producer_plugin_impl : public std::enable_shared_from_thistimestamp < fc::minutes(5) || (blk_num % 1000 == 0)) // only log every 1000 during sync fc_dlog(_log, "received incoming block ${n} ${id}", ("n", blk_num)("id", id)); - EOS_ASSERT(block->timestamp < (now + fc::seconds(7)), - block_from_the_future, - "received a block from the future, ignoring it: ${id}", - ("id", id)); + EOS_ASSERT(block->timestamp < (now + fc::seconds(7)), block_from_the_future, "received a block from the future, ignoring it: ${id}", ("id", id)); /* de-dupe here... no point in aborting block if we already know the block */ auto existing = chain.fetch_block_by_id(id); @@ -864,10 +861,8 @@ class producer_plugin_impl : public std::enable_shared_from_thissecond(digest); } else { @@ -1050,19 +1045,15 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia } auto subjective_account_max_failures_window_size = options.at("subjective-account-max-failures-window-size").as(); - EOS_ASSERT(subjective_account_max_failures_window_size > 0, - plugin_config_exception, - "subjective-account-max-failures-window-size ${s} must be greater than 0", - ("s", subjective_account_max_failures_window_size)); + EOS_ASSERT(subjective_account_max_failures_window_size > 0, plugin_config_exception, + "subjective-account-max-failures-window-size ${s} must be greater than 0", ("s", subjective_account_max_failures_window_size)); _account_fails.set_max_failures_per_account(options.at("subjective-account-max-failures").as(), subjective_account_max_failures_window_size); uint32_t cpu_effort_pct = options.at("cpu-effort-percent").as(); - EOS_ASSERT(cpu_effort_pct >= 0 && cpu_effort_pct <= 100, - plugin_config_exception, - "cpu-effort-percent ${pct} must be 0 - 100", - ("pct", cpu_effort_pct)); + EOS_ASSERT(cpu_effort_pct >= 0 && cpu_effort_pct <= 100, plugin_config_exception, + "cpu-effort-percent ${pct} must be 0 - 100", ("pct", cpu_effort_pct)); cpu_effort_pct *= config::percent_1; _cpu_effort_us = EOS_PERCENT(config::block_interval_us, cpu_effort_pct); @@ -1094,10 +1085,8 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia auto max_incoming_transaction_queue_size = options.at("incoming-transaction-queue-size-mb").as() * 1024 * 1024; - EOS_ASSERT(max_incoming_transaction_queue_size > 0, - plugin_config_exception, - "incoming-transaction-queue-size-mb ${mb} must be greater than 0", - ("mb", max_incoming_transaction_queue_size)); + EOS_ASSERT(max_incoming_transaction_queue_size > 0, plugin_config_exception, + "incoming-transaction-queue-size-mb ${mb} must be greater than 0", ("mb", max_incoming_transaction_queue_size)); _unapplied_transactions.set_max_transaction_queue_size(max_incoming_transaction_queue_size); @@ -1156,8 +1145,7 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia } } } - EOS_ASSERT(producer_plugin::test_mode_ || _ro_thread_pool_size == 0 || _producers.empty(), - plugin_config_exception, + EOS_ASSERT(producer_plugin::test_mode_ || _ro_thread_pool_size == 0 || _producers.empty(), plugin_config_exception, "read-only-threads not allowed on producer node"); // only initialize other read-only options when read-only thread pool is enabled @@ -1181,18 +1169,15 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia meminfo_file.ignore(std::numeric_limits::max(), '\n'); } - EOS_ASSERT(vm_total_kb > 0, - plugin_config_exception, + EOS_ASSERT(vm_total_kb > 0, plugin_config_exception, "Unable to get system virtual memory size (not a Linux?), therefore cannot determine if the system has enough " "virtual memory for multi-threaded read-only transactions on EOS VM OC"); - EOS_ASSERT(vm_total_kb > vm_used_kb, - plugin_config_exception, + EOS_ASSERT(vm_total_kb > vm_used_kb, plugin_config_exception, "vm total (${t}) must be greater than vm used (${u})", ("t", vm_total_kb)("u", vm_used_kb)); uint32_t num_threads_supported = (vm_total_kb - vm_used_kb) / 4200000000; // reserve 1 for the app thread, 1 for anything else which might use VM - EOS_ASSERT(num_threads_supported > 2, - plugin_config_exception, + EOS_ASSERT(num_threads_supported > 2, plugin_config_exception, "With the EOS VM OC configured, there is not enough system virtual memory to support the required minimum of " "3 threads (1 for main thread, 1 for read-only, and 1 for anything else), vm total: ${t}, vm used: ${u}", ("t", vm_total_kb)("u", vm_used_kb)); @@ -1202,8 +1187,7 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia "((vm total - vm used)/4.2 TB - 2): ${supp}, max allowed: ${max}, actual allowed: ${actual}", ("total", vm_total_kb)("used", vm_used_kb)("supp", num_threads_supported)("max", _ro_max_threads_allowed) ("actual", actual_threads_allowed)); - EOS_ASSERT(_ro_thread_pool_size <= actual_threads_allowed, - plugin_config_exception, + EOS_ASSERT(_ro_thread_pool_size <= actual_threads_allowed, plugin_config_exception, "read-only-threads (${th}) greater than number of threads allowed for EOS VM OC (${allowed})", ("th", _ro_thread_pool_size)("allowed", actual_threads_allowed)); } @@ -1304,18 +1288,14 @@ void producer_plugin_impl::plugin_startup() { chain::controller& chain = chain_plug->chain(); - EOS_ASSERT(_producers.empty() || chain.get_read_mode() != chain::db_read_mode::IRREVERSIBLE, - plugin_config_exception, + EOS_ASSERT(_producers.empty() || chain.get_read_mode() != chain::db_read_mode::IRREVERSIBLE, plugin_config_exception, "node cannot have any producer-name configured because block production is impossible when read_mode is \"irreversible\""); - EOS_ASSERT(_producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, - plugin_config_exception, + EOS_ASSERT(_producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, plugin_config_exception, "node cannot have any producer-name configured because block production is not safe when validation_mode is not \"full\""); - EOS_ASSERT(_producers.empty() || chain_plug->accept_transactions(), - plugin_config_exception, - "node cannot have any producer-name configured because no block production is possible with no " - "[api|p2p]-accepted-transactions"); + EOS_ASSERT(_producers.empty() || chain_plug->accept_transactions(), plugin_config_exception, + "node cannot have any producer-name configured because no block production is possible with no [api|p2p]-accepted-transactions"); _accepted_block_connection.emplace(chain.accepted_block.connect([this](const auto& bsp) { on_block(bsp); })); _accepted_block_header_connection.emplace(chain.accepted_block_header.connect([this](const auto& bsp) { on_block_header(bsp); })); @@ -1370,8 +1350,7 @@ void producer_plugin_impl::plugin_startup() { std::this_thread::sleep_for(1ms); ++time_slept_ms; } - EOS_ASSERT(num_threads_started.load() == _ro_thread_pool_size, - producer_exception, + EOS_ASSERT(num_threads_started.load() == _ro_thread_pool_size, producer_exception, "read-only threads failed to start. num_threads_started: ${n}, time_slept_ms: ${t}ms", ("n", num_threads_started.load())("t", time_slept_ms)); @@ -1554,17 +1533,14 @@ void producer_plugin_impl::schedule_protocol_feature_activations(const producer_ const chain::controller& chain = chain_plug->chain(); std::set set_of_features_to_activate(schedule.protocol_features_to_activate.begin(), schedule.protocol_features_to_activate.end()); - EOS_ASSERT(set_of_features_to_activate.size() == schedule.protocol_features_to_activate.size(), - invalid_protocol_features_to_activate, + EOS_ASSERT(set_of_features_to_activate.size() == schedule.protocol_features_to_activate.size(), invalid_protocol_features_to_activate, "duplicate digests"); chain.validate_protocol_features(schedule.protocol_features_to_activate); const auto& pfs = chain.get_protocol_feature_manager().get_protocol_feature_set(); for (auto& feature_digest : set_of_features_to_activate) { const auto& pf = pfs.get_protocol_feature(feature_digest); - EOS_ASSERT(!pf.preactivation_required, - protocol_feature_exception, - "protocol feature requires preactivation: ${digest}", - ("digest", feature_digest)); + EOS_ASSERT(!pf.preactivation_required, protocol_feature_exception, + "protocol feature requires preactivation: ${digest}", ("digest", feature_digest)); } _protocol_features_to_activate = schedule.protocol_features_to_activate; _protocol_features_signaled = false; @@ -2722,11 +2698,9 @@ static auto maybe_make_debug_time_logger() -> std::optionalchain(); - EOS_ASSERT(chain.is_building_block(), - missing_pending_block_state, + EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "pending_block_state does not exist but it should, another plugin may have corrupted it"); const auto& auth = chain.pending_block_signing_authority(); @@ -2741,8 +2715,7 @@ void producer_plugin_impl::produce_block() { } }); - EOS_ASSERT(relevant_providers.size() > 0, - producer_priv_key_not_found, + EOS_ASSERT(relevant_providers.size() > 0, producer_priv_key_not_found, "Attempting to produce a block for which we don't have any relevant private keys"); if (_protocol_features_signaled) { From fa07fd84f71be9e9906d4632433cb7c4f2973df7 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 26 May 2023 10:29:17 -0400 Subject: [PATCH 15/16] More whitespace cleanup --- plugins/producer_plugin/producer_plugin.cpp | 46 ++++++++++----------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 55b0dcdb24..1332ab7ae7 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -2234,16 +2234,17 @@ producer_plugin_impl::push_result producer_plugin_impl::push_transaction(const f trx, next, start, chain, trace, return_failure_trace, disable_subjective_enforcement, first_auth, sub_bill, prev_billed_cpu_time_us); } -producer_plugin_impl::push_result producer_plugin_impl::handle_push_result(const transaction_metadata_ptr& trx, - const next_function& next, - const fc::time_point& start, - chain::controller& chain, - const transaction_trace_ptr& trace, - bool return_failure_trace, - bool disable_subjective_enforcement, - account_name first_auth, - int64_t sub_bill, - uint32_t prev_billed_cpu_time_us) { +producer_plugin_impl::push_result +producer_plugin_impl::handle_push_result(const transaction_metadata_ptr& trx, + const next_function& next, + const fc::time_point& start, + chain::controller& chain, + const transaction_trace_ptr& trace, + bool return_failure_trace, + bool disable_subjective_enforcement, + account_name first_auth, + int64_t sub_bill, + uint32_t prev_billed_cpu_time_us) { auto end = fc::time_point::now(); chain::subjective_billing& subjective_bill = chain.get_mutable_subjective_billing(); @@ -2746,15 +2747,16 @@ void producer_plugin_impl::produce_block() { br.total_time += fc::time_point::now() - start; if (_update_produced_block_metrics) { - _update_produced_block_metrics({.unapplied_transactions_total = _unapplied_transactions.size(), - .blacklisted_transactions_total = _blacklisted_transactions.size(), - .subjective_bill_account_size_total = chain.get_subjective_billing().get_account_cache_size(), - .scheduled_trxs_total = chain.db().get_index().size(), - .trxs_produced_total = new_bs->block->transactions.size(), - .cpu_usage_us = br.total_cpu_usage_us, - .net_usage_us = br.total_net_usage, - .last_irreversible = chain.last_irreversible_block_num(), - .head_block_num = chain.head_block_num()}); + _update_produced_block_metrics( + {.unapplied_transactions_total = _unapplied_transactions.size(), + .blacklisted_transactions_total = _blacklisted_transactions.size(), + .subjective_bill_account_size_total = chain.get_subjective_billing().get_account_cache_size(), + .scheduled_trxs_total = chain.db().get_index().size(), + .trxs_produced_total = new_bs->block->transactions.size(), + .cpu_usage_us = br.total_cpu_usage_us, + .net_usage_us = br.total_net_usage, + .last_irreversible = chain.last_irreversible_block_num(), + .head_block_num = chain.head_block_num()}); } ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} " @@ -2958,11 +2960,7 @@ bool producer_plugin_impl::push_read_only_transaction(transaction_metadata_ptr t // Ensure the trx to finish by the end of read-window or write-window or block_deadline depending on auto trace = chain.push_transaction(trx, window_deadline, _ro_max_trx_time_us, 0, false, 0); _ro_all_threads_exec_time_us += (fc::time_point::now() - start).count(); - auto pr = handle_push_result(trx, - next, - start, - chain, - trace, + auto pr = handle_push_result(trx, next, start, chain, trace, true /*return_failure_trace*/, true /*disable_subjective_enforcement*/, {} /*first_auth*/, From 626b437b7ff8e79cfb9de22469dfa4c5071657ed Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 26 May 2023 13:22:15 -0400 Subject: [PATCH 16/16] More whitespace fix according to PR review. --- plugins/producer_plugin/producer_plugin.cpp | 25 ++++++++++----------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 1332ab7ae7..81267e8c2f 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1216,8 +1216,7 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia "plus ${min} us, required: ${read} us > (${trx_time} us + ${min} us).", ("read", _ro_read_window_time_us)("trx_time", _max_transaction_time_ms.load() * 1000)("min", _ro_read_window_minimum_time_us)); } - ilog("read-only-write-window-time-us: ${ww} us, read-only-read-window-time-us: ${rw} us, effective read window time to be used: ${w} " - "us", + ilog("read-only-write-window-time-us: ${ww} us, read-only-read-window-time-us: ${rw} us, effective read window time to be used: ${w} us", ("ww", _ro_write_window_time_us)("rw", _ro_read_window_time_us)("w", _ro_read_window_effective_time_us)); } @@ -1891,14 +1890,12 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if (current_watermark) { auto watermark_bn = current_watermark->first; if (watermark_bn < hbs->block_num) { - blocks_to_confirm = - (uint16_t)(std::min(std::numeric_limits::max(), (uint32_t)(hbs->block_num - watermark_bn))); + blocks_to_confirm = (uint16_t)(std::min(std::numeric_limits::max(), (uint32_t)(hbs->block_num - watermark_bn))); } } // can not confirm irreversible blocks - blocks_to_confirm = - (uint16_t)(std::min(blocks_to_confirm, (uint32_t)(hbs->block_num - hbs->dpos_irreversible_blocknum))); + blocks_to_confirm = (uint16_t)(std::min(blocks_to_confirm, (uint32_t)(hbs->block_num - hbs->dpos_irreversible_blocknum))); } abort_block(); @@ -2194,8 +2191,10 @@ producer_plugin_impl::push_result producer_plugin_impl::push_transaction(const f auto first_auth = trx->packed_trx()->get_transaction().first_authorizer(); - bool disable_subjective_enforcement = (api_trx && _disable_subjective_api_billing) || (!api_trx && _disable_subjective_p2p_billing) || - subjective_bill.is_account_disabled(first_auth) || trx->is_transient(); + bool disable_subjective_enforcement = (api_trx && _disable_subjective_api_billing) || + (!api_trx && _disable_subjective_p2p_billing) || + subjective_bill.is_account_disabled(first_auth) || + trx->is_transient(); if (!disable_subjective_enforcement && _account_fails.failure_limit(first_auth)) { if (next) { @@ -2961,11 +2960,11 @@ bool producer_plugin_impl::push_read_only_transaction(transaction_metadata_ptr t auto trace = chain.push_transaction(trx, window_deadline, _ro_max_trx_time_us, 0, false, 0); _ro_all_threads_exec_time_us += (fc::time_point::now() - start).count(); auto pr = handle_push_result(trx, next, start, chain, trace, - true /*return_failure_trace*/, - true /*disable_subjective_enforcement*/, - {} /*first_auth*/, - 0 /*sub_bill*/, - 0 /*prev_billed_cpu_time_us*/); + true, // return_failure_trace + true, // disable_subjective_enforcement + {}, // first_auth + 0, // sub_bill + 0); // prev_billed_cpu_time_us // If a transaction was exhausted, that indicates we are close to // the end of read window. Retry in next round. retry = pr.trx_exhausted;