Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[3.1 -> main] net_plugin delay connecting to peers #410

Merged
merged 6 commits into from
Jun 17, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 31 additions & 24 deletions plugins/net_plugin/net_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,7 @@ namespace eosio {
constexpr static uint16_t to_protocol_version(uint16_t v);

connection_ptr find_connection(const string& host)const; // must call with held mutex
string connect( const string& host );
};

const fc::string logger_name("net_plugin_impl");
Expand Down Expand Up @@ -1469,6 +1470,7 @@ namespace eosio {
void connection::sync_timeout( boost::system::error_code ec ) {
if( !ec ) {
my_impl->sync_master->sync_reassign_fetch( shared_from_this(), benign_other );
close(true);
} else if( ec != boost::asio::error::operation_aborted ) { // don't log on operation_aborted, called on destroy
peer_elog( this, "setting timer for sync request got error ${ec}", ("ec", ec.message()) );
}
Expand Down Expand Up @@ -3697,19 +3699,6 @@ namespace eosio {
}
}

if( my->acceptor ) {
try {
my->acceptor->open(listen_endpoint.protocol());
my->acceptor->set_option(tcp::acceptor::reuse_address(true));
my->acceptor->bind(listen_endpoint);
my->acceptor->listen();
} catch (const std::exception& e) {
elog( "net_plugin::plugin_startup failed to bind to port ${port}", ("port", listen_endpoint.port()) );
throw e;
}
fc_ilog( logger, "starting listener, max clients is ${mc}",("mc",my->max_client_count) );
my->start_listen_loop();
}
{
chain::controller& cc = my->chain_plug->chain();
cc.accepted_block.connect( [my = my]( const block_state_ptr& s ) {
Expand All @@ -3727,18 +3716,32 @@ namespace eosio {
std::lock_guard<std::mutex> g( my->keepalive_timer_mtx );
my->keepalive_timer.reset( new boost::asio::steady_timer( my->thread_pool->get_executor() ) );
}
my->ticker();

my->incoming_transaction_ack_subscription = app().get_channel<compat::channels::transaction_ack>().subscribe(
std::bind(&net_plugin_impl::transaction_ack, my.get(), std::placeholders::_1));

my->start_monitors();

my->update_chain_info();
app().post(priority::highest, [my=my, listen_endpoint](){
if( my->acceptor ) {
try {
my->acceptor->open(listen_endpoint.protocol());
my->acceptor->set_option(tcp::acceptor::reuse_address(true));
my->acceptor->bind(listen_endpoint);
my->acceptor->listen();
} catch (const std::exception& e) {
elog( "net_plugin::plugin_startup failed to bind to port ${port}", ("port", listen_endpoint.port()) );
throw e;
}
fc_ilog( logger, "starting listener, max clients is ${mc}",("mc",my->max_client_count) );
my->start_listen_loop();
}

for( const auto& seed_node : my->supplied_peers ) {
connect( seed_node );
}
my->ticker();
my->start_monitors();
my->update_chain_info();
for( const auto& seed_node : my->supplied_peers ) {
my->connect( seed_node );
}
});

} catch( ... ) {
// always want plugin_shutdown even on exception
Expand Down Expand Up @@ -3799,16 +3802,20 @@ namespace eosio {
* Used to trigger a new connection from RPC API
*/
string net_plugin::connect( const string& host ) {
std::lock_guard<std::shared_mutex> g( my->connections_mtx );
if( my->find_connection( host ) )
return my->connect( host );
}

string net_plugin_impl::connect( const string& host ) {
std::lock_guard<std::shared_mutex> g( connections_mtx );
if( find_connection( host ) )
return "already connected";

connection_ptr c = std::make_shared<connection>( host );
fc_dlog( logger, "calling active connector: ${h}", ("h", host) );
if( c->resolve_and_connect() ) {
fc_dlog( logger, "adding new connection to the list: ${host} ${cid}", ("host", host)("cid", c->connection_id) );
c->set_heartbeat_timeout( my->heartbeat_timeout );
my->connections.insert( c );
c->set_heartbeat_timeout( heartbeat_timeout );
connections.insert( c );
}
return "added connection";
}
Expand Down
10 changes: 6 additions & 4 deletions tests/Node.py
Original file line number Diff line number Diff line change
Expand Up @@ -1388,10 +1388,12 @@ def getSupportedProtocolFeatureDict(self, excludeDisabled=False, excludeUnactiva
break
return protocolFeatureDigestDict

def waitForHeadToAdvance(self, timeout=6):
def waitForHeadToAdvance(self, blocksToAdvance=1, timeout=None):
currentHead = self.getHeadBlockNum()
if timeout is None:
timeout = 6 + blocksToAdvance / 2
def isHeadAdvancing():
return self.getHeadBlockNum() > currentHead
return self.getHeadBlockNum() >= currentHead + blocksToAdvance
return Utils.waitForBool(isHeadAdvancing, timeout)

def waitForLibToAdvance(self, timeout=30):
Expand Down Expand Up @@ -1422,7 +1424,7 @@ def activatePreactivateFeature(self):
self.scheduleProtocolFeatureActivations([preactivateFeatureDigest])

# Wait for the next block to be produced so the scheduled protocol feature is activated
assert self.waitForHeadToAdvance(), "ERROR: TIMEOUT WAITING FOR PREACTIVATE"
assert self.waitForHeadToAdvance(blocksToAdvance=2), print("ERROR: TIMEOUT WAITING FOR PREACTIVATE")

# Return an array of feature digests to be preactivated in a correct order respecting dependencies
# Require producer_api_plugin
Expand All @@ -1449,7 +1451,7 @@ def preactivateProtocolFeatures(self, featureDigests:list):
if trans is None or not trans[0]:
Utils.Print("ERROR: Failed to preactive digest {}".format(digest))
return None
self.waitForHeadToAdvance()
self.waitForHeadToAdvance(blocksToAdvance=2)

# Require PREACTIVATE_FEATURE to be activated and require eosio.bios with preactivate_feature
def preactivateAllBuiltinProtocolFeature(self):
Expand Down