From 12924afa0a9f945c847e8969004e12ce44bae48c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 31 Mar 2023 14:10:05 -0500 Subject: [PATCH 1/7] Add test for forking in SHiP --- tests/CMakeLists.txt | 6 +- tests/ship_client.cpp | 2 + tests/ship_streamer.cpp | 31 ++++++++ tests/ship_streamer_test.py | 149 ++++++++++++++++++++++++++++++------ tests/ship_test.py | 4 +- 5 files changed, 164 insertions(+), 28 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 5b17d23d1d..01a5d413d5 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -98,12 +98,12 @@ target_link_libraries(ship_client abieos Boost::program_options Boost::system Th add_executable(ship_streamer ship_streamer.cpp) target_link_libraries(ship_streamer abieos Boost::program_options Boost::system Threads::Threads) -add_test(NAME ship_test COMMAND tests/ship_test.py -v --num-clients 1 --num-requests 5000 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME ship_test COMMAND tests/ship_test.py -v --num-clients 3 --num-requests 5000 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME ship_test_unix COMMAND tests/ship_test.py -v --num-clients 1 --num-requests 5000 --clean-run --dump-error-detail --unix-socket WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME ship_test_unix COMMAND tests/ship_test.py -v --num-clients 3 --num-requests 5000 --clean-run --dump-error-detail --unix-socket WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_test_unix PROPERTY LABELS nonparallelizable_tests) -add_test(NAME ship_streamer_test COMMAND tests/ship_streamer_test.py -v --num-clients 1 --num-blocks 50 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME ship_streamer_test COMMAND tests/ship_streamer_test.py -v --num-clients 3 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_streamer_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/ship_client.cpp b/tests/ship_client.cpp index b64abea596..3c383b4074 100644 --- a/tests/ship_client.cpp +++ b/tests/ship_client.cpp @@ -125,6 +125,8 @@ int main(int argc, char* argv[]) { eosio::check(result_doucment[1]["head"].IsObject(), "'head' is not an object"); eosio::check(result_doucment[1]["head"].HasMember("block_num"), "'head' does not contain 'block_num'"); eosio::check(result_doucment[1]["head"]["block_num"].IsUint(), "'head.block_num' isn't a number"); + eosio::check(result_doucment[1]["head"].HasMember("block_id"), "'head' does not contain 'block_id'"); + eosio::check(result_doucment[1]["head"]["block_id"].IsString(), "'head.block_id' isn't a string"); uint32_t this_block_num = result_doucment[1]["head"]["block_num"].GetUint(); diff --git a/tests/ship_streamer.cpp b/tests/ship_streamer.cpp index 9132f136ac..04e9683b6d 100644 --- a/tests/ship_streamer.cpp +++ b/tests/ship_streamer.cpp @@ -110,6 +110,8 @@ int main(int argc, char* argv[]) { stream.binary(true); stream.write(boost::asio::buffer(request_type.json_to_bin(request_sb.GetString(), [](){}))); + // block_num, block_id + std::map block_ids; bool is_first = true; for(;;) { boost::beast::flat_buffer buffer; @@ -128,12 +130,41 @@ int main(int argc, char* argv[]) { eosio::check(result_doucment[1]["head"].IsObject(), "'head' is not an object"); eosio::check(result_doucment[1]["head"].HasMember("block_num"), "'head' does not contain 'block_num'"); eosio::check(result_doucment[1]["head"]["block_num"].IsUint(), "'head.block_num' isn't a number"); + eosio::check(result_doucment[1]["head"].HasMember("block_id"), "'head' does not contain 'block_id'"); + eosio::check(result_doucment[1]["head"]["block_id"].IsString(), "'head.block_num' isn't a string"); uint32_t this_block_num = 0; if( result_doucment[1].HasMember("this_block") && result_doucment[1]["this_block"].IsObject() ) { if( result_doucment[1]["this_block"].HasMember("block_num") && result_doucment[1]["this_block"]["block_num"].IsUint() ) { this_block_num = result_doucment[1]["this_block"]["block_num"].GetUint(); } + std::string this_block_id; + if( result_doucment[1]["this_block"].HasMember("block_id") && result_doucment[1]["this_block"]["block_id"].IsString() ) { + this_block_id = result_doucment[1]["this_block"]["block_id"].GetString(); + } + std::string prev_block_id; + if( result_doucment[1]["prev_block"].HasMember("block_id") && result_doucment[1]["prev_block"]["block_id"].IsString() ) { + prev_block_id = result_doucment[1]["prev_block"]["block_id"].GetString(); + } + if( !irreversible_only && !this_block_id.empty() && !prev_block_id.empty() ) { + // verify forks were sent + if (block_ids.count(this_block_num-1)) { + if (block_ids[this_block_num-1] != prev_block_id) { + std::cerr << "Received block: << " << this_block_num << " that does not link to previous: " << block_ids[this_block_num-1] << std::endl; + return 1; + } + } + block_ids[this_block_num] = this_block_id; + + if( result_doucment[1]["last_irreversible"].HasMember("block_num") && result_doucment[1]["last_irreversible"]["block_num"].IsUint() ) { + uint32_t lib_num = result_doucment[1]["last_irreversible"]["block_num"].GetUint(); + auto i = block_ids.lower_bound(lib_num); + if (i != block_ids.end()) { + block_ids.erase(block_ids.begin(), i); + } + } + } + } if(is_first) { diff --git a/tests/ship_streamer_test.py b/tests/ship_streamer_test.py index b9b33d9255..11fb2632ad 100755 --- a/tests/ship_streamer_test.py +++ b/tests/ship_streamer_test.py @@ -9,32 +9,28 @@ from TestHarness import Cluster, TestHelper, Utils, WalletMgr from TestHarness.TestHelper import AppArgs +from core_symbol import CORE_SYMBOL ############################################################### # ship_streamer_test # -# This test sets up <-p> producing node(s) and <-n - -p> -# non-producing node(s). One of the non-producing nodes -# is configured with the state_history_plugin. An instance -# of node will be started with ship_streamer to exercise -# the SHiP API. +# This test sets up 2 producing nodes and one "bridge" node using test_control_api_plugin. +# One producing node has 3 of the elected producers and the other has 1 of the elected producers. +# All the producers are named in alphabetical order, so that the 3 producers, in the one production node, are +# scheduled first, followed by the 1 producer in the other producer node. Each producing node is only connected +# to the other producing node via the "bridge" node. +# The bridge node has the test_control_api_plugin, that the test uses to kill +# the "bridge" node to generate a fork. # ############################################################### Print=Utils.Print appArgs = AppArgs() -extraArgs = appArgs.add(flag="--num-blocks", type=int, help="How many blocsk to stream from ship_streamer", default=20) extraArgs = appArgs.add(flag="--num-clients", type=int, help="How many ship_streamers should be started", default=1) -args = TestHelper.parse_args({"-p", "-n","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run"}, applicationSpecificArgs=appArgs) +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run"}, applicationSpecificArgs=appArgs) Utils.Debug=args.v -totalProducerNodes=args.p -totalNodes=args.n -if totalNodes<=totalProducerNodes: - totalNodes=totalProducerNodes+1 -totalNonProducerNodes=totalNodes-totalProducerNodes -totalProducers=totalProducerNodes cluster=Cluster(walletd=True) dumpErrorDetails=args.dump_error_details keepLogs=args.keep_logs @@ -42,6 +38,12 @@ killAll=args.clean_run walletPort=TestHelper.DEFAULT_WALLET_PORT +totalProducerNodes=2 +totalNonProducerNodes=1 +totalNodes=totalProducerNodes+totalNonProducerNodes +maxActiveProducers=21 +totalProducers=maxActiveProducers + walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False killEosInstances=not dontKill @@ -57,12 +59,20 @@ cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") + + + # *** setup topogrophy *** + + # "bridge" shape connects defprocera through defproducerc (3 in node0) to each other and defproduceru (1 in node01) + # and the only connection between those 2 groups is through the bridge node + + shipNodeNum = 1 specificExtraNodeosArgs={} - # non-producing nodes are at the end of the cluster's nodes, so reserving the last one for state_history_plugin - shipNodeNum = totalNodes - 1 - specificExtraNodeosArgs[shipNodeNum]="--plugin eosio::state_history_plugin --disable-replay-opts --trace-history --sync-fetch-span 200 --plugin eosio::net_api_plugin " + specificExtraNodeosArgs[shipNodeNum]="--plugin eosio::state_history_plugin --disable-replay-opts --trace-history --chain-state-history --plugin eosio::net_api_plugin " + # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node + specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin " - if cluster.launch(pnodes=totalProducerNodes, + if cluster.launch(topo="bridge", pnodes=totalProducerNodes, totalNodes=totalNodes, totalProducers=totalProducers, useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: Utils.cmdError("launcher") @@ -70,15 +80,78 @@ # *** identify each node (producers and non-producing node) *** - shipNode = cluster.getNode(shipNodeNum) - prodNode = cluster.getNode(0) - #verify nodes are in sync and advancing cluster.waitOnClusterSync(blockAdvancing=5) Print("Cluster in Sync") + prodNode = cluster.getNode(0) + prodNode0 = prodNode + prodNode1 = cluster.getNode(1) + nonProdNode = cluster.getNode(2) + shipNode = cluster.getNode(shipNodeNum) + + + accounts=cluster.createAccountKeys(6) + if accounts is None: + Utils.errorExit("FAILURE - create keys") + + accounts[0].name="testeraaaaaa" + accounts[1].name="tester111111" # needed for voting + accounts[2].name="tester222222" # needed for voting + accounts[3].name="tester333333" # needed for voting + accounts[4].name="tester444444" # needed for voting + accounts[5].name="tester555555" # needed for voting + + testWalletName="test" + + Print("Creating wallet \"%s\"." % (testWalletName)) + testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4],accounts[5]]) + + for _, account in cluster.defProducerAccounts.items(): + walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True) + + for i in range(0, totalNodes): + node=cluster.getNode(i) + node.producers=Cluster.parseProducers(i) + numProducers=len(node.producers) + for prod in node.producers: + prodName = cluster.defProducerAccounts[prod].name + if prodName == "defproducera" or prodName == "defproducerb" or prodName == "defproducerc" or prodName == "defproduceru": + Print("Register producer %s" % cluster.defProducerAccounts[prod].name) + trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True) + + # create accounts via eosio as otherwise a bid is needed + for account in accounts: + Print("Create new account %s via %s with private key: %s" % (account.name, cluster.eosioAccount.name, account.activePrivateKey)) + trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=True, stakeNet=10000, stakeCPU=10000, buyRAM=10000000, exitOnError=True) + transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) + Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) + nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=True) + trans=nonProdNode.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=False, exitOnError=True) + + # *** vote using accounts *** + + cluster.waitOnClusterSync(blockAdvancing=3) start_block_num = shipNode.getBlockNum() - end_block_num = start_block_num + args.num_blocks + + # vote a,b,c u + voteProducers=[] + voteProducers.append("defproducera") + voteProducers.append("defproducerb") + voteProducers.append("defproducerc") + voteProducers.append("defproduceru") + for account in accounts: + Print("Account %s vote for producers=%s" % (account.name, voteProducers)) + trans=prodNode.vote(account, voteProducers, exitOnError=True, waitForTransBlock=False) + + #verify nodes are in sync and advancing + cluster.waitOnClusterSync(blockAdvancing=3) + Print("Shutdown unneeded bios node") + cluster.biosNode.kill(signal.SIGTERM) + prodNode0.waitForProducer("defproducerc") + + block_range = 350 + end_block_num = start_block_num + block_range shipClient = "tests/ship_streamer" cmd = "%s --start-block-num %d --end-block-num %d --fetch-block --fetch-traces --fetch-deltas" % (shipClient, start_block_num, end_block_num) @@ -101,8 +174,38 @@ files.append((outFile, errFile)) Print("Client %d started, Ship node head is: %s" % (i, shipNode.getBlockNum())) - Print("Stopping all %d clients" % (args.num_clients)) + # Generate a fork + forkAtProducer="defproducera" + prodNode1Prod="defproduceru" + preKillBlockNum=nonProdNode.getBlockNum() + preKillBlockProducer=nonProdNode.getBlockProducerByNum(preKillBlockNum) + nonProdNode.killNodeOnProducer(producer=forkAtProducer, whereInSequence=1) + Print("Current block producer %s fork will be at producer %s" % (preKillBlockProducer, forkAtProducer)) + prodNode0.waitForProducer(forkAtProducer) + prodNode1.waitForProducer(prodNode1Prod) + if nonProdNode.verifyAlive(): # if on defproducera, need to wait again + prodNode0.waitForProducer(forkAtProducer) + prodNode1.waitForProducer(prodNode1Prod) + if nonProdNode.verifyAlive(): + Utils.errorExit("Bridge did not shutdown"); + Print("Fork started") + + prodNode0.waitForProducer("defproducerb") # wait for fork to progress a bit + + Print("Restore fork") + Print("Relaunching the non-producing bridge node to connect the producing nodes again") + if nonProdNode.verifyAlive(): + Utils.errorExit("Bridge is already running"); + if not nonProdNode.relaunch(): + Utils.errorExit("Failure - (non-production) node %d should have restarted" % (nonProdNode.nodeNum)) + + nonProdNode.waitForProducer(forkAtProducer) + nonProdNode.waitForProducer(prodNode1Prod) + afterForkBlockNum = nonProdNode.getBlockNum() + if int(afterForkBlockNum) > int(end_block_num): + Utils.errorExit(f"Did not stream long enough {end_block_num} to cover the fork {afterForkBlockNum}, increase block_range {block_range}") + Print("Stopping all %d clients" % (args.num_clients)) for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts): popen.wait() Print("Stopped client %d. Ran for %.3f seconds." % (index, time.perf_counter() - start)) @@ -112,7 +215,7 @@ data = json.load(outFile) block_num = start_block_num for i in data: - print(i) + #print(i) assert block_num == i['get_blocks_result_v0']['this_block']['block_num'], f"{block_num} != {i['get_blocks_result_v0']['this_block']['block_num']}" assert isinstance(i['get_blocks_result_v0']['block'], str) # verify block in result block_num += 1 diff --git a/tests/ship_test.py b/tests/ship_test.py index 4171f3ce6d..c006772f73 100755 --- a/tests/ship_test.py +++ b/tests/ship_test.py @@ -132,7 +132,7 @@ try: statuses = json.loads(" ".join(lines)) except json.decoder.JSONDecodeError as er: - Utils.errorExit("javascript client output was malformed in %s. Exception: %s" % (shipClientErrorFile, er)) + Utils.errorExit("ship_client output was malformed in %s. Exception: %s" % (shipClientErrorFile, er)) for status in statuses: statusDesc = status["status"] @@ -143,7 +143,7 @@ maxFirstBN = max(maxFirstBN, firstBlockNum) minLastBN = min(minLastBN, lastBlockNum) if statusDesc == "error": - Utils.errorExit("javascript client reporting error see: %s." % (shipClientErrorFile)) + Utils.errorExit("ship_client reporting error see: %s." % (shipClientErrorFile)) assert done, Print("ERROR: Did not find a \"done\" status for client %d" % (i)) From fa8d025ca8870632730c185390605ba8b65e39c1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 31 Mar 2023 14:44:12 -0500 Subject: [PATCH 2/7] Make sure SHiP sends new blocks on forks --- plugins/state_history_plugin/state_history_plugin.cpp | 1 + tests/ship_streamer_test.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index a38ddafe52..964ff62b6e 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -351,6 +351,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisblock_num, block_state->id}; + to_send_block_num = std::min(block_state->block_num, to_send_block_num); send_update(std::move(result), block_state); } diff --git a/tests/ship_streamer_test.py b/tests/ship_streamer_test.py index 11fb2632ad..7bc95253d7 100755 --- a/tests/ship_streamer_test.py +++ b/tests/ship_streamer_test.py @@ -216,7 +216,11 @@ block_num = start_block_num for i in data: #print(i) - assert block_num == i['get_blocks_result_v0']['this_block']['block_num'], f"{block_num} != {i['get_blocks_result_v0']['this_block']['block_num']}" + # fork can cause block numbers to be repeated + this_block_num = i['get_blocks_result_v0']['this_block']['block_num'] + if this_block_num < block_num: + block_num = this_block_num + assert block_num == this_block_num, f"{block_num} != {this_block_num}" assert isinstance(i['get_blocks_result_v0']['block'], str) # verify block in result block_num += 1 assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}" From 6fb28e5f7dd22d8f4f052cac626422039e106537 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 31 Mar 2023 14:59:35 -0500 Subject: [PATCH 3/7] ship_streamer_test now the longest running nonparallelizable_tests, so make it long_running_test --- tests/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 01a5d413d5..4f9f35aa87 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -104,7 +104,7 @@ add_test(NAME ship_test_unix COMMAND tests/ship_test.py -v --num-clients 3 --num set_property(TEST ship_test_unix PROPERTY LABELS nonparallelizable_tests) add_test(NAME ship_streamer_test COMMAND tests/ship_streamer_test.py -v --num-clients 3 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST ship_streamer_test PROPERTY LABELS nonparallelizable_tests) +set_property(TEST ship_streamer_test PROPERTY LABELS long_running_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) From c3814b9e598ba21720a1638c7ac5c47646b6fb46 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 3 Apr 2023 06:58:58 -0500 Subject: [PATCH 4/7] Use 10 clients for test. Minor cleanups to ship_streamer_test.py --- tests/CMakeLists.txt | 6 +++--- tests/ship_streamer_test.py | 14 ++++++++------ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 4f9f35aa87..13fdd30cb3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -98,12 +98,12 @@ target_link_libraries(ship_client abieos Boost::program_options Boost::system Th add_executable(ship_streamer ship_streamer.cpp) target_link_libraries(ship_streamer abieos Boost::program_options Boost::system Threads::Threads) -add_test(NAME ship_test COMMAND tests/ship_test.py -v --num-clients 3 --num-requests 5000 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME ship_test COMMAND tests/ship_test.py -v --num-clients 10 --num-requests 5000 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME ship_test_unix COMMAND tests/ship_test.py -v --num-clients 3 --num-requests 5000 --clean-run --dump-error-detail --unix-socket WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME ship_test_unix COMMAND tests/ship_test.py -v --num-clients 10 --num-requests 5000 --clean-run --dump-error-detail --unix-socket WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_test_unix PROPERTY LABELS nonparallelizable_tests) -add_test(NAME ship_streamer_test COMMAND tests/ship_streamer_test.py -v --num-clients 3 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME ship_streamer_test COMMAND tests/ship_streamer_test.py -v --num-clients 10 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_streamer_test PROPERTY LABELS long_running_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/ship_streamer_test.py b/tests/ship_streamer_test.py index 7bc95253d7..51b34ba951 100755 --- a/tests/ship_streamer_test.py +++ b/tests/ship_streamer_test.py @@ -21,6 +21,10 @@ # to the other producing node via the "bridge" node. # The bridge node has the test_control_api_plugin, that the test uses to kill # the "bridge" node to generate a fork. +# ship_streamer is used to connect to the state_history_plugin and verify that blocks receive link to previous +# blocks. If the blocks do not link then ship_streamer will exit with an error causing this test to generate an +# error. The fork generated by nodeos should be sent to the ship_streamer so it is able to correctly observe the +# fork. # ############################################################### @@ -63,7 +67,7 @@ # *** setup topogrophy *** - # "bridge" shape connects defprocera through defproducerc (3 in node0) to each other and defproduceru (1 in node01) + # "bridge" shape connects defprocera through defproducerc (3 in node0) to each other and defproduceru (1 in node1) # and the only connection between those 2 groups is through the bridge node shipNodeNum = 1 @@ -113,12 +117,11 @@ for i in range(0, totalNodes): node=cluster.getNode(i) node.producers=Cluster.parseProducers(i) - numProducers=len(node.producers) for prod in node.producers: prodName = cluster.defProducerAccounts[prod].name if prodName == "defproducera" or prodName == "defproducerb" or prodName == "defproducerc" or prodName == "defproduceru": - Print("Register producer %s" % cluster.defProducerAccounts[prod].name) - trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True) + Print("Register producer %s" % prodName) + trans=node.regproducer(cluster.defProducerAccounts[prod], "http://mysite.com", 0, waitForTransBlock=False, exitOnError=True) # create accounts via eosio as otherwise a bid is needed for account in accounts: @@ -134,7 +137,7 @@ cluster.waitOnClusterSync(blockAdvancing=3) start_block_num = shipNode.getBlockNum() - # vote a,b,c u + # vote a,b,c (node0) u (node1) voteProducers=[] voteProducers.append("defproducera") voteProducers.append("defproducerb") @@ -215,7 +218,6 @@ data = json.load(outFile) block_num = start_block_num for i in data: - #print(i) # fork can cause block numbers to be repeated this_block_num = i['get_blocks_result_v0']['this_block']['block_num'] if this_block_num < block_num: From bb7fc8d1453f2a244bb62c7c431a85f337c67e2d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 3 Apr 2023 09:31:33 -0500 Subject: [PATCH 5/7] Fix spelling --- tests/ship_streamer.cpp | 48 ++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/tests/ship_streamer.cpp b/tests/ship_streamer.cpp index 04e9683b6d..7ef2d693c7 100644 --- a/tests/ship_streamer.cpp +++ b/tests/ship_streamer.cpp @@ -118,33 +118,33 @@ int main(int argc, char* argv[]) { stream.read(buffer); eosio::input_stream is((const char*)buffer.data().data(), buffer.data().size()); - rapidjson::Document result_doucment; - result_doucment.Parse(result_type.bin_to_json(is).c_str()); - - eosio::check(!result_doucment.HasParseError(), "Failed to parse result JSON from abieos"); - eosio::check(result_doucment.IsArray(), "result should have been an array (variant) but it's not"); - eosio::check(result_doucment.Size() == 2, "result was an array but did not contain 2 items like a variant should"); - eosio::check(std::string(result_doucment[0].GetString()) == "get_blocks_result_v0", "result type doesn't look like get_blocks_result_v0"); - eosio::check(result_doucment[1].IsObject(), "second item in result array is not an object"); - eosio::check(result_doucment[1].HasMember("head"), "cannot find 'head' in result"); - eosio::check(result_doucment[1]["head"].IsObject(), "'head' is not an object"); - eosio::check(result_doucment[1]["head"].HasMember("block_num"), "'head' does not contain 'block_num'"); - eosio::check(result_doucment[1]["head"]["block_num"].IsUint(), "'head.block_num' isn't a number"); - eosio::check(result_doucment[1]["head"].HasMember("block_id"), "'head' does not contain 'block_id'"); - eosio::check(result_doucment[1]["head"]["block_id"].IsString(), "'head.block_num' isn't a string"); + rapidjson::Document result_document; + result_document.Parse(result_type.bin_to_json(is).c_str()); + + eosio::check(!result_document.HasParseError(), "Failed to parse result JSON from abieos"); + eosio::check(result_document.IsArray(), "result should have been an array (variant) but it's not"); + eosio::check(result_document.Size() == 2, "result was an array but did not contain 2 items like a variant should"); + eosio::check(std::string(result_document[0].GetString()) == "get_blocks_result_v0", "result type doesn't look like get_blocks_result_v0"); + eosio::check(result_document[1].IsObject(), "second item in result array is not an object"); + eosio::check(result_document[1].HasMember("head"), "cannot find 'head' in result"); + eosio::check(result_document[1]["head"].IsObject(), "'head' is not an object"); + eosio::check(result_document[1]["head"].HasMember("block_num"), "'head' does not contain 'block_num'"); + eosio::check(result_document[1]["head"]["block_num"].IsUint(), "'head.block_num' isn't a number"); + eosio::check(result_document[1]["head"].HasMember("block_id"), "'head' does not contain 'block_id'"); + eosio::check(result_document[1]["head"]["block_id"].IsString(), "'head.block_id' isn't a string"); uint32_t this_block_num = 0; - if( result_doucment[1].HasMember("this_block") && result_doucment[1]["this_block"].IsObject() ) { - if( result_doucment[1]["this_block"].HasMember("block_num") && result_doucment[1]["this_block"]["block_num"].IsUint() ) { - this_block_num = result_doucment[1]["this_block"]["block_num"].GetUint(); + if( result_document[1].HasMember("this_block") && result_document[1]["this_block"].IsObject() ) { + if( result_document[1]["this_block"].HasMember("block_num") && result_document[1]["this_block"]["block_num"].IsUint() ) { + this_block_num = result_document[1]["this_block"]["block_num"].GetUint(); } std::string this_block_id; - if( result_doucment[1]["this_block"].HasMember("block_id") && result_doucment[1]["this_block"]["block_id"].IsString() ) { - this_block_id = result_doucment[1]["this_block"]["block_id"].GetString(); + if( result_document[1]["this_block"].HasMember("block_id") && result_document[1]["this_block"]["block_id"].IsString() ) { + this_block_id = result_document[1]["this_block"]["block_id"].GetString(); } std::string prev_block_id; - if( result_doucment[1]["prev_block"].HasMember("block_id") && result_doucment[1]["prev_block"]["block_id"].IsString() ) { - prev_block_id = result_doucment[1]["prev_block"]["block_id"].GetString(); + if( result_document[1]["prev_block"].HasMember("block_id") && result_document[1]["prev_block"]["block_id"].IsString() ) { + prev_block_id = result_document[1]["prev_block"]["block_id"].GetString(); } if( !irreversible_only && !this_block_id.empty() && !prev_block_id.empty() ) { // verify forks were sent @@ -156,8 +156,8 @@ int main(int argc, char* argv[]) { } block_ids[this_block_num] = this_block_id; - if( result_doucment[1]["last_irreversible"].HasMember("block_num") && result_doucment[1]["last_irreversible"]["block_num"].IsUint() ) { - uint32_t lib_num = result_doucment[1]["last_irreversible"]["block_num"].GetUint(); + if( result_document[1]["last_irreversible"].HasMember("block_num") && result_document[1]["last_irreversible"]["block_num"].IsUint() ) { + uint32_t lib_num = result_document[1]["last_irreversible"]["block_num"].GetUint(); auto i = block_ids.lower_bound(lib_num); if (i != block_ids.end()) { block_ids.erase(block_ids.begin(), i); @@ -177,7 +177,7 @@ int main(int argc, char* argv[]) { rapidjson::StringBuffer result_sb; rapidjson::PrettyWriter result_writer(result_sb); - result_doucment[1].Accept(result_writer); + result_document[1].Accept(result_writer); std::cout << result_sb.GetString() << std::endl << "}" << std::endl; if( this_block_num == end_block_num ) break; From fa2b7fece21c4795097732ebca26ff2376666ec7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 3 Apr 2023 10:29:25 -0500 Subject: [PATCH 6/7] Use f-strings --- tests/ship_streamer_test.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/ship_streamer_test.py b/tests/ship_streamer_test.py index 51b34ba951..c12d025528 100755 --- a/tests/ship_streamer_test.py +++ b/tests/ship_streamer_test.py @@ -108,7 +108,7 @@ testWalletName="test" - Print("Creating wallet \"%s\"." % (testWalletName)) + Print(f"Creating wallet {testWalletName}.") testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4],accounts[5]]) for _, account in cluster.defProducerAccounts.items(): @@ -120,15 +120,15 @@ for prod in node.producers: prodName = cluster.defProducerAccounts[prod].name if prodName == "defproducera" or prodName == "defproducerb" or prodName == "defproducerc" or prodName == "defproduceru": - Print("Register producer %s" % prodName) + Print(f"Register producer {prodName}") trans=node.regproducer(cluster.defProducerAccounts[prod], "http://mysite.com", 0, waitForTransBlock=False, exitOnError=True) # create accounts via eosio as otherwise a bid is needed for account in accounts: - Print("Create new account %s via %s with private key: %s" % (account.name, cluster.eosioAccount.name, account.activePrivateKey)) + Print(f"Create new account {account.name} via {cluster.eosioAccount.name} with private key: {account.activePrivateKey}") trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=True, stakeNet=10000, stakeCPU=10000, buyRAM=10000000, exitOnError=True) transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) - Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) + Print(f"Transfer funds {transferAmount} from account {cluster.eosioAccount.name} to {account.name}") nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=True) trans=nonProdNode.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=False, exitOnError=True) @@ -144,7 +144,7 @@ voteProducers.append("defproducerc") voteProducers.append("defproduceru") for account in accounts: - Print("Account %s vote for producers=%s" % (account.name, voteProducers)) + Print(f"Account {account.name} vote for producers={voteProducers}") trans=prodNode.vote(account, voteProducers, exitOnError=True, waitForTransBlock=False) #verify nodes are in sync and advancing @@ -157,8 +157,8 @@ end_block_num = start_block_num + block_range shipClient = "tests/ship_streamer" - cmd = "%s --start-block-num %d --end-block-num %d --fetch-block --fetch-traces --fetch-deltas" % (shipClient, start_block_num, end_block_num) - if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + cmd = f"{shipClient} --start-block-num {start_block_num} --end-block-num {end_block_num} --fetch-block --fetch-traces --fetch-deltas" + if Utils.Debug: Utils.Print(f"cmd: {cmd}") clients = [] files = [] shipTempDir = os.path.join(Utils.DataDir, "ship") @@ -168,14 +168,14 @@ starts = [] for i in range(0, args.num_clients): start = time.perf_counter() - outFile = open("%s%d.out" % (shipClientFilePrefix, i), "w") - errFile = open("%s%d.err" % (shipClientFilePrefix, i), "w") - Print("Start client %d" % (i)) + outFile = open(f"{shipClientFilePrefix}{i}.out", "w") + errFile = open(f"{shipClientFilePrefix}{i}.err", "w") + Print(f"Start client {i}") popen=Utils.delayedCheckOutput(cmd, stdout=outFile, stderr=errFile) starts.append(time.perf_counter()) clients.append((popen, cmd)) files.append((outFile, errFile)) - Print("Client %d started, Ship node head is: %s" % (i, shipNode.getBlockNum())) + Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}") # Generate a fork forkAtProducer="defproducera" @@ -183,7 +183,7 @@ preKillBlockNum=nonProdNode.getBlockNum() preKillBlockProducer=nonProdNode.getBlockProducerByNum(preKillBlockNum) nonProdNode.killNodeOnProducer(producer=forkAtProducer, whereInSequence=1) - Print("Current block producer %s fork will be at producer %s" % (preKillBlockProducer, forkAtProducer)) + Print(f"Current block producer {preKillBlockProducer} fork will be at producer {forkAtProducer}") prodNode0.waitForProducer(forkAtProducer) prodNode1.waitForProducer(prodNode1Prod) if nonProdNode.verifyAlive(): # if on defproducera, need to wait again @@ -200,7 +200,7 @@ if nonProdNode.verifyAlive(): Utils.errorExit("Bridge is already running"); if not nonProdNode.relaunch(): - Utils.errorExit("Failure - (non-production) node %d should have restarted" % (nonProdNode.nodeNum)) + Utils.errorExit(f"Failure - (non-production) node {nonProdNode.nodeNum} should have restarted") nonProdNode.waitForProducer(forkAtProducer) nonProdNode.waitForProducer(prodNode1Prod) @@ -208,13 +208,13 @@ if int(afterForkBlockNum) > int(end_block_num): Utils.errorExit(f"Did not stream long enough {end_block_num} to cover the fork {afterForkBlockNum}, increase block_range {block_range}") - Print("Stopping all %d clients" % (args.num_clients)) + Print(f"Stopping all {args.num_clients} clients") for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts): popen.wait() - Print("Stopped client %d. Ran for %.3f seconds." % (index, time.perf_counter() - start)) + Print(f"Stopped client {index}. Ran for {time.perf_counter() - start:.3f} seconds.") out.close() err.close() - outFile = open("%s%d.out" % (shipClientFilePrefix, index), "r") + outFile = open(f"{shipClientFilePrefix}{index}.out", "r") data = json.load(outFile) block_num = start_block_num for i in data: From d40be19b02f366a0d9e8bb701c9bb4e4491f950d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 3 Apr 2023 12:40:09 -0500 Subject: [PATCH 7/7] CORE_SYMBOL is now in TestHarness --- tests/ship_streamer_test.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/ship_streamer_test.py b/tests/ship_streamer_test.py index 3b1346fcd5..7d3816dfd3 100755 --- a/tests/ship_streamer_test.py +++ b/tests/ship_streamer_test.py @@ -7,9 +7,8 @@ import signal import sys -from TestHarness import Cluster, TestHelper, Utils, WalletMgr +from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL from TestHarness.TestHelper import AppArgs -from core_symbol import CORE_SYMBOL ############################################################### # ship_streamer_test