From 432e08a81a42f7e8196a04a3a5ef5d8270449e01 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 3 Feb 2024 13:57:37 -0600 Subject: [PATCH 1/4] GH-2172 Simplify ship_streamer_test forking by using a specific setup --- tests/CMakeLists.txt | 1 + tests/TestHarness/Cluster.py | 4 +- tests/bridge_for_fork_test_shape.json | 126 ++++++++++++++ tests/ship_streamer_test.py | 226 +++++++++++--------------- 4 files changed, 220 insertions(+), 137 deletions(-) create mode 100644 tests/bridge_for_fork_test_shape.json diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 23fceeb4fc..77c99e0d0b 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -48,6 +48,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_producer_watermark_test.py ${C configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cli_test.py ${CMAKE_CURRENT_BINARY_DIR}/cli_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ship_test.py ${CMAKE_CURRENT_BINARY_DIR}/ship_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ship_streamer_test.py ${CMAKE_CURRENT_BINARY_DIR}/ship_streamer_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/bridge_for_fork_test_shape.json ${CMAKE_CURRENT_BINARY_DIR}/bridge_for_fork_test_shape.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/large-lib-test.py ${CMAKE_CURRENT_BINARY_DIR}/large-lib-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/http_plugin_test.py ${CMAKE_CURRENT_BINARY_DIR}/http_plugin_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_high_latency_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_high_latency_test.py COPYONLY) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index c678ca4364..e139ca0d19 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -999,7 +999,7 @@ def activateInstantFinality(self, launcher, biosFinalizer, pnodes): # call setfinalizer numFins = 0 for n in launcher.network.nodes.values(): - if n.keys[0].blspubkey is None: + if len(n.keys) == 0 or n.keys[0].blspubkey is None: continue if len(n.producers) == 0: continue @@ -1020,7 +1020,7 @@ def activateInstantFinality(self, launcher, biosFinalizer, pnodes): for n in launcher.network.nodes.values(): if n.index == Node.biosNodeId and not biosFinalizer: continue - if n.keys[0].blspubkey is None: + if len(n.keys) == 0 or n.keys[0].blspubkey is None: continue if len(n.producers) == 0: continue diff --git a/tests/bridge_for_fork_test_shape.json b/tests/bridge_for_fork_test_shape.json new file mode 100644 index 0000000000..a97b7235f3 --- /dev/null +++ b/tests/bridge_for_fork_test_shape.json @@ -0,0 +1,126 @@ +{ + "name": "testnet_", + "ssh_helper": { + "ssh_cmd": "/usr/bin/ssh", + "scp_cmd": "/usr/bin/scp", + "ssh_identity": "", + "ssh_args": "" + }, + "nodes": { + "bios":{ + "name": "bios", + "keys": [ + { + "privkey":"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", + "pubkey":"EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + } + ], + "peers": [], + "producers": [ + "eosio" + ], + "dont_start": false + }, + "testnet_00":{ + "name": "testnet_00", + "keys": [ + { + "privkey":"5Jf4sTk7vwX1MYpLJ2eQFanVvKYXFqGBrCyANPukuP2BJ5WAAKZ", + "pubkey":"EOS58B33q9S7oNkgeFfcoW3VJYu4obfDiqn5RHGE2ige6jVjUhymR", + "blspubkey":"PUB_BLS_2QQ72DAhKOWKfnBF77AnYn3GqD0M+Yh/05tqKNhqEQ0K4ixhIZ0rKbO2UuonqGAV1KYPgLzIfRz6zMD4iWI3FhOGE+UZ4Le5cELQ3NjOBFagG51XqM8Q1lpUqNanhCoDyfFnLg==", + "blsprivkey":"PVT_BLS_XwmVWf21N/j+hYJfo5+VHN1BtMY2wmKdQ7unaX/rzk+EJ5PX", + "blspop":"SIG_BLS_jvAPOOkvw19wuEzIt1ot8tn6aLeP55XQtSIY2eP3DMcZvEcdmlWVqNI/M8VNKL8RiN2F7XrRZ6O5cPPh4f3B/XfHOyUd3UXG3p++9m0tu0jCojtWQd6wJmTIR1LQ6DUWAQwBOx8Rd70FoznDEqJS/RZBV03x9FpBDQH7VB6BYs9UztynlWrL8LZaRbi8WNwF9CDzUJJsmOmHMnZO5qcTuo/cmSgV1X03bITdQ4IGq06yExBPepIX9ZZu5XH4QCIBo/fIcg==" + } + ], + "peers": [ + "bios", + "testnet_01", + "testnet_02", + "testnet_04" + ], + "producers": [ + "defproducera" + ], + "dont_start": false + }, + "testnet_01":{ + "name": "testnet_01", + "keys": [ + { + "privkey":"5HviUPkTEtvF2B1nm8aZUnjma2TzgpKRjuXjwHyy3FME4xDbkZF", + "pubkey":"EOS5CbcTDgbks2ptTxvyCbT9HFbzX7PDHUY2wN4DDnVBhhQr2ZNDE", + "blspubkey":"PUB_BLS_g86vgFO5G0bcRuaEA95kNFxnsHyzVSOthKKN8MSJ2zLWj+WfCbIBIO73OxgzjVsZarSuMQrcbVu2MktqF6PGlPkPaSuJGnES3FQ0OAfebOMAsPeAd23Ge/3+cPl2OVgXSmHdhA==", + "blsprivkey":"PVT_BLS_AtgyGDKJdQWvCNyGJgyu9bWpMS7eQE07zB2nGTlhZ0nCX11C", + "blspop":"SIG_BLS_pzPEYt1zLPVbofA1YABSPb1gJdvUdUhREa+pQsj2eTSaEBEnb+w+AwO0cQLgYSYWNWRePIUUvj5MCWqlfIU5ulBL8tqlwdCqQ0o6W915axLq2l1qnbFK/XfN9dRxdJgWPdl57bCGmoii25gdyobgLUZaJzPfivE6iQ981IgGACAb5CRdVH5hPZq8Rab1O64OclwCT/8ho8TdcKoSQj0njbAfp9JZxv5EyuAkaNIQun9rn+vH++37n+nDeV6UgCUEzex3cQ==" + } + ], + "peers": [ + "bios", + "testnet_00", + "testnet_02", + "testnet_04" + ], + "producers": [ + "defproducerb" + ], + "dont_start": false + }, + "testnet_02":{ + "name": "testnet_02", + "keys": [ + { + "privkey":"5KkQbdxFHr8Pg1N3DEMDdU7emFgUTwQvh99FDJrodFhUbbsAtQT", + "pubkey":"EOS6Tkpf8kcDfa32WA9B4nTcEJ64ZdDMSNioDcaL6rzdMwnpzaWJB", + "blspubkey":"PUB_BLS_PerMKMuQdZ3N6NEOoQRdlB1BztNWAeHkmzqwcFwEQGEM8QMfv3mrrepX5yM4NKQHYDnfcPIQPpDt0gCi6afvpZgN0JHT4jUaNlbfsJKtbm5mOJcqggddamCKEz2lBC0OS2D5yw==", + "blsprivkey":"PVT_BLS_n4AshIQiCqCdIOC/lGkKauVOFE2KelMb3flVvodVsji15FHW", + "blspop":"SIG_BLS_oqOzQYpJRvQ88ExpJKmwgna29eXM5umPpLmjfHcdcUUKwS3NMWwvP1hLwLcj4XcU6CuM3RzfRo6PPE2sxrp2fUWpqP0bsuamcOOyF+V6TfJMYuDbepc1Jp9HUdli3X0QE6hL+umbO2PWE4KiCSn9tj9LRyXgc41IY7R/JeQCCQSNXMSWhebdB/KCapfxq8sYEzRhXcZik+bXUDC1AcLXaocvNV6o2nKHtJwQ7YyGXCvFXgMMcQ3PWFlQ8WErmxILOM3Z/w==" + } + ], + "peers": [ + "bios", + "testnet_01", + "testnet_00", + "testnet_04" + ], + "producers": [ + "defproducerc" + ], + "dont_start": false + }, + "testnet_03":{ + "name": "testnet_03", + "keys": [ + { + "privkey":"5JxTJJegQBpEL1p77TzkN1ompMB9gDwAfjM9chPzFCB4chxmwrE", + "pubkey":"EOS52ntDHqA2qj4xVo7KmxdezMRhvvBqpZBuKYJCsgihisxmywpAx", + "blspubkey":"PUB_BLS_6C3UlotUoDwruilh6gE+qlKsqY7VrmT6eT3aTr9fC0kZUkQRo13/xMh7MZerbBED2Rho72BLHIaWnT01LLsCFIZg9pSyHBFt3EcKa4p6OyvTkQAFxNb739EYcTVx2n8Gi0d+iw==", + "blsprivkey":"PVT_BLS_Tw2Lozr/Qw2/uf13xo6vduAWqzJnWu2o0/s9WalErmkq4RPV", + "blspop":"SIG_BLS_mrKA0CFFTP3udLsaWH67ilVf/5dcCHfzJ+P8i+dEuVg4y+td8uyghJqDxnPoitMEjjSqP12kmSZciDXqWD+uGU7nY1YeDK5Tvi7cvd1qSOuATjDuW+amc/5SKp73NLsYwqVFcIex4XF+Quu/NRfDCfLj9ZRPtmuNAUevi2iz0ExeOkQTjQhKksb9ihN+6w4Wk0vJEjt0KbbW2Ny46J+P7PbanH34X9iCV3dT+lqnyp9om0hxKJJIH2R6P5hC2d8Ry8FBAw==" + } + ], + "peers": [ + "bios", + "testnet_04" + ], + "producers": [ + "defproducerd" + ], + "dont_start": false + }, + "testnet_04":{ + "name": "testnet_04", + "keys": [ + ], + "peers": [ + "bios", + "testnet_00", + "testnet_01", + "testnet_02", + "testnet_03" + ], + "producers": [ + ], + "dont_start": false + } + } +} diff --git a/tests/ship_streamer_test.py b/tests/ship_streamer_test.py index 932b8f3015..5646364a74 100755 --- a/tests/ship_streamer_test.py +++ b/tests/ship_streamer_test.py @@ -7,16 +7,16 @@ import signal import sys -from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys +from TestHarness import Cluster, TestHelper, Utils, WalletMgr from TestHarness.TestHelper import AppArgs ############################################################### # ship_streamer_test # -# This test sets up 2 producing nodes and one "bridge" node using test_control_api_plugin. -# One producing node has 3 of the elected producers and the other has 1 of the elected producers. -# All the producers are named in alphabetical order, so that the 3 producers, in the one production node, are -# scheduled first, followed by the 1 producer in the other producer node. Each producing node is only connected +# This test sets up 4 producing nodes and one "bridge" node using test_control_api_plugin. +# One side of bridge has 3 of the elected producers and the other has 1 of the elected producers. +# All the producers are named in alphabetical order, so that the 3 producers, in the one production side, are +# scheduled first, followed by the 1 producer in the other producer node. Each producing side is only connected # to the other producing node via the "bridge" node. # The bridge node has the test_control_api_plugin, that the test uses to kill # the "bridge" node to generate a fork. @@ -39,7 +39,7 @@ dumpErrorDetails=args.dump_error_details walletPort=TestHelper.DEFAULT_WALLET_PORT -totalProducerNodes=2 +totalProducerNodes=4 totalNonProducerNodes=1 totalNodes=totalProducerNodes+totalNonProducerNodes maxActiveProducers=21 @@ -66,91 +66,35 @@ def getLatestSnapshot(nodeId): # *** setup topogrophy *** - # "bridge" shape connects defprocera through defproducerc (3 in node0) to each other and defproduceru (1 in node1) - # and the only connection between those 2 groups is through the bridge node + # "bridge" shape connects defproducera (node0) defproducerb (node1) defproducerc (node2) to each other and defproducerd (node3) + # and the only connection between those 2 groups is through the bridge (node4) - shipNodeNum = 1 + shipNodeNum = 3 specificExtraNodeosArgs={} specificExtraNodeosArgs[shipNodeNum]="--plugin eosio::state_history_plugin --trace-history --chain-state-history --state-history-stride 200 --plugin eosio::net_api_plugin --plugin eosio::producer_api_plugin " # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin " - if cluster.launch(topo="bridge", pnodes=totalProducerNodes, - totalNodes=totalNodes, totalProducers=totalProducers, activateIF=activateIF, biosFinalizer=False, + if cluster.launch(topo="./tests/bridge_for_fork_test_shape.json", pnodes=totalProducerNodes, loadSystemContract=False, + totalNodes=totalNodes, totalProducers=totalProducerNodes, activateIF=activateIF, biosFinalizer=False, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: Utils.cmdError("launcher") - Utils.errorExit("Failed to stand up eos cluster.") + Utils.errorExit("Failed to stand up cluster.") # *** identify each node (producers and non-producing node) *** - #verify nodes are in sync and advancing + # verify nodes are in sync and advancing cluster.waitOnClusterSync(blockAdvancing=5) Print("Cluster in Sync") - prodNode = cluster.getNode(0) - prodNode0 = prodNode - prodNode1 = cluster.getNode(1) - nonProdNode = cluster.getNode(2) + prodNode0 = cluster.getNode(0) + prodNode3 = cluster.getNode(3) + nonProdNode = cluster.getNode(4) shipNode = cluster.getNode(shipNodeNum) - - accounts=createAccountKeys(6) - if accounts is None: - Utils.errorExit("FAILURE - create keys") - - accounts[0].name="testeraaaaaa" - accounts[1].name="tester111111" # needed for voting - accounts[2].name="tester222222" # needed for voting - accounts[3].name="tester333333" # needed for voting - accounts[4].name="tester444444" # needed for voting - accounts[5].name="tester555555" # needed for voting - - testWalletName="test" - - Print(f"Creating wallet {testWalletName}.") - testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4],accounts[5]]) - - for _, account in cluster.defProducerAccounts.items(): - walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True) - - for i in range(0, totalNodes): - node=cluster.getNode(i) - node.producers=Cluster.parseProducers(i) - for prod in node.producers: - prodName = cluster.defProducerAccounts[prod].name - if prodName == "defproducera" or prodName == "defproducerb" or prodName == "defproducerc" or prodName == "defproduceru": - Print(f"Register producer {prodName}") - trans=node.regproducer(cluster.defProducerAccounts[prod], "http://mysite.com", 0, waitForTransBlock=False, exitOnError=True) - - # create accounts via eosio as otherwise a bid is needed - transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) - for account in accounts: - Print(f"Create new account {account.name} via {cluster.eosioAccount.name} with private key: {account.activePrivateKey}") - trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=10000, stakeCPU=10000, buyRAM=10000000, exitOnError=True) - nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) - for account in accounts: - Print(f"Transfer funds {transferAmount} from account {cluster.eosioAccount.name} to {account.name}") - trans=nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=False) - nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) - for account in accounts: - trans=nonProdNode.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=False, exitOnError=True) - nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) - - # *** vote using accounts *** - - cluster.waitOnClusterSync(blockAdvancing=3) + # cluster.waitOnClusterSync(blockAdvancing=3) start_block_num = shipNode.getBlockNum() - # vote a,b,c (node0) u (node1) - voteProducers=[] - voteProducers.append("defproducera") - voteProducers.append("defproducerb") - voteProducers.append("defproducerc") - voteProducers.append("defproduceru") - for account in accounts: - Print(f"Account {account.name} vote for producers={voteProducers}") - trans=prodNode.vote(account, voteProducers, exitOnError=True, waitForTransBlock=False) - #verify nodes are in sync and advancing cluster.waitOnClusterSync(blockAdvancing=3) Print("Shutdown unneeded bios node") @@ -160,18 +104,17 @@ def getLatestSnapshot(nodeId): targetTpsPerGenerator = 10 testTrxGenDurationSec=60*60 numTrxGenerators=2 - cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[accounts[0].name, accounts[1].name], - acctPrivKeysList=[accounts[0].activePrivateKey,accounts[1].activePrivateKey], nodeId=prodNode1.nodeId, + cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[cluster.defproduceraAccount.name, cluster.defproducerbAccount.name], + acctPrivKeysList=[cluster.defproduceraAccount.activePrivateKey,cluster.defproducerbAccount.activePrivateKey], nodeId=prodNode3.nodeId, tpsPerGenerator=targetTpsPerGenerator, numGenerators=numTrxGenerators, durationSec=testTrxGenDurationSec, waitToComplete=False) - status = cluster.waitForTrxGeneratorsSpinup(nodeId=prodNode1.nodeId, numGenerators=numTrxGenerators) + status = cluster.waitForTrxGeneratorsSpinup(nodeId=prodNode3.nodeId, numGenerators=numTrxGenerators) assert status is not None and status is not False, "ERROR: Failed to spinup Transaction Generators" prodNode0.waitForProducer("defproducerc") - prodNode0.waitForProducer("defproducera") - block_range = 450 + block_range = 250 end_block_num = start_block_num + block_range shipClient = "tests/ship_streamer" @@ -196,23 +139,31 @@ def getLatestSnapshot(nodeId): Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}") # Generate a fork - prodNode1Prod="defproduceru" + prodNode3Prod= "defproducerd" preKillBlockNum=nonProdNode.getBlockNum() preKillBlockProducer=nonProdNode.getBlockProducerByNum(preKillBlockNum) - forkAtProducer="defproducer" + chr(ord(preKillBlockProducer[-1])+2) + forkAtProducer="defproducerb" nonProdNode.killNodeOnProducer(producer=forkAtProducer, whereInSequence=1) Print(f"Current block producer {preKillBlockProducer} fork will be at producer {forkAtProducer}") - prodNode0.waitForProducer(forkAtProducer) - prodNode1.waitForProducer(prodNode1Prod) - if nonProdNode.verifyAlive(): # if on defproducera, need to wait again - prodNode0.waitForProducer(forkAtProducer) - prodNode1.waitForProducer(prodNode1Prod) + prodNode0.waitForProducer("defproducera") + prodNode3.waitForProducer(prodNode3Prod) + if nonProdNode.verifyAlive(): + prodNode0.waitForProducer("defproducera") + prodNode3.waitForProducer(prodNode3Prod) if nonProdNode.verifyAlive(): Utils.errorExit("Bridge did not shutdown") Print("Fork started") - forkProgress="defproducer" + chr(ord(forkAtProducer[-1])+3) - prodNode0.waitForProducer(forkProgress) # wait for fork to progress a bit + prodNode0.waitForProducer("defproducerc") # wait for fork to progress a bit + restore0BlockNum = prodNode0.getBlockNum() + restore1BlockNum = prodNode3.getBlockNum() + restoreBlockNum = max(int(restore0BlockNum), int(restore1BlockNum)) + restore0LIB = prodNode0.getIrreversibleBlockNum() + restore1LIB = prodNode3.getIrreversibleBlockNum() + restoreLIB = max(int(restore0LIB), int(restore1LIB)) + + if int(restoreBlockNum) > int(end_block_num): + Utils.errorExit(f"Did not stream long enough {end_block_num} to cover the fork {restoreBlockNum}, increase block_range {block_range}") Print("Restore fork") Print("Relaunching the non-producing bridge node to connect the producing nodes again") @@ -222,10 +173,11 @@ def getLatestSnapshot(nodeId): Utils.errorExit(f"Failure - (non-production) node {nonProdNode.nodeNum} should have restarted") nonProdNode.waitForProducer(forkAtProducer) - nonProdNode.waitForProducer(prodNode1Prod) + nonProdNode.waitForProducer(prodNode3Prod) + nonProdNode.waitForIrreversibleBlock(restoreLIB+1) afterForkBlockNum = nonProdNode.getBlockNum() - if int(afterForkBlockNum) < int(end_block_num): - Utils.errorExit(f"Did not stream long enough {end_block_num} to cover the fork {afterForkBlockNum}, increase block_range {block_range}") + + assert shipNode.findInLog(f"successfully switched fork to new head"), f"No fork found in log {shipNode}" Print(f"Stopping all {args.num_clients} clients") for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts): @@ -255,51 +207,55 @@ def getLatestSnapshot(nodeId): Print("Shutdown bridge node") nonProdNode.kill(signal.SIGTERM) - Print("Test starting ship from snapshot") - Utils.rmNodeDataDir(shipNodeNum) - isRelaunchSuccess = shipNode.relaunch(chainArg=" --snapshot {}".format(getLatestSnapshot(shipNodeNum))) - assert isRelaunchSuccess, "relaunch from snapshot failed" - - afterSnapshotBlockNum = shipNode.getBlockNum() - - Print("Verify we can stream from ship after start from a snapshot with no incoming trxs") - start_block_num = afterSnapshotBlockNum - block_range = 0 - end_block_num = start_block_num + block_range - cmd = f"{shipClient} --start-block-num {start_block_num} --end-block-num {end_block_num} --fetch-block --fetch-traces --fetch-deltas" - if Utils.Debug: Utils.Print(f"cmd: {cmd}") - clients = [] - files = [] - starts = [] - for i in range(0, args.num_clients): - start = time.perf_counter() - outFile = open(f"{shipClientFilePrefix}{i}_snapshot.out", "w") - errFile = open(f"{shipClientFilePrefix}{i}_snapshot.err", "w") - Print(f"Start client {i}") - popen=Utils.delayedCheckOutput(cmd, stdout=outFile, stderr=errFile) - starts.append(time.perf_counter()) - clients.append((popen, cmd)) - files.append((outFile, errFile)) - Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}") - - Print(f"Stopping all {args.num_clients} clients") - for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts): - popen.wait() - Print(f"Stopped client {index}. Ran for {time.perf_counter() - start:.3f} seconds.") - out.close() - err.close() - outFile = open(f"{shipClientFilePrefix}{index}_snapshot.out", "r") - data = json.load(outFile) - block_num = start_block_num - for i in data: - # fork can cause block numbers to be repeated - this_block_num = i['get_blocks_result_v0']['this_block']['block_num'] - if this_block_num < block_num: - block_num = this_block_num - assert block_num == this_block_num, f"{block_num} != {this_block_num}" - assert isinstance(i['get_blocks_result_v0']['deltas'], str) # verify deltas in result - block_num += 1 - assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}" + ## + ## Following requires https://github.com/AntelopeIO/leap/issues/1558 + ## + if not activateIF: + Print("Test starting ship from snapshot") + Utils.rmNodeDataDir(shipNodeNum) + isRelaunchSuccess = shipNode.relaunch(chainArg=" --snapshot {}".format(getLatestSnapshot(shipNodeNum))) + assert isRelaunchSuccess, "relaunch from snapshot failed" + + afterSnapshotBlockNum = shipNode.getBlockNum() + + Print("Verify we can stream from ship after start from a snapshot with no incoming trxs") + start_block_num = afterSnapshotBlockNum + block_range = 0 + end_block_num = start_block_num + block_range + cmd = f"{shipClient} --start-block-num {start_block_num} --end-block-num {end_block_num} --fetch-block --fetch-traces --fetch-deltas" + if Utils.Debug: Utils.Print(f"cmd: {cmd}") + clients = [] + files = [] + starts = [] + for i in range(0, args.num_clients): + start = time.perf_counter() + outFile = open(f"{shipClientFilePrefix}{i}_snapshot.out", "w") + errFile = open(f"{shipClientFilePrefix}{i}_snapshot.err", "w") + Print(f"Start client {i}") + popen=Utils.delayedCheckOutput(cmd, stdout=outFile, stderr=errFile) + starts.append(time.perf_counter()) + clients.append((popen, cmd)) + files.append((outFile, errFile)) + Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}") + + Print(f"Stopping all {args.num_clients} clients") + for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts): + popen.wait() + Print(f"Stopped client {index}. Ran for {time.perf_counter() - start:.3f} seconds.") + out.close() + err.close() + outFile = open(f"{shipClientFilePrefix}{index}_snapshot.out", "r") + data = json.load(outFile) + block_num = start_block_num + for i in data: + # fork can cause block numbers to be repeated + this_block_num = i['get_blocks_result_v0']['this_block']['block_num'] + if this_block_num < block_num: + block_num = this_block_num + assert block_num == this_block_num, f"{block_num} != {this_block_num}" + assert isinstance(i['get_blocks_result_v0']['deltas'], str) # verify deltas in result + block_num += 1 + assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}" testSuccessful = True finally: From 15236d06ad720142f1f1374d9dbb3b32dc79d8e3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 5 Feb 2024 06:29:55 -0600 Subject: [PATCH 2/4] GH-2172 Use not instead of len --- tests/TestHarness/Cluster.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index e139ca0d19..22412a800e 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -999,9 +999,9 @@ def activateInstantFinality(self, launcher, biosFinalizer, pnodes): # call setfinalizer numFins = 0 for n in launcher.network.nodes.values(): - if len(n.keys) == 0 or n.keys[0].blspubkey is None: + if not n.keys or not n.keys[0].blspubkey: continue - if len(n.producers) == 0: + if not n.producers: continue if n.index == Node.biosNodeId and not biosFinalizer: continue @@ -1020,9 +1020,9 @@ def activateInstantFinality(self, launcher, biosFinalizer, pnodes): for n in launcher.network.nodes.values(): if n.index == Node.biosNodeId and not biosFinalizer: continue - if len(n.keys) == 0 or n.keys[0].blspubkey is None: + if not n.keys or not n.keys[0].blspubkey: continue - if len(n.producers) == 0: + if not n.producers: continue setFinStr += f' {{"description": "finalizer #{finNum}", ' setFinStr += f' "weight":1, ' From 0fd74837d4caebf9925e41e3b153dfa8fc98fb0b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 5 Feb 2024 09:50:43 -0600 Subject: [PATCH 3/4] GH-2172 Fix merge issue --- tests/ship_streamer_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/ship_streamer_test.py b/tests/ship_streamer_test.py index 76c4f58a63..e7f38371db 100755 --- a/tests/ship_streamer_test.py +++ b/tests/ship_streamer_test.py @@ -106,11 +106,11 @@ def getLatestSnapshot(nodeId): wasmFile = "%s.wasm" % (contract) abiFile = "%s.abi" % (contract) - nonProdNode.publishContract(accounts[0], contractDir, wasmFile, abiFile) + nonProdNode.publishContract(cluster.defproducerbAccount, contractDir, wasmFile, abiFile) jumbotxn = { - "actions": [{"account": "testeraaaaaa","name": "jumbotime", - "authorization": [{"actor": "testeraaaaaa","permission": "active"}], + "actions": [{"account": "defproducerb","name": "jumbotime", + "authorization": [{"actor": "defproducerb","permission": "active"}], "data": "", "compression": "none"}] } From c18ed8c0e9e9b0290e84c0c49127fed01e9a76d4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 5 Feb 2024 13:19:02 -0600 Subject: [PATCH 4/4] GH-2172 Add in test --- tests/CMakeLists.txt | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 068242ee6f..a0a9831c3d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -139,9 +139,8 @@ set_property(TEST ship_if_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME ship_streamer_test COMMAND tests/ship_streamer_test.py -v --num-clients 10 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_streamer_test PROPERTY LABELS long_running_tests) -# TODO investigate failure: https://github.com/AntelopeIO/leap/issues/2172 -#add_test(NAME ship_streamer_if_test COMMAND tests/ship_streamer_test.py -v --num-clients 10 --activate-if ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -#set_property(TEST ship_streamer_if_test PROPERTY LABELS long_running_tests) +add_test(NAME ship_streamer_if_test COMMAND tests/ship_streamer_test.py -v --num-clients 10 --activate-if ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST ship_streamer_if_test PROPERTY LABELS long_running_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests)