From d35b65c863f05dd04fe7be15421f477528822929 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 18 Nov 2022 14:22:39 -0600 Subject: [PATCH 01/12] Rename to PerformanceTestBasic to better align with script names. Also renaming to use Ptb abbreviation in light of the name change. --- tests/performance_tests/performance_test.py | 22 +++++++++---------- .../performance_test_basic.py | 20 ++++++++--------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 09738bbcf5..1224c09f36 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -11,7 +11,7 @@ from TestHarness import TestHelper, Utils from TestHarness.TestHelper import AppArgs -from performance_test_basic import PerformanceBasicTest +from performance_test_basic import PerformanceTestBasic from platform import release, system from dataclasses import dataclass, asdict, field from datetime import datetime @@ -44,8 +44,8 @@ class PerfTestSearchResults: searchResults: list = field(default_factory=list) #PerfTestSearchIndivResult list maxTpsReport: dict = field(default_factory=dict) -def performPtbBinarySearch(tpsTestFloor: int, tpsTestCeiling: int, minStep: int, testHelperConfig: PerformanceBasicTest.TestHelperConfig, - testClusterConfig: PerformanceBasicTest.ClusterConfig, testDurationSec: int, tpsLimitPerGenerator: int, +def performPtbBinarySearch(tpsTestFloor: int, tpsTestCeiling: int, minStep: int, testHelperConfig: PerformanceTestBasic.TestHelperConfig, + testClusterConfig: PerformanceTestBasic.ClusterConfig, testDurationSec: int, tpsLimitPerGenerator: int, numAddlBlocksToPrune: int, testLogDir: str, delReport: bool, quiet: bool, delPerfLogs: bool) -> PerfTestSearchResults: floor = tpsTestFloor ceiling = tpsTestCeiling @@ -60,7 +60,7 @@ def performPtbBinarySearch(tpsTestFloor: int, tpsTestCeiling: int, minStep: int, ptbResult = PerfTestBasicResult() scenarioResult = PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) - myTest = PerformanceBasicTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=binSearchTarget, + myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=binSearchTarget, testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, numAddlBlocksToPrune=numAddlBlocksToPrune, rootLogDir=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) testSuccessful = myTest.runTest() @@ -81,8 +81,8 @@ def performPtbBinarySearch(tpsTestFloor: int, tpsTestCeiling: int, minStep: int, return PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) -def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: PerformanceBasicTest.TestHelperConfig, - testClusterConfig: PerformanceBasicTest.ClusterConfig, testDurationSec: int, tpsLimitPerGenerator: int, +def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: PerformanceTestBasic.TestHelperConfig, + testClusterConfig: PerformanceTestBasic.ClusterConfig, testDurationSec: int, tpsLimitPerGenerator: int, numAddlBlocksToPrune: int, testLogDir: str, delReport: bool, quiet: bool, delPerfLogs: bool) -> PerfTestSearchResults: # Default - Decrementing Max TPS in range [0, tpsInitial] @@ -101,7 +101,7 @@ def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: ptbResult = PerfTestBasicResult() scenarioResult = PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling, basicTestResult=ptbResult) - myTest = PerformanceBasicTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=searchTarget, + myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=searchTarget, testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, numAddlBlocksToPrune=numAddlBlocksToPrune, rootLogDir=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) testSuccessful = myTest.runTest() @@ -120,7 +120,7 @@ def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: return PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) -def evaluateSuccess(test: PerformanceBasicTest, testSuccessful: bool, result: PerfTestBasicResult) -> bool: +def evaluateSuccess(test: PerformanceTestBasic, testSuccessful: bool, result: PerfTestBasicResult) -> bool: result.targetTPS = test.targetTps result.expectedTxns = test.expectedTransactionsSent reportDict = test.report @@ -278,11 +278,11 @@ def main(): testDirsSetup(rootLogDir=rootLogDir, testTimeStampDirPath=testTimeStampDirPath, ptbLogsDirPath=ptbLogsDirPath) - testHelperConfig = PerformanceBasicTest.TestHelperConfig(killAll=killAll, dontKill=dontKill, keepLogs=not delPerfLogs, + testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=killAll, dontKill=dontKill, keepLogs=not delPerfLogs, dumpErrorDetails=dumpErrorDetails, delay=delay, nodesFile=nodesFile, verbose=verbose) - ENA = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs + ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) producerPluginArgs = ENA.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, @@ -292,7 +292,7 @@ def main(): httpPluginArgs = ENA.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) netPluginArgs = ENA.NetPluginArgs(netThreads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) - testClusterConfig = PerformanceBasicTest.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, + testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) argsDict = prepArgsDict(testDurationSec=testDurationSec, finalDurationSec=finalDurationSec, logsDir=testTimeStampDirPath, diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 1b82c9b0b1..f64867cf05 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -19,9 +19,9 @@ from datetime import datetime from math import ceil -class PerformanceBasicTest: +class PerformanceTestBasic: @dataclass - class PbtTpsTestResult: + class PtbTpsTestResult: completedRun: bool = False numGeneratorsUsed: int = 0 targetTpsPerGenList: list = field(default_factory=list) @@ -267,7 +267,7 @@ def setupWalletAndAccounts(self): self.account1PrivKey = self.cluster.accounts[0].activePrivateKey self.account2PrivKey = self.cluster.accounts[1].activePrivateKey - def runTpsTest(self) -> PbtTpsTestResult: + def runTpsTest(self) -> PtbTpsTestResult: completedRun = False self.producerNode = self.cluster.getNode(self.producerNodeId) self.validationNode = self.cluster.getNode(self.validationNodeId) @@ -301,7 +301,7 @@ def runTpsTest(self) -> PbtTpsTestResult: trxSent = self.validationNode.waitForTransactionsInBlockRange(trxSent, self.data.startBlock, blocksToWait) self.data.ceaseBlock = self.validationNode.getHeadBlockNum() - return PerformanceBasicTest.PbtTpsTestResult(completedRun=completedRun, numGeneratorsUsed=tpsTrxGensConfig.numGenerators, + return PerformanceTestBasic.PtbTpsTestResult(completedRun=completedRun, numGeneratorsUsed=tpsTrxGensConfig.numGenerators, targetTpsPerGenList=tpsTrxGensConfig.targetTpsPerGenList, trxGenExitCodes=trxGenExitCodes) def prepArgs(self) -> dict: @@ -334,7 +334,7 @@ def captureLowLevelArtifacts(self): print(f"Failed to move '{etcEosioDir}/{path}' to '{self.etcEosioLogsDirPath}/{path}': {type(e)}: {e}") - def analyzeResultsAndReport(self, testResult: PbtTpsTestResult): + def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): args = self.prepArgs() artifactsLocate = log_reader.ArtifactPaths(nodeosLogPath=self.nodeosLogPath, trxGenLogDirPath=self.trxGenLogDirPath, blockTrxDataPath=self.blockTrxDataPath, blockDataPath=self.blockDataPath) @@ -386,7 +386,7 @@ def runTest(self) -> bool: testSuccessful = self.ptbTestResult.completedRun - if not self.PbtTpsTestResult.completedRun: + if not self.PtbTpsTestResult.completedRun: for exitCode in self.ptbTestResult.trxGenExitCodes: if exitCode != 0: print(f"Error: Transaction Generator exited with error {exitCode}") @@ -458,10 +458,10 @@ def main(): args = parseArgs() Utils.Debug = args.v - testHelperConfig = PerformanceBasicTest.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, + testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, dumpErrorDetails=args.dump_error_details, delay=args.d, nodesFile=args.nodes_file, verbose=args.v) - ENA = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs + ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) producerPluginArgs = ENA.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, @@ -471,10 +471,10 @@ def main(): httpPluginArgs = ENA.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) netPluginArgs = ENA.NetPluginArgs(netThreads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) - testClusterConfig = PerformanceBasicTest.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, + testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) - myTest = PerformanceBasicTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=args.target_tps, + myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs) From d4b31f341c7e91f803c65139aa8e147a243c33c4 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 18 Nov 2022 16:58:47 -0600 Subject: [PATCH 02/12] Factor out test params into PtbConfig and LoggingConfig objects. Using PtbConfig and LoggingConfig objects simplifies test initializer params and encapsulates related config items into an object describing their use. It also allows easy reporting of test configuration in test report and will allow reporting to come for free if additional config arguments are added. Some renaming for efficiency and clarity. --- tests/performance_tests/performance_test.py | 21 ++-- .../performance_test_basic.py | 109 ++++++++++-------- 2 files changed, 72 insertions(+), 58 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 1224c09f36..62b66e68c5 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -59,10 +59,10 @@ def performPtbBinarySearch(tpsTestFloor: int, tpsTestCeiling: int, minStep: int, print(f"Running scenario: floor {floor} binSearchTarget {binSearchTarget} ceiling {ceiling}") ptbResult = PerfTestBasicResult() scenarioResult = PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) + ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, + numAddlBlocksToPrune=numAddlBlocksToPrune, logDirRoot=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) - myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=binSearchTarget, - testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, - numAddlBlocksToPrune=numAddlBlocksToPrune, rootLogDir=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) + myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() if evaluateSuccess(myTest, testSuccessful, ptbResult): maxTpsAchieved = binSearchTarget @@ -100,10 +100,10 @@ def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: print(f"Running scenario: floor {absFloor} searchTarget {searchTarget} ceiling {absCeiling}") ptbResult = PerfTestBasicResult() scenarioResult = PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling, basicTestResult=ptbResult) + ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=searchTarget, testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, + numAddlBlocksToPrune=numAddlBlocksToPrune, logDirRoot=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) - myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=searchTarget, - testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, - numAddlBlocksToPrune=numAddlBlocksToPrune, rootLogDir=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) + myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() if evaluateSuccess(myTest, testSuccessful, ptbResult): maxTpsAchieved = searchTarget @@ -121,8 +121,8 @@ def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: return PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) def evaluateSuccess(test: PerformanceTestBasic, testSuccessful: bool, result: PerfTestBasicResult) -> bool: - result.targetTPS = test.targetTps - result.expectedTxns = test.expectedTransactionsSent + result.targetTPS = test.ptbConfig.targetTps + result.expectedTxns = test.ptbConfig.expectedTransactionsSent reportDict = test.report result.testStart = reportDict["testStart"] result.testEnd = reportDict["testFinish"] @@ -134,7 +134,7 @@ def evaluateSuccess(test: PerformanceTestBasic, testSuccessful: bool, result: Pe result.trxExpectMet = result.expectedTxns == result.resultTxns result.basicTestSuccess = testSuccessful result.testAnalysisBlockCnt = reportDict["Analysis"]["BlocksGuide"]["testAnalysisBlockCnt"] - result.logsDir = test.testTimeStampDirPath + result.logsDir = test.loggingConfig.logDirPath print(f"basicTestSuccess: {result.basicTestSuccess} tpsExpectationMet: {result.tpsExpectMet} trxExpectationMet: {result.trxExpectMet}") @@ -201,7 +201,7 @@ def createArtifactsDir(path): def prepArgsDict(testDurationSec, finalDurationSec, logsDir, maxTpsToTest, testIterationMinStep, tpsLimitPerGenerator, delReport, delTestReport, numAddlBlocksToPrune, quiet, delPerfLogs, - testHelperConfig, testClusterConfig) -> dict: + testHelperConfig: PerformanceTestBasic.TestHelperConfig, testClusterConfig: PerformanceTestBasic.ClusterConfig) -> dict: argsDict = {} argsDict.update(asdict(testHelperConfig)) argsDict.update(asdict(testClusterConfig)) @@ -294,7 +294,6 @@ def main(): extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) - argsDict = prepArgsDict(testDurationSec=testDurationSec, finalDurationSec=finalDurationSec, logsDir=testTimeStampDirPath, maxTpsToTest=maxTpsToTest, testIterationMinStep=testIterationMinStep, tpsLimitPerGenerator=tpsLimitPerGenerator, delReport=delReport, delTestReport=delTestReport, numAddlBlocksToPrune=numAddlBlocksToPrune, diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index f64867cf05..8ffe58ed3e 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -117,21 +117,37 @@ def __post_init__(self): if not self.prodsEnableTraceApi: self.specificExtraNodeosArgs.update({f"{node}" : "--plugin eosio::trace_api_plugin" for node in range(self.pnodes, self._totalNodes)}) - def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), clusterConfig: ClusterConfig=ClusterConfig(), targetTps: int=8000, - testTrxGenDurationSec: int=30, tpsLimitPerGenerator: int=4000, numAddlBlocksToPrune: int=2, - rootLogDir: str=".", delReport: bool=False, quiet: bool=False, delPerfLogs: bool=False): + @dataclass + class PtbConfig: + targetTps: int=8000, + testTrxGenDurationSec: int=30 + tpsLimitPerGenerator: int=4000 + numAddlBlocksToPrune: int=2 + logDirRoot: str="." + delReport: bool=False + quiet: bool=False + delPerfLogs: bool=False + expectedTransactionsSent: int = field(default_factory=int, init=False) + + def __post_init__(self): + self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps + + @dataclass + class LoggingConfig: + logDirBase: str = f"./{os.path.splitext(os.path.basename(__file__))[0]}" + logDirTimestamp: str = f"{datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')}" + logDirTimestampedOptSuffix: str = "" + logDirPath: str = field(default_factory=str, init=False) + + def __post_init__(self): + self.logDirPath = f"{self.logDirBase}/{self.logDirTimestamp}{self.logDirTimestampedOptSuffix}" + + def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), clusterConfig: ClusterConfig=ClusterConfig(), ptbConfig=PtbConfig()): self.testHelperConfig = testHelperConfig self.clusterConfig = clusterConfig - self.targetTps = targetTps - self.testTrxGenDurationSec = testTrxGenDurationSec - self.tpsLimitPerGenerator = tpsLimitPerGenerator - self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps - self.numAddlBlocksToPrune = numAddlBlocksToPrune - self.delReport = delReport - self.quiet = quiet - self.delPerfLogs=delPerfLogs + self.ptbConfig = ptbConfig - self.testHelperConfig.keepLogs = not self.delPerfLogs + self.testHelperConfig.keepLogs = not self.ptbConfig.delPerfLogs Utils.Debug = self.testHelperConfig.verbose self.errorExit = Utils.errorExit @@ -139,17 +155,18 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.testStart = datetime.utcnow() - self.rootLogDir = rootLogDir - self.ptbLogDir = f"{self.rootLogDir}/{os.path.splitext(os.path.basename(__file__))[0]}" - self.testTimeStampDirPath = f"{self.ptbLogDir}/{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}-{self.targetTps}" - self.trxGenLogDirPath = f"{self.testTimeStampDirPath}/trxGenLogs" - self.varLogsDirPath = f"{self.testTimeStampDirPath}/var" - self.etcLogsDirPath = f"{self.testTimeStampDirPath}/etc" + self.loggingConfig = PerformanceTestBasic.LoggingConfig(logDirBase=f"{self.ptbConfig.logDirRoot}/{os.path.splitext(os.path.basename(__file__))[0]}", + logDirTimestamp=f"{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}", + logDirTimestampedOptSuffix = f"-{self.ptbConfig.targetTps}") + + self.trxGenLogDirPath = f"{self.loggingConfig.logDirPath}/trxGenLogs" + self.varLogsDirPath = f"{self.loggingConfig.logDirPath}/var" + self.etcLogsDirPath = f"{self.loggingConfig.logDirPath}/etc" self.etcEosioLogsDirPath = f"{self.etcLogsDirPath}/eosio" - self.blockDataLogDirPath = f"{self.testTimeStampDirPath}/blockDataLogs" + self.blockDataLogDirPath = f"{self.loggingConfig.logDirPath}/blockDataLogs" self.blockDataPath = f"{self.blockDataLogDirPath}/blockData.txt" self.blockTrxDataPath = f"{self.blockDataLogDirPath}/blockTrxData.txt" - self.reportPath = f"{self.testTimeStampDirPath}/data.json" + self.reportPath = f"{self.loggingConfig.logDirPath}/data.json" # Setup Expectations for Producer and Validation Node IDs # Producer Nodes are index [0, pnodes) and validation nodes/non-producer nodes [pnodes, _totalNodes) @@ -186,7 +203,7 @@ def removeAllArtifactsExceptFinalReport(): if not delReport: removeAllArtifactsExceptFinalReport() else: - removeArtifacts(self.testTimeStampDirPath) + removeArtifacts(self.loggingConfig.logDirPath) except OSError as error: print(error) @@ -198,9 +215,9 @@ def createArtifactsDir(path): print(f"Creating test artifacts dir: {path}") os.mkdir(f"{path}") - createArtifactsDir(self.rootLogDir) - createArtifactsDir(self.ptbLogDir) - createArtifactsDir(self.testTimeStampDirPath) + createArtifactsDir(self.ptbConfig.logDirRoot) + createArtifactsDir(self.loggingConfig.logDirBase) + createArtifactsDir(self.loggingConfig.logDirPath) createArtifactsDir(self.trxGenLogDirPath) createArtifactsDir(self.varLogsDirPath) createArtifactsDir(self.etcLogsDirPath) @@ -279,10 +296,10 @@ def runTpsTest(self) -> PtbTpsTestResult: self.cluster.biosNode.kill(signal.SIGTERM) self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) - tpsTrxGensConfig = ltg.TpsTrxGensConfig(targetTps=self.targetTps, tpsLimitPerGenerator=self.tpsLimitPerGenerator) + tpsTrxGensConfig = ltg.TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator) trxGenLauncher = ltg.TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, handlerAcct=self.cluster.eosioAccount.name, accts=f"{self.account1Name},{self.account2Name}", - privateKeys=f"{self.account1PrivKey},{self.account2PrivKey}", trxGenDurationSec=self.testTrxGenDurationSec, + privateKeys=f"{self.account1PrivKey},{self.account2PrivKey}", trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, tpsTrxGensConfig=tpsTrxGensConfig) trxGenExitCodes = trxGenLauncher.launch() @@ -296,8 +313,8 @@ def runTpsTest(self) -> PtbTpsTestResult: # Get stats after transaction generation stops trxSent = {} - log_reader.scrapeTrxGenTrxSentDataLogs(trxSent, self.trxGenLogDirPath, self.quiet) - blocksToWait = 2 * self.testTrxGenDurationSec + 10 + log_reader.scrapeTrxGenTrxSentDataLogs(trxSent, self.trxGenLogDirPath, self.ptbConfig.quiet) + blocksToWait = 2 * self.ptbConfig.testTrxGenDurationSec + 10 trxSent = self.validationNode.waitForTransactionsInBlockRange(trxSent, self.data.startBlock, blocksToWait) self.data.ceaseBlock = self.validationNode.getHeadBlockNum() @@ -308,8 +325,8 @@ def prepArgs(self) -> dict: args = {} args.update(asdict(self.testHelperConfig)) args.update(asdict(self.clusterConfig)) - args.update({key:val for key, val in inspect.getmembers(self) if key in set(['targetTps', 'testTrxGenDurationSec', 'tpsLimitPerGenerator', - 'expectedTransactionsSent', 'delReport', 'numAddlBlocksToPrune', 'quiet', 'delPerfLogs'])}) + args.update(asdict(self.ptbConfig)) + args.update(asdict(self.loggingConfig)) return args def captureLowLevelArtifacts(self): @@ -338,22 +355,22 @@ def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): args = self.prepArgs() artifactsLocate = log_reader.ArtifactPaths(nodeosLogPath=self.nodeosLogPath, trxGenLogDirPath=self.trxGenLogDirPath, blockTrxDataPath=self.blockTrxDataPath, blockDataPath=self.blockDataPath) - tpsTestConfig = log_reader.TpsTestConfig(targetTps=self.targetTps, testDurationSec=self.testTrxGenDurationSec, tpsLimitPerGenerator=self.tpsLimitPerGenerator, - numBlocksToPrune=self.numAddlBlocksToPrune, numTrxGensUsed=testResult.numGeneratorsUsed, - targetTpsPerGenList=testResult.targetTpsPerGenList, quiet=self.quiet) + tpsTestConfig = log_reader.TpsTestConfig(targetTps=self.ptbConfig.targetTps, testDurationSec=self.ptbConfig.testTrxGenDurationSec, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, + numBlocksToPrune=self.ptbConfig.numAddlBlocksToPrune, numTrxGensUsed=testResult.numGeneratorsUsed, + targetTpsPerGenList=testResult.targetTpsPerGenList, quiet=self.ptbConfig.quiet) self.report = log_reader.calcAndReport(data=self.data, tpsTestConfig=tpsTestConfig, artifacts=artifactsLocate, argsDict=args, testStart=self.testStart, completedRun=testResult.completedRun) jsonReport = None - if not self.quiet or not self.delReport: + if not self.ptbConfig.quiet or not self.ptbConfig.delReport: jsonReport = log_reader.reportAsJSON(self.report) - if not self.quiet: + if not self.ptbConfig.quiet: print(self.data) print(f"Report:\n{jsonReport}") - if not self.delReport: + if not self.ptbConfig.delReport: log_reader.exportReportAsJSON(jsonReport, self.reportPath) def preTestSpinup(self): @@ -391,9 +408,9 @@ def runTest(self) -> bool: if exitCode != 0: print(f"Error: Transaction Generator exited with error {exitCode}") - if testSuccessful and self.expectedTransactionsSent != self.data.totalTransactions: + if testSuccessful and self.ptbConfig.expectedTransactionsSent != self.data.totalTransactions: testSuccessful = False - print(f"Error: Transactions received: {self.data.totalTransactions} did not match expected total: {self.expectedTransactionsSent}") + print(f"Error: Transactions received: {self.data.totalTransactions} did not match expected total: {self.ptbConfig.expectedTransactionsSent}") finally: TestHelper.shutdown( @@ -407,16 +424,16 @@ def runTest(self) -> bool: self.testHelperConfig.dumpErrorDetails ) - if not self.delPerfLogs: + if not self.ptbConfig.delPerfLogs: self.captureLowLevelArtifacts() if not completedRun: os.system("pkill trx_generator") print("Test run cancelled early via SIGINT") - if self.delPerfLogs: - print(f"Cleaning up logs directory: {self.testTimeStampDirPath}") - self.testDirsCleanup(self.delReport) + if self.ptbConfig.delPerfLogs: + print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") + self.testDirsCleanup(self.ptbConfig.delReport) return testSuccessful @@ -473,11 +490,9 @@ def main(): extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) - - myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=args.target_tps, - testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, - numAddlBlocksToPrune=args.num_blocks_to_prune, delReport=args.del_report, quiet=args.quiet, - delPerfLogs=args.del_perf_logs) + ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, + numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs) + myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() exitCode = 0 if testSuccessful else 1 From 22b0118aaeb07eca02b41e73b2e5205471b752cd Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 21 Nov 2022 16:36:14 -0600 Subject: [PATCH 03/12] Factor PerformanceTest out into its own class. Break out config and logging classes. Update documentation. --- tests/performance_tests/README.md | 256 +++++---- tests/performance_tests/performance_test.py | 570 ++++++++++---------- 2 files changed, 419 insertions(+), 407 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index e7609d8d89..c1935f191d 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -450,9 +450,9 @@ Finally, the full detail test report for each of the determined max TPS throughp ``` json { "InitialMaxTpsAchieved": 16000, - "LongRunningMaxTpsAchieved": 15000, - "testStart": "2022-11-04T19:31:40.539240", - "testFinish": "2022-11-04T19:48:53.096915", + "LongRunningMaxTpsAchieved": 16000, + "testStart": "2022-11-21T22:17:03.604928", + "testFinish": "2022-11-21T22:29:02.923633", "InitialSearchResults": { "0": { "success": false, @@ -461,16 +461,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 50000, "basicTestResult": { "targetTPS": 50000, - "resultAvgTps": 15312.09090909091, + "resultAvgTps": 15121.925, "expectedTxns": 500000, - "resultTxns": 362075, + "resultTxns": 326102, "tpsExpectMet": false, "trxExpectMet": false, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 45, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-31-40", - "testStart": "2022-11-04T19:31:40.539927", - "testEnd": "2022-11-04T19:33:16.377065" + "basicTestSuccess": false, + "testAnalysisBlockCnt": 41, + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-17-03-50000", + "testStart": "2022-11-21T22:17:03.624828", + "testEnd": "2022-11-21T22:18:35.048631" } }, "1": { @@ -480,16 +480,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 49500, "basicTestResult": { "targetTPS": 25000, - "resultAvgTps": 15098.241379310344, + "resultAvgTps": 15307.275862068966, "expectedTxns": 250000, "resultTxns": 250000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 30, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-33-16", - "testStart": "2022-11-04T19:33:16.471198", - "testEnd": "2022-11-04T19:34:45.441319" + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-18-35-25000", + "testStart": "2022-11-21T22:18:35.136441", + "testEnd": "2022-11-21T22:20:02.355919" } }, "2": { @@ -499,16 +499,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 24500, "basicTestResult": { "targetTPS": 12500, - "resultAvgTps": 12500.0625, + "resultAvgTps": 12494.4375, "expectedTxns": 125000, "resultTxns": 125000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-34-45", - "testStart": "2022-11-04T19:34:45.507994", - "testEnd": "2022-11-04T19:36:01.234060" + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-20-02-12500", + "testStart": "2022-11-21T22:20:02.419664", + "testEnd": "2022-11-21T22:21:17.334219" } }, "3": { @@ -518,16 +518,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 24500, "basicTestResult": { "targetTPS": 19000, - "resultAvgTps": 15454.0, + "resultAvgTps": 15546.55, "expectedTxns": 190000, "resultTxns": 190000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 22, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-36-01", - "testStart": "2022-11-04T19:36:01.277926", - "testEnd": "2022-11-04T19:37:23.028124" + "testAnalysisBlockCnt": 21, + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-21-17-19000", + "testStart": "2022-11-21T22:21:17.380653", + "testEnd": "2022-11-21T22:22:37.113095" } }, "4": { @@ -537,16 +537,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 18500, "basicTestResult": { "targetTPS": 16000, - "resultAvgTps": 15900.625, + "resultAvgTps": 15969.375, "expectedTxns": 160000, "resultTxns": 160000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-37-23", - "testStart": "2022-11-04T19:37:23.085923", - "testEnd": "2022-11-04T19:38:41.744418" + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-22-37-16000", + "testStart": "2022-11-21T22:22:37.166645", + "testEnd": "2022-11-21T22:23:51.349987" } }, "5": { @@ -556,16 +556,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 18500, "basicTestResult": { "targetTPS": 17500, - "resultAvgTps": 15271.526315789473, + "resultAvgTps": 15048.263157894737, "expectedTxns": 175000, "resultTxns": 175000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 20, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-38-41", - "testStart": "2022-11-04T19:38:41.796745", - "testEnd": "2022-11-04T19:40:02.097920" + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-23-51-17500", + "testStart": "2022-11-21T22:23:51.399539", + "testEnd": "2022-11-21T22:25:11.171614" } }, "6": { @@ -575,16 +575,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 17000, "basicTestResult": { "targetTPS": 17000, - "resultAvgTps": 15876.176470588236, + "resultAvgTps": 15659.058823529413, "expectedTxns": 170000, "resultTxns": 170000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 18, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-40-02", - "testStart": "2022-11-04T19:40:02.150305", - "testEnd": "2022-11-04T19:41:21.802272" + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-25-11-17000", + "testStart": "2022-11-21T22:25:11.225775", + "testEnd": "2022-11-21T22:26:30.102913" } }, "7": { @@ -594,16 +594,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 16500, "basicTestResult": { "targetTPS": 16500, - "resultAvgTps": 16096.823529411764, + "resultAvgTps": 15714.823529411764, "expectedTxns": 165000, "resultTxns": 165000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 18, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-41-21", - "testStart": "2022-11-04T19:41:21.851918", - "testEnd": "2022-11-04T19:42:40.991794" + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-26-30-16500", + "testStart": "2022-11-21T22:26:30.155632", + "testEnd": "2022-11-21T22:27:48.093871" } } }, @@ -622,60 +622,22 @@ Finally, the full detail test report for each of the determined max TPS throughp }, "LongRunningSearchResults": { "0": { - "success": false, + "success": true, "searchTarget": 16000, "searchFloor": 0, "searchCeiling": 16000, "basicTestResult": { "targetTPS": 16000, - "resultAvgTps": 14954.266666666666, - "expectedTxns": 480000, - "resultTxns": 480000, - "tpsExpectMet": false, - "trxExpectMet": true, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 61, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-42-41", - "testStart": "2022-11-04T19:42:41.051468", - "testEnd": "2022-11-04T19:44:47.365905" - } - }, - "1": { - "success": false, - "searchTarget": 15500, - "searchFloor": 0, - "searchCeiling": 16000, - "basicTestResult": { - "targetTPS": 15500, - "resultAvgTps": 15001.827586206897, - "expectedTxns": 465000, - "resultTxns": 465000, - "tpsExpectMet": false, - "trxExpectMet": true, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 59, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-44-47", - "testStart": "2022-11-04T19:44:47.472961", - "testEnd": "2022-11-04T19:46:52.818564" - } - }, - "2": { - "success": true, - "searchTarget": 15000, - "searchFloor": 0, - "searchCeiling": 16000, - "basicTestResult": { - "targetTPS": 15000, - "resultAvgTps": 15023.464285714286, - "expectedTxns": 450000, - "resultTxns": 450000, + "resultAvgTps": 15933.1875, + "expectedTxns": 160000, + "resultTxns": 160000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 57, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-46-52", - "testStart": "2022-11-04T19:46:52.960531", - "testEnd": "2022-11-04T19:48:52.989694" + "testAnalysisBlockCnt": 17, + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-27-48-16000", + "testStart": "2022-11-21T22:27:48.146027", + "testEnd": "2022-11-21T22:29:02.871273" } } }, @@ -707,17 +669,24 @@ Finally, the full detail test report for each of the determined max TPS throughp "topo": "mesh", "extraNodeosArgs": { "chainPluginArgs": { - "signatureCpuBillablePct": 0 + "signatureCpuBillablePct": 0, + "chainStateDbSizeMb": 10240, + "chainThreads": 2, + "databaseMapMode": "mapped" }, "producerPluginArgs": { "disableSubjectiveBilling": true, "lastBlockTimeOffsetUs": 0, "produceTimeOffsetUs": 0, "cpuEffortPercent": 100, - "lastBlockCpuEffortPercent": 100 + "lastBlockCpuEffortPercent": 100, + "producerThreads": 2 }, "httpPluginArgs": { "httpMaxResponseTimeMs": 990000 + }, + "netPluginArgs": { + "netThreads": 2 } }, "useBiosBootFile": false, @@ -734,7 +703,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "_totalNodes": 2, "testDurationSec": 10, "finalDurationSec": 30, - "logsDir": "performance_test/2022-11-04_19-31-40", + "delPerfLogs": false, "maxTpsToTest": 50000, "testIterationMinStep": 500, "tpsLimitPerGenerator": 4000, @@ -742,12 +711,17 @@ Finally, the full detail test report for each of the determined max TPS throughp "delTestReport": false, "numAddlBlocksToPrune": 2, "quiet": false, - "delPerfLogs": false + "logDirRoot": ".", + "logDirBase": "./performance_test", + "logDirTimestamp": "2022-11-21_22-17-03", + "logDirPath": "./performance_test/2022-11-21_22-17-03", + "ptbLogsDirPath": "./performance_test/2022-11-21_22-17-03/testRunLogs" }, "env": { "system": "Linux", "os": "posix", - "release": "5.10.102.1-microsoft-standard-WSL2" + "release": "5.15.74.2-microsoft-standard-WSL2", + "logical_cpu_count": 16 }, "nodeosVersion": "v4.0.0-dev" } @@ -765,67 +739,67 @@ The Performance Test Basic generates, by default, a report that details results ``` json { "completedRun": true, - "testStart": "2022-11-04T19:46:52.960531", - "testFinish": "2022-11-04T19:48:52.989694", + "testStart": "2022-11-21T22:27:48.146027", + "testFinish": "2022-11-21T22:29:02.871273", "Analysis": { "BlockSize": { - "min": 1389312, - "max": 1575800, - "avg": 1474814.3157894737, - "sigma": 40921.65290309434, + "min": 1369536, + "max": 1624896, + "avg": 1530567.5294117648, + "sigma": 58850.381839050766, "emptyBlocks": 0, - "numBlocks": 57 + "numBlocks": 17 }, "BlocksGuide": { "firstBlockNum": 2, - "lastBlockNum": 232, - "totalBlocks": 231, + "lastBlockNum": 147, + "totalBlocks": 146, "testStartBlockNum": 105, - "testEndBlockNum": 199, + "testEndBlockNum": 136, "setupBlocksCnt": 103, - "tearDownBlocksCnt": 33, + "tearDownBlocksCnt": 11, "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 33, + "trailingEmptyBlocksCnt": 10, "configAddlDropCnt": 2, - "testAnalysisBlockCnt": 57 + "testAnalysisBlockCnt": 17 }, "TPS": { - "min": 14532, - "max": 15477, - "avg": 15023.464285714286, - "sigma": 178.66938384762454, + "min": 14996, + "max": 16486, + "avg": 15933.1875, + "sigma": 403.137727512261, "emptyBlocks": 0, - "numBlocks": 57, - "configTps": 15000, - "configTestDuration": 30, + "numBlocks": 17, + "configTps": 16000, + "configTestDuration": 10, "tpsPerGenerator": [ - 3750, - 3750, - 3750, - 3750 + 4000, + 4000, + 4000, + 4000 ], "generatorCount": 4 }, "TrxCPU": { "min": 7.0, - "max": 2647.0, - "avg": 23.146035555555557, - "sigma": 11.415769514864671, - "samples": 450000 + "max": 657.0, + "avg": 21.81190625, + "sigma": 9.853241319038672, + "samples": 160000 }, "TrxLatency": { "min": 0.0009999275207519531, - "max": 0.5539999008178711, - "avg": 0.2614889088874393, - "sigma": 0.1450651327531534, - "samples": 450000 + "max": 0.565000057220459, + "avg": 0.27573538126200436, + "sigma": 0.14606770516057177, + "samples": 160000 }, "TrxNet": { "min": 24.0, - "max": 25.0, - "avg": 24.555564444444446, - "sigma": 0.49690300111146485, - "samples": 450000 + "max": 24.0, + "avg": 24.0, + "sigma": 0.0, + "samples": 160000 } }, "args": { @@ -843,17 +817,24 @@ The Performance Test Basic generates, by default, a report that details results "topo": "mesh", "extraNodeosArgs": { "chainPluginArgs": { - "signatureCpuBillablePct": 0 + "signatureCpuBillablePct": 0, + "chainStateDbSizeMb": 10240, + "chainThreads": 2, + "databaseMapMode": "mapped" }, "producerPluginArgs": { "disableSubjectiveBilling": true, "lastBlockTimeOffsetUs": 0, "produceTimeOffsetUs": 0, "cpuEffortPercent": 100, - "lastBlockCpuEffortPercent": 100 + "lastBlockCpuEffortPercent": 100, + "producerThreads": 2 }, "httpPluginArgs": { "httpMaxResponseTimeMs": 990000 + }, + "netPluginArgs": { + "netThreads": 2 } }, "useBiosBootFile": false, @@ -868,19 +849,24 @@ The Performance Test Basic generates, by default, a report that details results "1": "--plugin eosio::trace_api_plugin" }, "_totalNodes": 2, - "delPerfLogs": false, - "delReport": false, - "expectedTransactionsSent": 450000, + "targetTps": 16000, + "testTrxGenDurationSec": 10, + "tpsLimitPerGenerator": 4000, "numAddlBlocksToPrune": 2, + "logDirRoot": "./performance_test/2022-11-21_22-17-03/testRunLogs", + "delReport": false, "quiet": false, - "targetTps": 15000, - "testTrxGenDurationSec": 30, - "tpsLimitPerGenerator": 4000 + "delPerfLogs": false, + "expectedTransactionsSent": 160000, + "logDirBase": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic", + "logDirTimestamp": "2022-11-21_22-27-48", + "logDirTimestampedOptSuffix": "-16000", + "logDirPath": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-27-48-16000" }, "env": { "system": "Linux", "os": "posix", - "release": "5.10.102.1-microsoft-standard-WSL2", + "release": "5.15.74.2-microsoft-standard-WSL2", "logical_cpu_count": 16 }, "nodeosVersion": "v4.0.0-dev" diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 62b66e68c5..5c721e01ae 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -16,198 +16,286 @@ from dataclasses import dataclass, asdict, field from datetime import datetime -@dataclass -class PerfTestBasicResult: - targetTPS: int = 0 - resultAvgTps: float = 0 - expectedTxns: int = 0 - resultTxns: int = 0 - tpsExpectMet: bool = False - trxExpectMet: bool = False - basicTestSuccess: bool = False - testAnalysisBlockCnt: int = 0 - logsDir: str = "" - testStart: datetime = None - testEnd: datetime = None - -@dataclass -class PerfTestSearchIndivResult: - success: bool = False - searchTarget: int = 0 - searchFloor: int = 0 - searchCeiling: int = 0 - basicTestResult: PerfTestBasicResult = PerfTestBasicResult() - -@dataclass -class PerfTestSearchResults: - maxTpsAchieved: int = 0 - searchResults: list = field(default_factory=list) #PerfTestSearchIndivResult list - maxTpsReport: dict = field(default_factory=dict) - -def performPtbBinarySearch(tpsTestFloor: int, tpsTestCeiling: int, minStep: int, testHelperConfig: PerformanceTestBasic.TestHelperConfig, - testClusterConfig: PerformanceTestBasic.ClusterConfig, testDurationSec: int, tpsLimitPerGenerator: int, - numAddlBlocksToPrune: int, testLogDir: str, delReport: bool, quiet: bool, delPerfLogs: bool) -> PerfTestSearchResults: - floor = tpsTestFloor - ceiling = tpsTestCeiling - binSearchTarget = tpsTestCeiling - - maxTpsAchieved = 0 - maxTpsReport = {} - searchResults = [] - - while ceiling >= floor: - print(f"Running scenario: floor {floor} binSearchTarget {binSearchTarget} ceiling {ceiling}") - ptbResult = PerfTestBasicResult() - scenarioResult = PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) - ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, - numAddlBlocksToPrune=numAddlBlocksToPrune, logDirRoot=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) - - myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) - testSuccessful = myTest.runTest() - if evaluateSuccess(myTest, testSuccessful, ptbResult): - maxTpsAchieved = binSearchTarget - maxTpsReport = myTest.report - floor = binSearchTarget + minStep - scenarioResult.success = True - else: - ceiling = binSearchTarget - minStep - - scenarioResult.basicTestResult = ptbResult - searchResults.append(scenarioResult) - if not quiet: - print(f"searchResult: {binSearchTarget} : {searchResults[-1]}") - - binSearchTarget = floor + (math.ceil(((ceiling - floor) / minStep) / 2) * minStep) - - return PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) - -def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: PerformanceTestBasic.TestHelperConfig, - testClusterConfig: PerformanceTestBasic.ClusterConfig, testDurationSec: int, tpsLimitPerGenerator: int, - numAddlBlocksToPrune: int, testLogDir: str, delReport: bool, quiet: bool, delPerfLogs: bool) -> PerfTestSearchResults: - - # Default - Decrementing Max TPS in range [0, tpsInitial] - absFloor = 0 - absCeiling = tpsInitial - - searchTarget = tpsInitial - - maxTpsAchieved = 0 - maxTpsReport = {} - searchResults = [] - maxFound = False - - while not maxFound: - print(f"Running scenario: floor {absFloor} searchTarget {searchTarget} ceiling {absCeiling}") - ptbResult = PerfTestBasicResult() - scenarioResult = PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling, basicTestResult=ptbResult) - ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=searchTarget, testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, - numAddlBlocksToPrune=numAddlBlocksToPrune, logDirRoot=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) - - myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) - testSuccessful = myTest.runTest() - if evaluateSuccess(myTest, testSuccessful, ptbResult): - maxTpsAchieved = searchTarget - maxTpsReport = myTest.report - scenarioResult.success = True - maxFound = True - else: - searchTarget = searchTarget - step - - scenarioResult.basicTestResult = ptbResult - searchResults.append(scenarioResult) - if not quiet: - print(f"searchResult: {searchTarget} : {searchResults[-1]}") - - return PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) - -def evaluateSuccess(test: PerformanceTestBasic, testSuccessful: bool, result: PerfTestBasicResult) -> bool: - result.targetTPS = test.ptbConfig.targetTps - result.expectedTxns = test.ptbConfig.expectedTransactionsSent - reportDict = test.report - result.testStart = reportDict["testStart"] - result.testEnd = reportDict["testFinish"] - result.resultAvgTps = reportDict["Analysis"]["TPS"]["avg"] - result.resultTxns = reportDict["Analysis"]["TrxLatency"]["samples"] - print(f"targetTPS: {result.targetTPS} expectedTxns: {result.expectedTxns} resultAvgTps: {result.resultAvgTps} resultTxns: {result.resultTxns}") - - result.tpsExpectMet = True if result.resultAvgTps >= result.targetTPS else abs(result.targetTPS - result.resultAvgTps) < 100 - result.trxExpectMet = result.expectedTxns == result.resultTxns - result.basicTestSuccess = testSuccessful - result.testAnalysisBlockCnt = reportDict["Analysis"]["BlocksGuide"]["testAnalysisBlockCnt"] - result.logsDir = test.loggingConfig.logDirPath - - print(f"basicTestSuccess: {result.basicTestSuccess} tpsExpectationMet: {result.tpsExpectMet} trxExpectationMet: {result.trxExpectMet}") - - return result.basicTestSuccess and result.tpsExpectMet and result.trxExpectMet - -def createReport(maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsAchieved, longRunningSearchResults, longRunningMaxTpsReport, testStart: datetime, testFinish: datetime, argsDict) -> dict: - report = {} - report['InitialMaxTpsAchieved'] = maxTpsAchieved - report['LongRunningMaxTpsAchieved'] = longRunningMaxTpsAchieved - report['testStart'] = testStart - report['testFinish'] = testFinish - report['InitialSearchResults'] = {x: asdict(searchResults[x]) for x in range(len(searchResults))} - report['InitialMaxTpsReport'] = maxTpsReport - report['LongRunningSearchResults'] = {x: asdict(longRunningSearchResults[x]) for x in range(len(longRunningSearchResults))} - report['LongRunningMaxTpsReport'] = longRunningMaxTpsReport - report['args'] = argsDict - report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} - report['nodeosVersion'] = Utils.getNodeosVersion() - return report - -def createJSONReport(maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsAchieved, longRunningSearchResults, longRunningMaxTpsReport, testStart: datetime, testFinish: datetime, argsDict) -> json: - report = createReport(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport, longRunningMaxTpsAchieved=longRunningMaxTpsAchieved, - longRunningSearchResults=longRunningSearchResults, longRunningMaxTpsReport=longRunningMaxTpsReport, testStart=testStart, testFinish=testFinish, argsDict=argsDict) - return reportAsJSON(report) - -def reportAsJSON(report: dict) -> json: - report['testStart'] = "Unknown" if report['testStart'] is None else report['testStart'].isoformat() - report['testFinish'] = "Unknown" if report['testFinish'] is None else report['testFinish'].isoformat() - return json.dumps(report, indent=2) - -def exportReportAsJSON(report: json, exportPath): - with open(exportPath, 'wt') as f: - f.write(report) - -def testDirsCleanup(delReport, testTimeStampDirPath, ptbLogsDirPath): - try: - def removeArtifacts(path): - print(f"Checking if test artifacts dir exists: {path}") - if os.path.isdir(f"{path}"): - print(f"Cleaning up test artifacts dir and all contents of: {path}") - shutil.rmtree(f"{path}") - - if not delReport: - removeArtifacts(ptbLogsDirPath) - else: - removeArtifacts(testTimeStampDirPath) - except OSError as error: - print(error) - -def testDirsSetup(rootLogDir, testTimeStampDirPath, ptbLogsDirPath): - try: - def createArtifactsDir(path): - print(f"Checking if test artifacts dir exists: {path}") - if not os.path.isdir(f"{path}"): - print(f"Creating test artifacts dir: {path}") - os.mkdir(f"{path}") - - createArtifactsDir(rootLogDir) - createArtifactsDir(testTimeStampDirPath) - createArtifactsDir(ptbLogsDirPath) - - except OSError as error: - print(error) - -def prepArgsDict(testDurationSec, finalDurationSec, logsDir, maxTpsToTest, testIterationMinStep, - tpsLimitPerGenerator, delReport, delTestReport, numAddlBlocksToPrune, quiet, delPerfLogs, - testHelperConfig: PerformanceTestBasic.TestHelperConfig, testClusterConfig: PerformanceTestBasic.ClusterConfig) -> dict: - argsDict = {} - argsDict.update(asdict(testHelperConfig)) - argsDict.update(asdict(testClusterConfig)) - argsDict.update({key:val for key, val in locals().items() if key in set(['testDurationSec', 'finalDurationSec', 'maxTpsToTest', 'testIterationMinStep', 'tpsLimitPerGenerator', - 'delReport', 'delTestReport', 'numAddlBlocksToPrune', 'logsDir', 'quiet', 'delPerfLogs'])}) - return argsDict +class PerformanceTest: + + @dataclass + class PerfTestSearchIndivResult: + @dataclass + class PerfTestBasicResult: + targetTPS: int = 0 + resultAvgTps: float = 0 + expectedTxns: int = 0 + resultTxns: int = 0 + tpsExpectMet: bool = False + trxExpectMet: bool = False + basicTestSuccess: bool = False + testAnalysisBlockCnt: int = 0 + logsDir: str = "" + testStart: datetime = None + testEnd: datetime = None + + success: bool = False + searchTarget: int = 0 + searchFloor: int = 0 + searchCeiling: int = 0 + basicTestResult: PerfTestBasicResult = PerfTestBasicResult() + + @dataclass + class PtConfig: + testDurationSec: int=150 + finalDurationSec: int=300 + delPerfLogs: bool=False + maxTpsToTest: int=50000 + testIterationMinStep: int=500 + tpsLimitPerGenerator: int=4000 + delReport: bool=False + delTestReport: bool=False + numAddlBlocksToPrune: int=2 + quiet: bool=False + logDirRoot: str="." + + @dataclass + class PerfTestSearchResults: + maxTpsAchieved: int = 0 + searchResults: list = field(default_factory=list) #PerfTestSearchIndivResult list + maxTpsReport: dict = field(default_factory=dict) + + @dataclass + class LoggingConfig: + logDirBase: str = f"./{os.path.splitext(os.path.basename(__file__))[0]}" + logDirTimestamp: str = f"{datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')}" + logDirPath: str = field(default_factory=str, init=False) + ptbLogsDirPath: str = field(default_factory=str, init=False) + + def __post_init__(self): + self.logDirPath = f"{self.logDirBase}/{self.logDirTimestamp}" + self.ptbLogsDirPath = f"{self.logDirPath}/testRunLogs" + + def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=PerformanceTestBasic.TestHelperConfig(), + clusterConfig: PerformanceTestBasic.ClusterConfig=PerformanceTestBasic.ClusterConfig(), ptConfig=PtConfig()): + self.testHelperConfig = testHelperConfig + self.clusterConfig = clusterConfig + self.ptConfig = ptConfig + + self.testStart = datetime.utcnow() + + self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=f"{self.ptConfig.logDirRoot}/{os.path.splitext(os.path.basename(__file__))[0]}", + logDirTimestamp=f"{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}") + + def performPtbBinarySearch(self) -> PerfTestSearchResults: + floor = 0 + ceiling = self.ptConfig.maxTpsToTest + binSearchTarget = self.ptConfig.maxTpsToTest + minStep = self.ptConfig.testIterationMinStep + + maxTpsAchieved = 0 + maxTpsReport = {} + searchResults = [] + + while ceiling >= floor: + print(f"Running scenario: floor {floor} binSearchTarget {binSearchTarget} ceiling {ceiling}") + ptbResult = PerformanceTest.PerfTestSearchIndivResult.PerfTestBasicResult() + scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) + ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, + numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, + delPerfLogs=self.ptConfig.delPerfLogs) + + myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig) + testSuccessful = myTest.runTest() + if self.evaluateSuccess(myTest, testSuccessful, ptbResult): + maxTpsAchieved = binSearchTarget + maxTpsReport = myTest.report + floor = binSearchTarget + minStep + scenarioResult.success = True + else: + ceiling = binSearchTarget - minStep + + scenarioResult.basicTestResult = ptbResult + searchResults.append(scenarioResult) + if not self.ptConfig.quiet: + print(f"searchResult: {binSearchTarget} : {searchResults[-1]}") + + binSearchTarget = floor + (math.ceil(((ceiling - floor) / minStep) / 2) * minStep) + + return PerformanceTest.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) + + def performPtbReverseLinearSearch(self, tpsInitial: int) -> PerfTestSearchResults: + + # Default - Decrementing Max TPS in range [0, tpsInitial] + absFloor = 0 + absCeiling = tpsInitial + + step = self.ptConfig.testIterationMinStep + + searchTarget = tpsInitial + + maxTpsAchieved = 0 + maxTpsReport = {} + searchResults = [] + maxFound = False + + while not maxFound: + print(f"Running scenario: floor {absFloor} searchTarget {searchTarget} ceiling {absCeiling}") + ptbResult = PerformanceTest.PerfTestSearchIndivResult.PerfTestBasicResult() + scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling, basicTestResult=ptbResult) + ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=searchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, + numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs) + + myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig) + testSuccessful = myTest.runTest() + if self.evaluateSuccess(myTest, testSuccessful, ptbResult): + maxTpsAchieved = searchTarget + maxTpsReport = myTest.report + scenarioResult.success = True + maxFound = True + else: + searchTarget = searchTarget - step + + scenarioResult.basicTestResult = ptbResult + searchResults.append(scenarioResult) + if not self.ptConfig.quiet: + print(f"searchResult: {searchTarget} : {searchResults[-1]}") + + return PerformanceTest.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) + + def evaluateSuccess(self, test: PerformanceTestBasic, testSuccessful: bool, result: PerfTestSearchIndivResult.PerfTestBasicResult) -> bool: + result.targetTPS = test.ptbConfig.targetTps + result.expectedTxns = test.ptbConfig.expectedTransactionsSent + reportDict = test.report + result.testStart = reportDict["testStart"] + result.testEnd = reportDict["testFinish"] + result.resultAvgTps = reportDict["Analysis"]["TPS"]["avg"] + result.resultTxns = reportDict["Analysis"]["TrxLatency"]["samples"] + print(f"targetTPS: {result.targetTPS} expectedTxns: {result.expectedTxns} resultAvgTps: {result.resultAvgTps} resultTxns: {result.resultTxns}") + + result.tpsExpectMet = True if result.resultAvgTps >= result.targetTPS else abs(result.targetTPS - result.resultAvgTps) < 100 + result.trxExpectMet = result.expectedTxns == result.resultTxns + result.basicTestSuccess = testSuccessful + result.testAnalysisBlockCnt = reportDict["Analysis"]["BlocksGuide"]["testAnalysisBlockCnt"] + result.logsDir = test.loggingConfig.logDirPath + + print(f"basicTestSuccess: {result.basicTestSuccess} tpsExpectationMet: {result.tpsExpectMet} trxExpectationMet: {result.trxExpectMet}") + + return result.basicTestSuccess and result.tpsExpectMet and result.trxExpectMet + + def createReport(self, maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsAchieved, longRunningSearchResults, longRunningMaxTpsReport, testStart: datetime, testFinish: datetime, argsDict) -> dict: + report = {} + report['InitialMaxTpsAchieved'] = maxTpsAchieved + report['LongRunningMaxTpsAchieved'] = longRunningMaxTpsAchieved + report['testStart'] = testStart + report['testFinish'] = testFinish + report['InitialSearchResults'] = {x: asdict(searchResults[x]) for x in range(len(searchResults))} + report['InitialMaxTpsReport'] = maxTpsReport + report['LongRunningSearchResults'] = {x: asdict(longRunningSearchResults[x]) for x in range(len(longRunningSearchResults))} + report['LongRunningMaxTpsReport'] = longRunningMaxTpsReport + report['args'] = argsDict + report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} + report['nodeosVersion'] = Utils.getNodeosVersion() + return report + + def createJSONReport(self, maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsAchieved, longRunningSearchResults, longRunningMaxTpsReport, testStart: datetime, testFinish: datetime, argsDict) -> json: + report = self.createReport(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport, longRunningMaxTpsAchieved=longRunningMaxTpsAchieved, + longRunningSearchResults=longRunningSearchResults, longRunningMaxTpsReport=longRunningMaxTpsReport, testStart=testStart, testFinish=testFinish, argsDict=argsDict) + return self.reportAsJSON(report) + + def reportAsJSON(self, report: dict) -> json: + report['testStart'] = "Unknown" if report['testStart'] is None else report['testStart'].isoformat() + report['testFinish'] = "Unknown" if report['testFinish'] is None else report['testFinish'].isoformat() + return json.dumps(report, indent=2) + + def exportReportAsJSON(self, report: json, exportPath): + with open(exportPath, 'wt') as f: + f.write(report) + + def testDirsCleanup(self): + try: + def removeArtifacts(path): + print(f"Checking if test artifacts dir exists: {path}") + if os.path.isdir(f"{path}"): + print(f"Cleaning up test artifacts dir and all contents of: {path}") + shutil.rmtree(f"{path}") + + if not self.ptConfig.delReport: + removeArtifacts(self.loggingConfig.ptbLogsDirPath) + else: + removeArtifacts(self.loggingConfig.logDirPath) + except OSError as error: + print(error) + + def testDirsSetup(self): + try: + def createArtifactsDir(path): + print(f"Checking if test artifacts dir exists: {path}") + if not os.path.isdir(f"{path}"): + print(f"Creating test artifacts dir: {path}") + os.mkdir(f"{path}") + + createArtifactsDir(self.loggingConfig.logDirBase) + createArtifactsDir(self.loggingConfig.logDirPath) + createArtifactsDir(self.loggingConfig.ptbLogsDirPath) + + except OSError as error: + print(error) + + def prepArgsDict(self) -> dict: + argsDict = {} + argsDict.update(asdict(self.testHelperConfig)) + argsDict.update(asdict(self.clusterConfig)) + argsDict.update(asdict(self.ptConfig)) + argsDict.update(asdict(self.loggingConfig)) + return argsDict + + def performTpsTest(self): + perfRunSuccessful = False + + try: + binSearchResults = self.performPtbBinarySearch() + + print(f"Successful rate of: {binSearchResults.maxTpsAchieved}") + + if not self.ptConfig.quiet: + print("Search Results:") + for i in range(len(binSearchResults.searchResults)): + print(f"Search scenario: {i} result: {binSearchResults.searchResults[i]}") + + longRunningSearchResults = self.performPtbReverseLinearSearch(tpsInitial=binSearchResults.maxTpsAchieved) + + print(f"Long Running Test - Successful rate of: {longRunningSearchResults.maxTpsAchieved}") + perfRunSuccessful = True + + if not self.ptConfig.quiet: + print("Long Running Test - Search Results:") + for i in range(len(longRunningSearchResults.searchResults)): + print(f"Search scenario: {i} result: {longRunningSearchResults.searchResults[i]}") + + testFinish = datetime.utcnow() + argsDict = self.prepArgsDict() + fullReport = self.createJSONReport(maxTpsAchieved=binSearchResults.maxTpsAchieved, searchResults=binSearchResults.searchResults, maxTpsReport=binSearchResults.maxTpsReport, + longRunningMaxTpsAchieved=longRunningSearchResults.maxTpsAchieved, longRunningSearchResults=longRunningSearchResults.searchResults, + longRunningMaxTpsReport=longRunningSearchResults.maxTpsReport, testStart=self.testStart, testFinish=testFinish, argsDict=argsDict) + + if not self.ptConfig.quiet: + print(f"Full Performance Test Report: {fullReport}") + + if not self.ptConfig.delReport: + self.exportReportAsJSON(fullReport, f"{self.loggingConfig.logDirPath}/report.json") + + finally: + + if self.ptConfig.delPerfLogs: + print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") + self.testDirsCleanup() + + return perfRunSuccessful + + def runTest(self): + + TestHelper.printSystemInfo("BEGIN") + self.testDirsSetup() + + testSuccessful = self.performTpsTest() + + return testSuccessful def parseArgs(): appArgs=AppArgs() @@ -248,39 +336,10 @@ def main(): args = parseArgs() Utils.Debug = args.v - testDurationSec=args.test_iteration_duration_sec - finalDurationSec=args.final_iterations_duration_sec - killAll=args.clean_run - dontKill=args.leave_running - delPerfLogs=args.del_perf_logs - dumpErrorDetails=args.dump_error_details - delay=args.d - nodesFile=args.nodes_file - verbose=args.v - pnodes=args.p - totalNodes=args.n - topo=args.s - genesisPath=args.genesis - maxTpsToTest=args.max_tps_to_test - testIterationMinStep=args.test_iteration_min_step - tpsLimitPerGenerator=args.tps_limit_per_generator - delReport=args.del_report - delTestReport=args.del_test_report - numAddlBlocksToPrune=args.num_blocks_to_prune - quiet=args.quiet - prodsEnableTraceApi=args.prods_enable_trace_api - - testStart = datetime.utcnow() - - rootLogDir: str=os.path.splitext(os.path.basename(__file__))[0] - testTimeStampDirPath = f"{rootLogDir}/{testStart.strftime('%Y-%m-%d_%H-%M-%S')}" - ptbLogsDirPath = f"{testTimeStampDirPath}/testRunLogs" - - testDirsSetup(rootLogDir=rootLogDir, testTimeStampDirPath=testTimeStampDirPath, ptbLogsDirPath=ptbLogsDirPath) - - testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=killAll, dontKill=dontKill, keepLogs=not delPerfLogs, - dumpErrorDetails=dumpErrorDetails, delay=delay, nodesFile=nodesFile, - verbose=verbose) + + testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, + dumpErrorDetails=args.dump_error_details, delay=args.d, nodesFile=args.nodes_file, + verbose=args.v) ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, @@ -294,54 +353,21 @@ def main(): extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) - argsDict = prepArgsDict(testDurationSec=testDurationSec, finalDurationSec=finalDurationSec, logsDir=testTimeStampDirPath, - maxTpsToTest=maxTpsToTest, testIterationMinStep=testIterationMinStep, tpsLimitPerGenerator=tpsLimitPerGenerator, - delReport=delReport, delTestReport=delTestReport, numAddlBlocksToPrune=numAddlBlocksToPrune, - quiet=quiet, delPerfLogs=delPerfLogs, testHelperConfig=testHelperConfig, testClusterConfig=testClusterConfig) - - perfRunSuccessful = False - - try: - binSearchResults = performPtbBinarySearch(tpsTestFloor=0, tpsTestCeiling=maxTpsToTest, minStep=testIterationMinStep, testHelperConfig=testHelperConfig, - testClusterConfig=testClusterConfig, testDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, - numAddlBlocksToPrune=numAddlBlocksToPrune, testLogDir=ptbLogsDirPath, delReport=delTestReport, quiet=quiet, delPerfLogs=delPerfLogs) - - print(f"Successful rate of: {binSearchResults.maxTpsAchieved}") - - if not quiet: - print("Search Results:") - for i in range(len(binSearchResults.searchResults)): - print(f"Search scenario: {i} result: {binSearchResults.searchResults[i]}") - - longRunningSearchResults = performPtbReverseLinearSearch(tpsInitial=binSearchResults.maxTpsAchieved, step=testIterationMinStep, testHelperConfig=testHelperConfig, - testClusterConfig=testClusterConfig, testDurationSec=finalDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, - numAddlBlocksToPrune=numAddlBlocksToPrune, testLogDir=ptbLogsDirPath, delReport=delTestReport, quiet=quiet, - delPerfLogs=delPerfLogs) - - print(f"Long Running Test - Successful rate of: {longRunningSearchResults.maxTpsAchieved}") - perfRunSuccessful = True - - if not quiet: - print("Long Running Test - Search Results:") - for i in range(len(longRunningSearchResults.searchResults)): - print(f"Search scenario: {i} result: {longRunningSearchResults.searchResults[i]}") - - testFinish = datetime.utcnow() - fullReport = createJSONReport(maxTpsAchieved=binSearchResults.maxTpsAchieved, searchResults=binSearchResults.searchResults, maxTpsReport=binSearchResults.maxTpsReport, - longRunningMaxTpsAchieved=longRunningSearchResults.maxTpsAchieved, longRunningSearchResults=longRunningSearchResults.searchResults, - longRunningMaxTpsReport=longRunningSearchResults.maxTpsReport, testStart=testStart, testFinish=testFinish, argsDict=argsDict) - - if not quiet: - print(f"Full Performance Test Report: {fullReport}") - - if not delReport: - exportReportAsJSON(fullReport, f"{testTimeStampDirPath}/report.json") - - finally: - if delPerfLogs: - print(f"Cleaning up logs directory: {testTimeStampDirPath}") - testDirsCleanup(delReport=delReport, testTimeStampDirPath=testTimeStampDirPath, ptbLogsDirPath=ptbLogsDirPath) + ptConfig = PerformanceTest.PtConfig(testDurationSec=args.test_iteration_duration_sec, + finalDurationSec=args.final_iterations_duration_sec, + delPerfLogs=args.del_perf_logs, + maxTpsToTest=args.max_tps_to_test, + testIterationMinStep=args.test_iteration_min_step, + tpsLimitPerGenerator=args.tps_limit_per_generator, + delReport=args.del_report, + delTestReport=args.del_test_report, + numAddlBlocksToPrune=args.num_blocks_to_prune, + quiet=args.quiet, + logDirRoot=".") + + myTest = PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig) + perfRunSuccessful = myTest.runTest() exitCode = 0 if perfRunSuccessful else 1 exit(exitCode) From 7ba252b71c37cb0e4d6cfb31b0ef2b180b3b9cd5 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 23 Nov 2022 12:30:01 -0600 Subject: [PATCH 04/12] Add options to calculate recommended producer, chain, and net worker thread pool size. Each plugin's option determines whether to calculate number of worker threads to use in the thread pool with options of: 'none', 'lmax', or 'full'. In 'none' mode, the default, no calculation will be attempted and default configured thread count will be used. In 'lmax' mode, thread count will incrementally be tested until the performance rate ceases to increase with the addition of subsequent threads. In 'full' mode thread count will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in 'lmax' mode). Useful for graphing the full performance impact of each available thread. Added log directory to collect results of thread optimization calculations. Added option to opt out of running the tps test. Updated README for new options and to show the updates to the result reports. --- tests/performance_tests/README.md | 406 ++++++++++++-------- tests/performance_tests/performance_test.py | 320 ++++++++++++--- 2 files changed, 500 insertions(+), 226 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index c1935f191d..b76b0b11e8 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -37,6 +37,11 @@ Please refer to [Leap: Build and Install from Source](https://github.com/Antelop performance_test/ └── 2022-10-27_15-28-09 ├── report.json + ├── pluginThreadOptRunLogs + │ ├── performance_test_basic + │ ├── chainThreadResults.txt + │ ├── netThreadResults.txt + │ └── producerThreadResults.txt └── testRunLogs └── performance_test_basic └── 2022-10-19_10-29-07 @@ -252,6 +257,22 @@ The Performance Harness main script `performance_test.py` can be configured usin * `--quiet` Whether to quiet printing intermediate results and reports to stdout (default: False) * `--prods-enable-trace-api` Determines whether producer nodes should have eosio::trace_api_plugin enabled (default: False) +* `--skip-tps-test` Determines whether to skip the max TPS measurement tests (default: False) +* `--calc-producer-threads {none,lmax,full}` + Determines whether to calculate number of worker threads to use in producer thread pool ("none", "lmax", or "full"). + In "none" mode, the default, no calculation will be attempted and default configured --producer-threads value will be used. + In "lmax" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. + In "full" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) +* `--calc-chain-threads {none,lmax,full}` + Determines whether to calculate number of worker threads to use in chain thread pool ("none", "lmax", or "full"). + In "none" mode, the default, no calculation will be attempted and default configured --chain-threads value will be used. + In "lmax" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. + In "full" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) +* `--calc-net-threads {none,lmax,full}` + Determines whether to calculate number of worker threads to use in net thread pool ("none", "lmax", or "full"). + In "none" mode, the default, no calculation will be attempted and default configured --net-threads value will be used. + In "lmax" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. + In "full" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) ### Support Scripts @@ -386,7 +407,7 @@ The Performance Harness generates a report to summarize results of test scenario Command used to run test and generate report: ``` bash -.build/tests/performance_tests/performance_test.py --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 +.build/tests/performance_tests/performance_test.py --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax ``` #### Report Breakdown @@ -400,23 +421,23 @@ Next, a summary of the search scenario conducted and respective results is inclu Expand Search Scenario Summary Example ``` json - "0": { - "success": false, - "searchTarget": 25000, + "1": { + "success": true, + "searchTarget": 26000, "searchFloor": 0, - "searchCeiling": 50000, + "searchCeiling": 26500, "basicTestResult": { - "targetTPS": 25000, - "resultAvgTps": 17160.4, - "expectedTxns": 250000, - "resultTxns": 250000, - "tpsExpectMet": false, + "targetTPS": 26000, + "resultAvgTps": 25986.9375, + "expectedTxns": 260000, + "resultTxns": 260000, + "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 26, - "logsDir": "performance_test/2022-10-26_15-01-51/testRunLogs/performance_test_basic/2022-10-26_15-01-51", - "testStart": "2022-10-26T15:03:37.764242", - "testEnd": "2022-10-26T15:01:51.128328" + "testAnalysisBlockCnt": 17, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-18-52-26000", + "testStart": "2022-11-23T15:18:52.115767", + "testEnd": "2022-11-23T15:20:16.911367" } } ``` @@ -449,10 +470,12 @@ Finally, the full detail test report for each of the determined max TPS throughp ``` json { - "InitialMaxTpsAchieved": 16000, - "LongRunningMaxTpsAchieved": 16000, - "testStart": "2022-11-21T22:17:03.604928", - "testFinish": "2022-11-21T22:29:02.923633", + "perfTestsBegin": "2022-11-23T12:56:58.699686", + "perfTestsFinish": "2022-11-23T15:20:16.979815", + "InitialMaxTpsAchieved": 26500, + "LongRunningMaxTpsAchieved": 26000, + "tpsTestStart": "2022-11-23T15:05:42.005050", + "tpsTestFinish": "2022-11-23T15:20:16.979800", "InitialSearchResults": { "0": { "success": false, @@ -461,149 +484,149 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 50000, "basicTestResult": { "targetTPS": 50000, - "resultAvgTps": 15121.925, + "resultAvgTps": 23784.324324324323, "expectedTxns": 500000, - "resultTxns": 326102, + "resultTxns": 500000, "tpsExpectMet": false, - "trxExpectMet": false, - "basicTestSuccess": false, - "testAnalysisBlockCnt": 41, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-17-03-50000", - "testStart": "2022-11-21T22:17:03.624828", - "testEnd": "2022-11-21T22:18:35.048631" + "trxExpectMet": true, + "basicTestSuccess": true, + "testAnalysisBlockCnt": 38, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-05-42-50000", + "testStart": "2022-11-23T15:05:42.005080", + "testEnd": "2022-11-23T15:07:24.111044" } }, "1": { - "success": false, + "success": true, "searchTarget": 25000, "searchFloor": 0, "searchCeiling": 49500, "basicTestResult": { "targetTPS": 25000, - "resultAvgTps": 15307.275862068966, + "resultAvgTps": 25013.3125, "expectedTxns": 250000, "resultTxns": 250000, - "tpsExpectMet": false, + "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 30, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-18-35-25000", - "testStart": "2022-11-21T22:18:35.136441", - "testEnd": "2022-11-21T22:20:02.355919" + "testAnalysisBlockCnt": 17, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-07-24-25000", + "testStart": "2022-11-23T15:07:24.225706", + "testEnd": "2022-11-23T15:08:47.510691" } }, "2": { - "success": true, - "searchTarget": 12500, - "searchFloor": 0, - "searchCeiling": 24500, + "success": false, + "searchTarget": 37500, + "searchFloor": 25500, + "searchCeiling": 49500, "basicTestResult": { - "targetTPS": 12500, - "resultAvgTps": 12494.4375, - "expectedTxns": 125000, - "resultTxns": 125000, - "tpsExpectMet": true, + "targetTPS": 37500, + "resultAvgTps": 24912.576923076922, + "expectedTxns": 375000, + "resultTxns": 375000, + "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 17, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-20-02-12500", - "testStart": "2022-11-21T22:20:02.419664", - "testEnd": "2022-11-21T22:21:17.334219" + "testAnalysisBlockCnt": 27, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-08-47-37500", + "testStart": "2022-11-23T15:08:47.579754", + "testEnd": "2022-11-23T15:10:23.342881" } }, "3": { "success": false, - "searchTarget": 19000, - "searchFloor": 13000, - "searchCeiling": 24500, + "searchTarget": 31500, + "searchFloor": 25500, + "searchCeiling": 37000, "basicTestResult": { - "targetTPS": 19000, - "resultAvgTps": 15546.55, - "expectedTxns": 190000, - "resultTxns": 190000, + "targetTPS": 31500, + "resultAvgTps": 24525.095238095237, + "expectedTxns": 315000, + "resultTxns": 315000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 21, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-21-17-19000", - "testStart": "2022-11-21T22:21:17.380653", - "testEnd": "2022-11-21T22:22:37.113095" + "testAnalysisBlockCnt": 22, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-10-23-31500", + "testStart": "2022-11-23T15:10:23.432821", + "testEnd": "2022-11-23T15:11:53.366694" } }, "4": { - "success": true, - "searchTarget": 16000, - "searchFloor": 13000, - "searchCeiling": 18500, + "success": false, + "searchTarget": 28500, + "searchFloor": 25500, + "searchCeiling": 31000, "basicTestResult": { - "targetTPS": 16000, - "resultAvgTps": 15969.375, - "expectedTxns": 160000, - "resultTxns": 160000, - "tpsExpectMet": true, + "targetTPS": 28500, + "resultAvgTps": 25896.666666666668, + "expectedTxns": 285000, + "resultTxns": 285000, + "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 17, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-22-37-16000", - "testStart": "2022-11-21T22:22:37.166645", - "testEnd": "2022-11-21T22:23:51.349987" + "testAnalysisBlockCnt": 19, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-11-53-28500", + "testStart": "2022-11-23T15:11:53.448449", + "testEnd": "2022-11-23T15:13:17.714663" } }, "5": { "success": false, - "searchTarget": 17500, - "searchFloor": 16500, - "searchCeiling": 18500, + "searchTarget": 27000, + "searchFloor": 25500, + "searchCeiling": 28000, "basicTestResult": { - "targetTPS": 17500, - "resultAvgTps": 15048.263157894737, - "expectedTxns": 175000, - "resultTxns": 175000, + "targetTPS": 27000, + "resultAvgTps": 26884.625, + "expectedTxns": 270000, + "resultTxns": 270000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 20, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-23-51-17500", - "testStart": "2022-11-21T22:23:51.399539", - "testEnd": "2022-11-21T22:25:11.171614" + "testAnalysisBlockCnt": 17, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-13-17-27000", + "testStart": "2022-11-23T15:13:17.787205", + "testEnd": "2022-11-23T15:14:40.753850" } }, "6": { - "success": false, - "searchTarget": 17000, - "searchFloor": 16500, - "searchCeiling": 17000, + "success": true, + "searchTarget": 26000, + "searchFloor": 25500, + "searchCeiling": 26500, "basicTestResult": { - "targetTPS": 17000, - "resultAvgTps": 15659.058823529413, - "expectedTxns": 170000, - "resultTxns": 170000, - "tpsExpectMet": false, + "targetTPS": 26000, + "resultAvgTps": 25959.0, + "expectedTxns": 260000, + "resultTxns": 260000, + "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 18, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-25-11-17000", - "testStart": "2022-11-21T22:25:11.225775", - "testEnd": "2022-11-21T22:26:30.102913" + "testAnalysisBlockCnt": 17, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-14-40-26000", + "testStart": "2022-11-23T15:14:40.823681", + "testEnd": "2022-11-23T15:16:02.884525" } }, "7": { - "success": false, - "searchTarget": 16500, - "searchFloor": 16500, - "searchCeiling": 16500, + "success": true, + "searchTarget": 26500, + "searchFloor": 26500, + "searchCeiling": 26500, "basicTestResult": { - "targetTPS": 16500, - "resultAvgTps": 15714.823529411764, - "expectedTxns": 165000, - "resultTxns": 165000, - "tpsExpectMet": false, + "targetTPS": 26500, + "resultAvgTps": 26400.5625, + "expectedTxns": 265000, + "resultTxns": 265000, + "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 18, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-26-30-16500", - "testStart": "2022-11-21T22:26:30.155632", - "testEnd": "2022-11-21T22:27:48.093871" + "testAnalysisBlockCnt": 17, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-16-02-26500", + "testStart": "2022-11-23T15:16:02.953195", + "testEnd": "2022-11-23T15:17:28.412837" } } }, @@ -622,22 +645,41 @@ Finally, the full detail test report for each of the determined max TPS throughp }, "LongRunningSearchResults": { "0": { + "success": false, + "searchTarget": 26500, + "searchFloor": 0, + "searchCeiling": 26500, + "basicTestResult": { + "targetTPS": 26500, + "resultAvgTps": 22554.42105263158, + "expectedTxns": 265000, + "resultTxns": 265000, + "tpsExpectMet": false, + "trxExpectMet": true, + "basicTestSuccess": true, + "testAnalysisBlockCnt": 20, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-17-28-26500", + "testStart": "2022-11-23T15:17:28.483195", + "testEnd": "2022-11-23T15:18:52.048868" + } + }, + "1": { "success": true, - "searchTarget": 16000, + "searchTarget": 26000, "searchFloor": 0, - "searchCeiling": 16000, + "searchCeiling": 26500, "basicTestResult": { - "targetTPS": 16000, - "resultAvgTps": 15933.1875, - "expectedTxns": 160000, - "resultTxns": 160000, + "targetTPS": 26000, + "resultAvgTps": 25986.9375, + "expectedTxns": 260000, + "resultTxns": 260000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-27-48-16000", - "testStart": "2022-11-21T22:27:48.146027", - "testEnd": "2022-11-21T22:29:02.871273" + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-18-52-26000", + "testStart": "2022-11-23T15:18:52.115767", + "testEnd": "2022-11-23T15:20:16.911367" } } }, @@ -654,6 +696,38 @@ Finally, the full detail test report for each of the determined max TPS throughp }, }, + "ProducerThreadAnalysis": { + "recommendedThreadCount": 6, + "threadToMaxTpsDict": { + "2": 16000, + "3": 21000, + "4": 24000, + "5": 25500, + "6": 27000, + "7": 26000 + }, + "analysisStart": "2022-11-23T12:56:58.730271", + "analysisFinish": "2022-11-23T14:05:45.727625" + }, + "ChainThreadAnalysis": { + "recommendedThreadCount": 3, + "threadToMaxTpsDict": { + "2": 25000, + "3": 26500, + "4": 26500 + }, + "analysisStart": "2022-11-23T14:05:45.728348", + "analysisFinish": "2022-11-23T14:41:43.721885" + }, + "NetThreadAnalysis": { + "recommendedThreadCount": 2, + "threadToMaxTpsDict": { + "2": 25500, + "3": 25000 + }, + "analysisStart": "2022-11-23T14:41:43.722862", + "analysisFinish": "2022-11-23T15:05:42.004421" + }, "args": { "killAll": false, "dontKill": false, @@ -671,7 +745,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "chainPluginArgs": { "signatureCpuBillablePct": 0, "chainStateDbSizeMb": 10240, - "chainThreads": 2, + "chainThreads": 3, "databaseMapMode": "mapped" }, "producerPluginArgs": { @@ -680,7 +754,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "produceTimeOffsetUs": 0, "cpuEffortPercent": 100, "lastBlockCpuEffortPercent": 100, - "producerThreads": 2 + "producerThreads": 6 }, "httpPluginArgs": { "httpMaxResponseTimeMs": 990000 @@ -712,10 +786,15 @@ Finally, the full detail test report for each of the determined max TPS throughp "numAddlBlocksToPrune": 2, "quiet": false, "logDirRoot": ".", + "skipTpsTests": false, + "calcProducerThreads": "lmax", + "calcChainThreads": "lmax", + "calcNetThreads": "lmax", "logDirBase": "./performance_test", - "logDirTimestamp": "2022-11-21_22-17-03", - "logDirPath": "./performance_test/2022-11-21_22-17-03", - "ptbLogsDirPath": "./performance_test/2022-11-21_22-17-03/testRunLogs" + "logDirTimestamp": "2022-11-23_12-56-58", + "logDirPath": "./performance_test/2022-11-23_12-56-58", + "ptbLogsDirPath": "./performance_test/2022-11-23_12-56-58/testRunLogs", + "pluginThreadOptLogsDirPath": "./performance_test/2022-11-23_12-56-58/pluginThreadOptRunLogs" }, "env": { "system": "Linux", @@ -739,67 +818,70 @@ The Performance Test Basic generates, by default, a report that details results ``` json { "completedRun": true, - "testStart": "2022-11-21T22:27:48.146027", - "testFinish": "2022-11-21T22:29:02.871273", + "testStart": "2022-11-23T15:18:52.115767", + "testFinish": "2022-11-23T15:20:16.911367", "Analysis": { "BlockSize": { - "min": 1369536, - "max": 1624896, - "avg": 1530567.5294117648, - "sigma": 58850.381839050766, + "min": 1937088, + "max": 2971200, + "avg": 2493345.882352941, + "sigma": 186567.07030350564, "emptyBlocks": 0, "numBlocks": 17 }, "BlocksGuide": { "firstBlockNum": 2, - "lastBlockNum": 147, - "totalBlocks": 146, - "testStartBlockNum": 105, - "testEndBlockNum": 136, - "setupBlocksCnt": 103, - "tearDownBlocksCnt": 11, + "lastBlockNum": 165, + "totalBlocks": 164, + "testStartBlockNum": 106, + "testEndBlockNum": 149, + "setupBlocksCnt": 104, + "tearDownBlocksCnt": 16, "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 10, + "trailingEmptyBlocksCnt": 22, "configAddlDropCnt": 2, "testAnalysisBlockCnt": 17 }, "TPS": { - "min": 14996, - "max": 16486, - "avg": 15933.1875, - "sigma": 403.137727512261, + "min": 23164, + "max": 28791, + "avg": 25986.9375, + "sigma": 1033.1693634606816, "emptyBlocks": 0, "numBlocks": 17, - "configTps": 16000, + "configTps": 26000, "configTestDuration": 10, "tpsPerGenerator": [ - 4000, - 4000, - 4000, - 4000 + 3714, + 3714, + 3714, + 3714, + 3714, + 3715, + 3715 ], - "generatorCount": 4 + "generatorCount": 7 }, "TrxCPU": { "min": 7.0, - "max": 657.0, - "avg": 21.81190625, - "sigma": 9.853241319038672, - "samples": 160000 + "max": 10893.0, + "avg": 17.314342307692307, + "sigma": 41.16144172726996, + "samples": 260000 }, "TrxLatency": { "min": 0.0009999275207519531, - "max": 0.565000057220459, - "avg": 0.27573538126200436, - "sigma": 0.14606770516057177, - "samples": 160000 + "max": 0.6380000114440918, + "avg": 0.26549454224201346, + "sigma": 0.14674558675649374, + "samples": 260000 }, "TrxNet": { "min": 24.0, "max": 24.0, "avg": 24.0, "sigma": 0.0, - "samples": 160000 + "samples": 260000 } }, "args": { @@ -819,7 +901,7 @@ The Performance Test Basic generates, by default, a report that details results "chainPluginArgs": { "signatureCpuBillablePct": 0, "chainStateDbSizeMb": 10240, - "chainThreads": 2, + "chainThreads": 3, "databaseMapMode": "mapped" }, "producerPluginArgs": { @@ -828,7 +910,7 @@ The Performance Test Basic generates, by default, a report that details results "produceTimeOffsetUs": 0, "cpuEffortPercent": 100, "lastBlockCpuEffortPercent": 100, - "producerThreads": 2 + "producerThreads": 6 }, "httpPluginArgs": { "httpMaxResponseTimeMs": 990000 @@ -849,19 +931,19 @@ The Performance Test Basic generates, by default, a report that details results "1": "--plugin eosio::trace_api_plugin" }, "_totalNodes": 2, - "targetTps": 16000, + "targetTps": 26000, "testTrxGenDurationSec": 10, "tpsLimitPerGenerator": 4000, "numAddlBlocksToPrune": 2, - "logDirRoot": "./performance_test/2022-11-21_22-17-03/testRunLogs", + "logDirRoot": "./performance_test/2022-11-23_12-56-58/testRunLogs", "delReport": false, "quiet": false, "delPerfLogs": false, - "expectedTransactionsSent": 160000, - "logDirBase": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic", - "logDirTimestamp": "2022-11-21_22-27-48", - "logDirTimestampedOptSuffix": "-16000", - "logDirPath": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-27-48-16000" + "expectedTransactionsSent": 260000, + "logDirBase": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic", + "logDirTimestamp": "2022-11-23_15-18-52", + "logDirTimestampedOptSuffix": "-26000", + "logDirPath": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-18-52-26000" }, "env": { "system": "Linux", diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 5c721e01ae..6dc4a959b2 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 +import copy import math import os import sys @@ -15,6 +16,7 @@ from platform import release, system from dataclasses import dataclass, asdict, field from datetime import datetime +from enum import Enum class PerformanceTest: @@ -53,12 +55,25 @@ class PtConfig: numAddlBlocksToPrune: int=2 quiet: bool=False logDirRoot: str="." + skipTpsTests: bool=False + calcProducerThreads: str="none" + calcChainThreads: str="none" + calcNetThreads: str="none" + @dataclass - class PerfTestSearchResults: - maxTpsAchieved: int = 0 - searchResults: list = field(default_factory=list) #PerfTestSearchIndivResult list - maxTpsReport: dict = field(default_factory=dict) + class TpsTestResult: + @dataclass + class PerfTestSearchResults: + maxTpsAchieved: int = 0 + searchResults: list = field(default_factory=list) #PerfTestSearchIndivResult list + maxTpsReport: dict = field(default_factory=dict) + + binSearchResults: PerfTestSearchResults=PerfTestSearchResults() + longRunningSearchResults: PerfTestSearchResults=PerfTestSearchResults() + tpsTestStart: datetime=datetime.utcnow() + tpsTestFinish: datetime=datetime.utcnow() + perfRunSuccessful: bool=False @dataclass class LoggingConfig: @@ -66,10 +81,12 @@ class LoggingConfig: logDirTimestamp: str = f"{datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')}" logDirPath: str = field(default_factory=str, init=False) ptbLogsDirPath: str = field(default_factory=str, init=False) + pluginThreadOptLogsDirPath: str = field(default_factory=str, init=False) def __post_init__(self): self.logDirPath = f"{self.logDirBase}/{self.logDirTimestamp}" self.ptbLogsDirPath = f"{self.logDirPath}/testRunLogs" + self.pluginThreadOptLogsDirPath = f"{self.logDirPath}/pluginThreadOptRunLogs" def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=PerformanceTestBasic.TestHelperConfig(), clusterConfig: PerformanceTestBasic.ClusterConfig=PerformanceTestBasic.ClusterConfig(), ptConfig=PtConfig()): @@ -77,12 +94,12 @@ def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=Perfo self.clusterConfig = clusterConfig self.ptConfig = ptConfig - self.testStart = datetime.utcnow() + self.testsStart = datetime.utcnow() self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=f"{self.ptConfig.logDirRoot}/{os.path.splitext(os.path.basename(__file__))[0]}", - logDirTimestamp=f"{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}") + logDirTimestamp=f"{self.testsStart.strftime('%Y-%m-%d_%H-%M-%S')}") - def performPtbBinarySearch(self) -> PerfTestSearchResults: + def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: str, delReport: bool, quiet: bool, delPerfLogs: bool) -> TpsTestResult.PerfTestSearchResults: floor = 0 ceiling = self.ptConfig.maxTpsToTest binSearchTarget = self.ptConfig.maxTpsToTest @@ -97,10 +114,10 @@ def performPtbBinarySearch(self) -> PerfTestSearchResults: ptbResult = PerformanceTest.PerfTestSearchIndivResult.PerfTestBasicResult() scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, - numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, - delPerfLogs=self.ptConfig.delPerfLogs) + numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=logDirRoot, delReport=delReport, + quiet=quiet, delPerfLogs=delPerfLogs) - myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig) + myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() if self.evaluateSuccess(myTest, testSuccessful, ptbResult): maxTpsAchieved = binSearchTarget @@ -117,9 +134,9 @@ def performPtbBinarySearch(self) -> PerfTestSearchResults: binSearchTarget = floor + (math.ceil(((ceiling - floor) / minStep) / 2) * minStep) - return PerformanceTest.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) + return PerformanceTest.TpsTestResult.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) - def performPtbReverseLinearSearch(self, tpsInitial: int) -> PerfTestSearchResults: + def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTestSearchResults: # Default - Decrementing Max TPS in range [0, tpsInitial] absFloor = 0 @@ -156,7 +173,7 @@ def performPtbReverseLinearSearch(self, tpsInitial: int) -> PerfTestSearchResult if not self.ptConfig.quiet: print(f"searchResult: {searchTarget} : {searchResults[-1]}") - return PerformanceTest.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) + return PerformanceTest.TpsTestResult.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) def evaluateSuccess(self, test: PerformanceTestBasic, testSuccessful: bool, result: PerfTestSearchIndivResult.PerfTestBasicResult) -> bool: result.targetTPS = test.ptbConfig.targetTps @@ -178,29 +195,142 @@ def evaluateSuccess(self, test: PerformanceTestBasic, testSuccessful: bool, resu return result.basicTestSuccess and result.tpsExpectMet and result.trxExpectMet - def createReport(self, maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsAchieved, longRunningSearchResults, longRunningMaxTpsReport, testStart: datetime, testFinish: datetime, argsDict) -> dict: + class PluginThreadOpt(Enum): + PRODUCER = 1 + CHAIN = 2 + NET = 3 + + class PluginThreadOptRunType(Enum): + FULL = 1 + LOCAL_MAX = 2 + + @dataclass + class PluginThreadOptResult: + recommendedThreadCount: int = 0 + threadToMaxTpsDict: dict = field(default_factory=dict) + analysisStart: datetime = datetime.utcnow() + analysisFinish: datetime = datetime.utcnow() + + def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: PluginThreadOptRunType=PluginThreadOptRunType.LOCAL_MAX, + minThreadCount: int=2, maxThreadCount: int=os.cpu_count()) -> PluginThreadOptResult: + + if optPlugin == PerformanceTest.PluginThreadOpt.PRODUCER: + fileName = "producerThreadResults.txt" + elif optPlugin == PerformanceTest.PluginThreadOpt.CHAIN: + fileName = "chainThreadResults.txt" + else: + fileName = "netThreadResults.txt" + + resultsFile = f"{self.loggingConfig.pluginThreadOptLogsDirPath}/{fileName}" + + threadToMaxTpsDict: dict = {} + + clusterConfig = copy.deepcopy(self.clusterConfig) + analysisStart = datetime.utcnow() + + with open(resultsFile, 'w') as log: + log.write(f"{optPlugin.name.lower()}Threads, maxTpsAchieved\n") + log.close() + + lastMaxTpsAchieved = 0 + for threadCount in range(minThreadCount, maxThreadCount+1): + print(f"Running {optPlugin.name.lower()} thread count optimization check with {threadCount} {optPlugin.name.lower()} threads") + + if optPlugin == PerformanceTest.PluginThreadOpt.PRODUCER: + clusterConfig.extraNodeosArgs.producerPluginArgs.producerThreads = threadCount + elif optPlugin == PerformanceTest.PluginThreadOpt.CHAIN: + clusterConfig.extraNodeosArgs.chainPluginArgs.chainThreads = threadCount + else: + clusterConfig.extraNodeosArgs.netPluginArgs.netThreads = threadCount + + binSearchResults = self.performPtbBinarySearch(clusterConfig=clusterConfig, logDirRoot=self.loggingConfig.pluginThreadOptLogsDirPath, + delReport=True, quiet=False, delPerfLogs=True) + + threadToMaxTpsDict[threadCount] = binSearchResults.maxTpsAchieved + if not self.ptConfig.quiet: + print("Search Results:") + for i in range(len(binSearchResults.searchResults)): + print(f"Search scenario {optPlugin.name} thread count {threadCount}: {i} result: {binSearchResults.searchResults[i]}") + + with open(resultsFile, 'a') as log: + log.write(f"{threadCount},{binSearchResults.maxTpsAchieved}\n") + log.close() + + if optType == PerformanceTest.PluginThreadOptRunType.LOCAL_MAX: + if binSearchResults.maxTpsAchieved <= lastMaxTpsAchieved: + break + lastMaxTpsAchieved = binSearchResults.maxTpsAchieved + + analysisFinish = datetime.utcnow() + + def calcLocalMax(threadToMaxDict: dict): + localMax = 0 + recThreadCount = 0 + for threads, tps in threadToMaxDict.items(): + if tps > localMax: + localMax = tps + recThreadCount = threads + else: + break + return recThreadCount + + recommendedThreadCount = calcLocalMax(threadToMaxDict=threadToMaxTpsDict) + + return PerformanceTest.PluginThreadOptResult(recommendedThreadCount=recommendedThreadCount, threadToMaxTpsDict=threadToMaxTpsDict, + analysisStart=analysisStart, analysisFinish=analysisFinish) + + def createTpsTestReport(self, tpsTestResult: TpsTestResult) -> dict: report = {} - report['InitialMaxTpsAchieved'] = maxTpsAchieved - report['LongRunningMaxTpsAchieved'] = longRunningMaxTpsAchieved - report['testStart'] = testStart - report['testFinish'] = testFinish - report['InitialSearchResults'] = {x: asdict(searchResults[x]) for x in range(len(searchResults))} - report['InitialMaxTpsReport'] = maxTpsReport - report['LongRunningSearchResults'] = {x: asdict(longRunningSearchResults[x]) for x in range(len(longRunningSearchResults))} - report['LongRunningMaxTpsReport'] = longRunningMaxTpsReport - report['args'] = argsDict + report['InitialMaxTpsAchieved'] = tpsTestResult.binSearchResults.maxTpsAchieved + report['LongRunningMaxTpsAchieved'] = tpsTestResult.longRunningSearchResults.maxTpsAchieved + report['tpsTestStart'] = tpsTestResult.tpsTestStart + report['tpsTestFinish'] = tpsTestResult.tpsTestFinish + report['InitialSearchResults'] = {x: asdict(tpsTestResult.binSearchResults.searchResults[x]) for x in range(len(tpsTestResult.binSearchResults.searchResults))} + report['InitialMaxTpsReport'] = tpsTestResult.binSearchResults.maxTpsReport + report['LongRunningSearchResults'] = {x: asdict(tpsTestResult.longRunningSearchResults.searchResults[x]) for x in range(len(tpsTestResult.longRunningSearchResults.searchResults))} + report['LongRunningMaxTpsReport'] = tpsTestResult.longRunningSearchResults.maxTpsReport + return report + + def createReport(self,producerThreadResult: PluginThreadOptResult=None, chainThreadResult: PluginThreadOptResult=None, netThreadResult: PluginThreadOptResult=None, tpsTestResult: dict=None) -> dict: + report = {} + report['perfTestsBegin'] = self.testsStart + report['perfTestsFinish'] = self.testsFinish + if tpsTestResult is not None: + report.update(self.createTpsTestReport(tpsTestResult)) + + if producerThreadResult is not None: + report['ProducerThreadAnalysis'] = asdict(producerThreadResult) + + if chainThreadResult is not None: + report['ChainThreadAnalysis'] = asdict(chainThreadResult) + + if netThreadResult is not None: + report['NetThreadAnalysis'] = asdict(netThreadResult) + + report['args'] = self.prepArgsDict() report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} report['nodeosVersion'] = Utils.getNodeosVersion() return report - def createJSONReport(self, maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsAchieved, longRunningSearchResults, longRunningMaxTpsReport, testStart: datetime, testFinish: datetime, argsDict) -> json: - report = self.createReport(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport, longRunningMaxTpsAchieved=longRunningMaxTpsAchieved, - longRunningSearchResults=longRunningSearchResults, longRunningMaxTpsReport=longRunningMaxTpsReport, testStart=testStart, testFinish=testFinish, argsDict=argsDict) - return self.reportAsJSON(report) - def reportAsJSON(self, report: dict) -> json: - report['testStart'] = "Unknown" if report['testStart'] is None else report['testStart'].isoformat() - report['testFinish'] = "Unknown" if report['testFinish'] is None else report['testFinish'].isoformat() + if 'ProducerThreadAnalysis' in report: + report['ProducerThreadAnalysis']['analysisStart'] = report['ProducerThreadAnalysis']['analysisStart'].isoformat() + report['ProducerThreadAnalysis']['analysisFinish'] = report['ProducerThreadAnalysis']['analysisFinish'].isoformat() + if 'ChainThreadAnalysis' in report: + report['ChainThreadAnalysis']['analysisStart'] = report['ChainThreadAnalysis']['analysisStart'].isoformat() + report['ChainThreadAnalysis']['analysisFinish'] = report['ChainThreadAnalysis']['analysisFinish'].isoformat() + if 'NetThreadAnalysis' in report: + report['NetThreadAnalysis']['analysisStart'] = report['NetThreadAnalysis']['analysisStart'].isoformat() + report['NetThreadAnalysis']['analysisFinish'] = report['NetThreadAnalysis']['analysisFinish'].isoformat() + + if 'tpsTestStart' in report: + report['tpsTestStart'] = report['tpsTestStart'].isoformat() + if 'tpsTestFinish' in report: + report['tpsTestFinish'] = report['tpsTestFinish'].isoformat() + + report['perfTestsBegin'] = report['perfTestsBegin'].isoformat() + report['perfTestsFinish'] = report['perfTestsFinish'].isoformat() + return json.dumps(report, indent=2) def exportReportAsJSON(self, report: json, exportPath): @@ -217,6 +347,7 @@ def removeArtifacts(path): if not self.ptConfig.delReport: removeArtifacts(self.loggingConfig.ptbLogsDirPath) + removeArtifacts(self.loggingConfig.pluginThreadOptLogsDirPath) else: removeArtifacts(self.loggingConfig.logDirPath) except OSError as error: @@ -233,6 +364,7 @@ def createArtifactsDir(path): createArtifactsDir(self.loggingConfig.logDirBase) createArtifactsDir(self.loggingConfig.logDirPath) createArtifactsDir(self.loggingConfig.ptbLogsDirPath) + createArtifactsDir(self.loggingConfig.pluginThreadOptLogsDirPath) except OSError as error: print(error) @@ -245,55 +377,92 @@ def prepArgsDict(self) -> dict: argsDict.update(asdict(self.loggingConfig)) return argsDict - def performTpsTest(self): + def performTpsTest(self) -> TpsTestResult: + tpsTestStart = datetime.utcnow() perfRunSuccessful = False - try: - binSearchResults = self.performPtbBinarySearch() + binSearchResults = self.performPtbBinarySearch(clusterConfig=self.clusterConfig, logDirRoot=self.loggingConfig.ptbLogsDirPath, + delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs) - print(f"Successful rate of: {binSearchResults.maxTpsAchieved}") + print(f"Successful rate of: {binSearchResults.maxTpsAchieved}") - if not self.ptConfig.quiet: - print("Search Results:") - for i in range(len(binSearchResults.searchResults)): - print(f"Search scenario: {i} result: {binSearchResults.searchResults[i]}") + if not self.ptConfig.quiet: + print("Search Results:") + for i in range(len(binSearchResults.searchResults)): + print(f"Search scenario: {i} result: {binSearchResults.searchResults[i]}") - longRunningSearchResults = self.performPtbReverseLinearSearch(tpsInitial=binSearchResults.maxTpsAchieved) + longRunningSearchResults = self.performPtbReverseLinearSearch(tpsInitial=binSearchResults.maxTpsAchieved) - print(f"Long Running Test - Successful rate of: {longRunningSearchResults.maxTpsAchieved}") - perfRunSuccessful = True + print(f"Long Running Test - Successful rate of: {longRunningSearchResults.maxTpsAchieved}") + perfRunSuccessful = True - if not self.ptConfig.quiet: - print("Long Running Test - Search Results:") - for i in range(len(longRunningSearchResults.searchResults)): - print(f"Search scenario: {i} result: {longRunningSearchResults.searchResults[i]}") + if not self.ptConfig.quiet: + print("Long Running Test - Search Results:") + for i in range(len(longRunningSearchResults.searchResults)): + print(f"Search scenario: {i} result: {longRunningSearchResults.searchResults[i]}") - testFinish = datetime.utcnow() - argsDict = self.prepArgsDict() - fullReport = self.createJSONReport(maxTpsAchieved=binSearchResults.maxTpsAchieved, searchResults=binSearchResults.searchResults, maxTpsReport=binSearchResults.maxTpsReport, - longRunningMaxTpsAchieved=longRunningSearchResults.maxTpsAchieved, longRunningSearchResults=longRunningSearchResults.searchResults, - longRunningMaxTpsReport=longRunningSearchResults.maxTpsReport, testStart=self.testStart, testFinish=testFinish, argsDict=argsDict) + tpsTestFinish = datetime.utcnow() - if not self.ptConfig.quiet: - print(f"Full Performance Test Report: {fullReport}") + return PerformanceTest.TpsTestResult(binSearchResults=binSearchResults, longRunningSearchResults=longRunningSearchResults, + tpsTestStart=tpsTestStart, tpsTestFinish=tpsTestFinish, perfRunSuccessful=perfRunSuccessful) - if not self.ptConfig.delReport: - self.exportReportAsJSON(fullReport, f"{self.loggingConfig.logDirPath}/report.json") + def runTest(self): + testSuccessful = True - finally: + TestHelper.printSystemInfo("BEGIN") + self.testDirsCleanup() + self.testDirsSetup() - if self.ptConfig.delPerfLogs: - print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") - self.testDirsCleanup() + if self.ptConfig.calcProducerThreads != "none": + print(f"Performing Producer Thread Optimization Tests") + if self.ptConfig.calcProducerThreads == "full": + optType = PerformanceTest.PluginThreadOptRunType.FULL + else: + optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX + prodResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.PRODUCER, optType=optType) + print(f"Producer Thread Optimization results: {prodResults}") + self.clusterConfig.extraNodeosArgs.producerPluginArgs.producerThreads = prodResults.recommendedThreadCount + + if self.ptConfig.calcChainThreads: + print(f"Performing Chain Thread Optimization Tests") + if self.ptConfig.calcChainThreads == "full": + optType = PerformanceTest.PluginThreadOptRunType.FULL + else: + optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX + chainResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.CHAIN, optType=optType) + print(f"Chain Thread Optimization results: {chainResults}") + self.clusterConfig.extraNodeosArgs.chainPluginArgs.chainThreads = chainResults.recommendedThreadCount + + if self.ptConfig.calcNetThreads: + print(f"Performing Net Thread Optimization Tests") + if self.ptConfig.calcNetThreads == "full": + optType = PerformanceTest.PluginThreadOptRunType.FULL + else: + optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX + netResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.NET, optType=optType) + print(f"Net Thread Optimization results: {netResults}") + self.clusterConfig.extraNodeosArgs.netPluginArgs.netThreads = netResults.recommendedThreadCount - return perfRunSuccessful + if not self.ptConfig.skipTpsTests: + print(f"Performing TPS Performance Tests") + testSuccessful = False + tpsTestResult = self.performTpsTest() + testSuccessful = tpsTestResult.perfRunSuccessful - def runTest(self): + self.testsFinish = datetime.utcnow() - TestHelper.printSystemInfo("BEGIN") - self.testDirsSetup() + self.report = self.createReport(producerThreadResult=prodResults, chainThreadResult=chainResults, netThreadResult=netResults, tpsTestResult=tpsTestResult) + jsonReport = self.reportAsJSON(self.report) + + if not self.ptConfig.quiet: + print(f"Full Performance Test Report: {jsonReport}") + + if not self.ptConfig.delReport: + self.exportReportAsJSON(jsonReport, f"{self.loggingConfig.logDirPath}/report.json") - testSuccessful = self.performTpsTest() + if self.ptConfig.delPerfLogs: + print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") + self.testDirsCleanup() return testSuccessful @@ -327,6 +496,25 @@ def parseArgs(): appArgs.add_bool(flag="--del-test-report", help="Whether to save json reports from each test scenario.") appArgs.add_bool(flag="--quiet", help="Whether to quiet printing intermediate results and reports to stdout") appArgs.add_bool(flag="--prods-enable-trace-api", help="Determines whether producer nodes should have eosio::trace_api_plugin enabled") + appArgs.add_bool(flag="--skip-tps-test", help="Determines whether to skip the max TPS measurement tests") + appArgs.add(flag="--calc-producer-threads", type=str, help="Determines whether to calculate number of worker threads to use in producer thread pool (\"none\", \"lmax\", or \"full\"). \ + In \"none\" mode, the default, no calculation will be attempted and default configured --producer-threads value will be used. \ + In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ + In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ + Useful for graphing the full performance impact of each available thread.", + choices=["none", "lmax", "full"], default="none") + appArgs.add(flag="--calc-chain-threads", type=str, help="Determines whether to calculate number of worker threads to use in chain thread pool (\"none\", \"lmax\", or \"full\"). \ + In \"none\" mode, the default, no calculation will be attempted and default configured --chain-threads value will be used. \ + In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ + In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ + Useful for graphing the full performance impact of each available thread.", + choices=["none", "lmax", "full"], default="none") + appArgs.add(flag="--calc-net-threads", type=str, help="Determines whether to calculate number of worker threads to use in net thread pool (\"none\", \"lmax\", or \"full\"). \ + In \"none\" mode, the default, no calculation will be attempted and default configured --net-threads value will be used. \ + In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ + In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ + Useful for graphing the full performance impact of each available thread.", + choices=["none", "lmax", "full"], default="none") args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file" ,"--dump-error-details","-v","--leave-running" ,"--clean-run"}, applicationSpecificArgs=appArgs) @@ -364,7 +552,11 @@ def main(): delTestReport=args.del_test_report, numAddlBlocksToPrune=args.num_blocks_to_prune, quiet=args.quiet, - logDirRoot=".") + logDirRoot=".", + skipTpsTests=args.skip_tps_test, + calcProducerThreads=args.calc_producer_threads, + calcChainThreads=args.calc_chain_threads, + calcNetThreads=args.calc_net_threads) myTest = PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig) perfRunSuccessful = myTest.runTest() From 92fa92a1b21564e27f3f39f1caa0a15c180d01df Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 23 Nov 2022 12:44:38 -0600 Subject: [PATCH 05/12] Update defaults for producer threads to 6 and chain threads to 3 based on performance run calculations. --- tests/performance_tests/README.md | 8 ++++---- tests/performance_tests/performance_test_basic.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index b76b0b11e8..ec14127cd6 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -229,7 +229,7 @@ The Performance Harness main script `performance_test.py` can be configured usin * `--chain-state-db-size-mb CHAIN_STATE_DB_SIZE_MB` Maximum size (in MiB) of the chain state database (default: 10240) * `--chain-threads CHAIN_THREADS` - Number of worker threads in controller thread pool (default: 2) + Number of worker threads in controller thread pool (default: 3) * `--database-map-mode {mapped,heap,locked}` Database map mode ("mapped", "heap", or "locked"). In "mapped" mode database is memory mapped as a file. @@ -248,7 +248,7 @@ The Performance Harness main script `performance_test.py` can be configured usin * `--last-block-cpu-effort-percent LAST_BLOCK_CPU_EFFORT_PERCENT` Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80% (default: 100) * `--producer-threads PRODUCER_THREADS` - Number of worker threads in producer thread pool (default: 2) + Number of worker threads in producer thread pool (default: 6) * `--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS` Maximum time for processing a request, -1 for unlimited (default: 990000) * `--del-perf-logs` Whether to delete performance test specific logs. (default: False) @@ -311,7 +311,7 @@ The following scripts are typically used by the Performance Harness main script * `--chain-state-db-size-mb CHAIN_STATE_DB_SIZE_MB` Maximum size (in MiB) of the chain state database (default: 10240) * `--chain-threads CHAIN_THREADS` - Number of worker threads in controller thread pool (default: 2) + Number of worker threads in controller thread pool (default: 3) * `--database-map-mode {mapped,heap,locked}` Database map mode ("mapped", "heap", or "locked"). In "mapped" mode database is memory mapped as a file. @@ -330,7 +330,7 @@ The following scripts are typically used by the Performance Harness main script * `--last-block-cpu-effort-percent LAST_BLOCK_CPU_EFFORT_PERCENT` Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80% (default: 100) * `--producer-threads PRODUCER_THREADS` - Number of worker threads in producer thread pool (default: 2) + Number of worker threads in producer thread pool (default: 6) * `--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS` Maximum time for processing a request, -1 for unlimited (default: 990000) * `--del-perf-logs` Whether to delete performance test specific logs. (default: False) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 8ffe58ed3e..cf7496a668 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -51,7 +51,7 @@ class ExtraNodeosArgs: class ChainPluginArgs: signatureCpuBillablePct: int = 0 chainStateDbSizeMb: int = 10 * 1024 - chainThreads: int = 2 + chainThreads: int = 3 databaseMapMode: str = "mapped" def __str__(self) -> str: @@ -74,7 +74,7 @@ class ProducerPluginArgs: produceTimeOffsetUs: int = 0 cpuEffortPercent: int = 100 lastBlockCpuEffortPercent: int = 100 - producerThreads: int = 2 + producerThreads: int = 6 def __str__(self) -> str: return f"--disable-subjective-billing {self.disableSubjectiveBilling} \ From 2ca2eb9d44b8fbe2de74c784bdade08a2bd8c0fd Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 23 Nov 2022 13:17:53 -0600 Subject: [PATCH 06/12] Fix typo. --- tests/performance_tests/performance_test_basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index cf7496a668..1e4610f8a7 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -119,7 +119,7 @@ def __post_init__(self): @dataclass class PtbConfig: - targetTps: int=8000, + targetTps: int=8000 testTrxGenDurationSec: int=30 tpsLimitPerGenerator: int=4000 numAddlBlocksToPrune: int=2 From 1db0e6c7c347dfc3f314fa3a66c6f24dc54218cc Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 28 Nov 2022 11:14:06 -0600 Subject: [PATCH 07/12] Fix check for thread calc being 'none' --- tests/performance_tests/performance_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 6dc4a959b2..ae8fb3cbfe 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -423,7 +423,7 @@ def runTest(self): print(f"Producer Thread Optimization results: {prodResults}") self.clusterConfig.extraNodeosArgs.producerPluginArgs.producerThreads = prodResults.recommendedThreadCount - if self.ptConfig.calcChainThreads: + if self.ptConfig.calcChainThreads != "none": print(f"Performing Chain Thread Optimization Tests") if self.ptConfig.calcChainThreads == "full": optType = PerformanceTest.PluginThreadOptRunType.FULL @@ -433,7 +433,7 @@ def runTest(self): print(f"Chain Thread Optimization results: {chainResults}") self.clusterConfig.extraNodeosArgs.chainPluginArgs.chainThreads = chainResults.recommendedThreadCount - if self.ptConfig.calcNetThreads: + if self.ptConfig.calcNetThreads != "none": print(f"Performing Net Thread Optimization Tests") if self.ptConfig.calcNetThreads == "full": optType = PerformanceTest.PluginThreadOptRunType.FULL From 84b11790c469fdfc00eeb259494d9be8c7da51cf Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 28 Nov 2022 11:41:06 -0600 Subject: [PATCH 08/12] Fix reference before assignment error on results objects when not configured to run tests. --- tests/performance_tests/performance_test.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index ae8fb3cbfe..6e11a2c833 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -413,6 +413,7 @@ def runTest(self): self.testDirsCleanup() self.testDirsSetup() + prodResults = None if self.ptConfig.calcProducerThreads != "none": print(f"Performing Producer Thread Optimization Tests") if self.ptConfig.calcProducerThreads == "full": @@ -423,6 +424,7 @@ def runTest(self): print(f"Producer Thread Optimization results: {prodResults}") self.clusterConfig.extraNodeosArgs.producerPluginArgs.producerThreads = prodResults.recommendedThreadCount + chainResults = None if self.ptConfig.calcChainThreads != "none": print(f"Performing Chain Thread Optimization Tests") if self.ptConfig.calcChainThreads == "full": @@ -433,6 +435,7 @@ def runTest(self): print(f"Chain Thread Optimization results: {chainResults}") self.clusterConfig.extraNodeosArgs.chainPluginArgs.chainThreads = chainResults.recommendedThreadCount + netResults = None if self.ptConfig.calcNetThreads != "none": print(f"Performing Net Thread Optimization Tests") if self.ptConfig.calcNetThreads == "full": @@ -443,6 +446,7 @@ def runTest(self): print(f"Net Thread Optimization results: {netResults}") self.clusterConfig.extraNodeosArgs.netPluginArgs.netThreads = netResults.recommendedThreadCount + tpsTestResult = None if not self.ptConfig.skipTpsTests: print(f"Performing TPS Performance Tests") testSuccessful = False From 0d5980d2da0e2af090e82710d4e592f5db820b8e Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 30 Nov 2022 10:29:33 -0600 Subject: [PATCH 09/12] Use Python introspection to simplify. --- tests/performance_tests/performance_test.py | 40 +++++++------------ .../performance_test_basic.py | 18 ++++----- 2 files changed, 23 insertions(+), 35 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 6e11a2c833..91e06169ea 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -196,9 +196,9 @@ def evaluateSuccess(self, test: PerformanceTestBasic, testSuccessful: bool, resu return result.basicTestSuccess and result.tpsExpectMet and result.trxExpectMet class PluginThreadOpt(Enum): - PRODUCER = 1 - CHAIN = 2 - NET = 3 + PRODUCER = "producer" + CHAIN = "chain" + NET = "net" class PluginThreadOptRunType(Enum): FULL = 1 @@ -214,14 +214,7 @@ class PluginThreadOptResult: def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: PluginThreadOptRunType=PluginThreadOptRunType.LOCAL_MAX, minThreadCount: int=2, maxThreadCount: int=os.cpu_count()) -> PluginThreadOptResult: - if optPlugin == PerformanceTest.PluginThreadOpt.PRODUCER: - fileName = "producerThreadResults.txt" - elif optPlugin == PerformanceTest.PluginThreadOpt.CHAIN: - fileName = "chainThreadResults.txt" - else: - fileName = "netThreadResults.txt" - - resultsFile = f"{self.loggingConfig.pluginThreadOptLogsDirPath}/{fileName}" + resultsFile = f"{self.loggingConfig.pluginThreadOptLogsDirPath}/{optPlugin.value}ThreadResults.txt" threadToMaxTpsDict: dict = {} @@ -229,19 +222,14 @@ def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: Plugin analysisStart = datetime.utcnow() with open(resultsFile, 'w') as log: - log.write(f"{optPlugin.name.lower()}Threads, maxTpsAchieved\n") + log.write(f"{optPlugin.value}Threads, maxTpsAchieved\n") log.close() lastMaxTpsAchieved = 0 for threadCount in range(minThreadCount, maxThreadCount+1): - print(f"Running {optPlugin.name.lower()} thread count optimization check with {threadCount} {optPlugin.name.lower()} threads") + print(f"Running {optPlugin.value} thread count optimization check with {threadCount} {optPlugin.value} threads") - if optPlugin == PerformanceTest.PluginThreadOpt.PRODUCER: - clusterConfig.extraNodeosArgs.producerPluginArgs.producerThreads = threadCount - elif optPlugin == PerformanceTest.PluginThreadOpt.CHAIN: - clusterConfig.extraNodeosArgs.chainPluginArgs.chainThreads = threadCount - else: - clusterConfig.extraNodeosArgs.netPluginArgs.netThreads = threadCount + getattr(clusterConfig.extraNodeosArgs, optPlugin.value + 'PluginArgs').threads = threadCount binSearchResults = self.performPtbBinarySearch(clusterConfig=clusterConfig, logDirRoot=self.loggingConfig.pluginThreadOptLogsDirPath, delReport=True, quiet=False, delPerfLogs=True) @@ -250,7 +238,7 @@ def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: Plugin if not self.ptConfig.quiet: print("Search Results:") for i in range(len(binSearchResults.searchResults)): - print(f"Search scenario {optPlugin.name} thread count {threadCount}: {i} result: {binSearchResults.searchResults[i]}") + print(f"Search scenario {optPlugin.value} thread count {threadCount}: {i} result: {binSearchResults.searchResults[i]}") with open(resultsFile, 'a') as log: log.write(f"{threadCount},{binSearchResults.maxTpsAchieved}\n") @@ -422,7 +410,7 @@ def runTest(self): optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX prodResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.PRODUCER, optType=optType) print(f"Producer Thread Optimization results: {prodResults}") - self.clusterConfig.extraNodeosArgs.producerPluginArgs.producerThreads = prodResults.recommendedThreadCount + self.clusterConfig.extraNodeosArgs.producerPluginArgs.threads = prodResults.recommendedThreadCount chainResults = None if self.ptConfig.calcChainThreads != "none": @@ -433,7 +421,7 @@ def runTest(self): optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX chainResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.CHAIN, optType=optType) print(f"Chain Thread Optimization results: {chainResults}") - self.clusterConfig.extraNodeosArgs.chainPluginArgs.chainThreads = chainResults.recommendedThreadCount + self.clusterConfig.extraNodeosArgs.chainPluginArgs.threads = chainResults.recommendedThreadCount netResults = None if self.ptConfig.calcNetThreads != "none": @@ -444,7 +432,7 @@ def runTest(self): optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX netResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.NET, optType=optType) print(f"Net Thread Optimization results: {netResults}") - self.clusterConfig.extraNodeosArgs.netPluginArgs.netThreads = netResults.recommendedThreadCount + self.clusterConfig.extraNodeosArgs.netPluginArgs.threads = netResults.recommendedThreadCount tpsTestResult = None if not self.ptConfig.skipTpsTests: @@ -535,13 +523,13 @@ def main(): ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, - chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) + threads=args.chain_threads, databaseMapMode=args.database_map_mode) producerPluginArgs = ENA.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, - producerThreads=args.producer_threads) + threads=args.producer_threads) httpPluginArgs = ENA.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - netPluginArgs = ENA.NetPluginArgs(netThreads=args.net_threads) + netPluginArgs = ENA.NetPluginArgs(threads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 1e4610f8a7..6f195f7a31 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -51,21 +51,21 @@ class ExtraNodeosArgs: class ChainPluginArgs: signatureCpuBillablePct: int = 0 chainStateDbSizeMb: int = 10 * 1024 - chainThreads: int = 3 + threads: int = 3 databaseMapMode: str = "mapped" def __str__(self) -> str: return f"--signature-cpu-billable-pct {self.signatureCpuBillablePct} \ --chain-state-db-size-mb {self.chainStateDbSizeMb} \ - --chain-threads {self.chainThreads} \ + --chain-threads {self.threads} \ --database-map-mode {self.databaseMapMode}" @dataclass class NetPluginArgs: - netThreads: int = 2 + threads: int = 2 def __str__(self) -> str: - return f"--net-threads {self.netThreads}" + return f"--net-threads {self.threads}" @dataclass class ProducerPluginArgs: @@ -74,7 +74,7 @@ class ProducerPluginArgs: produceTimeOffsetUs: int = 0 cpuEffortPercent: int = 100 lastBlockCpuEffortPercent: int = 100 - producerThreads: int = 6 + threads: int = 6 def __str__(self) -> str: return f"--disable-subjective-billing {self.disableSubjectiveBilling} \ @@ -82,7 +82,7 @@ def __str__(self) -> str: --produce-time-offset-us {self.produceTimeOffsetUs} \ --cpu-effort-percent {self.cpuEffortPercent} \ --last-block-cpu-effort-percent {self.lastBlockCpuEffortPercent} \ - --producer-threads {self.producerThreads}" + --producer-threads {self.threads}" @dataclass class HttpPluginArgs: @@ -480,13 +480,13 @@ def main(): ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, - chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) + threads=args.chain_threads, databaseMapMode=args.database_map_mode) producerPluginArgs = ENA.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, - producerThreads=args.producer_threads) + threads=args.producer_threads) httpPluginArgs = ENA.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - netPluginArgs = ENA.NetPluginArgs(netThreads=args.net_threads) + netPluginArgs = ENA.NetPluginArgs(threads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) From d291e17022589485a321c1e6d4b7cefde3a6fd1b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 30 Nov 2022 11:12:12 -0600 Subject: [PATCH 10/12] Use encoder to handle datetime and None type conversions for reporting. --- tests/performance_tests/log_reader.py | 12 ++++++--- tests/performance_tests/performance_test.py | 28 +++++++-------------- 2 files changed, 18 insertions(+), 22 deletions(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 2fb8341848..4cd550bf45 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -376,10 +376,16 @@ def createReport(guide: chainBlocksGuide, tpsTestConfig: TpsTestConfig, tpsStats report['nodeosVersion'] = Utils.getNodeosVersion() return report +class LogReaderEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, datetime): + return obj.isoformat() + if obj is None: + return "Unknown" + return json.JSONEncoder.default(self, obj) + def reportAsJSON(report: dict) -> json: - report['testStart'] = "Unknown" if report['testStart'] is None else report['testStart'].isoformat() - report['testFinish'] = "Unknown" if report['testFinish'] is None else report['testFinish'].isoformat() - return json.dumps(report, sort_keys=True, indent=2) + return json.dumps(report, sort_keys=True, indent=2, cls=LogReaderEncoder) def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: ArtifactPaths, argsDict: dict, testStart: datetime=None, completedRun: bool=True) -> dict: scrapeLog(data, artifacts.nodeosLogPath) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 91e06169ea..a9f21cb7d0 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -300,26 +300,16 @@ def createReport(self,producerThreadResult: PluginThreadOptResult=None, chainThr report['nodeosVersion'] = Utils.getNodeosVersion() return report + class PtReportEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, datetime): + return obj.isoformat() + if obj is None: + return "Unknown" + return json.JSONEncoder.default(self, obj) + def reportAsJSON(self, report: dict) -> json: - if 'ProducerThreadAnalysis' in report: - report['ProducerThreadAnalysis']['analysisStart'] = report['ProducerThreadAnalysis']['analysisStart'].isoformat() - report['ProducerThreadAnalysis']['analysisFinish'] = report['ProducerThreadAnalysis']['analysisFinish'].isoformat() - if 'ChainThreadAnalysis' in report: - report['ChainThreadAnalysis']['analysisStart'] = report['ChainThreadAnalysis']['analysisStart'].isoformat() - report['ChainThreadAnalysis']['analysisFinish'] = report['ChainThreadAnalysis']['analysisFinish'].isoformat() - if 'NetThreadAnalysis' in report: - report['NetThreadAnalysis']['analysisStart'] = report['NetThreadAnalysis']['analysisStart'].isoformat() - report['NetThreadAnalysis']['analysisFinish'] = report['NetThreadAnalysis']['analysisFinish'].isoformat() - - if 'tpsTestStart' in report: - report['tpsTestStart'] = report['tpsTestStart'].isoformat() - if 'tpsTestFinish' in report: - report['tpsTestFinish'] = report['tpsTestFinish'].isoformat() - - report['perfTestsBegin'] = report['perfTestsBegin'].isoformat() - report['perfTestsFinish'] = report['perfTestsFinish'].isoformat() - - return json.dumps(report, indent=2) + return json.dumps(report, indent=2, cls=PerformanceTest.PtReportEncoder) def exportReportAsJSON(self, report: json, exportPath): with open(exportPath, 'wt') as f: From 33b94ba2a4c25fde078bf6214689c9dbe66fb770 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 30 Nov 2022 14:43:02 -0600 Subject: [PATCH 11/12] Remove redundant calls to close when using with statement. --- tests/performance_tests/performance_test.py | 2 -- tests/performance_tests/performance_test_basic.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index a9f21cb7d0..7f977b44c2 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -223,7 +223,6 @@ def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: Plugin with open(resultsFile, 'w') as log: log.write(f"{optPlugin.value}Threads, maxTpsAchieved\n") - log.close() lastMaxTpsAchieved = 0 for threadCount in range(minThreadCount, maxThreadCount+1): @@ -242,7 +241,6 @@ def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: Plugin with open(resultsFile, 'a') as log: log.write(f"{threadCount},{binSearchResults.maxTpsAchieved}\n") - log.close() if optType == PerformanceTest.PluginThreadOptRunType.LOCAL_MAX: if binSearchResults.maxTpsAchieved <= lastMaxTpsAchieved: diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 6f195f7a31..214cb144ae 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -240,12 +240,10 @@ def queryBlockTrxData(self, node, blockDataPath, blockTrxDataPath, startBlockNum btdf_append_write = self.fileOpenMode(blockTrxDataPath) with open(blockTrxDataPath, btdf_append_write) as trxDataFile: [trxDataFile.write(f"{trx['id']},{trx['block_num']},{trx['cpu_usage_us']},{trx['net_usage_words']}\n") for trx in block['payload']['transactions'] if block['payload']['transactions']] - trxDataFile.close() bdf_append_write = self.fileOpenMode(blockDataPath) with open(blockDataPath, bdf_append_write) as blockDataFile: blockDataFile.write(f"{block['payload']['number']},{block['payload']['id']},{block['payload']['producer']},{block['payload']['status']},{block['payload']['timestamp']}\n") - blockDataFile.close() def waitForEmptyBlocks(self, node, numEmptyToWaitOn): emptyBlocks = 0 From c175d6be97db15670525c6378005e8ca44793d24 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 30 Nov 2022 14:43:26 -0600 Subject: [PATCH 12/12] Resuse encoder from log_reader. --- tests/performance_tests/performance_test.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 7f977b44c2..2be12a1924 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -17,6 +17,7 @@ from dataclasses import dataclass, asdict, field from datetime import datetime from enum import Enum +from log_reader import LogReaderEncoder class PerformanceTest: @@ -298,16 +299,8 @@ def createReport(self,producerThreadResult: PluginThreadOptResult=None, chainThr report['nodeosVersion'] = Utils.getNodeosVersion() return report - class PtReportEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, datetime): - return obj.isoformat() - if obj is None: - return "Unknown" - return json.JSONEncoder.default(self, obj) - def reportAsJSON(self, report: dict) -> json: - return json.dumps(report, indent=2, cls=PerformanceTest.PtReportEncoder) + return json.dumps(report, indent=2, cls=LogReaderEncoder) def exportReportAsJSON(self, report: json, exportPath): with open(exportPath, 'wt') as f: