From 9986399867ae1c1fea3ef07c32b2cc53484166b1 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 15 Nov 2022 15:16:00 -0600 Subject: [PATCH 001/178] Add support for configuring number of worker threads in controller thread pool (--chain-threads). --- tests/performance_tests/performance_test.py | 3 ++- tests/performance_tests/performance_test_basic.py | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 1ca5d42f1c..7a8766315b 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -219,6 +219,7 @@ def parseArgs(): appArgs.add(flag="--genesis", type=str, help="Path to genesis.json", default="tests/performance_tests/genesis.json") appArgs.add(flag="--num-blocks-to-prune", type=int, help="The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, to prune from the beginning and end of the range of blocks of interest for evaluation.", default=2) appArgs.add(flag="--signature-cpu-billable-pct", type=int, help="Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50%%", default=0) + appArgs.add(flag="--chain-threads", type=int, help="Number of worker threads in controller thread pool", default=2) appArgs.add(flag="--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) appArgs.add(flag="--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) appArgs.add(flag="--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) @@ -273,7 +274,7 @@ def main(): dumpErrorDetails=dumpErrorDetails, delay=delay, nodesFile=nodesFile, verbose=verbose) - extraNodeosChainPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct) + extraNodeosChainPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainThreads=args.chain_threads) extraNodeosProducerPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index b588263466..ab1d6ee118 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -50,9 +50,10 @@ class ExtraNodeosArgs: @dataclass class ExtraNodeosChainPluginArgs: signatureCpuBillablePct: int = 0 + chainThreads: int = 2 def argsStr(self) -> str: - return f"--signature-cpu-billable-pct {self.signatureCpuBillablePct}" + return f"--signature-cpu-billable-pct {self.signatureCpuBillablePct} --chain-threads {self.chainThreads}" @dataclass class ExtraNodeosProducerPluginArgs: @@ -398,6 +399,7 @@ def parseArgs(): appArgs.add(flag="--num-blocks-to-prune", type=int, help=("The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, " "to prune from the beginning and end of the range of blocks of interest for evaluation."), default=2) appArgs.add(flag="--signature-cpu-billable-pct", type=int, help="Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50%%", default=0) + appArgs.add(flag="--chain-threads", type=int, help="Number of worker threads in controller thread pool", default=2) appArgs.add(flag="--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) appArgs.add(flag="--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) appArgs.add(flag="--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) @@ -421,7 +423,7 @@ def main(): testHelperConfig = PerformanceBasicTest.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, dumpErrorDetails=args.dump_error_details, delay=args.d, nodesFile=args.nodes_file, verbose=args.v) - extraNodeosChainPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct) + extraNodeosChainPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainThreads=args.chain_threads) extraNodeosProducerPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent) From 138fb0e096fb618fd8103ef40e2459f49186729e Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 15 Nov 2022 15:43:23 -0600 Subject: [PATCH 002/178] Add support for configuring number of worker threads in net_plugin thread pool (--net-threads). --- tests/performance_tests/performance_test.py | 4 +++- tests/performance_tests/performance_test_basic.py | 14 ++++++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 7a8766315b..4ee28ffd1e 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -220,6 +220,7 @@ def parseArgs(): appArgs.add(flag="--num-blocks-to-prune", type=int, help="The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, to prune from the beginning and end of the range of blocks of interest for evaluation.", default=2) appArgs.add(flag="--signature-cpu-billable-pct", type=int, help="Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50%%", default=0) appArgs.add(flag="--chain-threads", type=int, help="Number of worker threads in controller thread pool", default=2) + appArgs.add(flag="--net-threads", type=int, help="Number of worker threads in net_plugin thread pool", default=2) appArgs.add(flag="--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) appArgs.add(flag="--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) appArgs.add(flag="--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) @@ -279,7 +280,8 @@ def main(): lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent) extraNodeosHttpPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosHttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - extraNodeosArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs(chainPluginArgs=extraNodeosChainPluginArgs, httpPluginArgs=extraNodeosHttpPluginArgs, producerPluginArgs=extraNodeosProducerPluginArgs) + extraNodeosNetPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosNetPluginArgs(netThreads=args.net_threads) + extraNodeosArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs(chainPluginArgs=extraNodeosChainPluginArgs, httpPluginArgs=extraNodeosHttpPluginArgs, producerPluginArgs=extraNodeosProducerPluginArgs, netPluginArgs=extraNodeosNetPluginArgs) testClusterConfig = PerformanceBasicTest.ClusterConfig(pnodes=pnodes, totalNodes=totalNodes, topo=topo, genesisPath=genesisPath, prodsEnableTraceApi=prodsEnableTraceApi, extraNodeosArgs=extraNodeosArgs) argsDict = prepArgsDict(testDurationSec=testDurationSec, finalDurationSec=finalDurationSec, logsDir=testTimeStampDirPath, diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index ab1d6ee118..3023fc0f3f 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -55,6 +55,13 @@ class ExtraNodeosChainPluginArgs: def argsStr(self) -> str: return f"--signature-cpu-billable-pct {self.signatureCpuBillablePct} --chain-threads {self.chainThreads}" + @dataclass + class ExtraNodeosNetPluginArgs: + netThreads: int = 2 + + def argsStr(self) -> str: + return f"--net-threads {self.netThreads}" + @dataclass class ExtraNodeosProducerPluginArgs: disableSubjectiveBilling: bool = True @@ -80,9 +87,10 @@ def argsStr(self) -> str: chainPluginArgs: ExtraNodeosChainPluginArgs = ExtraNodeosChainPluginArgs() producerPluginArgs: ExtraNodeosProducerPluginArgs = ExtraNodeosProducerPluginArgs() httpPluginArgs: ExtraNodeosHttpPluginArgs = ExtraNodeosHttpPluginArgs() + netPluginArgs: ExtraNodeosNetPluginArgs = ExtraNodeosNetPluginArgs() def argsStr(self) -> str: - return f" {self.httpPluginArgs.argsStr()} {self.producerPluginArgs.argsStr()} {self.chainPluginArgs.argsStr()}" + return f" {self.httpPluginArgs.argsStr()} {self.producerPluginArgs.argsStr()} {self.chainPluginArgs.argsStr()} {self.netPluginArgs.argsStr()}" pnodes: int = 1 totalNodes: int = 2 @@ -400,6 +408,7 @@ def parseArgs(): "to prune from the beginning and end of the range of blocks of interest for evaluation."), default=2) appArgs.add(flag="--signature-cpu-billable-pct", type=int, help="Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50%%", default=0) appArgs.add(flag="--chain-threads", type=int, help="Number of worker threads in controller thread pool", default=2) + appArgs.add(flag="--net-threads", type=int, help="Number of worker threads in net_plugin thread pool", default=2) appArgs.add(flag="--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) appArgs.add(flag="--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) appArgs.add(flag="--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) @@ -428,7 +437,8 @@ def main(): lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent) extraNodeosHttpPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosHttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - extraNodeosArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs(chainPluginArgs=extraNodeosChainPluginArgs, httpPluginArgs=extraNodeosHttpPluginArgs, producerPluginArgs=extraNodeosProducerPluginArgs) + extraNodeosNetPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosNetPluginArgs(netThreads=args.net_threads) + extraNodeosArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs(chainPluginArgs=extraNodeosChainPluginArgs, httpPluginArgs=extraNodeosHttpPluginArgs, producerPluginArgs=extraNodeosProducerPluginArgs, netPluginArgs=extraNodeosNetPluginArgs) testClusterConfig = PerformanceBasicTest.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) myTest = PerformanceBasicTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=args.target_tps, From 2afac0031c209e97dc615bde3e9f68b57cf1a84d Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 15 Nov 2022 16:11:24 -0600 Subject: [PATCH 003/178] Add support for configuring number of worker threads in producer thread pool (--producer-threads). --- tests/performance_tests/performance_test.py | 3 ++- tests/performance_tests/performance_test_basic.py | 7 +++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 4ee28ffd1e..e022fff07e 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -226,6 +226,7 @@ def parseArgs(): appArgs.add(flag="--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) appArgs.add(flag="--cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=100) appArgs.add(flag="--last-block-cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80%%", default=100) + appArgs.add(flag="--producer-threads", type=int, help="Number of worker threads in producer thread pool", default=2) appArgs.add(flag="--http-max-response-time-ms", type=int, help="Maximum time for processing a request, -1 for unlimited", default=990000) appArgs.add_bool(flag="--del-perf-logs", help="Whether to delete performance test specific logs.") appArgs.add_bool(flag="--del-report", help="Whether to delete overarching performance run report.") @@ -278,7 +279,7 @@ def main(): extraNodeosChainPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainThreads=args.chain_threads) extraNodeosProducerPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, - lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent) + lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, producerThreads=args.producer_threads) extraNodeosHttpPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosHttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) extraNodeosNetPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosNetPluginArgs(netThreads=args.net_threads) extraNodeosArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs(chainPluginArgs=extraNodeosChainPluginArgs, httpPluginArgs=extraNodeosHttpPluginArgs, producerPluginArgs=extraNodeosProducerPluginArgs, netPluginArgs=extraNodeosNetPluginArgs) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 3023fc0f3f..c1e947e5ab 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -69,13 +69,15 @@ class ExtraNodeosProducerPluginArgs: produceTimeOffsetUs: int = 0 cpuEffortPercent: int = 100 lastBlockCpuEffortPercent: int = 100 + producerThreads: int = 2 def argsStr(self) -> str: return f"--disable-subjective-billing {self.disableSubjectiveBilling} \ --last-block-time-offset-us {self.lastBlockTimeOffsetUs} \ --produce-time-offset-us {self.produceTimeOffsetUs} \ --cpu-effort-percent {self.cpuEffortPercent} \ - --last-block-cpu-effort-percent {self.lastBlockCpuEffortPercent}" + --last-block-cpu-effort-percent {self.lastBlockCpuEffortPercent} \ + --producer-threads {self.producerThreads}" @dataclass class ExtraNodeosHttpPluginArgs: @@ -414,6 +416,7 @@ def parseArgs(): appArgs.add(flag="--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) appArgs.add(flag="--cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=100) appArgs.add(flag="--last-block-cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80%%", default=100) + appArgs.add(flag="--producer-threads", type=int, help="Number of worker threads in producer thread pool", default=2) appArgs.add(flag="--http-max-response-time-ms", type=int, help="Maximum time for processing a request, -1 for unlimited", default=990000) appArgs.add_bool(flag="--del-perf-logs", help="Whether to delete performance test specific logs.") appArgs.add_bool(flag="--del-report", help="Whether to delete overarching performance run report.") @@ -435,7 +438,7 @@ def main(): extraNodeosChainPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainThreads=args.chain_threads) extraNodeosProducerPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, - lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent) + lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, producerThreads=args.producer_threads) extraNodeosHttpPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosHttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) extraNodeosNetPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosNetPluginArgs(netThreads=args.net_threads) extraNodeosArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs(chainPluginArgs=extraNodeosChainPluginArgs, httpPluginArgs=extraNodeosHttpPluginArgs, producerPluginArgs=extraNodeosProducerPluginArgs, netPluginArgs=extraNodeosNetPluginArgs) From 8fd065c0cff545cdb3ad9473cd540c3d41eb344f Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 16 Nov 2022 08:34:12 -0600 Subject: [PATCH 004/178] Update docs for new arguments. --- tests/performance_tests/README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index dd26fc6ce5..cacfbb4314 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -221,6 +221,10 @@ The Performance Harness main script `performance_test.py` can be configured usin (default: 2) * `--signature-cpu-billable-pct SIGNATURE_CPU_BILLABLE_PCT` Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50% (default: 0) +* `--chain-threads CHAIN_THREADS` + Number of worker threads in controller thread pool (default: 2) +* `--net-threads NET_THREADS` + Number of worker threads in net_plugin thread pool (default: 2) * `--disable-subjective-billing DISABLE_SUBJECTIVE_BILLING` Disable subjective CPU billing for API/P2P transactions (default: True) * `--last-block-time-offset-us LAST_BLOCK_TIME_OFFSET_US` @@ -231,6 +235,8 @@ The Performance Harness main script `performance_test.py` can be configured usin Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80% (default: 100) * `--last-block-cpu-effort-percent LAST_BLOCK_CPU_EFFORT_PERCENT` Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80% (default: 100) +* `--producer-threads PRODUCER_THREADS` + Number of worker threads in producer thread pool (default: 2) * `--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS` Maximum time for processing a request, -1 for unlimited (default: 990000) * `--del-perf-logs` Whether to delete performance test specific logs. (default: False) @@ -274,6 +280,10 @@ The following scripts are typically used by the Performance Harness main script of the range of blocks of interest for evaluation. (default: 2) * `--signature-cpu-billable-pct SIGNATURE_CPU_BILLABLE_PCT` Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50% (default: 0) +* `--chain-threads CHAIN_THREADS` + Number of worker threads in controller thread pool (default: 2) +* `--net-threads NET_THREADS` + Number of worker threads in net_plugin thread pool (default: 2) * `--disable-subjective-billing DISABLE_SUBJECTIVE_BILLING` Disable subjective CPU billing for API/P2P transactions (default: True) * `--last-block-time-offset-us LAST_BLOCK_TIME_OFFSET_US` @@ -284,6 +294,8 @@ The following scripts are typically used by the Performance Harness main script Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80% (default: 100) * `--last-block-cpu-effort-percent LAST_BLOCK_CPU_EFFORT_PERCENT` Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80% (default: 100) +* `--producer-threads PRODUCER_THREADS` + Number of worker threads in producer thread pool (default: 2) * `--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS` Maximum time for processing a request, -1 for unlimited (default: 990000) * `--del-perf-logs` Whether to delete performance test specific logs. (default: False) From 4dd551f00f2012e46acf120b2eedc6cbeae657cc Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 16 Nov 2022 15:30:41 -0600 Subject: [PATCH 005/178] Rename to simplify and shorten plugin argument classes. --- tests/performance_tests/performance_test.py | 8 +++---- .../performance_test_basic.py | 24 +++++++++---------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index e022fff07e..c8fb592eb7 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -276,12 +276,12 @@ def main(): dumpErrorDetails=dumpErrorDetails, delay=delay, nodesFile=nodesFile, verbose=verbose) - extraNodeosChainPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainThreads=args.chain_threads) - extraNodeosProducerPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, + extraNodeosChainPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainThreads=args.chain_threads) + extraNodeosProducerPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, producerThreads=args.producer_threads) - extraNodeosHttpPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosHttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - extraNodeosNetPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosNetPluginArgs(netThreads=args.net_threads) + extraNodeosHttpPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) + extraNodeosNetPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.NetPluginArgs(netThreads=args.net_threads) extraNodeosArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs(chainPluginArgs=extraNodeosChainPluginArgs, httpPluginArgs=extraNodeosHttpPluginArgs, producerPluginArgs=extraNodeosProducerPluginArgs, netPluginArgs=extraNodeosNetPluginArgs) testClusterConfig = PerformanceBasicTest.ClusterConfig(pnodes=pnodes, totalNodes=totalNodes, topo=topo, genesisPath=genesisPath, prodsEnableTraceApi=prodsEnableTraceApi, extraNodeosArgs=extraNodeosArgs) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index c1e947e5ab..358fdc08b6 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -48,7 +48,7 @@ class ClusterConfig: @dataclass class ExtraNodeosArgs: @dataclass - class ExtraNodeosChainPluginArgs: + class ChainPluginArgs: signatureCpuBillablePct: int = 0 chainThreads: int = 2 @@ -56,14 +56,14 @@ def argsStr(self) -> str: return f"--signature-cpu-billable-pct {self.signatureCpuBillablePct} --chain-threads {self.chainThreads}" @dataclass - class ExtraNodeosNetPluginArgs: + class NetPluginArgs: netThreads: int = 2 def argsStr(self) -> str: return f"--net-threads {self.netThreads}" @dataclass - class ExtraNodeosProducerPluginArgs: + class ProducerPluginArgs: disableSubjectiveBilling: bool = True lastBlockTimeOffsetUs: int = 0 produceTimeOffsetUs: int = 0 @@ -80,16 +80,16 @@ def argsStr(self) -> str: --producer-threads {self.producerThreads}" @dataclass - class ExtraNodeosHttpPluginArgs: + class HttpPluginArgs: httpMaxResponseTimeMs: int = 990000 def argsStr(self) -> str: return f"--http-max-response-time-ms {self.httpMaxResponseTimeMs}" - chainPluginArgs: ExtraNodeosChainPluginArgs = ExtraNodeosChainPluginArgs() - producerPluginArgs: ExtraNodeosProducerPluginArgs = ExtraNodeosProducerPluginArgs() - httpPluginArgs: ExtraNodeosHttpPluginArgs = ExtraNodeosHttpPluginArgs() - netPluginArgs: ExtraNodeosNetPluginArgs = ExtraNodeosNetPluginArgs() + chainPluginArgs: ChainPluginArgs = ChainPluginArgs() + producerPluginArgs: ProducerPluginArgs = ProducerPluginArgs() + httpPluginArgs: HttpPluginArgs = HttpPluginArgs() + netPluginArgs: NetPluginArgs = NetPluginArgs() def argsStr(self) -> str: return f" {self.httpPluginArgs.argsStr()} {self.producerPluginArgs.argsStr()} {self.chainPluginArgs.argsStr()} {self.netPluginArgs.argsStr()}" @@ -435,12 +435,12 @@ def main(): testHelperConfig = PerformanceBasicTest.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, dumpErrorDetails=args.dump_error_details, delay=args.d, nodesFile=args.nodes_file, verbose=args.v) - extraNodeosChainPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainThreads=args.chain_threads) - extraNodeosProducerPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, + extraNodeosChainPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainThreads=args.chain_threads) + extraNodeosProducerPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, producerThreads=args.producer_threads) - extraNodeosHttpPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosHttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - extraNodeosNetPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ExtraNodeosNetPluginArgs(netThreads=args.net_threads) + extraNodeosHttpPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) + extraNodeosNetPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.NetPluginArgs(netThreads=args.net_threads) extraNodeosArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs(chainPluginArgs=extraNodeosChainPluginArgs, httpPluginArgs=extraNodeosHttpPluginArgs, producerPluginArgs=extraNodeosProducerPluginArgs, netPluginArgs=extraNodeosNetPluginArgs) testClusterConfig = PerformanceBasicTest.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) From e3e688ad512bd6d772a65f78fc83ad6ac8273ec2 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 16 Nov 2022 15:44:42 -0600 Subject: [PATCH 006/178] Refactor to use __str__ function override. --- tests/performance_tests/performance_test_basic.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 358fdc08b6..7634ac3f6f 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -52,14 +52,14 @@ class ChainPluginArgs: signatureCpuBillablePct: int = 0 chainThreads: int = 2 - def argsStr(self) -> str: + def __str__(self) -> str: return f"--signature-cpu-billable-pct {self.signatureCpuBillablePct} --chain-threads {self.chainThreads}" @dataclass class NetPluginArgs: netThreads: int = 2 - def argsStr(self) -> str: + def __str__(self) -> str: return f"--net-threads {self.netThreads}" @dataclass @@ -71,7 +71,7 @@ class ProducerPluginArgs: lastBlockCpuEffortPercent: int = 100 producerThreads: int = 2 - def argsStr(self) -> str: + def __str__(self) -> str: return f"--disable-subjective-billing {self.disableSubjectiveBilling} \ --last-block-time-offset-us {self.lastBlockTimeOffsetUs} \ --produce-time-offset-us {self.produceTimeOffsetUs} \ @@ -83,7 +83,7 @@ def argsStr(self) -> str: class HttpPluginArgs: httpMaxResponseTimeMs: int = 990000 - def argsStr(self) -> str: + def __str__(self) -> str: return f"--http-max-response-time-ms {self.httpMaxResponseTimeMs}" chainPluginArgs: ChainPluginArgs = ChainPluginArgs() @@ -91,8 +91,8 @@ def argsStr(self) -> str: httpPluginArgs: HttpPluginArgs = HttpPluginArgs() netPluginArgs: NetPluginArgs = NetPluginArgs() - def argsStr(self) -> str: - return f" {self.httpPluginArgs.argsStr()} {self.producerPluginArgs.argsStr()} {self.chainPluginArgs.argsStr()} {self.netPluginArgs.argsStr()}" + def __str__(self) -> str: + return f" {self.httpPluginArgs} {self.producerPluginArgs} {self.chainPluginArgs} {self.netPluginArgs}" pnodes: int = 1 totalNodes: int = 2 @@ -247,7 +247,7 @@ def launchCluster(self): genesisPath=self.clusterConfig.genesisPath, maximumP2pPerHost=self.clusterConfig.maximumP2pPerHost, maximumClients=self.clusterConfig.maximumClients, - extraNodeosArgs=self.clusterConfig.extraNodeosArgs.argsStr(), + extraNodeosArgs=str(self.clusterConfig.extraNodeosArgs), prodsEnableTraceApi=self.clusterConfig.prodsEnableTraceApi, specificExtraNodeosArgs=self.clusterConfig.specificExtraNodeosArgs ) From f852de1a1399e95f0c64a21902333b5d2b0a8d73 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 16 Nov 2022 17:06:24 -0600 Subject: [PATCH 007/178] Add argument to configure --database-map-mode. Use namespace alias to simplify and make easier to read. Rename some variables for brevity. --- tests/performance_tests/performance_test.py | 25 +++++++++++----- .../performance_test_basic.py | 30 +++++++++++++------ 2 files changed, 38 insertions(+), 17 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index c8fb592eb7..93e292efab 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -220,6 +220,11 @@ def parseArgs(): appArgs.add(flag="--num-blocks-to-prune", type=int, help="The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, to prune from the beginning and end of the range of blocks of interest for evaluation.", default=2) appArgs.add(flag="--signature-cpu-billable-pct", type=int, help="Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50%%", default=0) appArgs.add(flag="--chain-threads", type=int, help="Number of worker threads in controller thread pool", default=2) + appArgs.add(flag="--database-map-mode", type=str, help="Database map mode (\"mapped\", \"heap\", or \"locked\"). \ + In \"mapped\" mode database is memory mapped as a file. \ + In \"heap\" mode database is preloaded in to swappable memory and will use huge pages if available. \ + In \"locked\" mode database is preloaded, locked in to memory, and will use huge pages if available.", + choices=["mapped", "heap", "locked"], default="mapped") appArgs.add(flag="--net-threads", type=int, help="Number of worker threads in net_plugin thread pool", default=2) appArgs.add(flag="--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) appArgs.add(flag="--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) @@ -276,14 +281,18 @@ def main(): dumpErrorDetails=dumpErrorDetails, delay=delay, nodesFile=nodesFile, verbose=verbose) - extraNodeosChainPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainThreads=args.chain_threads) - extraNodeosProducerPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, - lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, - lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, producerThreads=args.producer_threads) - extraNodeosHttpPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - extraNodeosNetPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.NetPluginArgs(netThreads=args.net_threads) - extraNodeosArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs(chainPluginArgs=extraNodeosChainPluginArgs, httpPluginArgs=extraNodeosHttpPluginArgs, producerPluginArgs=extraNodeosProducerPluginArgs, netPluginArgs=extraNodeosNetPluginArgs) - testClusterConfig = PerformanceBasicTest.ClusterConfig(pnodes=pnodes, totalNodes=totalNodes, topo=topo, genesisPath=genesisPath, prodsEnableTraceApi=prodsEnableTraceApi, extraNodeosArgs=extraNodeosArgs) + ENA = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs + chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, + chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) + producerPluginArgs = ENA.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, + lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, + cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, + producerThreads=args.producer_threads) + httpPluginArgs = ENA.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) + netPluginArgs = ENA.NetPluginArgs(netThreads=args.net_threads) + extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) + testClusterConfig = PerformanceBasicTest.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, + prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) argsDict = prepArgsDict(testDurationSec=testDurationSec, finalDurationSec=finalDurationSec, logsDir=testTimeStampDirPath, maxTpsToTest=maxTpsToTest, testIterationMinStep=testIterationMinStep, tpsLimitPerGenerator=tpsLimitPerGenerator, diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 7634ac3f6f..ff7b34d58b 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -51,9 +51,12 @@ class ExtraNodeosArgs: class ChainPluginArgs: signatureCpuBillablePct: int = 0 chainThreads: int = 2 + databaseMapMode: str = "mapped" def __str__(self) -> str: - return f"--signature-cpu-billable-pct {self.signatureCpuBillablePct} --chain-threads {self.chainThreads}" + return f"--signature-cpu-billable-pct {self.signatureCpuBillablePct} \ + --chain-threads {self.chainThreads} \ + --database-map-mode {self.databaseMapMode}" @dataclass class NetPluginArgs: @@ -410,6 +413,11 @@ def parseArgs(): "to prune from the beginning and end of the range of blocks of interest for evaluation."), default=2) appArgs.add(flag="--signature-cpu-billable-pct", type=int, help="Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50%%", default=0) appArgs.add(flag="--chain-threads", type=int, help="Number of worker threads in controller thread pool", default=2) + appArgs.add(flag="--database-map-mode", type=str, help="Database map mode (\"mapped\", \"heap\", or \"locked\"). \ + In \"mapped\" mode database is memory mapped as a file. \ + In \"heap\" mode database is preloaded in to swappable memory and will use huge pages if available. \ + In \"locked\" mode database is preloaded, locked in to memory, and will use huge pages if available.", + choices=["mapped", "heap", "locked"], default="mapped") appArgs.add(flag="--net-threads", type=int, help="Number of worker threads in net_plugin thread pool", default=2) appArgs.add(flag="--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) appArgs.add(flag="--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) @@ -435,14 +443,18 @@ def main(): testHelperConfig = PerformanceBasicTest.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, dumpErrorDetails=args.dump_error_details, delay=args.d, nodesFile=args.nodes_file, verbose=args.v) - extraNodeosChainPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainThreads=args.chain_threads) - extraNodeosProducerPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, - lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, - lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, producerThreads=args.producer_threads) - extraNodeosHttpPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - extraNodeosNetPluginArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs.NetPluginArgs(netThreads=args.net_threads) - extraNodeosArgs = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs(chainPluginArgs=extraNodeosChainPluginArgs, httpPluginArgs=extraNodeosHttpPluginArgs, producerPluginArgs=extraNodeosProducerPluginArgs, netPluginArgs=extraNodeosNetPluginArgs) - testClusterConfig = PerformanceBasicTest.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) + ENA = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs + chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, + chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) + producerPluginArgs = ENA.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, + lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, + cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, + producerThreads=args.producer_threads) + httpPluginArgs = ENA.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) + netPluginArgs = ENA.NetPluginArgs(netThreads=args.net_threads) + extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) + testClusterConfig = PerformanceBasicTest.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, + prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) myTest = PerformanceBasicTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, From 20f7ccf399894790da6e7cbcc7948bdd21526295 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 16 Nov 2022 17:08:17 -0600 Subject: [PATCH 008/178] Update docs for new argument --- tests/performance_tests/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index cacfbb4314..c0dba4a93f 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -223,6 +223,11 @@ The Performance Harness main script `performance_test.py` can be configured usin Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50% (default: 0) * `--chain-threads CHAIN_THREADS` Number of worker threads in controller thread pool (default: 2) +* `--database-map-mode {mapped,heap,locked}` + Database map mode ("mapped", "heap", or "locked"). + In "mapped" mode database is memory mapped as a file. + In "heap" mode database is preloaded in to swappable memory and will use huge pages if available. + In "locked" mode database is preloaded, locked in to memory, and will use huge pages if available. (default: mapped) * `--net-threads NET_THREADS` Number of worker threads in net_plugin thread pool (default: 2) * `--disable-subjective-billing DISABLE_SUBJECTIVE_BILLING` @@ -282,6 +287,11 @@ The following scripts are typically used by the Performance Harness main script Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50% (default: 0) * `--chain-threads CHAIN_THREADS` Number of worker threads in controller thread pool (default: 2) +* `--database-map-mode {mapped,heap,locked}` + Database map mode ("mapped", "heap", or "locked"). + In "mapped" mode database is memory mapped as a file. + In "heap" mode database is preloaded in to swappable memory and will use huge pages if available. + In "locked" mode database is preloaded, locked in to memory, and will use huge pages if available. (default: mapped) * `--net-threads NET_THREADS` Number of worker threads in net_plugin thread pool (default: 2) * `--disable-subjective-billing DISABLE_SUBJECTIVE_BILLING` From 90a1cbd5e8e8fecfc2858e7303163a83b9a283e0 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 17 Nov 2022 09:27:30 -0600 Subject: [PATCH 009/178] Add cpu count to top level report. --- tests/performance_tests/performance_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 93e292efab..0d61a513d7 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -151,7 +151,7 @@ def createReport(maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsA report['LongRunningSearchResults'] = {x: asdict(longRunningSearchResults[x]) for x in range(len(longRunningSearchResults))} report['LongRunningMaxTpsReport'] = longRunningMaxTpsReport report['args'] = argsDict - report['env'] = {'system': system(), 'os': os.name, 'release': release()} + report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} report['nodeosVersion'] = Utils.getNodeosVersion() return report From bda146d96e76e93118aa65ec11c3c364131e7083 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 17 Nov 2022 11:45:07 -0600 Subject: [PATCH 010/178] Add target tps number to test log dir name for ease of locating specific run. --- tests/performance_tests/performance_test_basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index fd0f7dc1cd..6c48fad856 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -141,7 +141,7 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.rootLogDir = rootLogDir self.ptbLogDir = f"{self.rootLogDir}/{os.path.splitext(os.path.basename(__file__))[0]}" - self.testTimeStampDirPath = f"{self.ptbLogDir}/{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}" + self.testTimeStampDirPath = f"{self.ptbLogDir}/{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}-{self.targetTps}" self.trxGenLogDirPath = f"{self.testTimeStampDirPath}/trxGenLogs" self.varLogsDirPath = f"{self.testTimeStampDirPath}/var" self.etcLogsDirPath = f"{self.testTimeStampDirPath}/etc" From d35b65c863f05dd04fe7be15421f477528822929 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 18 Nov 2022 14:22:39 -0600 Subject: [PATCH 011/178] Rename to PerformanceTestBasic to better align with script names. Also renaming to use Ptb abbreviation in light of the name change. --- tests/performance_tests/performance_test.py | 22 +++++++++---------- .../performance_test_basic.py | 20 ++++++++--------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 09738bbcf5..1224c09f36 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -11,7 +11,7 @@ from TestHarness import TestHelper, Utils from TestHarness.TestHelper import AppArgs -from performance_test_basic import PerformanceBasicTest +from performance_test_basic import PerformanceTestBasic from platform import release, system from dataclasses import dataclass, asdict, field from datetime import datetime @@ -44,8 +44,8 @@ class PerfTestSearchResults: searchResults: list = field(default_factory=list) #PerfTestSearchIndivResult list maxTpsReport: dict = field(default_factory=dict) -def performPtbBinarySearch(tpsTestFloor: int, tpsTestCeiling: int, minStep: int, testHelperConfig: PerformanceBasicTest.TestHelperConfig, - testClusterConfig: PerformanceBasicTest.ClusterConfig, testDurationSec: int, tpsLimitPerGenerator: int, +def performPtbBinarySearch(tpsTestFloor: int, tpsTestCeiling: int, minStep: int, testHelperConfig: PerformanceTestBasic.TestHelperConfig, + testClusterConfig: PerformanceTestBasic.ClusterConfig, testDurationSec: int, tpsLimitPerGenerator: int, numAddlBlocksToPrune: int, testLogDir: str, delReport: bool, quiet: bool, delPerfLogs: bool) -> PerfTestSearchResults: floor = tpsTestFloor ceiling = tpsTestCeiling @@ -60,7 +60,7 @@ def performPtbBinarySearch(tpsTestFloor: int, tpsTestCeiling: int, minStep: int, ptbResult = PerfTestBasicResult() scenarioResult = PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) - myTest = PerformanceBasicTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=binSearchTarget, + myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=binSearchTarget, testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, numAddlBlocksToPrune=numAddlBlocksToPrune, rootLogDir=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) testSuccessful = myTest.runTest() @@ -81,8 +81,8 @@ def performPtbBinarySearch(tpsTestFloor: int, tpsTestCeiling: int, minStep: int, return PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) -def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: PerformanceBasicTest.TestHelperConfig, - testClusterConfig: PerformanceBasicTest.ClusterConfig, testDurationSec: int, tpsLimitPerGenerator: int, +def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: PerformanceTestBasic.TestHelperConfig, + testClusterConfig: PerformanceTestBasic.ClusterConfig, testDurationSec: int, tpsLimitPerGenerator: int, numAddlBlocksToPrune: int, testLogDir: str, delReport: bool, quiet: bool, delPerfLogs: bool) -> PerfTestSearchResults: # Default - Decrementing Max TPS in range [0, tpsInitial] @@ -101,7 +101,7 @@ def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: ptbResult = PerfTestBasicResult() scenarioResult = PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling, basicTestResult=ptbResult) - myTest = PerformanceBasicTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=searchTarget, + myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=searchTarget, testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, numAddlBlocksToPrune=numAddlBlocksToPrune, rootLogDir=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) testSuccessful = myTest.runTest() @@ -120,7 +120,7 @@ def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: return PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) -def evaluateSuccess(test: PerformanceBasicTest, testSuccessful: bool, result: PerfTestBasicResult) -> bool: +def evaluateSuccess(test: PerformanceTestBasic, testSuccessful: bool, result: PerfTestBasicResult) -> bool: result.targetTPS = test.targetTps result.expectedTxns = test.expectedTransactionsSent reportDict = test.report @@ -278,11 +278,11 @@ def main(): testDirsSetup(rootLogDir=rootLogDir, testTimeStampDirPath=testTimeStampDirPath, ptbLogsDirPath=ptbLogsDirPath) - testHelperConfig = PerformanceBasicTest.TestHelperConfig(killAll=killAll, dontKill=dontKill, keepLogs=not delPerfLogs, + testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=killAll, dontKill=dontKill, keepLogs=not delPerfLogs, dumpErrorDetails=dumpErrorDetails, delay=delay, nodesFile=nodesFile, verbose=verbose) - ENA = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs + ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) producerPluginArgs = ENA.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, @@ -292,7 +292,7 @@ def main(): httpPluginArgs = ENA.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) netPluginArgs = ENA.NetPluginArgs(netThreads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) - testClusterConfig = PerformanceBasicTest.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, + testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) argsDict = prepArgsDict(testDurationSec=testDurationSec, finalDurationSec=finalDurationSec, logsDir=testTimeStampDirPath, diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 1b82c9b0b1..f64867cf05 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -19,9 +19,9 @@ from datetime import datetime from math import ceil -class PerformanceBasicTest: +class PerformanceTestBasic: @dataclass - class PbtTpsTestResult: + class PtbTpsTestResult: completedRun: bool = False numGeneratorsUsed: int = 0 targetTpsPerGenList: list = field(default_factory=list) @@ -267,7 +267,7 @@ def setupWalletAndAccounts(self): self.account1PrivKey = self.cluster.accounts[0].activePrivateKey self.account2PrivKey = self.cluster.accounts[1].activePrivateKey - def runTpsTest(self) -> PbtTpsTestResult: + def runTpsTest(self) -> PtbTpsTestResult: completedRun = False self.producerNode = self.cluster.getNode(self.producerNodeId) self.validationNode = self.cluster.getNode(self.validationNodeId) @@ -301,7 +301,7 @@ def runTpsTest(self) -> PbtTpsTestResult: trxSent = self.validationNode.waitForTransactionsInBlockRange(trxSent, self.data.startBlock, blocksToWait) self.data.ceaseBlock = self.validationNode.getHeadBlockNum() - return PerformanceBasicTest.PbtTpsTestResult(completedRun=completedRun, numGeneratorsUsed=tpsTrxGensConfig.numGenerators, + return PerformanceTestBasic.PtbTpsTestResult(completedRun=completedRun, numGeneratorsUsed=tpsTrxGensConfig.numGenerators, targetTpsPerGenList=tpsTrxGensConfig.targetTpsPerGenList, trxGenExitCodes=trxGenExitCodes) def prepArgs(self) -> dict: @@ -334,7 +334,7 @@ def captureLowLevelArtifacts(self): print(f"Failed to move '{etcEosioDir}/{path}' to '{self.etcEosioLogsDirPath}/{path}': {type(e)}: {e}") - def analyzeResultsAndReport(self, testResult: PbtTpsTestResult): + def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): args = self.prepArgs() artifactsLocate = log_reader.ArtifactPaths(nodeosLogPath=self.nodeosLogPath, trxGenLogDirPath=self.trxGenLogDirPath, blockTrxDataPath=self.blockTrxDataPath, blockDataPath=self.blockDataPath) @@ -386,7 +386,7 @@ def runTest(self) -> bool: testSuccessful = self.ptbTestResult.completedRun - if not self.PbtTpsTestResult.completedRun: + if not self.PtbTpsTestResult.completedRun: for exitCode in self.ptbTestResult.trxGenExitCodes: if exitCode != 0: print(f"Error: Transaction Generator exited with error {exitCode}") @@ -458,10 +458,10 @@ def main(): args = parseArgs() Utils.Debug = args.v - testHelperConfig = PerformanceBasicTest.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, + testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, dumpErrorDetails=args.dump_error_details, delay=args.d, nodesFile=args.nodes_file, verbose=args.v) - ENA = PerformanceBasicTest.ClusterConfig.ExtraNodeosArgs + ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) producerPluginArgs = ENA.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, @@ -471,10 +471,10 @@ def main(): httpPluginArgs = ENA.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) netPluginArgs = ENA.NetPluginArgs(netThreads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) - testClusterConfig = PerformanceBasicTest.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, + testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) - myTest = PerformanceBasicTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=args.target_tps, + myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs) From d4b31f341c7e91f803c65139aa8e147a243c33c4 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 18 Nov 2022 16:58:47 -0600 Subject: [PATCH 012/178] Factor out test params into PtbConfig and LoggingConfig objects. Using PtbConfig and LoggingConfig objects simplifies test initializer params and encapsulates related config items into an object describing their use. It also allows easy reporting of test configuration in test report and will allow reporting to come for free if additional config arguments are added. Some renaming for efficiency and clarity. --- tests/performance_tests/performance_test.py | 21 ++-- .../performance_test_basic.py | 109 ++++++++++-------- 2 files changed, 72 insertions(+), 58 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 1224c09f36..62b66e68c5 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -59,10 +59,10 @@ def performPtbBinarySearch(tpsTestFloor: int, tpsTestCeiling: int, minStep: int, print(f"Running scenario: floor {floor} binSearchTarget {binSearchTarget} ceiling {ceiling}") ptbResult = PerfTestBasicResult() scenarioResult = PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) + ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, + numAddlBlocksToPrune=numAddlBlocksToPrune, logDirRoot=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) - myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=binSearchTarget, - testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, - numAddlBlocksToPrune=numAddlBlocksToPrune, rootLogDir=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) + myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() if evaluateSuccess(myTest, testSuccessful, ptbResult): maxTpsAchieved = binSearchTarget @@ -100,10 +100,10 @@ def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: print(f"Running scenario: floor {absFloor} searchTarget {searchTarget} ceiling {absCeiling}") ptbResult = PerfTestBasicResult() scenarioResult = PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling, basicTestResult=ptbResult) + ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=searchTarget, testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, + numAddlBlocksToPrune=numAddlBlocksToPrune, logDirRoot=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) - myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=searchTarget, - testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, - numAddlBlocksToPrune=numAddlBlocksToPrune, rootLogDir=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) + myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() if evaluateSuccess(myTest, testSuccessful, ptbResult): maxTpsAchieved = searchTarget @@ -121,8 +121,8 @@ def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: return PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) def evaluateSuccess(test: PerformanceTestBasic, testSuccessful: bool, result: PerfTestBasicResult) -> bool: - result.targetTPS = test.targetTps - result.expectedTxns = test.expectedTransactionsSent + result.targetTPS = test.ptbConfig.targetTps + result.expectedTxns = test.ptbConfig.expectedTransactionsSent reportDict = test.report result.testStart = reportDict["testStart"] result.testEnd = reportDict["testFinish"] @@ -134,7 +134,7 @@ def evaluateSuccess(test: PerformanceTestBasic, testSuccessful: bool, result: Pe result.trxExpectMet = result.expectedTxns == result.resultTxns result.basicTestSuccess = testSuccessful result.testAnalysisBlockCnt = reportDict["Analysis"]["BlocksGuide"]["testAnalysisBlockCnt"] - result.logsDir = test.testTimeStampDirPath + result.logsDir = test.loggingConfig.logDirPath print(f"basicTestSuccess: {result.basicTestSuccess} tpsExpectationMet: {result.tpsExpectMet} trxExpectationMet: {result.trxExpectMet}") @@ -201,7 +201,7 @@ def createArtifactsDir(path): def prepArgsDict(testDurationSec, finalDurationSec, logsDir, maxTpsToTest, testIterationMinStep, tpsLimitPerGenerator, delReport, delTestReport, numAddlBlocksToPrune, quiet, delPerfLogs, - testHelperConfig, testClusterConfig) -> dict: + testHelperConfig: PerformanceTestBasic.TestHelperConfig, testClusterConfig: PerformanceTestBasic.ClusterConfig) -> dict: argsDict = {} argsDict.update(asdict(testHelperConfig)) argsDict.update(asdict(testClusterConfig)) @@ -294,7 +294,6 @@ def main(): extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) - argsDict = prepArgsDict(testDurationSec=testDurationSec, finalDurationSec=finalDurationSec, logsDir=testTimeStampDirPath, maxTpsToTest=maxTpsToTest, testIterationMinStep=testIterationMinStep, tpsLimitPerGenerator=tpsLimitPerGenerator, delReport=delReport, delTestReport=delTestReport, numAddlBlocksToPrune=numAddlBlocksToPrune, diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index f64867cf05..8ffe58ed3e 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -117,21 +117,37 @@ def __post_init__(self): if not self.prodsEnableTraceApi: self.specificExtraNodeosArgs.update({f"{node}" : "--plugin eosio::trace_api_plugin" for node in range(self.pnodes, self._totalNodes)}) - def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), clusterConfig: ClusterConfig=ClusterConfig(), targetTps: int=8000, - testTrxGenDurationSec: int=30, tpsLimitPerGenerator: int=4000, numAddlBlocksToPrune: int=2, - rootLogDir: str=".", delReport: bool=False, quiet: bool=False, delPerfLogs: bool=False): + @dataclass + class PtbConfig: + targetTps: int=8000, + testTrxGenDurationSec: int=30 + tpsLimitPerGenerator: int=4000 + numAddlBlocksToPrune: int=2 + logDirRoot: str="." + delReport: bool=False + quiet: bool=False + delPerfLogs: bool=False + expectedTransactionsSent: int = field(default_factory=int, init=False) + + def __post_init__(self): + self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps + + @dataclass + class LoggingConfig: + logDirBase: str = f"./{os.path.splitext(os.path.basename(__file__))[0]}" + logDirTimestamp: str = f"{datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')}" + logDirTimestampedOptSuffix: str = "" + logDirPath: str = field(default_factory=str, init=False) + + def __post_init__(self): + self.logDirPath = f"{self.logDirBase}/{self.logDirTimestamp}{self.logDirTimestampedOptSuffix}" + + def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), clusterConfig: ClusterConfig=ClusterConfig(), ptbConfig=PtbConfig()): self.testHelperConfig = testHelperConfig self.clusterConfig = clusterConfig - self.targetTps = targetTps - self.testTrxGenDurationSec = testTrxGenDurationSec - self.tpsLimitPerGenerator = tpsLimitPerGenerator - self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps - self.numAddlBlocksToPrune = numAddlBlocksToPrune - self.delReport = delReport - self.quiet = quiet - self.delPerfLogs=delPerfLogs + self.ptbConfig = ptbConfig - self.testHelperConfig.keepLogs = not self.delPerfLogs + self.testHelperConfig.keepLogs = not self.ptbConfig.delPerfLogs Utils.Debug = self.testHelperConfig.verbose self.errorExit = Utils.errorExit @@ -139,17 +155,18 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.testStart = datetime.utcnow() - self.rootLogDir = rootLogDir - self.ptbLogDir = f"{self.rootLogDir}/{os.path.splitext(os.path.basename(__file__))[0]}" - self.testTimeStampDirPath = f"{self.ptbLogDir}/{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}-{self.targetTps}" - self.trxGenLogDirPath = f"{self.testTimeStampDirPath}/trxGenLogs" - self.varLogsDirPath = f"{self.testTimeStampDirPath}/var" - self.etcLogsDirPath = f"{self.testTimeStampDirPath}/etc" + self.loggingConfig = PerformanceTestBasic.LoggingConfig(logDirBase=f"{self.ptbConfig.logDirRoot}/{os.path.splitext(os.path.basename(__file__))[0]}", + logDirTimestamp=f"{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}", + logDirTimestampedOptSuffix = f"-{self.ptbConfig.targetTps}") + + self.trxGenLogDirPath = f"{self.loggingConfig.logDirPath}/trxGenLogs" + self.varLogsDirPath = f"{self.loggingConfig.logDirPath}/var" + self.etcLogsDirPath = f"{self.loggingConfig.logDirPath}/etc" self.etcEosioLogsDirPath = f"{self.etcLogsDirPath}/eosio" - self.blockDataLogDirPath = f"{self.testTimeStampDirPath}/blockDataLogs" + self.blockDataLogDirPath = f"{self.loggingConfig.logDirPath}/blockDataLogs" self.blockDataPath = f"{self.blockDataLogDirPath}/blockData.txt" self.blockTrxDataPath = f"{self.blockDataLogDirPath}/blockTrxData.txt" - self.reportPath = f"{self.testTimeStampDirPath}/data.json" + self.reportPath = f"{self.loggingConfig.logDirPath}/data.json" # Setup Expectations for Producer and Validation Node IDs # Producer Nodes are index [0, pnodes) and validation nodes/non-producer nodes [pnodes, _totalNodes) @@ -186,7 +203,7 @@ def removeAllArtifactsExceptFinalReport(): if not delReport: removeAllArtifactsExceptFinalReport() else: - removeArtifacts(self.testTimeStampDirPath) + removeArtifacts(self.loggingConfig.logDirPath) except OSError as error: print(error) @@ -198,9 +215,9 @@ def createArtifactsDir(path): print(f"Creating test artifacts dir: {path}") os.mkdir(f"{path}") - createArtifactsDir(self.rootLogDir) - createArtifactsDir(self.ptbLogDir) - createArtifactsDir(self.testTimeStampDirPath) + createArtifactsDir(self.ptbConfig.logDirRoot) + createArtifactsDir(self.loggingConfig.logDirBase) + createArtifactsDir(self.loggingConfig.logDirPath) createArtifactsDir(self.trxGenLogDirPath) createArtifactsDir(self.varLogsDirPath) createArtifactsDir(self.etcLogsDirPath) @@ -279,10 +296,10 @@ def runTpsTest(self) -> PtbTpsTestResult: self.cluster.biosNode.kill(signal.SIGTERM) self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) - tpsTrxGensConfig = ltg.TpsTrxGensConfig(targetTps=self.targetTps, tpsLimitPerGenerator=self.tpsLimitPerGenerator) + tpsTrxGensConfig = ltg.TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator) trxGenLauncher = ltg.TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, handlerAcct=self.cluster.eosioAccount.name, accts=f"{self.account1Name},{self.account2Name}", - privateKeys=f"{self.account1PrivKey},{self.account2PrivKey}", trxGenDurationSec=self.testTrxGenDurationSec, + privateKeys=f"{self.account1PrivKey},{self.account2PrivKey}", trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, tpsTrxGensConfig=tpsTrxGensConfig) trxGenExitCodes = trxGenLauncher.launch() @@ -296,8 +313,8 @@ def runTpsTest(self) -> PtbTpsTestResult: # Get stats after transaction generation stops trxSent = {} - log_reader.scrapeTrxGenTrxSentDataLogs(trxSent, self.trxGenLogDirPath, self.quiet) - blocksToWait = 2 * self.testTrxGenDurationSec + 10 + log_reader.scrapeTrxGenTrxSentDataLogs(trxSent, self.trxGenLogDirPath, self.ptbConfig.quiet) + blocksToWait = 2 * self.ptbConfig.testTrxGenDurationSec + 10 trxSent = self.validationNode.waitForTransactionsInBlockRange(trxSent, self.data.startBlock, blocksToWait) self.data.ceaseBlock = self.validationNode.getHeadBlockNum() @@ -308,8 +325,8 @@ def prepArgs(self) -> dict: args = {} args.update(asdict(self.testHelperConfig)) args.update(asdict(self.clusterConfig)) - args.update({key:val for key, val in inspect.getmembers(self) if key in set(['targetTps', 'testTrxGenDurationSec', 'tpsLimitPerGenerator', - 'expectedTransactionsSent', 'delReport', 'numAddlBlocksToPrune', 'quiet', 'delPerfLogs'])}) + args.update(asdict(self.ptbConfig)) + args.update(asdict(self.loggingConfig)) return args def captureLowLevelArtifacts(self): @@ -338,22 +355,22 @@ def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): args = self.prepArgs() artifactsLocate = log_reader.ArtifactPaths(nodeosLogPath=self.nodeosLogPath, trxGenLogDirPath=self.trxGenLogDirPath, blockTrxDataPath=self.blockTrxDataPath, blockDataPath=self.blockDataPath) - tpsTestConfig = log_reader.TpsTestConfig(targetTps=self.targetTps, testDurationSec=self.testTrxGenDurationSec, tpsLimitPerGenerator=self.tpsLimitPerGenerator, - numBlocksToPrune=self.numAddlBlocksToPrune, numTrxGensUsed=testResult.numGeneratorsUsed, - targetTpsPerGenList=testResult.targetTpsPerGenList, quiet=self.quiet) + tpsTestConfig = log_reader.TpsTestConfig(targetTps=self.ptbConfig.targetTps, testDurationSec=self.ptbConfig.testTrxGenDurationSec, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, + numBlocksToPrune=self.ptbConfig.numAddlBlocksToPrune, numTrxGensUsed=testResult.numGeneratorsUsed, + targetTpsPerGenList=testResult.targetTpsPerGenList, quiet=self.ptbConfig.quiet) self.report = log_reader.calcAndReport(data=self.data, tpsTestConfig=tpsTestConfig, artifacts=artifactsLocate, argsDict=args, testStart=self.testStart, completedRun=testResult.completedRun) jsonReport = None - if not self.quiet or not self.delReport: + if not self.ptbConfig.quiet or not self.ptbConfig.delReport: jsonReport = log_reader.reportAsJSON(self.report) - if not self.quiet: + if not self.ptbConfig.quiet: print(self.data) print(f"Report:\n{jsonReport}") - if not self.delReport: + if not self.ptbConfig.delReport: log_reader.exportReportAsJSON(jsonReport, self.reportPath) def preTestSpinup(self): @@ -391,9 +408,9 @@ def runTest(self) -> bool: if exitCode != 0: print(f"Error: Transaction Generator exited with error {exitCode}") - if testSuccessful and self.expectedTransactionsSent != self.data.totalTransactions: + if testSuccessful and self.ptbConfig.expectedTransactionsSent != self.data.totalTransactions: testSuccessful = False - print(f"Error: Transactions received: {self.data.totalTransactions} did not match expected total: {self.expectedTransactionsSent}") + print(f"Error: Transactions received: {self.data.totalTransactions} did not match expected total: {self.ptbConfig.expectedTransactionsSent}") finally: TestHelper.shutdown( @@ -407,16 +424,16 @@ def runTest(self) -> bool: self.testHelperConfig.dumpErrorDetails ) - if not self.delPerfLogs: + if not self.ptbConfig.delPerfLogs: self.captureLowLevelArtifacts() if not completedRun: os.system("pkill trx_generator") print("Test run cancelled early via SIGINT") - if self.delPerfLogs: - print(f"Cleaning up logs directory: {self.testTimeStampDirPath}") - self.testDirsCleanup(self.delReport) + if self.ptbConfig.delPerfLogs: + print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") + self.testDirsCleanup(self.ptbConfig.delReport) return testSuccessful @@ -473,11 +490,9 @@ def main(): extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) - - myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, targetTps=args.target_tps, - testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, - numAddlBlocksToPrune=args.num_blocks_to_prune, delReport=args.del_report, quiet=args.quiet, - delPerfLogs=args.del_perf_logs) + ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, + numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs) + myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() exitCode = 0 if testSuccessful else 1 From 22b0118aaeb07eca02b41e73b2e5205471b752cd Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 21 Nov 2022 16:36:14 -0600 Subject: [PATCH 013/178] Factor PerformanceTest out into its own class. Break out config and logging classes. Update documentation. --- tests/performance_tests/README.md | 256 +++++---- tests/performance_tests/performance_test.py | 570 ++++++++++---------- 2 files changed, 419 insertions(+), 407 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index e7609d8d89..c1935f191d 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -450,9 +450,9 @@ Finally, the full detail test report for each of the determined max TPS throughp ``` json { "InitialMaxTpsAchieved": 16000, - "LongRunningMaxTpsAchieved": 15000, - "testStart": "2022-11-04T19:31:40.539240", - "testFinish": "2022-11-04T19:48:53.096915", + "LongRunningMaxTpsAchieved": 16000, + "testStart": "2022-11-21T22:17:03.604928", + "testFinish": "2022-11-21T22:29:02.923633", "InitialSearchResults": { "0": { "success": false, @@ -461,16 +461,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 50000, "basicTestResult": { "targetTPS": 50000, - "resultAvgTps": 15312.09090909091, + "resultAvgTps": 15121.925, "expectedTxns": 500000, - "resultTxns": 362075, + "resultTxns": 326102, "tpsExpectMet": false, "trxExpectMet": false, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 45, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-31-40", - "testStart": "2022-11-04T19:31:40.539927", - "testEnd": "2022-11-04T19:33:16.377065" + "basicTestSuccess": false, + "testAnalysisBlockCnt": 41, + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-17-03-50000", + "testStart": "2022-11-21T22:17:03.624828", + "testEnd": "2022-11-21T22:18:35.048631" } }, "1": { @@ -480,16 +480,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 49500, "basicTestResult": { "targetTPS": 25000, - "resultAvgTps": 15098.241379310344, + "resultAvgTps": 15307.275862068966, "expectedTxns": 250000, "resultTxns": 250000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 30, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-33-16", - "testStart": "2022-11-04T19:33:16.471198", - "testEnd": "2022-11-04T19:34:45.441319" + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-18-35-25000", + "testStart": "2022-11-21T22:18:35.136441", + "testEnd": "2022-11-21T22:20:02.355919" } }, "2": { @@ -499,16 +499,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 24500, "basicTestResult": { "targetTPS": 12500, - "resultAvgTps": 12500.0625, + "resultAvgTps": 12494.4375, "expectedTxns": 125000, "resultTxns": 125000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-34-45", - "testStart": "2022-11-04T19:34:45.507994", - "testEnd": "2022-11-04T19:36:01.234060" + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-20-02-12500", + "testStart": "2022-11-21T22:20:02.419664", + "testEnd": "2022-11-21T22:21:17.334219" } }, "3": { @@ -518,16 +518,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 24500, "basicTestResult": { "targetTPS": 19000, - "resultAvgTps": 15454.0, + "resultAvgTps": 15546.55, "expectedTxns": 190000, "resultTxns": 190000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 22, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-36-01", - "testStart": "2022-11-04T19:36:01.277926", - "testEnd": "2022-11-04T19:37:23.028124" + "testAnalysisBlockCnt": 21, + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-21-17-19000", + "testStart": "2022-11-21T22:21:17.380653", + "testEnd": "2022-11-21T22:22:37.113095" } }, "4": { @@ -537,16 +537,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 18500, "basicTestResult": { "targetTPS": 16000, - "resultAvgTps": 15900.625, + "resultAvgTps": 15969.375, "expectedTxns": 160000, "resultTxns": 160000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-37-23", - "testStart": "2022-11-04T19:37:23.085923", - "testEnd": "2022-11-04T19:38:41.744418" + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-22-37-16000", + "testStart": "2022-11-21T22:22:37.166645", + "testEnd": "2022-11-21T22:23:51.349987" } }, "5": { @@ -556,16 +556,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 18500, "basicTestResult": { "targetTPS": 17500, - "resultAvgTps": 15271.526315789473, + "resultAvgTps": 15048.263157894737, "expectedTxns": 175000, "resultTxns": 175000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 20, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-38-41", - "testStart": "2022-11-04T19:38:41.796745", - "testEnd": "2022-11-04T19:40:02.097920" + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-23-51-17500", + "testStart": "2022-11-21T22:23:51.399539", + "testEnd": "2022-11-21T22:25:11.171614" } }, "6": { @@ -575,16 +575,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 17000, "basicTestResult": { "targetTPS": 17000, - "resultAvgTps": 15876.176470588236, + "resultAvgTps": 15659.058823529413, "expectedTxns": 170000, "resultTxns": 170000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 18, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-40-02", - "testStart": "2022-11-04T19:40:02.150305", - "testEnd": "2022-11-04T19:41:21.802272" + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-25-11-17000", + "testStart": "2022-11-21T22:25:11.225775", + "testEnd": "2022-11-21T22:26:30.102913" } }, "7": { @@ -594,16 +594,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 16500, "basicTestResult": { "targetTPS": 16500, - "resultAvgTps": 16096.823529411764, + "resultAvgTps": 15714.823529411764, "expectedTxns": 165000, "resultTxns": 165000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 18, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-41-21", - "testStart": "2022-11-04T19:41:21.851918", - "testEnd": "2022-11-04T19:42:40.991794" + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-26-30-16500", + "testStart": "2022-11-21T22:26:30.155632", + "testEnd": "2022-11-21T22:27:48.093871" } } }, @@ -622,60 +622,22 @@ Finally, the full detail test report for each of the determined max TPS throughp }, "LongRunningSearchResults": { "0": { - "success": false, + "success": true, "searchTarget": 16000, "searchFloor": 0, "searchCeiling": 16000, "basicTestResult": { "targetTPS": 16000, - "resultAvgTps": 14954.266666666666, - "expectedTxns": 480000, - "resultTxns": 480000, - "tpsExpectMet": false, - "trxExpectMet": true, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 61, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-42-41", - "testStart": "2022-11-04T19:42:41.051468", - "testEnd": "2022-11-04T19:44:47.365905" - } - }, - "1": { - "success": false, - "searchTarget": 15500, - "searchFloor": 0, - "searchCeiling": 16000, - "basicTestResult": { - "targetTPS": 15500, - "resultAvgTps": 15001.827586206897, - "expectedTxns": 465000, - "resultTxns": 465000, - "tpsExpectMet": false, - "trxExpectMet": true, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 59, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-44-47", - "testStart": "2022-11-04T19:44:47.472961", - "testEnd": "2022-11-04T19:46:52.818564" - } - }, - "2": { - "success": true, - "searchTarget": 15000, - "searchFloor": 0, - "searchCeiling": 16000, - "basicTestResult": { - "targetTPS": 15000, - "resultAvgTps": 15023.464285714286, - "expectedTxns": 450000, - "resultTxns": 450000, + "resultAvgTps": 15933.1875, + "expectedTxns": 160000, + "resultTxns": 160000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 57, - "logsDir": "performance_test/2022-11-04_19-31-40/testRunLogs/performance_test_basic/2022-11-04_19-46-52", - "testStart": "2022-11-04T19:46:52.960531", - "testEnd": "2022-11-04T19:48:52.989694" + "testAnalysisBlockCnt": 17, + "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-27-48-16000", + "testStart": "2022-11-21T22:27:48.146027", + "testEnd": "2022-11-21T22:29:02.871273" } } }, @@ -707,17 +669,24 @@ Finally, the full detail test report for each of the determined max TPS throughp "topo": "mesh", "extraNodeosArgs": { "chainPluginArgs": { - "signatureCpuBillablePct": 0 + "signatureCpuBillablePct": 0, + "chainStateDbSizeMb": 10240, + "chainThreads": 2, + "databaseMapMode": "mapped" }, "producerPluginArgs": { "disableSubjectiveBilling": true, "lastBlockTimeOffsetUs": 0, "produceTimeOffsetUs": 0, "cpuEffortPercent": 100, - "lastBlockCpuEffortPercent": 100 + "lastBlockCpuEffortPercent": 100, + "producerThreads": 2 }, "httpPluginArgs": { "httpMaxResponseTimeMs": 990000 + }, + "netPluginArgs": { + "netThreads": 2 } }, "useBiosBootFile": false, @@ -734,7 +703,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "_totalNodes": 2, "testDurationSec": 10, "finalDurationSec": 30, - "logsDir": "performance_test/2022-11-04_19-31-40", + "delPerfLogs": false, "maxTpsToTest": 50000, "testIterationMinStep": 500, "tpsLimitPerGenerator": 4000, @@ -742,12 +711,17 @@ Finally, the full detail test report for each of the determined max TPS throughp "delTestReport": false, "numAddlBlocksToPrune": 2, "quiet": false, - "delPerfLogs": false + "logDirRoot": ".", + "logDirBase": "./performance_test", + "logDirTimestamp": "2022-11-21_22-17-03", + "logDirPath": "./performance_test/2022-11-21_22-17-03", + "ptbLogsDirPath": "./performance_test/2022-11-21_22-17-03/testRunLogs" }, "env": { "system": "Linux", "os": "posix", - "release": "5.10.102.1-microsoft-standard-WSL2" + "release": "5.15.74.2-microsoft-standard-WSL2", + "logical_cpu_count": 16 }, "nodeosVersion": "v4.0.0-dev" } @@ -765,67 +739,67 @@ The Performance Test Basic generates, by default, a report that details results ``` json { "completedRun": true, - "testStart": "2022-11-04T19:46:52.960531", - "testFinish": "2022-11-04T19:48:52.989694", + "testStart": "2022-11-21T22:27:48.146027", + "testFinish": "2022-11-21T22:29:02.871273", "Analysis": { "BlockSize": { - "min": 1389312, - "max": 1575800, - "avg": 1474814.3157894737, - "sigma": 40921.65290309434, + "min": 1369536, + "max": 1624896, + "avg": 1530567.5294117648, + "sigma": 58850.381839050766, "emptyBlocks": 0, - "numBlocks": 57 + "numBlocks": 17 }, "BlocksGuide": { "firstBlockNum": 2, - "lastBlockNum": 232, - "totalBlocks": 231, + "lastBlockNum": 147, + "totalBlocks": 146, "testStartBlockNum": 105, - "testEndBlockNum": 199, + "testEndBlockNum": 136, "setupBlocksCnt": 103, - "tearDownBlocksCnt": 33, + "tearDownBlocksCnt": 11, "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 33, + "trailingEmptyBlocksCnt": 10, "configAddlDropCnt": 2, - "testAnalysisBlockCnt": 57 + "testAnalysisBlockCnt": 17 }, "TPS": { - "min": 14532, - "max": 15477, - "avg": 15023.464285714286, - "sigma": 178.66938384762454, + "min": 14996, + "max": 16486, + "avg": 15933.1875, + "sigma": 403.137727512261, "emptyBlocks": 0, - "numBlocks": 57, - "configTps": 15000, - "configTestDuration": 30, + "numBlocks": 17, + "configTps": 16000, + "configTestDuration": 10, "tpsPerGenerator": [ - 3750, - 3750, - 3750, - 3750 + 4000, + 4000, + 4000, + 4000 ], "generatorCount": 4 }, "TrxCPU": { "min": 7.0, - "max": 2647.0, - "avg": 23.146035555555557, - "sigma": 11.415769514864671, - "samples": 450000 + "max": 657.0, + "avg": 21.81190625, + "sigma": 9.853241319038672, + "samples": 160000 }, "TrxLatency": { "min": 0.0009999275207519531, - "max": 0.5539999008178711, - "avg": 0.2614889088874393, - "sigma": 0.1450651327531534, - "samples": 450000 + "max": 0.565000057220459, + "avg": 0.27573538126200436, + "sigma": 0.14606770516057177, + "samples": 160000 }, "TrxNet": { "min": 24.0, - "max": 25.0, - "avg": 24.555564444444446, - "sigma": 0.49690300111146485, - "samples": 450000 + "max": 24.0, + "avg": 24.0, + "sigma": 0.0, + "samples": 160000 } }, "args": { @@ -843,17 +817,24 @@ The Performance Test Basic generates, by default, a report that details results "topo": "mesh", "extraNodeosArgs": { "chainPluginArgs": { - "signatureCpuBillablePct": 0 + "signatureCpuBillablePct": 0, + "chainStateDbSizeMb": 10240, + "chainThreads": 2, + "databaseMapMode": "mapped" }, "producerPluginArgs": { "disableSubjectiveBilling": true, "lastBlockTimeOffsetUs": 0, "produceTimeOffsetUs": 0, "cpuEffortPercent": 100, - "lastBlockCpuEffortPercent": 100 + "lastBlockCpuEffortPercent": 100, + "producerThreads": 2 }, "httpPluginArgs": { "httpMaxResponseTimeMs": 990000 + }, + "netPluginArgs": { + "netThreads": 2 } }, "useBiosBootFile": false, @@ -868,19 +849,24 @@ The Performance Test Basic generates, by default, a report that details results "1": "--plugin eosio::trace_api_plugin" }, "_totalNodes": 2, - "delPerfLogs": false, - "delReport": false, - "expectedTransactionsSent": 450000, + "targetTps": 16000, + "testTrxGenDurationSec": 10, + "tpsLimitPerGenerator": 4000, "numAddlBlocksToPrune": 2, + "logDirRoot": "./performance_test/2022-11-21_22-17-03/testRunLogs", + "delReport": false, "quiet": false, - "targetTps": 15000, - "testTrxGenDurationSec": 30, - "tpsLimitPerGenerator": 4000 + "delPerfLogs": false, + "expectedTransactionsSent": 160000, + "logDirBase": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic", + "logDirTimestamp": "2022-11-21_22-27-48", + "logDirTimestampedOptSuffix": "-16000", + "logDirPath": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-27-48-16000" }, "env": { "system": "Linux", "os": "posix", - "release": "5.10.102.1-microsoft-standard-WSL2", + "release": "5.15.74.2-microsoft-standard-WSL2", "logical_cpu_count": 16 }, "nodeosVersion": "v4.0.0-dev" diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 62b66e68c5..5c721e01ae 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -16,198 +16,286 @@ from dataclasses import dataclass, asdict, field from datetime import datetime -@dataclass -class PerfTestBasicResult: - targetTPS: int = 0 - resultAvgTps: float = 0 - expectedTxns: int = 0 - resultTxns: int = 0 - tpsExpectMet: bool = False - trxExpectMet: bool = False - basicTestSuccess: bool = False - testAnalysisBlockCnt: int = 0 - logsDir: str = "" - testStart: datetime = None - testEnd: datetime = None - -@dataclass -class PerfTestSearchIndivResult: - success: bool = False - searchTarget: int = 0 - searchFloor: int = 0 - searchCeiling: int = 0 - basicTestResult: PerfTestBasicResult = PerfTestBasicResult() - -@dataclass -class PerfTestSearchResults: - maxTpsAchieved: int = 0 - searchResults: list = field(default_factory=list) #PerfTestSearchIndivResult list - maxTpsReport: dict = field(default_factory=dict) - -def performPtbBinarySearch(tpsTestFloor: int, tpsTestCeiling: int, minStep: int, testHelperConfig: PerformanceTestBasic.TestHelperConfig, - testClusterConfig: PerformanceTestBasic.ClusterConfig, testDurationSec: int, tpsLimitPerGenerator: int, - numAddlBlocksToPrune: int, testLogDir: str, delReport: bool, quiet: bool, delPerfLogs: bool) -> PerfTestSearchResults: - floor = tpsTestFloor - ceiling = tpsTestCeiling - binSearchTarget = tpsTestCeiling - - maxTpsAchieved = 0 - maxTpsReport = {} - searchResults = [] - - while ceiling >= floor: - print(f"Running scenario: floor {floor} binSearchTarget {binSearchTarget} ceiling {ceiling}") - ptbResult = PerfTestBasicResult() - scenarioResult = PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) - ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, - numAddlBlocksToPrune=numAddlBlocksToPrune, logDirRoot=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) - - myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) - testSuccessful = myTest.runTest() - if evaluateSuccess(myTest, testSuccessful, ptbResult): - maxTpsAchieved = binSearchTarget - maxTpsReport = myTest.report - floor = binSearchTarget + minStep - scenarioResult.success = True - else: - ceiling = binSearchTarget - minStep - - scenarioResult.basicTestResult = ptbResult - searchResults.append(scenarioResult) - if not quiet: - print(f"searchResult: {binSearchTarget} : {searchResults[-1]}") - - binSearchTarget = floor + (math.ceil(((ceiling - floor) / minStep) / 2) * minStep) - - return PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) - -def performPtbReverseLinearSearch(tpsInitial: int, step: int, testHelperConfig: PerformanceTestBasic.TestHelperConfig, - testClusterConfig: PerformanceTestBasic.ClusterConfig, testDurationSec: int, tpsLimitPerGenerator: int, - numAddlBlocksToPrune: int, testLogDir: str, delReport: bool, quiet: bool, delPerfLogs: bool) -> PerfTestSearchResults: - - # Default - Decrementing Max TPS in range [0, tpsInitial] - absFloor = 0 - absCeiling = tpsInitial - - searchTarget = tpsInitial - - maxTpsAchieved = 0 - maxTpsReport = {} - searchResults = [] - maxFound = False - - while not maxFound: - print(f"Running scenario: floor {absFloor} searchTarget {searchTarget} ceiling {absCeiling}") - ptbResult = PerfTestBasicResult() - scenarioResult = PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling, basicTestResult=ptbResult) - ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=searchTarget, testTrxGenDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, - numAddlBlocksToPrune=numAddlBlocksToPrune, logDirRoot=testLogDir, delReport=delReport, quiet=quiet, delPerfLogs=delPerfLogs) - - myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) - testSuccessful = myTest.runTest() - if evaluateSuccess(myTest, testSuccessful, ptbResult): - maxTpsAchieved = searchTarget - maxTpsReport = myTest.report - scenarioResult.success = True - maxFound = True - else: - searchTarget = searchTarget - step - - scenarioResult.basicTestResult = ptbResult - searchResults.append(scenarioResult) - if not quiet: - print(f"searchResult: {searchTarget} : {searchResults[-1]}") - - return PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) - -def evaluateSuccess(test: PerformanceTestBasic, testSuccessful: bool, result: PerfTestBasicResult) -> bool: - result.targetTPS = test.ptbConfig.targetTps - result.expectedTxns = test.ptbConfig.expectedTransactionsSent - reportDict = test.report - result.testStart = reportDict["testStart"] - result.testEnd = reportDict["testFinish"] - result.resultAvgTps = reportDict["Analysis"]["TPS"]["avg"] - result.resultTxns = reportDict["Analysis"]["TrxLatency"]["samples"] - print(f"targetTPS: {result.targetTPS} expectedTxns: {result.expectedTxns} resultAvgTps: {result.resultAvgTps} resultTxns: {result.resultTxns}") - - result.tpsExpectMet = True if result.resultAvgTps >= result.targetTPS else abs(result.targetTPS - result.resultAvgTps) < 100 - result.trxExpectMet = result.expectedTxns == result.resultTxns - result.basicTestSuccess = testSuccessful - result.testAnalysisBlockCnt = reportDict["Analysis"]["BlocksGuide"]["testAnalysisBlockCnt"] - result.logsDir = test.loggingConfig.logDirPath - - print(f"basicTestSuccess: {result.basicTestSuccess} tpsExpectationMet: {result.tpsExpectMet} trxExpectationMet: {result.trxExpectMet}") - - return result.basicTestSuccess and result.tpsExpectMet and result.trxExpectMet - -def createReport(maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsAchieved, longRunningSearchResults, longRunningMaxTpsReport, testStart: datetime, testFinish: datetime, argsDict) -> dict: - report = {} - report['InitialMaxTpsAchieved'] = maxTpsAchieved - report['LongRunningMaxTpsAchieved'] = longRunningMaxTpsAchieved - report['testStart'] = testStart - report['testFinish'] = testFinish - report['InitialSearchResults'] = {x: asdict(searchResults[x]) for x in range(len(searchResults))} - report['InitialMaxTpsReport'] = maxTpsReport - report['LongRunningSearchResults'] = {x: asdict(longRunningSearchResults[x]) for x in range(len(longRunningSearchResults))} - report['LongRunningMaxTpsReport'] = longRunningMaxTpsReport - report['args'] = argsDict - report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} - report['nodeosVersion'] = Utils.getNodeosVersion() - return report - -def createJSONReport(maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsAchieved, longRunningSearchResults, longRunningMaxTpsReport, testStart: datetime, testFinish: datetime, argsDict) -> json: - report = createReport(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport, longRunningMaxTpsAchieved=longRunningMaxTpsAchieved, - longRunningSearchResults=longRunningSearchResults, longRunningMaxTpsReport=longRunningMaxTpsReport, testStart=testStart, testFinish=testFinish, argsDict=argsDict) - return reportAsJSON(report) - -def reportAsJSON(report: dict) -> json: - report['testStart'] = "Unknown" if report['testStart'] is None else report['testStart'].isoformat() - report['testFinish'] = "Unknown" if report['testFinish'] is None else report['testFinish'].isoformat() - return json.dumps(report, indent=2) - -def exportReportAsJSON(report: json, exportPath): - with open(exportPath, 'wt') as f: - f.write(report) - -def testDirsCleanup(delReport, testTimeStampDirPath, ptbLogsDirPath): - try: - def removeArtifacts(path): - print(f"Checking if test artifacts dir exists: {path}") - if os.path.isdir(f"{path}"): - print(f"Cleaning up test artifacts dir and all contents of: {path}") - shutil.rmtree(f"{path}") - - if not delReport: - removeArtifacts(ptbLogsDirPath) - else: - removeArtifacts(testTimeStampDirPath) - except OSError as error: - print(error) - -def testDirsSetup(rootLogDir, testTimeStampDirPath, ptbLogsDirPath): - try: - def createArtifactsDir(path): - print(f"Checking if test artifacts dir exists: {path}") - if not os.path.isdir(f"{path}"): - print(f"Creating test artifacts dir: {path}") - os.mkdir(f"{path}") - - createArtifactsDir(rootLogDir) - createArtifactsDir(testTimeStampDirPath) - createArtifactsDir(ptbLogsDirPath) - - except OSError as error: - print(error) - -def prepArgsDict(testDurationSec, finalDurationSec, logsDir, maxTpsToTest, testIterationMinStep, - tpsLimitPerGenerator, delReport, delTestReport, numAddlBlocksToPrune, quiet, delPerfLogs, - testHelperConfig: PerformanceTestBasic.TestHelperConfig, testClusterConfig: PerformanceTestBasic.ClusterConfig) -> dict: - argsDict = {} - argsDict.update(asdict(testHelperConfig)) - argsDict.update(asdict(testClusterConfig)) - argsDict.update({key:val for key, val in locals().items() if key in set(['testDurationSec', 'finalDurationSec', 'maxTpsToTest', 'testIterationMinStep', 'tpsLimitPerGenerator', - 'delReport', 'delTestReport', 'numAddlBlocksToPrune', 'logsDir', 'quiet', 'delPerfLogs'])}) - return argsDict +class PerformanceTest: + + @dataclass + class PerfTestSearchIndivResult: + @dataclass + class PerfTestBasicResult: + targetTPS: int = 0 + resultAvgTps: float = 0 + expectedTxns: int = 0 + resultTxns: int = 0 + tpsExpectMet: bool = False + trxExpectMet: bool = False + basicTestSuccess: bool = False + testAnalysisBlockCnt: int = 0 + logsDir: str = "" + testStart: datetime = None + testEnd: datetime = None + + success: bool = False + searchTarget: int = 0 + searchFloor: int = 0 + searchCeiling: int = 0 + basicTestResult: PerfTestBasicResult = PerfTestBasicResult() + + @dataclass + class PtConfig: + testDurationSec: int=150 + finalDurationSec: int=300 + delPerfLogs: bool=False + maxTpsToTest: int=50000 + testIterationMinStep: int=500 + tpsLimitPerGenerator: int=4000 + delReport: bool=False + delTestReport: bool=False + numAddlBlocksToPrune: int=2 + quiet: bool=False + logDirRoot: str="." + + @dataclass + class PerfTestSearchResults: + maxTpsAchieved: int = 0 + searchResults: list = field(default_factory=list) #PerfTestSearchIndivResult list + maxTpsReport: dict = field(default_factory=dict) + + @dataclass + class LoggingConfig: + logDirBase: str = f"./{os.path.splitext(os.path.basename(__file__))[0]}" + logDirTimestamp: str = f"{datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')}" + logDirPath: str = field(default_factory=str, init=False) + ptbLogsDirPath: str = field(default_factory=str, init=False) + + def __post_init__(self): + self.logDirPath = f"{self.logDirBase}/{self.logDirTimestamp}" + self.ptbLogsDirPath = f"{self.logDirPath}/testRunLogs" + + def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=PerformanceTestBasic.TestHelperConfig(), + clusterConfig: PerformanceTestBasic.ClusterConfig=PerformanceTestBasic.ClusterConfig(), ptConfig=PtConfig()): + self.testHelperConfig = testHelperConfig + self.clusterConfig = clusterConfig + self.ptConfig = ptConfig + + self.testStart = datetime.utcnow() + + self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=f"{self.ptConfig.logDirRoot}/{os.path.splitext(os.path.basename(__file__))[0]}", + logDirTimestamp=f"{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}") + + def performPtbBinarySearch(self) -> PerfTestSearchResults: + floor = 0 + ceiling = self.ptConfig.maxTpsToTest + binSearchTarget = self.ptConfig.maxTpsToTest + minStep = self.ptConfig.testIterationMinStep + + maxTpsAchieved = 0 + maxTpsReport = {} + searchResults = [] + + while ceiling >= floor: + print(f"Running scenario: floor {floor} binSearchTarget {binSearchTarget} ceiling {ceiling}") + ptbResult = PerformanceTest.PerfTestSearchIndivResult.PerfTestBasicResult() + scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) + ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, + numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, + delPerfLogs=self.ptConfig.delPerfLogs) + + myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig) + testSuccessful = myTest.runTest() + if self.evaluateSuccess(myTest, testSuccessful, ptbResult): + maxTpsAchieved = binSearchTarget + maxTpsReport = myTest.report + floor = binSearchTarget + minStep + scenarioResult.success = True + else: + ceiling = binSearchTarget - minStep + + scenarioResult.basicTestResult = ptbResult + searchResults.append(scenarioResult) + if not self.ptConfig.quiet: + print(f"searchResult: {binSearchTarget} : {searchResults[-1]}") + + binSearchTarget = floor + (math.ceil(((ceiling - floor) / minStep) / 2) * minStep) + + return PerformanceTest.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) + + def performPtbReverseLinearSearch(self, tpsInitial: int) -> PerfTestSearchResults: + + # Default - Decrementing Max TPS in range [0, tpsInitial] + absFloor = 0 + absCeiling = tpsInitial + + step = self.ptConfig.testIterationMinStep + + searchTarget = tpsInitial + + maxTpsAchieved = 0 + maxTpsReport = {} + searchResults = [] + maxFound = False + + while not maxFound: + print(f"Running scenario: floor {absFloor} searchTarget {searchTarget} ceiling {absCeiling}") + ptbResult = PerformanceTest.PerfTestSearchIndivResult.PerfTestBasicResult() + scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling, basicTestResult=ptbResult) + ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=searchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, + numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs) + + myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig) + testSuccessful = myTest.runTest() + if self.evaluateSuccess(myTest, testSuccessful, ptbResult): + maxTpsAchieved = searchTarget + maxTpsReport = myTest.report + scenarioResult.success = True + maxFound = True + else: + searchTarget = searchTarget - step + + scenarioResult.basicTestResult = ptbResult + searchResults.append(scenarioResult) + if not self.ptConfig.quiet: + print(f"searchResult: {searchTarget} : {searchResults[-1]}") + + return PerformanceTest.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) + + def evaluateSuccess(self, test: PerformanceTestBasic, testSuccessful: bool, result: PerfTestSearchIndivResult.PerfTestBasicResult) -> bool: + result.targetTPS = test.ptbConfig.targetTps + result.expectedTxns = test.ptbConfig.expectedTransactionsSent + reportDict = test.report + result.testStart = reportDict["testStart"] + result.testEnd = reportDict["testFinish"] + result.resultAvgTps = reportDict["Analysis"]["TPS"]["avg"] + result.resultTxns = reportDict["Analysis"]["TrxLatency"]["samples"] + print(f"targetTPS: {result.targetTPS} expectedTxns: {result.expectedTxns} resultAvgTps: {result.resultAvgTps} resultTxns: {result.resultTxns}") + + result.tpsExpectMet = True if result.resultAvgTps >= result.targetTPS else abs(result.targetTPS - result.resultAvgTps) < 100 + result.trxExpectMet = result.expectedTxns == result.resultTxns + result.basicTestSuccess = testSuccessful + result.testAnalysisBlockCnt = reportDict["Analysis"]["BlocksGuide"]["testAnalysisBlockCnt"] + result.logsDir = test.loggingConfig.logDirPath + + print(f"basicTestSuccess: {result.basicTestSuccess} tpsExpectationMet: {result.tpsExpectMet} trxExpectationMet: {result.trxExpectMet}") + + return result.basicTestSuccess and result.tpsExpectMet and result.trxExpectMet + + def createReport(self, maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsAchieved, longRunningSearchResults, longRunningMaxTpsReport, testStart: datetime, testFinish: datetime, argsDict) -> dict: + report = {} + report['InitialMaxTpsAchieved'] = maxTpsAchieved + report['LongRunningMaxTpsAchieved'] = longRunningMaxTpsAchieved + report['testStart'] = testStart + report['testFinish'] = testFinish + report['InitialSearchResults'] = {x: asdict(searchResults[x]) for x in range(len(searchResults))} + report['InitialMaxTpsReport'] = maxTpsReport + report['LongRunningSearchResults'] = {x: asdict(longRunningSearchResults[x]) for x in range(len(longRunningSearchResults))} + report['LongRunningMaxTpsReport'] = longRunningMaxTpsReport + report['args'] = argsDict + report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} + report['nodeosVersion'] = Utils.getNodeosVersion() + return report + + def createJSONReport(self, maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsAchieved, longRunningSearchResults, longRunningMaxTpsReport, testStart: datetime, testFinish: datetime, argsDict) -> json: + report = self.createReport(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport, longRunningMaxTpsAchieved=longRunningMaxTpsAchieved, + longRunningSearchResults=longRunningSearchResults, longRunningMaxTpsReport=longRunningMaxTpsReport, testStart=testStart, testFinish=testFinish, argsDict=argsDict) + return self.reportAsJSON(report) + + def reportAsJSON(self, report: dict) -> json: + report['testStart'] = "Unknown" if report['testStart'] is None else report['testStart'].isoformat() + report['testFinish'] = "Unknown" if report['testFinish'] is None else report['testFinish'].isoformat() + return json.dumps(report, indent=2) + + def exportReportAsJSON(self, report: json, exportPath): + with open(exportPath, 'wt') as f: + f.write(report) + + def testDirsCleanup(self): + try: + def removeArtifacts(path): + print(f"Checking if test artifacts dir exists: {path}") + if os.path.isdir(f"{path}"): + print(f"Cleaning up test artifacts dir and all contents of: {path}") + shutil.rmtree(f"{path}") + + if not self.ptConfig.delReport: + removeArtifacts(self.loggingConfig.ptbLogsDirPath) + else: + removeArtifacts(self.loggingConfig.logDirPath) + except OSError as error: + print(error) + + def testDirsSetup(self): + try: + def createArtifactsDir(path): + print(f"Checking if test artifacts dir exists: {path}") + if not os.path.isdir(f"{path}"): + print(f"Creating test artifacts dir: {path}") + os.mkdir(f"{path}") + + createArtifactsDir(self.loggingConfig.logDirBase) + createArtifactsDir(self.loggingConfig.logDirPath) + createArtifactsDir(self.loggingConfig.ptbLogsDirPath) + + except OSError as error: + print(error) + + def prepArgsDict(self) -> dict: + argsDict = {} + argsDict.update(asdict(self.testHelperConfig)) + argsDict.update(asdict(self.clusterConfig)) + argsDict.update(asdict(self.ptConfig)) + argsDict.update(asdict(self.loggingConfig)) + return argsDict + + def performTpsTest(self): + perfRunSuccessful = False + + try: + binSearchResults = self.performPtbBinarySearch() + + print(f"Successful rate of: {binSearchResults.maxTpsAchieved}") + + if not self.ptConfig.quiet: + print("Search Results:") + for i in range(len(binSearchResults.searchResults)): + print(f"Search scenario: {i} result: {binSearchResults.searchResults[i]}") + + longRunningSearchResults = self.performPtbReverseLinearSearch(tpsInitial=binSearchResults.maxTpsAchieved) + + print(f"Long Running Test - Successful rate of: {longRunningSearchResults.maxTpsAchieved}") + perfRunSuccessful = True + + if not self.ptConfig.quiet: + print("Long Running Test - Search Results:") + for i in range(len(longRunningSearchResults.searchResults)): + print(f"Search scenario: {i} result: {longRunningSearchResults.searchResults[i]}") + + testFinish = datetime.utcnow() + argsDict = self.prepArgsDict() + fullReport = self.createJSONReport(maxTpsAchieved=binSearchResults.maxTpsAchieved, searchResults=binSearchResults.searchResults, maxTpsReport=binSearchResults.maxTpsReport, + longRunningMaxTpsAchieved=longRunningSearchResults.maxTpsAchieved, longRunningSearchResults=longRunningSearchResults.searchResults, + longRunningMaxTpsReport=longRunningSearchResults.maxTpsReport, testStart=self.testStart, testFinish=testFinish, argsDict=argsDict) + + if not self.ptConfig.quiet: + print(f"Full Performance Test Report: {fullReport}") + + if not self.ptConfig.delReport: + self.exportReportAsJSON(fullReport, f"{self.loggingConfig.logDirPath}/report.json") + + finally: + + if self.ptConfig.delPerfLogs: + print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") + self.testDirsCleanup() + + return perfRunSuccessful + + def runTest(self): + + TestHelper.printSystemInfo("BEGIN") + self.testDirsSetup() + + testSuccessful = self.performTpsTest() + + return testSuccessful def parseArgs(): appArgs=AppArgs() @@ -248,39 +336,10 @@ def main(): args = parseArgs() Utils.Debug = args.v - testDurationSec=args.test_iteration_duration_sec - finalDurationSec=args.final_iterations_duration_sec - killAll=args.clean_run - dontKill=args.leave_running - delPerfLogs=args.del_perf_logs - dumpErrorDetails=args.dump_error_details - delay=args.d - nodesFile=args.nodes_file - verbose=args.v - pnodes=args.p - totalNodes=args.n - topo=args.s - genesisPath=args.genesis - maxTpsToTest=args.max_tps_to_test - testIterationMinStep=args.test_iteration_min_step - tpsLimitPerGenerator=args.tps_limit_per_generator - delReport=args.del_report - delTestReport=args.del_test_report - numAddlBlocksToPrune=args.num_blocks_to_prune - quiet=args.quiet - prodsEnableTraceApi=args.prods_enable_trace_api - - testStart = datetime.utcnow() - - rootLogDir: str=os.path.splitext(os.path.basename(__file__))[0] - testTimeStampDirPath = f"{rootLogDir}/{testStart.strftime('%Y-%m-%d_%H-%M-%S')}" - ptbLogsDirPath = f"{testTimeStampDirPath}/testRunLogs" - - testDirsSetup(rootLogDir=rootLogDir, testTimeStampDirPath=testTimeStampDirPath, ptbLogsDirPath=ptbLogsDirPath) - - testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=killAll, dontKill=dontKill, keepLogs=not delPerfLogs, - dumpErrorDetails=dumpErrorDetails, delay=delay, nodesFile=nodesFile, - verbose=verbose) + + testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, + dumpErrorDetails=args.dump_error_details, delay=args.d, nodesFile=args.nodes_file, + verbose=args.v) ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, @@ -294,54 +353,21 @@ def main(): extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) - argsDict = prepArgsDict(testDurationSec=testDurationSec, finalDurationSec=finalDurationSec, logsDir=testTimeStampDirPath, - maxTpsToTest=maxTpsToTest, testIterationMinStep=testIterationMinStep, tpsLimitPerGenerator=tpsLimitPerGenerator, - delReport=delReport, delTestReport=delTestReport, numAddlBlocksToPrune=numAddlBlocksToPrune, - quiet=quiet, delPerfLogs=delPerfLogs, testHelperConfig=testHelperConfig, testClusterConfig=testClusterConfig) - - perfRunSuccessful = False - - try: - binSearchResults = performPtbBinarySearch(tpsTestFloor=0, tpsTestCeiling=maxTpsToTest, minStep=testIterationMinStep, testHelperConfig=testHelperConfig, - testClusterConfig=testClusterConfig, testDurationSec=testDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, - numAddlBlocksToPrune=numAddlBlocksToPrune, testLogDir=ptbLogsDirPath, delReport=delTestReport, quiet=quiet, delPerfLogs=delPerfLogs) - - print(f"Successful rate of: {binSearchResults.maxTpsAchieved}") - - if not quiet: - print("Search Results:") - for i in range(len(binSearchResults.searchResults)): - print(f"Search scenario: {i} result: {binSearchResults.searchResults[i]}") - - longRunningSearchResults = performPtbReverseLinearSearch(tpsInitial=binSearchResults.maxTpsAchieved, step=testIterationMinStep, testHelperConfig=testHelperConfig, - testClusterConfig=testClusterConfig, testDurationSec=finalDurationSec, tpsLimitPerGenerator=tpsLimitPerGenerator, - numAddlBlocksToPrune=numAddlBlocksToPrune, testLogDir=ptbLogsDirPath, delReport=delTestReport, quiet=quiet, - delPerfLogs=delPerfLogs) - - print(f"Long Running Test - Successful rate of: {longRunningSearchResults.maxTpsAchieved}") - perfRunSuccessful = True - - if not quiet: - print("Long Running Test - Search Results:") - for i in range(len(longRunningSearchResults.searchResults)): - print(f"Search scenario: {i} result: {longRunningSearchResults.searchResults[i]}") - - testFinish = datetime.utcnow() - fullReport = createJSONReport(maxTpsAchieved=binSearchResults.maxTpsAchieved, searchResults=binSearchResults.searchResults, maxTpsReport=binSearchResults.maxTpsReport, - longRunningMaxTpsAchieved=longRunningSearchResults.maxTpsAchieved, longRunningSearchResults=longRunningSearchResults.searchResults, - longRunningMaxTpsReport=longRunningSearchResults.maxTpsReport, testStart=testStart, testFinish=testFinish, argsDict=argsDict) - - if not quiet: - print(f"Full Performance Test Report: {fullReport}") - - if not delReport: - exportReportAsJSON(fullReport, f"{testTimeStampDirPath}/report.json") - - finally: - if delPerfLogs: - print(f"Cleaning up logs directory: {testTimeStampDirPath}") - testDirsCleanup(delReport=delReport, testTimeStampDirPath=testTimeStampDirPath, ptbLogsDirPath=ptbLogsDirPath) + ptConfig = PerformanceTest.PtConfig(testDurationSec=args.test_iteration_duration_sec, + finalDurationSec=args.final_iterations_duration_sec, + delPerfLogs=args.del_perf_logs, + maxTpsToTest=args.max_tps_to_test, + testIterationMinStep=args.test_iteration_min_step, + tpsLimitPerGenerator=args.tps_limit_per_generator, + delReport=args.del_report, + delTestReport=args.del_test_report, + numAddlBlocksToPrune=args.num_blocks_to_prune, + quiet=args.quiet, + logDirRoot=".") + + myTest = PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig) + perfRunSuccessful = myTest.runTest() exitCode = 0 if perfRunSuccessful else 1 exit(exitCode) From 7ba252b71c37cb0e4d6cfb31b0ef2b180b3b9cd5 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 23 Nov 2022 12:30:01 -0600 Subject: [PATCH 014/178] Add options to calculate recommended producer, chain, and net worker thread pool size. Each plugin's option determines whether to calculate number of worker threads to use in the thread pool with options of: 'none', 'lmax', or 'full'. In 'none' mode, the default, no calculation will be attempted and default configured thread count will be used. In 'lmax' mode, thread count will incrementally be tested until the performance rate ceases to increase with the addition of subsequent threads. In 'full' mode thread count will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in 'lmax' mode). Useful for graphing the full performance impact of each available thread. Added log directory to collect results of thread optimization calculations. Added option to opt out of running the tps test. Updated README for new options and to show the updates to the result reports. --- tests/performance_tests/README.md | 406 ++++++++++++-------- tests/performance_tests/performance_test.py | 320 ++++++++++++--- 2 files changed, 500 insertions(+), 226 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index c1935f191d..b76b0b11e8 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -37,6 +37,11 @@ Please refer to [Leap: Build and Install from Source](https://github.com/Antelop performance_test/ └── 2022-10-27_15-28-09 ├── report.json + ├── pluginThreadOptRunLogs + │ ├── performance_test_basic + │ ├── chainThreadResults.txt + │ ├── netThreadResults.txt + │ └── producerThreadResults.txt └── testRunLogs └── performance_test_basic └── 2022-10-19_10-29-07 @@ -252,6 +257,22 @@ The Performance Harness main script `performance_test.py` can be configured usin * `--quiet` Whether to quiet printing intermediate results and reports to stdout (default: False) * `--prods-enable-trace-api` Determines whether producer nodes should have eosio::trace_api_plugin enabled (default: False) +* `--skip-tps-test` Determines whether to skip the max TPS measurement tests (default: False) +* `--calc-producer-threads {none,lmax,full}` + Determines whether to calculate number of worker threads to use in producer thread pool ("none", "lmax", or "full"). + In "none" mode, the default, no calculation will be attempted and default configured --producer-threads value will be used. + In "lmax" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. + In "full" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) +* `--calc-chain-threads {none,lmax,full}` + Determines whether to calculate number of worker threads to use in chain thread pool ("none", "lmax", or "full"). + In "none" mode, the default, no calculation will be attempted and default configured --chain-threads value will be used. + In "lmax" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. + In "full" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) +* `--calc-net-threads {none,lmax,full}` + Determines whether to calculate number of worker threads to use in net thread pool ("none", "lmax", or "full"). + In "none" mode, the default, no calculation will be attempted and default configured --net-threads value will be used. + In "lmax" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. + In "full" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) ### Support Scripts @@ -386,7 +407,7 @@ The Performance Harness generates a report to summarize results of test scenario Command used to run test and generate report: ``` bash -.build/tests/performance_tests/performance_test.py --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 +.build/tests/performance_tests/performance_test.py --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax ``` #### Report Breakdown @@ -400,23 +421,23 @@ Next, a summary of the search scenario conducted and respective results is inclu Expand Search Scenario Summary Example ``` json - "0": { - "success": false, - "searchTarget": 25000, + "1": { + "success": true, + "searchTarget": 26000, "searchFloor": 0, - "searchCeiling": 50000, + "searchCeiling": 26500, "basicTestResult": { - "targetTPS": 25000, - "resultAvgTps": 17160.4, - "expectedTxns": 250000, - "resultTxns": 250000, - "tpsExpectMet": false, + "targetTPS": 26000, + "resultAvgTps": 25986.9375, + "expectedTxns": 260000, + "resultTxns": 260000, + "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 26, - "logsDir": "performance_test/2022-10-26_15-01-51/testRunLogs/performance_test_basic/2022-10-26_15-01-51", - "testStart": "2022-10-26T15:03:37.764242", - "testEnd": "2022-10-26T15:01:51.128328" + "testAnalysisBlockCnt": 17, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-18-52-26000", + "testStart": "2022-11-23T15:18:52.115767", + "testEnd": "2022-11-23T15:20:16.911367" } } ``` @@ -449,10 +470,12 @@ Finally, the full detail test report for each of the determined max TPS throughp ``` json { - "InitialMaxTpsAchieved": 16000, - "LongRunningMaxTpsAchieved": 16000, - "testStart": "2022-11-21T22:17:03.604928", - "testFinish": "2022-11-21T22:29:02.923633", + "perfTestsBegin": "2022-11-23T12:56:58.699686", + "perfTestsFinish": "2022-11-23T15:20:16.979815", + "InitialMaxTpsAchieved": 26500, + "LongRunningMaxTpsAchieved": 26000, + "tpsTestStart": "2022-11-23T15:05:42.005050", + "tpsTestFinish": "2022-11-23T15:20:16.979800", "InitialSearchResults": { "0": { "success": false, @@ -461,149 +484,149 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 50000, "basicTestResult": { "targetTPS": 50000, - "resultAvgTps": 15121.925, + "resultAvgTps": 23784.324324324323, "expectedTxns": 500000, - "resultTxns": 326102, + "resultTxns": 500000, "tpsExpectMet": false, - "trxExpectMet": false, - "basicTestSuccess": false, - "testAnalysisBlockCnt": 41, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-17-03-50000", - "testStart": "2022-11-21T22:17:03.624828", - "testEnd": "2022-11-21T22:18:35.048631" + "trxExpectMet": true, + "basicTestSuccess": true, + "testAnalysisBlockCnt": 38, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-05-42-50000", + "testStart": "2022-11-23T15:05:42.005080", + "testEnd": "2022-11-23T15:07:24.111044" } }, "1": { - "success": false, + "success": true, "searchTarget": 25000, "searchFloor": 0, "searchCeiling": 49500, "basicTestResult": { "targetTPS": 25000, - "resultAvgTps": 15307.275862068966, + "resultAvgTps": 25013.3125, "expectedTxns": 250000, "resultTxns": 250000, - "tpsExpectMet": false, + "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 30, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-18-35-25000", - "testStart": "2022-11-21T22:18:35.136441", - "testEnd": "2022-11-21T22:20:02.355919" + "testAnalysisBlockCnt": 17, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-07-24-25000", + "testStart": "2022-11-23T15:07:24.225706", + "testEnd": "2022-11-23T15:08:47.510691" } }, "2": { - "success": true, - "searchTarget": 12500, - "searchFloor": 0, - "searchCeiling": 24500, + "success": false, + "searchTarget": 37500, + "searchFloor": 25500, + "searchCeiling": 49500, "basicTestResult": { - "targetTPS": 12500, - "resultAvgTps": 12494.4375, - "expectedTxns": 125000, - "resultTxns": 125000, - "tpsExpectMet": true, + "targetTPS": 37500, + "resultAvgTps": 24912.576923076922, + "expectedTxns": 375000, + "resultTxns": 375000, + "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 17, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-20-02-12500", - "testStart": "2022-11-21T22:20:02.419664", - "testEnd": "2022-11-21T22:21:17.334219" + "testAnalysisBlockCnt": 27, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-08-47-37500", + "testStart": "2022-11-23T15:08:47.579754", + "testEnd": "2022-11-23T15:10:23.342881" } }, "3": { "success": false, - "searchTarget": 19000, - "searchFloor": 13000, - "searchCeiling": 24500, + "searchTarget": 31500, + "searchFloor": 25500, + "searchCeiling": 37000, "basicTestResult": { - "targetTPS": 19000, - "resultAvgTps": 15546.55, - "expectedTxns": 190000, - "resultTxns": 190000, + "targetTPS": 31500, + "resultAvgTps": 24525.095238095237, + "expectedTxns": 315000, + "resultTxns": 315000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 21, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-21-17-19000", - "testStart": "2022-11-21T22:21:17.380653", - "testEnd": "2022-11-21T22:22:37.113095" + "testAnalysisBlockCnt": 22, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-10-23-31500", + "testStart": "2022-11-23T15:10:23.432821", + "testEnd": "2022-11-23T15:11:53.366694" } }, "4": { - "success": true, - "searchTarget": 16000, - "searchFloor": 13000, - "searchCeiling": 18500, + "success": false, + "searchTarget": 28500, + "searchFloor": 25500, + "searchCeiling": 31000, "basicTestResult": { - "targetTPS": 16000, - "resultAvgTps": 15969.375, - "expectedTxns": 160000, - "resultTxns": 160000, - "tpsExpectMet": true, + "targetTPS": 28500, + "resultAvgTps": 25896.666666666668, + "expectedTxns": 285000, + "resultTxns": 285000, + "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 17, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-22-37-16000", - "testStart": "2022-11-21T22:22:37.166645", - "testEnd": "2022-11-21T22:23:51.349987" + "testAnalysisBlockCnt": 19, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-11-53-28500", + "testStart": "2022-11-23T15:11:53.448449", + "testEnd": "2022-11-23T15:13:17.714663" } }, "5": { "success": false, - "searchTarget": 17500, - "searchFloor": 16500, - "searchCeiling": 18500, + "searchTarget": 27000, + "searchFloor": 25500, + "searchCeiling": 28000, "basicTestResult": { - "targetTPS": 17500, - "resultAvgTps": 15048.263157894737, - "expectedTxns": 175000, - "resultTxns": 175000, + "targetTPS": 27000, + "resultAvgTps": 26884.625, + "expectedTxns": 270000, + "resultTxns": 270000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 20, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-23-51-17500", - "testStart": "2022-11-21T22:23:51.399539", - "testEnd": "2022-11-21T22:25:11.171614" + "testAnalysisBlockCnt": 17, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-13-17-27000", + "testStart": "2022-11-23T15:13:17.787205", + "testEnd": "2022-11-23T15:14:40.753850" } }, "6": { - "success": false, - "searchTarget": 17000, - "searchFloor": 16500, - "searchCeiling": 17000, + "success": true, + "searchTarget": 26000, + "searchFloor": 25500, + "searchCeiling": 26500, "basicTestResult": { - "targetTPS": 17000, - "resultAvgTps": 15659.058823529413, - "expectedTxns": 170000, - "resultTxns": 170000, - "tpsExpectMet": false, + "targetTPS": 26000, + "resultAvgTps": 25959.0, + "expectedTxns": 260000, + "resultTxns": 260000, + "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 18, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-25-11-17000", - "testStart": "2022-11-21T22:25:11.225775", - "testEnd": "2022-11-21T22:26:30.102913" + "testAnalysisBlockCnt": 17, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-14-40-26000", + "testStart": "2022-11-23T15:14:40.823681", + "testEnd": "2022-11-23T15:16:02.884525" } }, "7": { - "success": false, - "searchTarget": 16500, - "searchFloor": 16500, - "searchCeiling": 16500, + "success": true, + "searchTarget": 26500, + "searchFloor": 26500, + "searchCeiling": 26500, "basicTestResult": { - "targetTPS": 16500, - "resultAvgTps": 15714.823529411764, - "expectedTxns": 165000, - "resultTxns": 165000, - "tpsExpectMet": false, + "targetTPS": 26500, + "resultAvgTps": 26400.5625, + "expectedTxns": 265000, + "resultTxns": 265000, + "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 18, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-26-30-16500", - "testStart": "2022-11-21T22:26:30.155632", - "testEnd": "2022-11-21T22:27:48.093871" + "testAnalysisBlockCnt": 17, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-16-02-26500", + "testStart": "2022-11-23T15:16:02.953195", + "testEnd": "2022-11-23T15:17:28.412837" } } }, @@ -622,22 +645,41 @@ Finally, the full detail test report for each of the determined max TPS throughp }, "LongRunningSearchResults": { "0": { + "success": false, + "searchTarget": 26500, + "searchFloor": 0, + "searchCeiling": 26500, + "basicTestResult": { + "targetTPS": 26500, + "resultAvgTps": 22554.42105263158, + "expectedTxns": 265000, + "resultTxns": 265000, + "tpsExpectMet": false, + "trxExpectMet": true, + "basicTestSuccess": true, + "testAnalysisBlockCnt": 20, + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-17-28-26500", + "testStart": "2022-11-23T15:17:28.483195", + "testEnd": "2022-11-23T15:18:52.048868" + } + }, + "1": { "success": true, - "searchTarget": 16000, + "searchTarget": 26000, "searchFloor": 0, - "searchCeiling": 16000, + "searchCeiling": 26500, "basicTestResult": { - "targetTPS": 16000, - "resultAvgTps": 15933.1875, - "expectedTxns": 160000, - "resultTxns": 160000, + "targetTPS": 26000, + "resultAvgTps": 25986.9375, + "expectedTxns": 260000, + "resultTxns": 260000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-27-48-16000", - "testStart": "2022-11-21T22:27:48.146027", - "testEnd": "2022-11-21T22:29:02.871273" + "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-18-52-26000", + "testStart": "2022-11-23T15:18:52.115767", + "testEnd": "2022-11-23T15:20:16.911367" } } }, @@ -654,6 +696,38 @@ Finally, the full detail test report for each of the determined max TPS throughp }, }, + "ProducerThreadAnalysis": { + "recommendedThreadCount": 6, + "threadToMaxTpsDict": { + "2": 16000, + "3": 21000, + "4": 24000, + "5": 25500, + "6": 27000, + "7": 26000 + }, + "analysisStart": "2022-11-23T12:56:58.730271", + "analysisFinish": "2022-11-23T14:05:45.727625" + }, + "ChainThreadAnalysis": { + "recommendedThreadCount": 3, + "threadToMaxTpsDict": { + "2": 25000, + "3": 26500, + "4": 26500 + }, + "analysisStart": "2022-11-23T14:05:45.728348", + "analysisFinish": "2022-11-23T14:41:43.721885" + }, + "NetThreadAnalysis": { + "recommendedThreadCount": 2, + "threadToMaxTpsDict": { + "2": 25500, + "3": 25000 + }, + "analysisStart": "2022-11-23T14:41:43.722862", + "analysisFinish": "2022-11-23T15:05:42.004421" + }, "args": { "killAll": false, "dontKill": false, @@ -671,7 +745,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "chainPluginArgs": { "signatureCpuBillablePct": 0, "chainStateDbSizeMb": 10240, - "chainThreads": 2, + "chainThreads": 3, "databaseMapMode": "mapped" }, "producerPluginArgs": { @@ -680,7 +754,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "produceTimeOffsetUs": 0, "cpuEffortPercent": 100, "lastBlockCpuEffortPercent": 100, - "producerThreads": 2 + "producerThreads": 6 }, "httpPluginArgs": { "httpMaxResponseTimeMs": 990000 @@ -712,10 +786,15 @@ Finally, the full detail test report for each of the determined max TPS throughp "numAddlBlocksToPrune": 2, "quiet": false, "logDirRoot": ".", + "skipTpsTests": false, + "calcProducerThreads": "lmax", + "calcChainThreads": "lmax", + "calcNetThreads": "lmax", "logDirBase": "./performance_test", - "logDirTimestamp": "2022-11-21_22-17-03", - "logDirPath": "./performance_test/2022-11-21_22-17-03", - "ptbLogsDirPath": "./performance_test/2022-11-21_22-17-03/testRunLogs" + "logDirTimestamp": "2022-11-23_12-56-58", + "logDirPath": "./performance_test/2022-11-23_12-56-58", + "ptbLogsDirPath": "./performance_test/2022-11-23_12-56-58/testRunLogs", + "pluginThreadOptLogsDirPath": "./performance_test/2022-11-23_12-56-58/pluginThreadOptRunLogs" }, "env": { "system": "Linux", @@ -739,67 +818,70 @@ The Performance Test Basic generates, by default, a report that details results ``` json { "completedRun": true, - "testStart": "2022-11-21T22:27:48.146027", - "testFinish": "2022-11-21T22:29:02.871273", + "testStart": "2022-11-23T15:18:52.115767", + "testFinish": "2022-11-23T15:20:16.911367", "Analysis": { "BlockSize": { - "min": 1369536, - "max": 1624896, - "avg": 1530567.5294117648, - "sigma": 58850.381839050766, + "min": 1937088, + "max": 2971200, + "avg": 2493345.882352941, + "sigma": 186567.07030350564, "emptyBlocks": 0, "numBlocks": 17 }, "BlocksGuide": { "firstBlockNum": 2, - "lastBlockNum": 147, - "totalBlocks": 146, - "testStartBlockNum": 105, - "testEndBlockNum": 136, - "setupBlocksCnt": 103, - "tearDownBlocksCnt": 11, + "lastBlockNum": 165, + "totalBlocks": 164, + "testStartBlockNum": 106, + "testEndBlockNum": 149, + "setupBlocksCnt": 104, + "tearDownBlocksCnt": 16, "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 10, + "trailingEmptyBlocksCnt": 22, "configAddlDropCnt": 2, "testAnalysisBlockCnt": 17 }, "TPS": { - "min": 14996, - "max": 16486, - "avg": 15933.1875, - "sigma": 403.137727512261, + "min": 23164, + "max": 28791, + "avg": 25986.9375, + "sigma": 1033.1693634606816, "emptyBlocks": 0, "numBlocks": 17, - "configTps": 16000, + "configTps": 26000, "configTestDuration": 10, "tpsPerGenerator": [ - 4000, - 4000, - 4000, - 4000 + 3714, + 3714, + 3714, + 3714, + 3714, + 3715, + 3715 ], - "generatorCount": 4 + "generatorCount": 7 }, "TrxCPU": { "min": 7.0, - "max": 657.0, - "avg": 21.81190625, - "sigma": 9.853241319038672, - "samples": 160000 + "max": 10893.0, + "avg": 17.314342307692307, + "sigma": 41.16144172726996, + "samples": 260000 }, "TrxLatency": { "min": 0.0009999275207519531, - "max": 0.565000057220459, - "avg": 0.27573538126200436, - "sigma": 0.14606770516057177, - "samples": 160000 + "max": 0.6380000114440918, + "avg": 0.26549454224201346, + "sigma": 0.14674558675649374, + "samples": 260000 }, "TrxNet": { "min": 24.0, "max": 24.0, "avg": 24.0, "sigma": 0.0, - "samples": 160000 + "samples": 260000 } }, "args": { @@ -819,7 +901,7 @@ The Performance Test Basic generates, by default, a report that details results "chainPluginArgs": { "signatureCpuBillablePct": 0, "chainStateDbSizeMb": 10240, - "chainThreads": 2, + "chainThreads": 3, "databaseMapMode": "mapped" }, "producerPluginArgs": { @@ -828,7 +910,7 @@ The Performance Test Basic generates, by default, a report that details results "produceTimeOffsetUs": 0, "cpuEffortPercent": 100, "lastBlockCpuEffortPercent": 100, - "producerThreads": 2 + "producerThreads": 6 }, "httpPluginArgs": { "httpMaxResponseTimeMs": 990000 @@ -849,19 +931,19 @@ The Performance Test Basic generates, by default, a report that details results "1": "--plugin eosio::trace_api_plugin" }, "_totalNodes": 2, - "targetTps": 16000, + "targetTps": 26000, "testTrxGenDurationSec": 10, "tpsLimitPerGenerator": 4000, "numAddlBlocksToPrune": 2, - "logDirRoot": "./performance_test/2022-11-21_22-17-03/testRunLogs", + "logDirRoot": "./performance_test/2022-11-23_12-56-58/testRunLogs", "delReport": false, "quiet": false, "delPerfLogs": false, - "expectedTransactionsSent": 160000, - "logDirBase": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic", - "logDirTimestamp": "2022-11-21_22-27-48", - "logDirTimestampedOptSuffix": "-16000", - "logDirPath": "./performance_test/2022-11-21_22-17-03/testRunLogs/performance_test_basic/2022-11-21_22-27-48-16000" + "expectedTransactionsSent": 260000, + "logDirBase": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic", + "logDirTimestamp": "2022-11-23_15-18-52", + "logDirTimestampedOptSuffix": "-26000", + "logDirPath": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-18-52-26000" }, "env": { "system": "Linux", diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 5c721e01ae..6dc4a959b2 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 +import copy import math import os import sys @@ -15,6 +16,7 @@ from platform import release, system from dataclasses import dataclass, asdict, field from datetime import datetime +from enum import Enum class PerformanceTest: @@ -53,12 +55,25 @@ class PtConfig: numAddlBlocksToPrune: int=2 quiet: bool=False logDirRoot: str="." + skipTpsTests: bool=False + calcProducerThreads: str="none" + calcChainThreads: str="none" + calcNetThreads: str="none" + @dataclass - class PerfTestSearchResults: - maxTpsAchieved: int = 0 - searchResults: list = field(default_factory=list) #PerfTestSearchIndivResult list - maxTpsReport: dict = field(default_factory=dict) + class TpsTestResult: + @dataclass + class PerfTestSearchResults: + maxTpsAchieved: int = 0 + searchResults: list = field(default_factory=list) #PerfTestSearchIndivResult list + maxTpsReport: dict = field(default_factory=dict) + + binSearchResults: PerfTestSearchResults=PerfTestSearchResults() + longRunningSearchResults: PerfTestSearchResults=PerfTestSearchResults() + tpsTestStart: datetime=datetime.utcnow() + tpsTestFinish: datetime=datetime.utcnow() + perfRunSuccessful: bool=False @dataclass class LoggingConfig: @@ -66,10 +81,12 @@ class LoggingConfig: logDirTimestamp: str = f"{datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')}" logDirPath: str = field(default_factory=str, init=False) ptbLogsDirPath: str = field(default_factory=str, init=False) + pluginThreadOptLogsDirPath: str = field(default_factory=str, init=False) def __post_init__(self): self.logDirPath = f"{self.logDirBase}/{self.logDirTimestamp}" self.ptbLogsDirPath = f"{self.logDirPath}/testRunLogs" + self.pluginThreadOptLogsDirPath = f"{self.logDirPath}/pluginThreadOptRunLogs" def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=PerformanceTestBasic.TestHelperConfig(), clusterConfig: PerformanceTestBasic.ClusterConfig=PerformanceTestBasic.ClusterConfig(), ptConfig=PtConfig()): @@ -77,12 +94,12 @@ def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=Perfo self.clusterConfig = clusterConfig self.ptConfig = ptConfig - self.testStart = datetime.utcnow() + self.testsStart = datetime.utcnow() self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=f"{self.ptConfig.logDirRoot}/{os.path.splitext(os.path.basename(__file__))[0]}", - logDirTimestamp=f"{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}") + logDirTimestamp=f"{self.testsStart.strftime('%Y-%m-%d_%H-%M-%S')}") - def performPtbBinarySearch(self) -> PerfTestSearchResults: + def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: str, delReport: bool, quiet: bool, delPerfLogs: bool) -> TpsTestResult.PerfTestSearchResults: floor = 0 ceiling = self.ptConfig.maxTpsToTest binSearchTarget = self.ptConfig.maxTpsToTest @@ -97,10 +114,10 @@ def performPtbBinarySearch(self) -> PerfTestSearchResults: ptbResult = PerformanceTest.PerfTestSearchIndivResult.PerfTestBasicResult() scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, - numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, - delPerfLogs=self.ptConfig.delPerfLogs) + numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=logDirRoot, delReport=delReport, + quiet=quiet, delPerfLogs=delPerfLogs) - myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig) + myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() if self.evaluateSuccess(myTest, testSuccessful, ptbResult): maxTpsAchieved = binSearchTarget @@ -117,9 +134,9 @@ def performPtbBinarySearch(self) -> PerfTestSearchResults: binSearchTarget = floor + (math.ceil(((ceiling - floor) / minStep) / 2) * minStep) - return PerformanceTest.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) + return PerformanceTest.TpsTestResult.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) - def performPtbReverseLinearSearch(self, tpsInitial: int) -> PerfTestSearchResults: + def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTestSearchResults: # Default - Decrementing Max TPS in range [0, tpsInitial] absFloor = 0 @@ -156,7 +173,7 @@ def performPtbReverseLinearSearch(self, tpsInitial: int) -> PerfTestSearchResult if not self.ptConfig.quiet: print(f"searchResult: {searchTarget} : {searchResults[-1]}") - return PerformanceTest.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) + return PerformanceTest.TpsTestResult.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) def evaluateSuccess(self, test: PerformanceTestBasic, testSuccessful: bool, result: PerfTestSearchIndivResult.PerfTestBasicResult) -> bool: result.targetTPS = test.ptbConfig.targetTps @@ -178,29 +195,142 @@ def evaluateSuccess(self, test: PerformanceTestBasic, testSuccessful: bool, resu return result.basicTestSuccess and result.tpsExpectMet and result.trxExpectMet - def createReport(self, maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsAchieved, longRunningSearchResults, longRunningMaxTpsReport, testStart: datetime, testFinish: datetime, argsDict) -> dict: + class PluginThreadOpt(Enum): + PRODUCER = 1 + CHAIN = 2 + NET = 3 + + class PluginThreadOptRunType(Enum): + FULL = 1 + LOCAL_MAX = 2 + + @dataclass + class PluginThreadOptResult: + recommendedThreadCount: int = 0 + threadToMaxTpsDict: dict = field(default_factory=dict) + analysisStart: datetime = datetime.utcnow() + analysisFinish: datetime = datetime.utcnow() + + def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: PluginThreadOptRunType=PluginThreadOptRunType.LOCAL_MAX, + minThreadCount: int=2, maxThreadCount: int=os.cpu_count()) -> PluginThreadOptResult: + + if optPlugin == PerformanceTest.PluginThreadOpt.PRODUCER: + fileName = "producerThreadResults.txt" + elif optPlugin == PerformanceTest.PluginThreadOpt.CHAIN: + fileName = "chainThreadResults.txt" + else: + fileName = "netThreadResults.txt" + + resultsFile = f"{self.loggingConfig.pluginThreadOptLogsDirPath}/{fileName}" + + threadToMaxTpsDict: dict = {} + + clusterConfig = copy.deepcopy(self.clusterConfig) + analysisStart = datetime.utcnow() + + with open(resultsFile, 'w') as log: + log.write(f"{optPlugin.name.lower()}Threads, maxTpsAchieved\n") + log.close() + + lastMaxTpsAchieved = 0 + for threadCount in range(minThreadCount, maxThreadCount+1): + print(f"Running {optPlugin.name.lower()} thread count optimization check with {threadCount} {optPlugin.name.lower()} threads") + + if optPlugin == PerformanceTest.PluginThreadOpt.PRODUCER: + clusterConfig.extraNodeosArgs.producerPluginArgs.producerThreads = threadCount + elif optPlugin == PerformanceTest.PluginThreadOpt.CHAIN: + clusterConfig.extraNodeosArgs.chainPluginArgs.chainThreads = threadCount + else: + clusterConfig.extraNodeosArgs.netPluginArgs.netThreads = threadCount + + binSearchResults = self.performPtbBinarySearch(clusterConfig=clusterConfig, logDirRoot=self.loggingConfig.pluginThreadOptLogsDirPath, + delReport=True, quiet=False, delPerfLogs=True) + + threadToMaxTpsDict[threadCount] = binSearchResults.maxTpsAchieved + if not self.ptConfig.quiet: + print("Search Results:") + for i in range(len(binSearchResults.searchResults)): + print(f"Search scenario {optPlugin.name} thread count {threadCount}: {i} result: {binSearchResults.searchResults[i]}") + + with open(resultsFile, 'a') as log: + log.write(f"{threadCount},{binSearchResults.maxTpsAchieved}\n") + log.close() + + if optType == PerformanceTest.PluginThreadOptRunType.LOCAL_MAX: + if binSearchResults.maxTpsAchieved <= lastMaxTpsAchieved: + break + lastMaxTpsAchieved = binSearchResults.maxTpsAchieved + + analysisFinish = datetime.utcnow() + + def calcLocalMax(threadToMaxDict: dict): + localMax = 0 + recThreadCount = 0 + for threads, tps in threadToMaxDict.items(): + if tps > localMax: + localMax = tps + recThreadCount = threads + else: + break + return recThreadCount + + recommendedThreadCount = calcLocalMax(threadToMaxDict=threadToMaxTpsDict) + + return PerformanceTest.PluginThreadOptResult(recommendedThreadCount=recommendedThreadCount, threadToMaxTpsDict=threadToMaxTpsDict, + analysisStart=analysisStart, analysisFinish=analysisFinish) + + def createTpsTestReport(self, tpsTestResult: TpsTestResult) -> dict: report = {} - report['InitialMaxTpsAchieved'] = maxTpsAchieved - report['LongRunningMaxTpsAchieved'] = longRunningMaxTpsAchieved - report['testStart'] = testStart - report['testFinish'] = testFinish - report['InitialSearchResults'] = {x: asdict(searchResults[x]) for x in range(len(searchResults))} - report['InitialMaxTpsReport'] = maxTpsReport - report['LongRunningSearchResults'] = {x: asdict(longRunningSearchResults[x]) for x in range(len(longRunningSearchResults))} - report['LongRunningMaxTpsReport'] = longRunningMaxTpsReport - report['args'] = argsDict + report['InitialMaxTpsAchieved'] = tpsTestResult.binSearchResults.maxTpsAchieved + report['LongRunningMaxTpsAchieved'] = tpsTestResult.longRunningSearchResults.maxTpsAchieved + report['tpsTestStart'] = tpsTestResult.tpsTestStart + report['tpsTestFinish'] = tpsTestResult.tpsTestFinish + report['InitialSearchResults'] = {x: asdict(tpsTestResult.binSearchResults.searchResults[x]) for x in range(len(tpsTestResult.binSearchResults.searchResults))} + report['InitialMaxTpsReport'] = tpsTestResult.binSearchResults.maxTpsReport + report['LongRunningSearchResults'] = {x: asdict(tpsTestResult.longRunningSearchResults.searchResults[x]) for x in range(len(tpsTestResult.longRunningSearchResults.searchResults))} + report['LongRunningMaxTpsReport'] = tpsTestResult.longRunningSearchResults.maxTpsReport + return report + + def createReport(self,producerThreadResult: PluginThreadOptResult=None, chainThreadResult: PluginThreadOptResult=None, netThreadResult: PluginThreadOptResult=None, tpsTestResult: dict=None) -> dict: + report = {} + report['perfTestsBegin'] = self.testsStart + report['perfTestsFinish'] = self.testsFinish + if tpsTestResult is not None: + report.update(self.createTpsTestReport(tpsTestResult)) + + if producerThreadResult is not None: + report['ProducerThreadAnalysis'] = asdict(producerThreadResult) + + if chainThreadResult is not None: + report['ChainThreadAnalysis'] = asdict(chainThreadResult) + + if netThreadResult is not None: + report['NetThreadAnalysis'] = asdict(netThreadResult) + + report['args'] = self.prepArgsDict() report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} report['nodeosVersion'] = Utils.getNodeosVersion() return report - def createJSONReport(self, maxTpsAchieved, searchResults, maxTpsReport, longRunningMaxTpsAchieved, longRunningSearchResults, longRunningMaxTpsReport, testStart: datetime, testFinish: datetime, argsDict) -> json: - report = self.createReport(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport, longRunningMaxTpsAchieved=longRunningMaxTpsAchieved, - longRunningSearchResults=longRunningSearchResults, longRunningMaxTpsReport=longRunningMaxTpsReport, testStart=testStart, testFinish=testFinish, argsDict=argsDict) - return self.reportAsJSON(report) - def reportAsJSON(self, report: dict) -> json: - report['testStart'] = "Unknown" if report['testStart'] is None else report['testStart'].isoformat() - report['testFinish'] = "Unknown" if report['testFinish'] is None else report['testFinish'].isoformat() + if 'ProducerThreadAnalysis' in report: + report['ProducerThreadAnalysis']['analysisStart'] = report['ProducerThreadAnalysis']['analysisStart'].isoformat() + report['ProducerThreadAnalysis']['analysisFinish'] = report['ProducerThreadAnalysis']['analysisFinish'].isoformat() + if 'ChainThreadAnalysis' in report: + report['ChainThreadAnalysis']['analysisStart'] = report['ChainThreadAnalysis']['analysisStart'].isoformat() + report['ChainThreadAnalysis']['analysisFinish'] = report['ChainThreadAnalysis']['analysisFinish'].isoformat() + if 'NetThreadAnalysis' in report: + report['NetThreadAnalysis']['analysisStart'] = report['NetThreadAnalysis']['analysisStart'].isoformat() + report['NetThreadAnalysis']['analysisFinish'] = report['NetThreadAnalysis']['analysisFinish'].isoformat() + + if 'tpsTestStart' in report: + report['tpsTestStart'] = report['tpsTestStart'].isoformat() + if 'tpsTestFinish' in report: + report['tpsTestFinish'] = report['tpsTestFinish'].isoformat() + + report['perfTestsBegin'] = report['perfTestsBegin'].isoformat() + report['perfTestsFinish'] = report['perfTestsFinish'].isoformat() + return json.dumps(report, indent=2) def exportReportAsJSON(self, report: json, exportPath): @@ -217,6 +347,7 @@ def removeArtifacts(path): if not self.ptConfig.delReport: removeArtifacts(self.loggingConfig.ptbLogsDirPath) + removeArtifacts(self.loggingConfig.pluginThreadOptLogsDirPath) else: removeArtifacts(self.loggingConfig.logDirPath) except OSError as error: @@ -233,6 +364,7 @@ def createArtifactsDir(path): createArtifactsDir(self.loggingConfig.logDirBase) createArtifactsDir(self.loggingConfig.logDirPath) createArtifactsDir(self.loggingConfig.ptbLogsDirPath) + createArtifactsDir(self.loggingConfig.pluginThreadOptLogsDirPath) except OSError as error: print(error) @@ -245,55 +377,92 @@ def prepArgsDict(self) -> dict: argsDict.update(asdict(self.loggingConfig)) return argsDict - def performTpsTest(self): + def performTpsTest(self) -> TpsTestResult: + tpsTestStart = datetime.utcnow() perfRunSuccessful = False - try: - binSearchResults = self.performPtbBinarySearch() + binSearchResults = self.performPtbBinarySearch(clusterConfig=self.clusterConfig, logDirRoot=self.loggingConfig.ptbLogsDirPath, + delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs) - print(f"Successful rate of: {binSearchResults.maxTpsAchieved}") + print(f"Successful rate of: {binSearchResults.maxTpsAchieved}") - if not self.ptConfig.quiet: - print("Search Results:") - for i in range(len(binSearchResults.searchResults)): - print(f"Search scenario: {i} result: {binSearchResults.searchResults[i]}") + if not self.ptConfig.quiet: + print("Search Results:") + for i in range(len(binSearchResults.searchResults)): + print(f"Search scenario: {i} result: {binSearchResults.searchResults[i]}") - longRunningSearchResults = self.performPtbReverseLinearSearch(tpsInitial=binSearchResults.maxTpsAchieved) + longRunningSearchResults = self.performPtbReverseLinearSearch(tpsInitial=binSearchResults.maxTpsAchieved) - print(f"Long Running Test - Successful rate of: {longRunningSearchResults.maxTpsAchieved}") - perfRunSuccessful = True + print(f"Long Running Test - Successful rate of: {longRunningSearchResults.maxTpsAchieved}") + perfRunSuccessful = True - if not self.ptConfig.quiet: - print("Long Running Test - Search Results:") - for i in range(len(longRunningSearchResults.searchResults)): - print(f"Search scenario: {i} result: {longRunningSearchResults.searchResults[i]}") + if not self.ptConfig.quiet: + print("Long Running Test - Search Results:") + for i in range(len(longRunningSearchResults.searchResults)): + print(f"Search scenario: {i} result: {longRunningSearchResults.searchResults[i]}") - testFinish = datetime.utcnow() - argsDict = self.prepArgsDict() - fullReport = self.createJSONReport(maxTpsAchieved=binSearchResults.maxTpsAchieved, searchResults=binSearchResults.searchResults, maxTpsReport=binSearchResults.maxTpsReport, - longRunningMaxTpsAchieved=longRunningSearchResults.maxTpsAchieved, longRunningSearchResults=longRunningSearchResults.searchResults, - longRunningMaxTpsReport=longRunningSearchResults.maxTpsReport, testStart=self.testStart, testFinish=testFinish, argsDict=argsDict) + tpsTestFinish = datetime.utcnow() - if not self.ptConfig.quiet: - print(f"Full Performance Test Report: {fullReport}") + return PerformanceTest.TpsTestResult(binSearchResults=binSearchResults, longRunningSearchResults=longRunningSearchResults, + tpsTestStart=tpsTestStart, tpsTestFinish=tpsTestFinish, perfRunSuccessful=perfRunSuccessful) - if not self.ptConfig.delReport: - self.exportReportAsJSON(fullReport, f"{self.loggingConfig.logDirPath}/report.json") + def runTest(self): + testSuccessful = True - finally: + TestHelper.printSystemInfo("BEGIN") + self.testDirsCleanup() + self.testDirsSetup() - if self.ptConfig.delPerfLogs: - print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") - self.testDirsCleanup() + if self.ptConfig.calcProducerThreads != "none": + print(f"Performing Producer Thread Optimization Tests") + if self.ptConfig.calcProducerThreads == "full": + optType = PerformanceTest.PluginThreadOptRunType.FULL + else: + optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX + prodResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.PRODUCER, optType=optType) + print(f"Producer Thread Optimization results: {prodResults}") + self.clusterConfig.extraNodeosArgs.producerPluginArgs.producerThreads = prodResults.recommendedThreadCount + + if self.ptConfig.calcChainThreads: + print(f"Performing Chain Thread Optimization Tests") + if self.ptConfig.calcChainThreads == "full": + optType = PerformanceTest.PluginThreadOptRunType.FULL + else: + optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX + chainResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.CHAIN, optType=optType) + print(f"Chain Thread Optimization results: {chainResults}") + self.clusterConfig.extraNodeosArgs.chainPluginArgs.chainThreads = chainResults.recommendedThreadCount + + if self.ptConfig.calcNetThreads: + print(f"Performing Net Thread Optimization Tests") + if self.ptConfig.calcNetThreads == "full": + optType = PerformanceTest.PluginThreadOptRunType.FULL + else: + optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX + netResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.NET, optType=optType) + print(f"Net Thread Optimization results: {netResults}") + self.clusterConfig.extraNodeosArgs.netPluginArgs.netThreads = netResults.recommendedThreadCount - return perfRunSuccessful + if not self.ptConfig.skipTpsTests: + print(f"Performing TPS Performance Tests") + testSuccessful = False + tpsTestResult = self.performTpsTest() + testSuccessful = tpsTestResult.perfRunSuccessful - def runTest(self): + self.testsFinish = datetime.utcnow() - TestHelper.printSystemInfo("BEGIN") - self.testDirsSetup() + self.report = self.createReport(producerThreadResult=prodResults, chainThreadResult=chainResults, netThreadResult=netResults, tpsTestResult=tpsTestResult) + jsonReport = self.reportAsJSON(self.report) + + if not self.ptConfig.quiet: + print(f"Full Performance Test Report: {jsonReport}") + + if not self.ptConfig.delReport: + self.exportReportAsJSON(jsonReport, f"{self.loggingConfig.logDirPath}/report.json") - testSuccessful = self.performTpsTest() + if self.ptConfig.delPerfLogs: + print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") + self.testDirsCleanup() return testSuccessful @@ -327,6 +496,25 @@ def parseArgs(): appArgs.add_bool(flag="--del-test-report", help="Whether to save json reports from each test scenario.") appArgs.add_bool(flag="--quiet", help="Whether to quiet printing intermediate results and reports to stdout") appArgs.add_bool(flag="--prods-enable-trace-api", help="Determines whether producer nodes should have eosio::trace_api_plugin enabled") + appArgs.add_bool(flag="--skip-tps-test", help="Determines whether to skip the max TPS measurement tests") + appArgs.add(flag="--calc-producer-threads", type=str, help="Determines whether to calculate number of worker threads to use in producer thread pool (\"none\", \"lmax\", or \"full\"). \ + In \"none\" mode, the default, no calculation will be attempted and default configured --producer-threads value will be used. \ + In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ + In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ + Useful for graphing the full performance impact of each available thread.", + choices=["none", "lmax", "full"], default="none") + appArgs.add(flag="--calc-chain-threads", type=str, help="Determines whether to calculate number of worker threads to use in chain thread pool (\"none\", \"lmax\", or \"full\"). \ + In \"none\" mode, the default, no calculation will be attempted and default configured --chain-threads value will be used. \ + In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ + In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ + Useful for graphing the full performance impact of each available thread.", + choices=["none", "lmax", "full"], default="none") + appArgs.add(flag="--calc-net-threads", type=str, help="Determines whether to calculate number of worker threads to use in net thread pool (\"none\", \"lmax\", or \"full\"). \ + In \"none\" mode, the default, no calculation will be attempted and default configured --net-threads value will be used. \ + In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ + In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ + Useful for graphing the full performance impact of each available thread.", + choices=["none", "lmax", "full"], default="none") args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file" ,"--dump-error-details","-v","--leave-running" ,"--clean-run"}, applicationSpecificArgs=appArgs) @@ -364,7 +552,11 @@ def main(): delTestReport=args.del_test_report, numAddlBlocksToPrune=args.num_blocks_to_prune, quiet=args.quiet, - logDirRoot=".") + logDirRoot=".", + skipTpsTests=args.skip_tps_test, + calcProducerThreads=args.calc_producer_threads, + calcChainThreads=args.calc_chain_threads, + calcNetThreads=args.calc_net_threads) myTest = PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig) perfRunSuccessful = myTest.runTest() From 92fa92a1b21564e27f3f39f1caa0a15c180d01df Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 23 Nov 2022 12:44:38 -0600 Subject: [PATCH 015/178] Update defaults for producer threads to 6 and chain threads to 3 based on performance run calculations. --- tests/performance_tests/README.md | 8 ++++---- tests/performance_tests/performance_test_basic.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index b76b0b11e8..ec14127cd6 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -229,7 +229,7 @@ The Performance Harness main script `performance_test.py` can be configured usin * `--chain-state-db-size-mb CHAIN_STATE_DB_SIZE_MB` Maximum size (in MiB) of the chain state database (default: 10240) * `--chain-threads CHAIN_THREADS` - Number of worker threads in controller thread pool (default: 2) + Number of worker threads in controller thread pool (default: 3) * `--database-map-mode {mapped,heap,locked}` Database map mode ("mapped", "heap", or "locked"). In "mapped" mode database is memory mapped as a file. @@ -248,7 +248,7 @@ The Performance Harness main script `performance_test.py` can be configured usin * `--last-block-cpu-effort-percent LAST_BLOCK_CPU_EFFORT_PERCENT` Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80% (default: 100) * `--producer-threads PRODUCER_THREADS` - Number of worker threads in producer thread pool (default: 2) + Number of worker threads in producer thread pool (default: 6) * `--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS` Maximum time for processing a request, -1 for unlimited (default: 990000) * `--del-perf-logs` Whether to delete performance test specific logs. (default: False) @@ -311,7 +311,7 @@ The following scripts are typically used by the Performance Harness main script * `--chain-state-db-size-mb CHAIN_STATE_DB_SIZE_MB` Maximum size (in MiB) of the chain state database (default: 10240) * `--chain-threads CHAIN_THREADS` - Number of worker threads in controller thread pool (default: 2) + Number of worker threads in controller thread pool (default: 3) * `--database-map-mode {mapped,heap,locked}` Database map mode ("mapped", "heap", or "locked"). In "mapped" mode database is memory mapped as a file. @@ -330,7 +330,7 @@ The following scripts are typically used by the Performance Harness main script * `--last-block-cpu-effort-percent LAST_BLOCK_CPU_EFFORT_PERCENT` Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80% (default: 100) * `--producer-threads PRODUCER_THREADS` - Number of worker threads in producer thread pool (default: 2) + Number of worker threads in producer thread pool (default: 6) * `--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS` Maximum time for processing a request, -1 for unlimited (default: 990000) * `--del-perf-logs` Whether to delete performance test specific logs. (default: False) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 8ffe58ed3e..cf7496a668 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -51,7 +51,7 @@ class ExtraNodeosArgs: class ChainPluginArgs: signatureCpuBillablePct: int = 0 chainStateDbSizeMb: int = 10 * 1024 - chainThreads: int = 2 + chainThreads: int = 3 databaseMapMode: str = "mapped" def __str__(self) -> str: @@ -74,7 +74,7 @@ class ProducerPluginArgs: produceTimeOffsetUs: int = 0 cpuEffortPercent: int = 100 lastBlockCpuEffortPercent: int = 100 - producerThreads: int = 2 + producerThreads: int = 6 def __str__(self) -> str: return f"--disable-subjective-billing {self.disableSubjectiveBilling} \ From 2ca2eb9d44b8fbe2de74c784bdade08a2bd8c0fd Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 23 Nov 2022 13:17:53 -0600 Subject: [PATCH 016/178] Fix typo. --- tests/performance_tests/performance_test_basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index cf7496a668..1e4610f8a7 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -119,7 +119,7 @@ def __post_init__(self): @dataclass class PtbConfig: - targetTps: int=8000, + targetTps: int=8000 testTrxGenDurationSec: int=30 tpsLimitPerGenerator: int=4000 numAddlBlocksToPrune: int=2 From 1db0e6c7c347dfc3f314fa3a66c6f24dc54218cc Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 28 Nov 2022 11:14:06 -0600 Subject: [PATCH 017/178] Fix check for thread calc being 'none' --- tests/performance_tests/performance_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 6dc4a959b2..ae8fb3cbfe 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -423,7 +423,7 @@ def runTest(self): print(f"Producer Thread Optimization results: {prodResults}") self.clusterConfig.extraNodeosArgs.producerPluginArgs.producerThreads = prodResults.recommendedThreadCount - if self.ptConfig.calcChainThreads: + if self.ptConfig.calcChainThreads != "none": print(f"Performing Chain Thread Optimization Tests") if self.ptConfig.calcChainThreads == "full": optType = PerformanceTest.PluginThreadOptRunType.FULL @@ -433,7 +433,7 @@ def runTest(self): print(f"Chain Thread Optimization results: {chainResults}") self.clusterConfig.extraNodeosArgs.chainPluginArgs.chainThreads = chainResults.recommendedThreadCount - if self.ptConfig.calcNetThreads: + if self.ptConfig.calcNetThreads != "none": print(f"Performing Net Thread Optimization Tests") if self.ptConfig.calcNetThreads == "full": optType = PerformanceTest.PluginThreadOptRunType.FULL From 84b11790c469fdfc00eeb259494d9be8c7da51cf Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 28 Nov 2022 11:41:06 -0600 Subject: [PATCH 018/178] Fix reference before assignment error on results objects when not configured to run tests. --- tests/performance_tests/performance_test.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index ae8fb3cbfe..6e11a2c833 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -413,6 +413,7 @@ def runTest(self): self.testDirsCleanup() self.testDirsSetup() + prodResults = None if self.ptConfig.calcProducerThreads != "none": print(f"Performing Producer Thread Optimization Tests") if self.ptConfig.calcProducerThreads == "full": @@ -423,6 +424,7 @@ def runTest(self): print(f"Producer Thread Optimization results: {prodResults}") self.clusterConfig.extraNodeosArgs.producerPluginArgs.producerThreads = prodResults.recommendedThreadCount + chainResults = None if self.ptConfig.calcChainThreads != "none": print(f"Performing Chain Thread Optimization Tests") if self.ptConfig.calcChainThreads == "full": @@ -433,6 +435,7 @@ def runTest(self): print(f"Chain Thread Optimization results: {chainResults}") self.clusterConfig.extraNodeosArgs.chainPluginArgs.chainThreads = chainResults.recommendedThreadCount + netResults = None if self.ptConfig.calcNetThreads != "none": print(f"Performing Net Thread Optimization Tests") if self.ptConfig.calcNetThreads == "full": @@ -443,6 +446,7 @@ def runTest(self): print(f"Net Thread Optimization results: {netResults}") self.clusterConfig.extraNodeosArgs.netPluginArgs.netThreads = netResults.recommendedThreadCount + tpsTestResult = None if not self.ptConfig.skipTpsTests: print(f"Performing TPS Performance Tests") testSuccessful = False From ed975c6425e4acca0057698f97c2c9dc696719dc Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 29 Nov 2022 09:04:29 -0600 Subject: [PATCH 019/178] Make use of ArgumentParser parents and groups. This will greatly reduce work to bubble up arguments between scripts/modules. Also brings clarity and grouping of arguments by use with group titles and descriptions. Test Helper arguments can now be bubbled up to user scripts via direct access to the ArgumentParser. Performance Test Basic makes use of Test Helper arguments. Perfomance Test makes use of both Performance Test Basic and Test Helper arguments as well. --- tests/TestHarness/TestHelper.py | 75 +++++++------ tests/performance_tests/performance_test.py | 104 +++++++++--------- .../performance_test_basic.py | 93 ++++++++++------ 3 files changed, 153 insertions(+), 119 deletions(-) diff --git a/tests/TestHarness/TestHelper.py b/tests/TestHarness/TestHelper.py index 724f512a22..e9e177cd42 100644 --- a/tests/TestHarness/TestHelper.py +++ b/tests/TestHarness/TestHelper.py @@ -37,87 +37,100 @@ class TestHelper(object): @staticmethod # pylint: disable=too-many-branches # pylint: disable=too-many-statements - def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): + def createArgumentParser(includeArgs, applicationSpecificArgs=AppArgs()) -> argparse.ArgumentParser: """Accepts set of arguments, builds argument parser and returns parse_args() output.""" assert(includeArgs) assert(isinstance(includeArgs, set)) assert(isinstance(applicationSpecificArgs, AppArgs)) - parser = argparse.ArgumentParser(add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('-?', action='help', default=argparse.SUPPRESS, + thParser = argparse.ArgumentParser(add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter) + thGrpTitle = "Test Helper Arguments" + thGrpDescription="Test Helper configuration items used to configure and spin up the regression test framework and blockchain environment." + thGrp = thParser.add_argument_group(title=thGrpTitle, description=thGrpDescription) + thGrp.add_argument('-?', action='help', default=argparse.SUPPRESS, help=argparse._('show this help message and exit')) if "-p" in includeArgs: - parser.add_argument("-p", type=int, help="producing nodes count", default=1) + thGrp.add_argument("-p", type=int, help="producing nodes count", default=1) if "-n" in includeArgs: - parser.add_argument("-n", type=int, help="total nodes", default=0) + thGrp.add_argument("-n", type=int, help="total nodes", default=0) if "-d" in includeArgs: - parser.add_argument("-d", type=int, help="delay between nodes startup", default=1) + thGrp.add_argument("-d", type=int, help="delay between nodes startup", default=1) if "--nodes-file" in includeArgs: - parser.add_argument("--nodes-file", type=str, help="File containing nodes info in JSON format.") + thGrp.add_argument("--nodes-file", type=str, help="File containing nodes info in JSON format.") if "-s" in includeArgs: - parser.add_argument("-s", type=str, help="topology", choices=["mesh"], default="mesh") + thGrp.add_argument("-s", type=str, help="topology", choices=["mesh"], default="mesh") if "-c" in includeArgs: - parser.add_argument("-c", type=str, help="chain strategy", + thGrp.add_argument("-c", type=str, help="chain strategy", choices=[Utils.SyncResyncTag, Utils.SyncReplayTag, Utils.SyncNoneTag, Utils.SyncHardReplayTag], default=Utils.SyncResyncTag) if "--kill-sig" in includeArgs: - parser.add_argument("--kill-sig", type=str, choices=[Utils.SigKillTag, Utils.SigTermTag], help="kill signal.", + thGrp.add_argument("--kill-sig", type=str, choices=[Utils.SigKillTag, Utils.SigTermTag], help="kill signal.", default=Utils.SigKillTag) if "--kill-count" in includeArgs: - parser.add_argument("--kill-count", type=int, help="nodeos instances to kill", default=-1) + thGrp.add_argument("--kill-count", type=int, help="nodeos instances to kill", default=-1) if "--terminate-at-block" in includeArgs: - parser.add_argument("--terminate-at-block", type=int, help="block to terminate on when replaying", default=0) + thGrp.add_argument("--terminate-at-block", type=int, help="block to terminate on when replaying", default=0) if "--seed" in includeArgs: - parser.add_argument("--seed", type=int, help="random seed", default=1) + thGrp.add_argument("--seed", type=int, help="random seed", default=1) if "--host" in includeArgs: - parser.add_argument("-h", "--host", type=str, help="%s host name" % (Utils.EosServerName), + thGrp.add_argument("-h", "--host", type=str, help="%s host name" % (Utils.EosServerName), default=TestHelper.LOCAL_HOST) if "--port" in includeArgs: - parser.add_argument("--port", type=int, help="%s host port" % Utils.EosServerName, + thGrp.add_argument("--port", type=int, help="%s host port" % Utils.EosServerName, default=TestHelper.DEFAULT_PORT) if "--wallet-host" in includeArgs: - parser.add_argument("--wallet-host", type=str, help="%s host" % Utils.EosWalletName, + thGrp.add_argument("--wallet-host", type=str, help="%s host" % Utils.EosWalletName, default=TestHelper.LOCAL_HOST) if "--wallet-port" in includeArgs: - parser.add_argument("--wallet-port", type=int, help="%s port" % Utils.EosWalletName, + thGrp.add_argument("--wallet-port", type=int, help="%s port" % Utils.EosWalletName, default=TestHelper.DEFAULT_WALLET_PORT) if "--prod-count" in includeArgs: - parser.add_argument("-c", "--prod-count", type=int, help="Per node producer count", default=1) + thGrp.add_argument("-c", "--prod-count", type=int, help="Per node producer count", default=1) if "--defproducera_prvt_key" in includeArgs: - parser.add_argument("--defproducera_prvt_key", type=str, help="defproducera private key.") + thGrp.add_argument("--defproducera_prvt_key", type=str, help="defproducera private key.") if "--defproducerb_prvt_key" in includeArgs: - parser.add_argument("--defproducerb_prvt_key", type=str, help="defproducerb private key.") + thGrp.add_argument("--defproducerb_prvt_key", type=str, help="defproducerb private key.") if "--dump-error-details" in includeArgs: - parser.add_argument("--dump-error-details", + thGrp.add_argument("--dump-error-details", help="Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout", action='store_true') if "--dont-launch" in includeArgs: - parser.add_argument("--dont-launch", help="Don't launch own node. Assume node is already running.", + thGrp.add_argument("--dont-launch", help="Don't launch own node. Assume node is already running.", action='store_true') if "--keep-logs" in includeArgs: - parser.add_argument("--keep-logs", help="Don't delete var/lib/node_* folders, or other test specific log directories, upon test completion", + thGrp.add_argument("--keep-logs", help="Don't delete var/lib/node_* folders, or other test specific log directories, upon test completion", action='store_true') if "-v" in includeArgs: - parser.add_argument("-v", help="verbose logging", action='store_true') + thGrp.add_argument("-v", help="verbose logging", action='store_true') if "--leave-running" in includeArgs: - parser.add_argument("--leave-running", help="Leave cluster running after test finishes", action='store_true') + thGrp.add_argument("--leave-running", help="Leave cluster running after test finishes", action='store_true') if "--only-bios" in includeArgs: - parser.add_argument("--only-bios", help="Limit testing to bios node.", action='store_true') + thGrp.add_argument("--only-bios", help="Limit testing to bios node.", action='store_true') if "--clean-run" in includeArgs: - parser.add_argument("--clean-run", help="Kill all nodeos and keosd instances", action='store_true') + thGrp.add_argument("--clean-run", help="Kill all nodeos and keosd instances", action='store_true') if "--sanity-test" in includeArgs: - parser.add_argument("--sanity-test", help="Validates nodeos and keosd are in path and can be started up.", action='store_true') + thGrp.add_argument("--sanity-test", help="Validates nodeos and keosd are in path and can be started up.", action='store_true') if "--alternate-version-labels-file" in includeArgs: - parser.add_argument("--alternate-version-labels-file", type=str, help="Provide a file to define the labels that can be used in the test and the path to the version installation associated with that.") + thGrp.add_argument("--alternate-version-labels-file", type=str, help="Provide a file to define the labels that can be used in the test and the path to the version installation associated with that.") + appArgsGrpTitle="Application Specific Arguments" + appArgsGrpdescription="Test Helper configuration items used to configure and spin up the regression test framework and blockchain environment." + appArgsGrp = thParser.add_argument_group(title=appArgsGrpTitle, description=appArgsGrpdescription) for arg in applicationSpecificArgs.args: if arg.type is not None: - parser.add_argument(arg.flag, type=arg.type, help=arg.help, choices=arg.choices, default=arg.default) + appArgsGrp.add_argument(arg.flag, type=arg.type, help=arg.help, choices=arg.choices, default=arg.default) else: - parser.add_argument(arg.flag, help=arg.help, action=arg.action) + appArgsGrp.add_argument(arg.flag, help=arg.help, action=arg.action) + + return thParser + @staticmethod + # pylint: disable=too-many-branches + # pylint: disable=too-many-statements + def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): + parser = TestHelper.createArgumentParser(includeArgs=includeArgs, applicationSpecificArgs=applicationSpecificArgs) args = parser.parse_args() return args diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 6e11a2c833..ee2b77dd84 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 +import argparse import copy import math import os @@ -12,7 +13,7 @@ from TestHarness import TestHelper, Utils from TestHarness.TestHelper import AppArgs -from performance_test_basic import PerformanceTestBasic +from performance_test_basic import PerformanceTestBasic, PtbArgumentsHandler from platform import release, system from dataclasses import dataclass, asdict, field from datetime import datetime @@ -470,63 +471,56 @@ def runTest(self): return testSuccessful -def parseArgs(): - appArgs=AppArgs() - appArgs.add(flag="--max-tps-to-test", type=int, help="The max target transfers realistic as ceiling of test range", default=50000) - appArgs.add(flag="--test-iteration-duration-sec", type=int, help="The duration of transfer trx generation for each iteration of the test during the initial search (seconds)", default=150) - appArgs.add(flag="--test-iteration-min-step", type=int, help="The step size determining granularity of tps result during initial search", default=500) - appArgs.add(flag="--final-iterations-duration-sec", type=int, help="The duration of transfer trx generation for each final longer run iteration of the test during the final search (seconds)", default=300) - appArgs.add(flag="--tps-limit-per-generator", type=int, help="Maximum amount of transactions per second a single generator can have.", default=4000) - appArgs.add(flag="--genesis", type=str, help="Path to genesis.json", default="tests/performance_tests/genesis.json") - appArgs.add(flag="--num-blocks-to-prune", type=int, help="The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, to prune from the beginning and end of the range of blocks of interest for evaluation.", default=2) - appArgs.add(flag="--signature-cpu-billable-pct", type=int, help="Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50%%", default=0) - appArgs.add(flag="--chain-state-db-size-mb", type=int, help="Maximum size (in MiB) of the chain state database", default=10*1024) - appArgs.add(flag="--chain-threads", type=int, help="Number of worker threads in controller thread pool", default=2) - appArgs.add(flag="--database-map-mode", type=str, help="Database map mode (\"mapped\", \"heap\", or \"locked\"). \ - In \"mapped\" mode database is memory mapped as a file. \ - In \"heap\" mode database is preloaded in to swappable memory and will use huge pages if available. \ - In \"locked\" mode database is preloaded, locked in to memory, and will use huge pages if available.", - choices=["mapped", "heap", "locked"], default="mapped") - appArgs.add(flag="--net-threads", type=int, help="Number of worker threads in net_plugin thread pool", default=2) - appArgs.add(flag="--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) - appArgs.add(flag="--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) - appArgs.add(flag="--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) - appArgs.add(flag="--cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=100) - appArgs.add(flag="--last-block-cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80%%", default=100) - appArgs.add(flag="--producer-threads", type=int, help="Number of worker threads in producer thread pool", default=2) - appArgs.add(flag="--http-max-response-time-ms", type=int, help="Maximum time for processing a request, -1 for unlimited", default=990000) - appArgs.add_bool(flag="--del-perf-logs", help="Whether to delete performance test specific logs.") - appArgs.add_bool(flag="--del-report", help="Whether to delete overarching performance run report.") - appArgs.add_bool(flag="--del-test-report", help="Whether to save json reports from each test scenario.") - appArgs.add_bool(flag="--quiet", help="Whether to quiet printing intermediate results and reports to stdout") - appArgs.add_bool(flag="--prods-enable-trace-api", help="Determines whether producer nodes should have eosio::trace_api_plugin enabled") - appArgs.add_bool(flag="--skip-tps-test", help="Determines whether to skip the max TPS measurement tests") - appArgs.add(flag="--calc-producer-threads", type=str, help="Determines whether to calculate number of worker threads to use in producer thread pool (\"none\", \"lmax\", or \"full\"). \ - In \"none\" mode, the default, no calculation will be attempted and default configured --producer-threads value will be used. \ - In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ - In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ - Useful for graphing the full performance impact of each available thread.", - choices=["none", "lmax", "full"], default="none") - appArgs.add(flag="--calc-chain-threads", type=str, help="Determines whether to calculate number of worker threads to use in chain thread pool (\"none\", \"lmax\", or \"full\"). \ - In \"none\" mode, the default, no calculation will be attempted and default configured --chain-threads value will be used. \ - In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ - In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ - Useful for graphing the full performance impact of each available thread.", - choices=["none", "lmax", "full"], default="none") - appArgs.add(flag="--calc-net-threads", type=str, help="Determines whether to calculate number of worker threads to use in net thread pool (\"none\", \"lmax\", or \"full\"). \ - In \"none\" mode, the default, no calculation will be attempted and default configured --net-threads value will be used. \ - In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ - In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ - Useful for graphing the full performance impact of each available thread.", - choices=["none", "lmax", "full"], default="none") - args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file" - ,"--dump-error-details","-v","--leave-running" - ,"--clean-run"}, applicationSpecificArgs=appArgs) - return args +class PerfTestArgumentsHandler(object): + @staticmethod + def createArgumentParser(): + ptbArgParser = PtbArgumentsHandler.createBaseArgumentParser() + ptParser = argparse.ArgumentParser(parents=[ptbArgParser], add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + ptGrpTitle="Performance Harness" + ptGrpDescription="Performance Harness testing configuration items." + ptParserGroup = ptParser.add_argument_group(title=ptGrpTitle, description=ptGrpDescription) + ptParserGroup.add_argument("--skip-tps-test", help="Determines whether to skip the max TPS measurement tests", action='store_true') + ptParserGroup.add_argument("--calc-producer-threads", type=str, help="Determines whether to calculate number of worker threads to use in producer thread pool (\"none\", \"lmax\", or \"full\"). \ + In \"none\" mode, the default, no calculation will be attempted and default configured --producer-threads value will be used. \ + In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ + In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ + Useful for graphing the full performance impact of each available thread.", + choices=["none", "lmax", "full"], default="none") + ptParserGroup.add_argument("--calc-chain-threads", type=str, help="Determines whether to calculate number of worker threads to use in chain thread pool (\"none\", \"lmax\", or \"full\"). \ + In \"none\" mode, the default, no calculation will be attempted and default configured --chain-threads value will be used. \ + In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ + In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ + Useful for graphing the full performance impact of each available thread.", + choices=["none", "lmax", "full"], default="none") + ptParserGroup.add_argument("--calc-net-threads", type=str, help="Determines whether to calculate number of worker threads to use in net thread pool (\"none\", \"lmax\", or \"full\"). \ + In \"none\" mode, the default, no calculation will be attempted and default configured --net-threads value will be used. \ + In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ + In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ + Useful for graphing the full performance impact of each available thread.", + choices=["none", "lmax", "full"], default="none") + ptParserGroup.add_argument("--del-test-report", help="Whether to save json reports from each test scenario.", action='store_true') + + ptTpsGrpTitle="TPS Test Config" + ptTpsGrpDescription="TPS Performance Test configuration items." + ptTpsParserGroup = ptParser.add_argument_group(title=ptTpsGrpTitle, description=ptTpsGrpDescription) + + ptTpsParserGroup.add_argument("--max-tps-to-test", type=int, help="The max target transfers realistic as ceiling of test range", default=50000) + ptTpsParserGroup.add_argument("--test-iteration-duration-sec", type=int, help="The duration of transfer trx generation for each iteration of the test during the initial search (seconds)", default=150) + ptTpsParserGroup.add_argument("--test-iteration-min-step", type=int, help="The step size determining granularity of tps result during initial search", default=500) + ptTpsParserGroup.add_argument("--final-iterations-duration-sec", type=int, help="The duration of transfer trx generation for each final longer run iteration of the test during the final search (seconds)", default=300) + + return ptParser + + @staticmethod + def parseArgs(): + ptParser=PerfTestArgumentsHandler.createArgumentParser() + args=ptParser.parse_args() + return args def main(): - args = parseArgs() + args = PerfTestArgumentsHandler.parseArgs() Utils.Debug = args.v testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 1e4610f8a7..b021cd4641 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 +import argparse import os import sys import subprocess @@ -437,42 +438,68 @@ def runTest(self) -> bool: return testSuccessful -def parseArgs(): - appArgs=AppArgs() - appArgs.add(flag="--target-tps", type=int, help="The target transfers per second to send during test", default=8000) - appArgs.add(flag="--tps-limit-per-generator", type=int, help="Maximum amount of transactions per second a single generator can have.", default=4000) - appArgs.add(flag="--test-duration-sec", type=int, help="The duration of transfer trx generation for the test in seconds", default=90) - appArgs.add(flag="--genesis", type=str, help="Path to genesis.json", default="tests/performance_tests/genesis.json") - appArgs.add(flag="--num-blocks-to-prune", type=int, help=("The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, " - "to prune from the beginning and end of the range of blocks of interest for evaluation."), default=2) - appArgs.add(flag="--signature-cpu-billable-pct", type=int, help="Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50%%", default=0) - appArgs.add(flag="--chain-state-db-size-mb", type=int, help="Maximum size (in MiB) of the chain state database", default=10*1024) - appArgs.add(flag="--chain-threads", type=int, help="Number of worker threads in controller thread pool", default=2) - appArgs.add(flag="--database-map-mode", type=str, help="Database map mode (\"mapped\", \"heap\", or \"locked\"). \ - In \"mapped\" mode database is memory mapped as a file. \ - In \"heap\" mode database is preloaded in to swappable memory and will use huge pages if available. \ - In \"locked\" mode database is preloaded, locked in to memory, and will use huge pages if available.", - choices=["mapped", "heap", "locked"], default="mapped") - appArgs.add(flag="--net-threads", type=int, help="Number of worker threads in net_plugin thread pool", default=2) - appArgs.add(flag="--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) - appArgs.add(flag="--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) - appArgs.add(flag="--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) - appArgs.add(flag="--cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=100) - appArgs.add(flag="--last-block-cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80%%", default=100) - appArgs.add(flag="--producer-threads", type=int, help="Number of worker threads in producer thread pool", default=2) - appArgs.add(flag="--http-max-response-time-ms", type=int, help="Maximum time for processing a request, -1 for unlimited", default=990000) - appArgs.add_bool(flag="--del-perf-logs", help="Whether to delete performance test specific logs.") - appArgs.add_bool(flag="--del-report", help="Whether to delete overarching performance run report.") - appArgs.add_bool(flag="--quiet", help="Whether to quiet printing intermediate results and reports to stdout") - appArgs.add_bool(flag="--prods-enable-trace-api", help="Determines whether producer nodes should have eosio::trace_api_plugin enabled") - args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file" - ,"--dump-error-details","-v","--leave-running" - ,"--clean-run"}, applicationSpecificArgs=appArgs) - return args +class PtbArgumentsHandler(object): + @staticmethod + def createBaseArgumentParser(): + testHelperArgParser=TestHelper.createArgumentParser(includeArgs={"-p","-n","-d","-s","--nodes-file" + ,"--dump-error-details","-v","--leave-running" + ,"--clean-run"}) + ptbBaseParser = argparse.ArgumentParser(parents=[testHelperArgParser], add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + ptbBaseGrpTitle="Performance Test Basic Base" + ptbBaseGrpDescription="Performance Test Basic base configuration items." + ptbBaseParserGroup = ptbBaseParser.add_argument_group(title=ptbBaseGrpTitle, description=ptbBaseGrpDescription) + + ptbBaseParserGroup.add_argument("--tps-limit-per-generator", type=int, help="Maximum amount of transactions per second a single generator can have.", default=4000) + ptbBaseParserGroup.add_argument("--genesis", type=str, help="Path to genesis.json", default="tests/performance_tests/genesis.json") + ptbBaseParserGroup.add_argument("--num-blocks-to-prune", type=int, help=("The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, " + "to prune from the beginning and end of the range of blocks of interest for evaluation."), default=2) + ptbBaseParserGroup.add_argument("--signature-cpu-billable-pct", type=int, help="Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50%%", default=0) + ptbBaseParserGroup.add_argument("--chain-state-db-size-mb", type=int, help="Maximum size (in MiB) of the chain state database", default=10*1024) + ptbBaseParserGroup.add_argument("--chain-threads", type=int, help="Number of worker threads in controller thread pool", default=2) + ptbBaseParserGroup.add_argument("--database-map-mode", type=str, help="Database map mode (\"mapped\", \"heap\", or \"locked\"). \ + In \"mapped\" mode database is memory mapped as a file. \ + In \"heap\" mode database is preloaded in to swappable memory and will use huge pages if available. \ + In \"locked\" mode database is preloaded, locked in to memory, and will use huge pages if available.", + choices=["mapped", "heap", "locked"], default="mapped") + ptbBaseParserGroup.add_argument("--net-threads", type=int, help="Number of worker threads in net_plugin thread pool", default=2) + ptbBaseParserGroup.add_argument("--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) + ptbBaseParserGroup.add_argument("--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) + ptbBaseParserGroup.add_argument("--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) + ptbBaseParserGroup.add_argument("--cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=100) + ptbBaseParserGroup.add_argument("--last-block-cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80%%", default=100) + ptbBaseParserGroup.add_argument("--producer-threads", type=int, help="Number of worker threads in producer thread pool", default=2) + ptbBaseParserGroup.add_argument("--http-max-response-time-ms", type=int, help="Maximum time for processing a request, -1 for unlimited", default=990000) + ptbBaseParserGroup.add_argument("--del-perf-logs", help="Whether to delete performance test specific logs.", action='store_true') + ptbBaseParserGroup.add_argument("--del-report", help="Whether to delete overarching performance run report.", action='store_true') + ptbBaseParserGroup.add_argument("--quiet", help="Whether to quiet printing intermediate results and reports to stdout", action='store_true') + ptbBaseParserGroup.add_argument("--prods-enable-trace-api", help="Determines whether producer nodes should have eosio::trace_api_plugin enabled", action='store_true') + return ptbBaseParser + + @staticmethod + def createArgumentParser(): + ptbBaseParser = PtbArgumentsHandler.createBaseArgumentParser() + + ptbParser = argparse.ArgumentParser(parents=[ptbBaseParser], add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + ptbGrpTitle="Performance Test Basic Single Test" + ptbGrpDescription="Performance Test Basic single test configuration items. Useful for running a single test directly. \ + These items may not be directly configurable from higher level scripts as the scripts themselves may configure these internally." + ptbParserGroup = ptbBaseParser.add_argument_group(title=ptbGrpTitle, description=ptbGrpDescription) + + ptbParserGroup.add_argument("--target-tps", type=int, help="The target transfers per second to send during test", default=8000) + ptbParserGroup.add_argument("--test-duration-sec", type=int, help="The duration of transfer trx generation for the test in seconds", default=90) + return ptbParser + + @staticmethod + def parseArgs(): + ptbParser=PtbArgumentsHandler.createArgumentParser() + args=ptbParser.parse_args() + return args def main(): - args = parseArgs() + args = PtbArgumentsHandler.parseArgs() Utils.Debug = args.v testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, From ee1795570de7e6d936c3f42273ebc79ef76a687d Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 29 Nov 2022 09:09:07 -0600 Subject: [PATCH 020/178] Don't print Application Specific Arguments group if there are none. --- tests/TestHarness/TestHelper.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tests/TestHarness/TestHelper.py b/tests/TestHarness/TestHelper.py index e9e177cd42..01874c6be0 100644 --- a/tests/TestHarness/TestHelper.py +++ b/tests/TestHarness/TestHelper.py @@ -115,14 +115,15 @@ def createArgumentParser(includeArgs, applicationSpecificArgs=AppArgs()) -> argp if "--alternate-version-labels-file" in includeArgs: thGrp.add_argument("--alternate-version-labels-file", type=str, help="Provide a file to define the labels that can be used in the test and the path to the version installation associated with that.") - appArgsGrpTitle="Application Specific Arguments" - appArgsGrpdescription="Test Helper configuration items used to configure and spin up the regression test framework and blockchain environment." - appArgsGrp = thParser.add_argument_group(title=appArgsGrpTitle, description=appArgsGrpdescription) - for arg in applicationSpecificArgs.args: - if arg.type is not None: - appArgsGrp.add_argument(arg.flag, type=arg.type, help=arg.help, choices=arg.choices, default=arg.default) - else: - appArgsGrp.add_argument(arg.flag, help=arg.help, action=arg.action) + if len(applicationSpecificArgs.args) > 0: + appArgsGrpTitle="Application Specific Arguments" + appArgsGrpdescription="Test Helper configuration items used to configure and spin up the regression test framework and blockchain environment." + appArgsGrp = thParser.add_argument_group(title=appArgsGrpTitle, description=appArgsGrpdescription) + for arg in applicationSpecificArgs.args: + if arg.type is not None: + appArgsGrp.add_argument(arg.flag, type=arg.type, help=arg.help, choices=arg.choices, default=arg.default) + else: + appArgsGrp.add_argument(arg.flag, help=arg.help, action=arg.action) return thParser From b6b6bcf767e74cc252d57f556ec58ab0d91abbb7 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 29 Nov 2022 09:11:01 -0600 Subject: [PATCH 021/178] Clarify TPS Test Config is part of Performance Harness. --- tests/performance_tests/performance_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index ee2b77dd84..ecdb068411 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -501,7 +501,7 @@ def createArgumentParser(): choices=["none", "lmax", "full"], default="none") ptParserGroup.add_argument("--del-test-report", help="Whether to save json reports from each test scenario.", action='store_true') - ptTpsGrpTitle="TPS Test Config" + ptTpsGrpTitle="Performance Harness - TPS Test Config" ptTpsGrpDescription="TPS Performance Test configuration items." ptTpsParserGroup = ptParser.add_argument_group(title=ptTpsGrpTitle, description=ptTpsGrpDescription) From bd4a525f2f1438530c8063a3a31940352d237f7a Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 29 Nov 2022 09:38:24 -0600 Subject: [PATCH 022/178] Fix performance_test_basic.py parser. --- tests/performance_tests/performance_test_basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index b021cd4641..264045660d 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -485,7 +485,7 @@ def createArgumentParser(): ptbGrpTitle="Performance Test Basic Single Test" ptbGrpDescription="Performance Test Basic single test configuration items. Useful for running a single test directly. \ These items may not be directly configurable from higher level scripts as the scripts themselves may configure these internally." - ptbParserGroup = ptbBaseParser.add_argument_group(title=ptbGrpTitle, description=ptbGrpDescription) + ptbParserGroup = ptbParser.add_argument_group(title=ptbGrpTitle, description=ptbGrpDescription) ptbParserGroup.add_argument("--target-tps", type=int, help="The target transfers per second to send during test", default=8000) ptbParserGroup.add_argument("--test-duration-sec", type=int, help="The duration of transfer trx generation for the test in seconds", default=90) From 0d5980d2da0e2af090e82710d4e592f5db820b8e Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 30 Nov 2022 10:29:33 -0600 Subject: [PATCH 023/178] Use Python introspection to simplify. --- tests/performance_tests/performance_test.py | 40 +++++++------------ .../performance_test_basic.py | 18 ++++----- 2 files changed, 23 insertions(+), 35 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 6e11a2c833..91e06169ea 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -196,9 +196,9 @@ def evaluateSuccess(self, test: PerformanceTestBasic, testSuccessful: bool, resu return result.basicTestSuccess and result.tpsExpectMet and result.trxExpectMet class PluginThreadOpt(Enum): - PRODUCER = 1 - CHAIN = 2 - NET = 3 + PRODUCER = "producer" + CHAIN = "chain" + NET = "net" class PluginThreadOptRunType(Enum): FULL = 1 @@ -214,14 +214,7 @@ class PluginThreadOptResult: def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: PluginThreadOptRunType=PluginThreadOptRunType.LOCAL_MAX, minThreadCount: int=2, maxThreadCount: int=os.cpu_count()) -> PluginThreadOptResult: - if optPlugin == PerformanceTest.PluginThreadOpt.PRODUCER: - fileName = "producerThreadResults.txt" - elif optPlugin == PerformanceTest.PluginThreadOpt.CHAIN: - fileName = "chainThreadResults.txt" - else: - fileName = "netThreadResults.txt" - - resultsFile = f"{self.loggingConfig.pluginThreadOptLogsDirPath}/{fileName}" + resultsFile = f"{self.loggingConfig.pluginThreadOptLogsDirPath}/{optPlugin.value}ThreadResults.txt" threadToMaxTpsDict: dict = {} @@ -229,19 +222,14 @@ def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: Plugin analysisStart = datetime.utcnow() with open(resultsFile, 'w') as log: - log.write(f"{optPlugin.name.lower()}Threads, maxTpsAchieved\n") + log.write(f"{optPlugin.value}Threads, maxTpsAchieved\n") log.close() lastMaxTpsAchieved = 0 for threadCount in range(minThreadCount, maxThreadCount+1): - print(f"Running {optPlugin.name.lower()} thread count optimization check with {threadCount} {optPlugin.name.lower()} threads") + print(f"Running {optPlugin.value} thread count optimization check with {threadCount} {optPlugin.value} threads") - if optPlugin == PerformanceTest.PluginThreadOpt.PRODUCER: - clusterConfig.extraNodeosArgs.producerPluginArgs.producerThreads = threadCount - elif optPlugin == PerformanceTest.PluginThreadOpt.CHAIN: - clusterConfig.extraNodeosArgs.chainPluginArgs.chainThreads = threadCount - else: - clusterConfig.extraNodeosArgs.netPluginArgs.netThreads = threadCount + getattr(clusterConfig.extraNodeosArgs, optPlugin.value + 'PluginArgs').threads = threadCount binSearchResults = self.performPtbBinarySearch(clusterConfig=clusterConfig, logDirRoot=self.loggingConfig.pluginThreadOptLogsDirPath, delReport=True, quiet=False, delPerfLogs=True) @@ -250,7 +238,7 @@ def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: Plugin if not self.ptConfig.quiet: print("Search Results:") for i in range(len(binSearchResults.searchResults)): - print(f"Search scenario {optPlugin.name} thread count {threadCount}: {i} result: {binSearchResults.searchResults[i]}") + print(f"Search scenario {optPlugin.value} thread count {threadCount}: {i} result: {binSearchResults.searchResults[i]}") with open(resultsFile, 'a') as log: log.write(f"{threadCount},{binSearchResults.maxTpsAchieved}\n") @@ -422,7 +410,7 @@ def runTest(self): optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX prodResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.PRODUCER, optType=optType) print(f"Producer Thread Optimization results: {prodResults}") - self.clusterConfig.extraNodeosArgs.producerPluginArgs.producerThreads = prodResults.recommendedThreadCount + self.clusterConfig.extraNodeosArgs.producerPluginArgs.threads = prodResults.recommendedThreadCount chainResults = None if self.ptConfig.calcChainThreads != "none": @@ -433,7 +421,7 @@ def runTest(self): optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX chainResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.CHAIN, optType=optType) print(f"Chain Thread Optimization results: {chainResults}") - self.clusterConfig.extraNodeosArgs.chainPluginArgs.chainThreads = chainResults.recommendedThreadCount + self.clusterConfig.extraNodeosArgs.chainPluginArgs.threads = chainResults.recommendedThreadCount netResults = None if self.ptConfig.calcNetThreads != "none": @@ -444,7 +432,7 @@ def runTest(self): optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX netResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.NET, optType=optType) print(f"Net Thread Optimization results: {netResults}") - self.clusterConfig.extraNodeosArgs.netPluginArgs.netThreads = netResults.recommendedThreadCount + self.clusterConfig.extraNodeosArgs.netPluginArgs.threads = netResults.recommendedThreadCount tpsTestResult = None if not self.ptConfig.skipTpsTests: @@ -535,13 +523,13 @@ def main(): ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, - chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) + threads=args.chain_threads, databaseMapMode=args.database_map_mode) producerPluginArgs = ENA.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, - producerThreads=args.producer_threads) + threads=args.producer_threads) httpPluginArgs = ENA.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - netPluginArgs = ENA.NetPluginArgs(netThreads=args.net_threads) + netPluginArgs = ENA.NetPluginArgs(threads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 1e4610f8a7..6f195f7a31 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -51,21 +51,21 @@ class ExtraNodeosArgs: class ChainPluginArgs: signatureCpuBillablePct: int = 0 chainStateDbSizeMb: int = 10 * 1024 - chainThreads: int = 3 + threads: int = 3 databaseMapMode: str = "mapped" def __str__(self) -> str: return f"--signature-cpu-billable-pct {self.signatureCpuBillablePct} \ --chain-state-db-size-mb {self.chainStateDbSizeMb} \ - --chain-threads {self.chainThreads} \ + --chain-threads {self.threads} \ --database-map-mode {self.databaseMapMode}" @dataclass class NetPluginArgs: - netThreads: int = 2 + threads: int = 2 def __str__(self) -> str: - return f"--net-threads {self.netThreads}" + return f"--net-threads {self.threads}" @dataclass class ProducerPluginArgs: @@ -74,7 +74,7 @@ class ProducerPluginArgs: produceTimeOffsetUs: int = 0 cpuEffortPercent: int = 100 lastBlockCpuEffortPercent: int = 100 - producerThreads: int = 6 + threads: int = 6 def __str__(self) -> str: return f"--disable-subjective-billing {self.disableSubjectiveBilling} \ @@ -82,7 +82,7 @@ def __str__(self) -> str: --produce-time-offset-us {self.produceTimeOffsetUs} \ --cpu-effort-percent {self.cpuEffortPercent} \ --last-block-cpu-effort-percent {self.lastBlockCpuEffortPercent} \ - --producer-threads {self.producerThreads}" + --producer-threads {self.threads}" @dataclass class HttpPluginArgs: @@ -480,13 +480,13 @@ def main(): ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, - chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) + threads=args.chain_threads, databaseMapMode=args.database_map_mode) producerPluginArgs = ENA.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, - producerThreads=args.producer_threads) + threads=args.producer_threads) httpPluginArgs = ENA.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - netPluginArgs = ENA.NetPluginArgs(netThreads=args.net_threads) + netPluginArgs = ENA.NetPluginArgs(threads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) From d291e17022589485a321c1e6d4b7cefde3a6fd1b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 30 Nov 2022 11:12:12 -0600 Subject: [PATCH 024/178] Use encoder to handle datetime and None type conversions for reporting. --- tests/performance_tests/log_reader.py | 12 ++++++--- tests/performance_tests/performance_test.py | 28 +++++++-------------- 2 files changed, 18 insertions(+), 22 deletions(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 2fb8341848..4cd550bf45 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -376,10 +376,16 @@ def createReport(guide: chainBlocksGuide, tpsTestConfig: TpsTestConfig, tpsStats report['nodeosVersion'] = Utils.getNodeosVersion() return report +class LogReaderEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, datetime): + return obj.isoformat() + if obj is None: + return "Unknown" + return json.JSONEncoder.default(self, obj) + def reportAsJSON(report: dict) -> json: - report['testStart'] = "Unknown" if report['testStart'] is None else report['testStart'].isoformat() - report['testFinish'] = "Unknown" if report['testFinish'] is None else report['testFinish'].isoformat() - return json.dumps(report, sort_keys=True, indent=2) + return json.dumps(report, sort_keys=True, indent=2, cls=LogReaderEncoder) def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: ArtifactPaths, argsDict: dict, testStart: datetime=None, completedRun: bool=True) -> dict: scrapeLog(data, artifacts.nodeosLogPath) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 91e06169ea..a9f21cb7d0 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -300,26 +300,16 @@ def createReport(self,producerThreadResult: PluginThreadOptResult=None, chainThr report['nodeosVersion'] = Utils.getNodeosVersion() return report + class PtReportEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, datetime): + return obj.isoformat() + if obj is None: + return "Unknown" + return json.JSONEncoder.default(self, obj) + def reportAsJSON(self, report: dict) -> json: - if 'ProducerThreadAnalysis' in report: - report['ProducerThreadAnalysis']['analysisStart'] = report['ProducerThreadAnalysis']['analysisStart'].isoformat() - report['ProducerThreadAnalysis']['analysisFinish'] = report['ProducerThreadAnalysis']['analysisFinish'].isoformat() - if 'ChainThreadAnalysis' in report: - report['ChainThreadAnalysis']['analysisStart'] = report['ChainThreadAnalysis']['analysisStart'].isoformat() - report['ChainThreadAnalysis']['analysisFinish'] = report['ChainThreadAnalysis']['analysisFinish'].isoformat() - if 'NetThreadAnalysis' in report: - report['NetThreadAnalysis']['analysisStart'] = report['NetThreadAnalysis']['analysisStart'].isoformat() - report['NetThreadAnalysis']['analysisFinish'] = report['NetThreadAnalysis']['analysisFinish'].isoformat() - - if 'tpsTestStart' in report: - report['tpsTestStart'] = report['tpsTestStart'].isoformat() - if 'tpsTestFinish' in report: - report['tpsTestFinish'] = report['tpsTestFinish'].isoformat() - - report['perfTestsBegin'] = report['perfTestsBegin'].isoformat() - report['perfTestsFinish'] = report['perfTestsFinish'].isoformat() - - return json.dumps(report, indent=2) + return json.dumps(report, indent=2, cls=PerformanceTest.PtReportEncoder) def exportReportAsJSON(self, report: json, exportPath): with open(exportPath, 'wt') as f: From 33b94ba2a4c25fde078bf6214689c9dbe66fb770 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 30 Nov 2022 14:43:02 -0600 Subject: [PATCH 025/178] Remove redundant calls to close when using with statement. --- tests/performance_tests/performance_test.py | 2 -- tests/performance_tests/performance_test_basic.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index a9f21cb7d0..7f977b44c2 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -223,7 +223,6 @@ def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: Plugin with open(resultsFile, 'w') as log: log.write(f"{optPlugin.value}Threads, maxTpsAchieved\n") - log.close() lastMaxTpsAchieved = 0 for threadCount in range(minThreadCount, maxThreadCount+1): @@ -242,7 +241,6 @@ def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: Plugin with open(resultsFile, 'a') as log: log.write(f"{threadCount},{binSearchResults.maxTpsAchieved}\n") - log.close() if optType == PerformanceTest.PluginThreadOptRunType.LOCAL_MAX: if binSearchResults.maxTpsAchieved <= lastMaxTpsAchieved: diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 6f195f7a31..214cb144ae 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -240,12 +240,10 @@ def queryBlockTrxData(self, node, blockDataPath, blockTrxDataPath, startBlockNum btdf_append_write = self.fileOpenMode(blockTrxDataPath) with open(blockTrxDataPath, btdf_append_write) as trxDataFile: [trxDataFile.write(f"{trx['id']},{trx['block_num']},{trx['cpu_usage_us']},{trx['net_usage_words']}\n") for trx in block['payload']['transactions'] if block['payload']['transactions']] - trxDataFile.close() bdf_append_write = self.fileOpenMode(blockDataPath) with open(blockDataPath, bdf_append_write) as blockDataFile: blockDataFile.write(f"{block['payload']['number']},{block['payload']['id']},{block['payload']['producer']},{block['payload']['status']},{block['payload']['timestamp']}\n") - blockDataFile.close() def waitForEmptyBlocks(self, node, numEmptyToWaitOn): emptyBlocks = 0 From c175d6be97db15670525c6378005e8ca44793d24 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 30 Nov 2022 14:43:26 -0600 Subject: [PATCH 026/178] Resuse encoder from log_reader. --- tests/performance_tests/performance_test.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 7f977b44c2..2be12a1924 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -17,6 +17,7 @@ from dataclasses import dataclass, asdict, field from datetime import datetime from enum import Enum +from log_reader import LogReaderEncoder class PerformanceTest: @@ -298,16 +299,8 @@ def createReport(self,producerThreadResult: PluginThreadOptResult=None, chainThr report['nodeosVersion'] = Utils.getNodeosVersion() return report - class PtReportEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, datetime): - return obj.isoformat() - if obj is None: - return "Unknown" - return json.JSONEncoder.default(self, obj) - def reportAsJSON(self, report: dict) -> json: - return json.dumps(report, indent=2, cls=PerformanceTest.PtReportEncoder) + return json.dumps(report, indent=2, cls=LogReaderEncoder) def exportReportAsJSON(self, report: json, exportPath): with open(exportPath, 'wt') as f: From cf4097217f9ecc408a34a29ed1e557c898656860 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 6 Dec 2022 16:19:10 -0600 Subject: [PATCH 027/178] Provide *PluginArgs dataclasses to capture and validate nodeos config options. Each *PluginArgs dataclass captures the command line argument and default for each option as specificed in nodeos. New test validate_nodeos_plugin_args uses nodeos's --help command to parse out each plugin's available options and defaults to validate current status vs. the *PluginArgs classes. Any addition, removal, update will cause a test failure with note of what has changed. This will allow easy maintenance going forward. --- tests/performance_tests/CMakeLists.txt | 12 + tests/performance_tests/ChainPluginArgs.py | 207 ++++++++++++++++++ .../performance_tests/HttpClientPluginArgs.py | 48 ++++ tests/performance_tests/HttpPluginArgs.py | 99 +++++++++ tests/performance_tests/NetPluginArgs.py | 99 +++++++++ tests/performance_tests/ProducerPluginArgs.py | 126 +++++++++++ .../ResourceMonitorPluginArgs.py | 54 +++++ .../SignatureProviderPluginArgs.py | 45 ++++ .../StateHistoryPluginArgs.py | 66 ++++++ tests/performance_tests/TraceApiPluginArgs.py | 60 +++++ tests/performance_tests/performance_test.py | 23 +- .../performance_test_basic.py | 89 +++----- .../validate_nodeos_plugin_args.py | 119 ++++++++++ 13 files changed, 979 insertions(+), 68 deletions(-) create mode 100755 tests/performance_tests/ChainPluginArgs.py create mode 100755 tests/performance_tests/HttpClientPluginArgs.py create mode 100755 tests/performance_tests/HttpPluginArgs.py create mode 100755 tests/performance_tests/NetPluginArgs.py create mode 100755 tests/performance_tests/ProducerPluginArgs.py create mode 100755 tests/performance_tests/ResourceMonitorPluginArgs.py create mode 100755 tests/performance_tests/SignatureProviderPluginArgs.py create mode 100755 tests/performance_tests/StateHistoryPluginArgs.py create mode 100755 tests/performance_tests/TraceApiPluginArgs.py create mode 100755 tests/performance_tests/validate_nodeos_plugin_args.py diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index ae63924623..cff889c4ce 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -7,7 +7,19 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launch_transaction_generators.py ${CM configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_log_2_0_14.txt.gz ${CMAKE_CURRENT_BINARY_DIR}/nodeos_log_2_0_14.txt.gz COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_log_3_2.txt.gz ${CMAKE_CURRENT_BINARY_DIR}/nodeos_log_3_2.txt.gz COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/genesis.json ${CMAKE_CURRENT_BINARY_DIR}/genesis.json COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate_nodeos_plugin_args.py ${CMAKE_CURRENT_BINARY_DIR}/validate_nodeos_plugin_args.py COPYONLY) + +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ChainPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ChainPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/HttpClientPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/HttpClientPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/HttpPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/HttpPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/NetPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/NetPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ProducerPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ProducerPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ResourceMonitorPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ResourceMonitorPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/SignatureProviderPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/SignatureProviderPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/StateHistoryPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/StateHistoryPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/TraceApiPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/TraceApiPluginArgs.py COPYONLY) add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 6000 --tps-limit-per-generator 1500 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/performance_tests/ChainPluginArgs.py b/tests/performance_tests/ChainPluginArgs.py new file mode 100755 index 0000000000..9bdcb3e07a --- /dev/null +++ b/tests/performance_tests/ChainPluginArgs.py @@ -0,0 +1,207 @@ +#!/usr/bin/env python3 + +import dataclasses +import re + +from dataclasses import dataclass + +@dataclass +class ChainPluginArgs: + _pluginNamespace: str = "eosio" + _pluginName: str = "chain_plugin" + blocksDir: str=None + _blocksDirNodeosDefault: str='"blocks"' + _blocksDirNodeosArg: str="--blocks-dir" + protocolFeaturesDir: str=None + _protocolFeaturesDirNodeosDefault: str='"protocol_features"' + _protocolFeaturesDirNodeosArg: str="--protocol-features-dir" + checkpoint: str=None + _checkpointNodeosDefault: str=None + _checkpointNodeosArg: str="--checkpoint" + wasmRuntime: str=None + _wasmRuntimeNodeosDefault: str='eos-vm-jit' + _wasmRuntimeNodeosArg: str="--wasm-runtime" + profileAccount: str=None + _profileAccountNodeosDefault: str=None + _profileAccountNodeosArg: str="--profile-account" + abiSerializerMaxTimeMs: int=None + _abiSerializerMaxTimeMsNodeosDefault: int=15 + _abiSerializerMaxTimeMsNodeosArg: str="--abi-serializer-max-time-ms" + chainStateDbSizeMb: int=None + _chainStateDbSizeMbNodeosDefault: int=1024 + _chainStateDbSizeMbNodeosArg: str="--chain-state-db-size-mb" + chainStateDbGuardSizeMb: int=None + _chainStateDbGuardSizeMbNodeosDefault: int=128 + _chainStateDbGuardSizeMbNodeosArg: str="--chain-state-db-guard-size-mb" + signatureCpuBillablePct: int=None + _signatureCpuBillablePctNodeosDefault: int=50 + _signatureCpuBillablePctNodeosArg: str="--signature-cpu-billable-pct" + chainThreads: int=None + _chainThreadsNodeosDefault: int=2 + _chainThreadsNodeosArg: str="--chain-threads" + contractsConsole: bool=None + _contractsConsoleNodeosDefault: bool=False + _contractsConsoleNodeosArg: str="--contracts-console" + deepMind: bool=None + _deepMindNodeosDefault: bool=False + _deepMindNodeosArg: str="--deep-mind" + actorWhitelist: str=None + _actorWhitelistNodeosDefault: str=None + _actorWhitelistNodeosArg: str="--actor-whitelist" + actorBlacklist: str=None + _actorBlacklistNodeosDefault: str=None + _actorBlacklistNodeosArg: str="--actor-blacklist" + contractWhitelist: str=None + _contractWhitelistNodeosDefault: str=None + _contractWhitelistNodeosArg: str="--contract-whitelist" + contractBlacklist: str=None + _contractBlacklistNodeosDefault: str=None + _contractBlacklistNodeosArg: str="--contract-blacklist" + actionBlacklist: str=None + _actionBlacklistNodeosDefault: str=None + _actionBlacklistNodeosArg: str="--action-blacklist" + keyBlacklist: str=None + _keyBlacklistNodeosDefault: str=None + _keyBlacklistNodeosArg: str="--key-blacklist" + senderBypassWhiteblacklist: str=None + _senderBypassWhiteblacklistNodeosDefault: str=None + _senderBypassWhiteblacklistNodeosArg: str="--sender-bypass-whiteblacklist" + readMode: str=None + _readModeNodeosDefault: str='head' + _readModeNodeosArg: str="--read-mode" + apiAcceptTransactions: int=None + _apiAcceptTransactionsNodeosDefault: int=1 + _apiAcceptTransactionsNodeosArg: str="--api-accept-transactions" + validationMode: str=None + _validationModeNodeosDefault: str='full' + _validationModeNodeosArg: str="--validation-mode" + disableRamBillingNotifyChecks: bool=None + _disableRamBillingNotifyChecksNodeosDefault: bool=False + _disableRamBillingNotifyChecksNodeosArg: str="--disable-ram-billing-notify-checks" + maximumVariableSignatureLength: int=None + _maximumVariableSignatureLengthNodeosDefault: int=16384 + _maximumVariableSignatureLengthNodeosArg: str="--maximum-variable-signature-length" + trustedProducer: str=None + _trustedProducerNodeosDefault: str=None + _trustedProducerNodeosArg: str="--trusted-producer" + databaseMapMode: str=None + _databaseMapModeNodeosDefault: str='mapped' + _databaseMapModeNodeosArg: str="--database-map-mode" + eosVmOcCacheSizeMb: int=None + _eosVmOcCacheSizeMbNodeosDefault: int=1024 + _eosVmOcCacheSizeMbNodeosArg: str="--eos-vm-oc-cache-size-mb" + eosVmOcCompileThreads: int=None + _eosVmOcCompileThreadsNodeosDefault: int=1 + _eosVmOcCompileThreadsNodeosArg: str="--eos-vm-oc-compile-threads" + eosVmOcEnable: bool=None + _eosVmOcEnableNodeosDefault: bool=False + _eosVmOcEnableNodeosArg: str="--eos-vm-oc-enable" + enableAccountQueries: int=None + _enableAccountQueriesNodeosDefault: int=0 + _enableAccountQueriesNodeosArg: str="--enable-account-queries" + maxNonprivilegedInlineActionSize: int=None + _maxNonprivilegedInlineActionSizeNodeosDefault: int=4096 + _maxNonprivilegedInlineActionSizeNodeosArg: str="--max-nonprivileged-inline-action-size" + transactionRetryMaxStorageSizeGb: int=None + _transactionRetryMaxStorageSizeGbNodeosDefault: int=None + _transactionRetryMaxStorageSizeGbNodeosArg: str="--transaction-retry-max-storage-size-gb" + transactionRetryIntervalSec: int=None + _transactionRetryIntervalSecNodeosDefault: int=20 + _transactionRetryIntervalSecNodeosArg: str="--transaction-retry-interval-sec" + transactionRetryMaxExpirationSec: int=None + _transactionRetryMaxExpirationSecNodeosDefault: int=120 + _transactionRetryMaxExpirationSecNodeosArg: str="--transaction-retry-max-expiration-sec" + transactionFinalityStatusMaxStorageSizeGb: int=None + _transactionFinalityStatusMaxStorageSizeGbNodeosDefault: int=None + _transactionFinalityStatusMaxStorageSizeGbNodeosArg: str="--transaction-finality-status-max-storage-size-gb" + transactionFinalityStatusSuccessDurationSec: int=None + _transactionFinalityStatusSuccessDurationSecNodeosDefault: int=180 + _transactionFinalityStatusSuccessDurationSecNodeosArg: str="--transaction-finality-status-success-duration-sec" + transactionFinalityStatusFailureDurationSec: int=None + _transactionFinalityStatusFailureDurationSecNodeosDefault: int=180 + _transactionFinalityStatusFailureDurationSecNodeosArg: str="--transaction-finality-status-failure-duration-sec" + integrityHashOnStart: bool=None + _integrityHashOnStartNodeosDefault: bool=False + _integrityHashOnStartNodeosArg: str="--integrity-hash-on-start" + integrityHashOnStop: bool=None + _integrityHashOnStopNodeosDefault: bool=False + _integrityHashOnStopNodeosArg: str="--integrity-hash-on-stop" + blockLogRetainBlocks: int=None + _blockLogRetainBlocksNodeosDefault: int=False + _blockLogRetainBlocksNodeosArg: str="--block-log-retain-blocks" + genesisJson: str=None + _genesisJsonNodeosDefault: str=None + _genesisJsonNodeosArg: str="--genesis-json" + genesisTimestamp: str=None + _genesisTimestampNodeosDefault: str=None + _genesisTimestampNodeosArg: str="--genesis-timestamp" + printGenesisJson: bool=None + _printGenesisJsonNodeosDefault: bool=False + _printGenesisJsonNodeosArg: str="--print-genesis-json" + extractGenesisJson: bool=None + _extractGenesisJsonNodeosDefault: bool=False + _extractGenesisJsonNodeosArg: str="--extract-genesis-json" + printBuildInfo: bool=None + _printBuildInfoNodeosDefault: bool=False + _printBuildInfoNodeosArg: str="--print-build-info" + extractBuildInfo=None + _extractBuildInfoNodeosDefault=None + _extractBuildInfoNodeosArg: str="--extract-build-info" + forceAllChecks: bool=None + _forceAllChecksNodeosDefault: bool=False + _forceAllChecksNodeosArg: str="--force-all-checks" + disableReplayOpts: bool=None + _disableReplayOptsNodeosDefault: bool=False + _disableReplayOptsNodeosArg: str="--disable-replay-opts" + replayBlockchain: bool=None + _replayBlockchainNodeosDefault: bool=False + _replayBlockchainNodeosArg: str="--replay-blockchain" + hardReplayBlockchain: bool=None + _hardReplayBlockchainNodeosDefault: bool=False + _hardReplayBlockchainNodeosArg: str="--hard-replay-blockchain" + deleteAllBlocks: bool=None + _deleteAllBlocksNodeosDefault: bool=False + _deleteAllBlocksNodeosArg: str="--delete-all-blocks" + truncateAtBlock: int=None + _truncateAtBlockNodeosDefault: int=0 + _truncateAtBlockNodeosArg: str="--truncate-at-block" + terminateAtBlock: int=None + _terminateAtBlockNodeosDefault: int=0 + _terminateAtBlockNodeosArg: str="--terminate-at-block" + snapshot: str=None + _snapshotNodeosDefault: str=None + _snapshotNodeosArg: str="--snapshot" + + def threads(self, threads: int): + self.chainThreads=threads + + def supportedNodeosArgs(self) -> list: + args = [] + for field in dataclasses.fields(self): + match = re.search("\w*NodeosArg", field.name) + if match is not None: + args.append(getattr(self, field.name)) + return args + + def __str__(self) -> str: + args = [] + for field in dataclasses.fields(self): + match = re.search("[^_]", field.name[0]) + if match is not None: + default = getattr(self, f"_{field.name}NodeosDefault") + current = getattr(self, field.name) + if current is not None and current != default: + if type(current) is bool: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") + else: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") + + return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" + +def main(): + pluginArgs = ChainPluginArgs() + print(pluginArgs.supportedNodeosArgs()) + exit(0) + +if __name__ == '__main__': + main() diff --git a/tests/performance_tests/HttpClientPluginArgs.py b/tests/performance_tests/HttpClientPluginArgs.py new file mode 100755 index 0000000000..c033d31e7e --- /dev/null +++ b/tests/performance_tests/HttpClientPluginArgs.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 + +import dataclasses +import re + +from dataclasses import dataclass + +@dataclass +class HttpClientPluginArgs: + _pluginNamespace: str = "eosio" + _pluginName: str = "http_client_plugin" + httpsClientRootCert: str=None + _httpsClientRootCertNodeosDefault: str=None + _httpsClientRootCertNodeosArg: str="--https-client-root-cert" + httpsClientValidatePeers: int=None + _httpsClientValidatePeersNodeosDefault: int=1 + _httpsClientValidatePeersNodeosArg: str="--https-client-validate-peers" + + def supportedNodeosArgs(self) -> list: + args = [] + for field in dataclasses.fields(self): + match = re.search("\w*NodeosArg", field.name) + if match is not None: + args.append(getattr(self, field.name)) + return args + + def __str__(self) -> str: + args = [] + for field in dataclasses.fields(self): + match = re.search("[^_]", field.name[0]) + if match is not None: + default = getattr(self, f"_{field.name}NodeosDefault") + current = getattr(self, field.name) + if current is not None and current != default: + if type(current) is bool: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") + else: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") + + return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" + +def main(): + pluginArgs = HttpClientPluginArgs() + print(pluginArgs.supportedNodeosArgs()) + exit(0) + +if __name__ == '__main__': + main() diff --git a/tests/performance_tests/HttpPluginArgs.py b/tests/performance_tests/HttpPluginArgs.py new file mode 100755 index 0000000000..41aa6eee1c --- /dev/null +++ b/tests/performance_tests/HttpPluginArgs.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 + +import dataclasses +import re + +from dataclasses import dataclass + +@dataclass +class HttpPluginArgs: + _pluginNamespace: str="eosio" + _pluginName: str="http_plugin" + unixSocketPath: str=None + _unixSocketPathNodeosDefault: str=None + _unixSocketPathNodeosArg: str="--unix-socket-path" + httpServerAddress: str=None + _httpServerAddressNodeosDefault: str="127.0.0.1:8888" + _httpServerAddressNodeosArg: str="--http-server-address" + httpsServerAddress: str=None + _httpsServerAddressNodeosDefault: str=None + _httpsServerAddressNodeosArg: str="--https-server-address" + httpsCertificateChainFile: str=None + _httpsCertificateChainFileNodeosDefault: str=None + _httpsCertificateChainFileNodeosArg: str="--https-certificate-chain-file" + httpsPrivateKeyFile: str=None + _httpsPrivateKeyFileNodeosDefault: str=None + _httpsPrivateKeyFileNodeosArg: str="--https-private-key-file" + httpsEcdhCurve: str=None + _httpsEcdhCurveNodeosDefault: str='secp384r1' + _httpsEcdhCurveNodeosArg: str="--https-ecdh-curve" + accessControlAllowOrigin: str=None + _accessControlAllowOriginNodeosDefault: str=None + _accessControlAllowOriginNodeosArg: str="--access-control-allow-origin" + accessControlAllowHeaders: str=None + _accessControlAllowHeadersNodeosDefault: str=None + _accessControlAllowHeadersNodeosArg: str="--access-control-allow-headers" + accessControlMaxAge: int=None + _accessControlMaxAgeNodeosDefault: int=None + _accessControlMaxAgeNodeosArg: str="--access-control-max-age" + accessControlAllowCredentials: bool=None + _accessControlAllowCredentialsNodeosDefault: bool=False + _accessControlAllowCredentialsNodeosArg: str="--access-control-allow-credentials" + maxBodySize: int=None + _maxBodySizeNodeosDefault: int=2097152 + _maxBodySizeNodeosArg: str="--max-body-size" + httpMaxBytesInFlightMb: int=None + _httpMaxBytesInFlightMbNodeosDefault: int=500 + _httpMaxBytesInFlightMbNodeosArg: str="--http-max-bytes-in-flight-mb" + httpMaxInFlightRequests: int=None + _httpMaxInFlightRequestsNodeosDefault: int=-1 + _httpMaxInFlightRequestsNodeosArg: str="--http-max-in-flight-requests" + httpMaxResponseTimeMs: int=None + _httpMaxResponseTimeMsNodeosDefault: int=30 + _httpMaxResponseTimeMsNodeosArg: str="--http-max-response-time-ms" + verboseHttpErrors: bool=None + _verboseHttpErrorsNodeosDefault: bool=False + _verboseHttpErrorsNodeosArg: str="--verbose-http-errors" + httpValidateHost: int=None + _httpValidateHostNodeosDefault: int=1 + _httpValidateHostNodeosArg: str="--http-validate-host" + httpAlias: str=None + _httpAliasNodeosDefault: str=None + _httpAliasNodeosArg: str="--http-alias" + httpThreads: int=None + _httpThreadsNodeosDefault: int=2 + _httpThreadsNodeosArg: str="--http-threads" + httpKeepAlive: int=None + _httpKeepAliveNodeosDefault: int=1 + _httpKeepAliveNodeosArg: str="--http-keep-alive" + + def supportedNodeosArgs(self) -> list: + args = [] + for field in dataclasses.fields(self): + match = re.search("\w*NodeosArg", field.name) + if match is not None: + args.append(getattr(self, field.name)) + return args + + def __str__(self) -> str: + args = [] + for field in dataclasses.fields(self): + match = re.search("[^_]", field.name[0]) + if match is not None: + default = getattr(self, f"_{field.name}NodeosDefault") + current = getattr(self, field.name) + if current is not None and current != default: + if type(current) is bool: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") + else: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") + + return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" + +def main(): + pluginArgs = HttpPluginArgs() + print(pluginArgs.supportedNodeosArgs()) + exit(0) + +if __name__ == '__main__': + main() diff --git a/tests/performance_tests/NetPluginArgs.py b/tests/performance_tests/NetPluginArgs.py new file mode 100755 index 0000000000..75ed490ba9 --- /dev/null +++ b/tests/performance_tests/NetPluginArgs.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 + +import dataclasses +import re + +from dataclasses import dataclass + +@dataclass +class NetPluginArgs: + _pluginNamespace: str = "eosio" + _pluginName: str = "net_plugin" + p2pListenEndpoint: str=None + _p2pListenEndpointNodeosDefault: str="0.0.0.0:9876" + _p2pListenEndpointNodeosArg: str="--p2p-listen-endpoint" + p2pServerAddress: str=None + _p2pServerAddressNodeosDefault: str=None + _p2pServerAddressNodeosArg: str="--p2p-server-address" + p2pPeerAddress: str=None + _p2pPeerAddressNodeosDefault: str=None + _p2pPeerAddressNodeosArg: str="--p2p-peer-address" + p2pMaxNodesPerHost: int=None + _p2pMaxNodesPerHostNodeosDefault: int=1 + _p2pMaxNodesPerHostNodeosArg: str="--p2p-max-nodes-per-host" + p2pAcceptTransactions: int=None + _p2pAcceptTransactionsNodeosDefault: int=1 + _p2pAcceptTransactionsNodeosArg: str="--p2p-accept-transactions" + agentName: str=None + _agentNameNodeosDefault: str='EOS Test Agent' + _agentNameNodeosArg: str="--agent-name" + allowedConnection: str=None + _allowedConnectionNodeosDefault: str='any' + _allowedConnectionNodeosArg: str="--allowed-connection" + peerKey: str=None + _peerKeyNodeosDefault: str=None + _peerKeyNodeosArg: str="--peer-key" + peerPrivateKey: str=None + _peerPrivateKeyNodeosDefault: str=None + _peerPrivateKeyNodeosArg: str="--peer-private-key" + maxClients: int=None + _maxClientsNodeosDefault: int=25 + _maxClientsNodeosArg: str="--max-clients" + connectionCleanupPeriod: int=None + _connectionCleanupPeriodNodeosDefault: int=30 + _connectionCleanupPeriodNodeosArg: str="--connection-cleanup-period" + maxCleanupTimeMsec: int=None + _maxCleanupTimeMsecNodeosDefault: int=10 + _maxCleanupTimeMsecNodeosArg: str="--max-cleanup-time-msec" + p2pDedupCacheExpireTimeSec: int=None + _p2pDedupCacheExpireTimeSecNodeosDefault: int=10 + _p2pDedupCacheExpireTimeSecNodeosArg: str="--p2p-dedup-cache-expire-time-sec" + netThreads: int=None + _netThreadsNodeosDefault: int=2 + _netThreadsNodeosArg: str="--net-threads" + syncFetchSpan: int=None + _syncFetchSpanNodeosDefault: int=100 + _syncFetchSpanNodeosArg: str="--sync-fetch-span" + useSocketReadWatermark: int=None + _useSocketReadWatermarkNodeosDefault: int=0 + _useSocketReadWatermarkNodeosArg: str="--use-socket-read-watermark" + peerLogFormat: str=None + _peerLogFormatNodeosDefault: str="[\"${_name}\" - ${_cid} ${_ip}:${_port}] " + _peerLogFormatNodeosArg: str="--peer-log-format" + p2pKeepaliveIntervalMs: int=None + _p2pKeepaliveIntervalMsNodeosDefault: int=10000 + _p2pKeepaliveIntervalMsNodeosArg: str="--p2p-keepalive-interval-ms" + + def threads(self, threads: int): + self.netThreads=threads + + def supportedNodeosArgs(self) -> list: + args = [] + for field in dataclasses.fields(self): + match = re.search("\w*NodeosArg", field.name) + if match is not None: + args.append(getattr(self, field.name)) + return args + + def __str__(self) -> str: + args = [] + for field in dataclasses.fields(self): + match = re.search("[^_]", field.name[0]) + if match is not None: + default = getattr(self, f"_{field.name}NodeosDefault") + current = getattr(self, field.name) + if current is not None and current != default: + if type(current) is bool: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") + else: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") + + return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" + +def main(): + pluginArgs = NetPluginArgs() + print(pluginArgs.supportedNodeosArgs()) + exit(0) + +if __name__ == '__main__': + main() diff --git a/tests/performance_tests/ProducerPluginArgs.py b/tests/performance_tests/ProducerPluginArgs.py new file mode 100755 index 0000000000..0fd7795591 --- /dev/null +++ b/tests/performance_tests/ProducerPluginArgs.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 + +import dataclasses +import re + +from dataclasses import dataclass + +@dataclass +class ProducerPluginArgs: + _pluginNamespace: str = "eosio" + _pluginName: str = "producer_plugin" + enableStaleProduction: bool=None + _enableStaleProductionNodeosDefault: bool=False + _enableStaleProductionNodeosArg: str="--enable-stale-production" + pauseOnStartup: bool=None + _pauseOnStartupNodeosDefault: bool=False + _pauseOnStartupNodeosArg: str="--pause-on-startup" + maxTransactionTime: int=None + _maxTransactionTimeNodeosDefault: int=30 + _maxTransactionTimeNodeosArg: str="--max-transaction-time" + maxIrreversibleBlockAge: int=None + _maxIrreversibleBlockAgeNodeosDefault: int=-1 + _maxIrreversibleBlockAgeNodeosArg: str="--max-irreversible-block-age" + producerName: str=None + _producerNameNodeosDefault: str=None + _producerNameNodeosArg: str="--producer-name" + privateKey: str=None + _privateKeyNodeosDefault: str=None + _privateKeyNodeosArg: str="--private-key" + signatureProvider: str=None + _signatureProviderNodeosDefault: str="EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" + _signatureProviderNodeosArg: str="--signature-provider" + greylistAccount: str=None + _greylistAccountNodeosDefault: str=None + _greylistAccountNodeosArg: str="--greylist-account" + greylistLimit: int=None + _greylistLimitNodeosDefault: int=1000 + _greylistLimitNodeosArg: str="--greylist-limit" + produceTimeOffsetUs: int=None + _produceTimeOffsetUsNodeosDefault: int=0 + _produceTimeOffsetUsNodeosArg: str="--produce-time-offset-us" + lastBlockTimeOffsetUs: int=None + _lastBlockTimeOffsetUsNodeosDefault: int=-200000 + _lastBlockTimeOffsetUsNodeosArg: str="--last-block-time-offset-us" + cpuEffortPercent: int=None + _cpuEffortPercentNodeosDefault: int=80 + _cpuEffortPercentNodeosArg: str="--cpu-effort-percent" + lastBlockCpuEffortPercent: int=None + _lastBlockCpuEffortPercentNodeosDefault: int=80 + _lastBlockCpuEffortPercentNodeosArg: str="--last-block-cpu-effort-percent" + maxBlockCpuUsageThresholdUs: int=None + _maxBlockCpuUsageThresholdUsNodeosDefault: int=5000 + _maxBlockCpuUsageThresholdUsNodeosArg: str="--max-block-cpu-usage-threshold-us" + maxBlockNetUsageThresholdBytes: int=None + _maxBlockNetUsageThresholdBytesNodeosDefault: int=1024 + _maxBlockNetUsageThresholdBytesNodeosArg: str="--max-block-net-usage-threshold-bytes" + maxScheduledTransactionTimePerBlockMs: int=None + _maxScheduledTransactionTimePerBlockMsNodeosDefault: int=100 + _maxScheduledTransactionTimePerBlockMsNodeosArg: str="--max-scheduled-transaction-time-per-block-ms" + subjectiveCpuLeewayUs: int=None + _subjectiveCpuLeewayUsNodeosDefault: int=31000 + _subjectiveCpuLeewayUsNodeosArg: str="--subjective-cpu-leeway-us" + subjectiveAccountMaxFailures: int=None + _subjectiveAccountMaxFailuresNodeosDefault: int=3 + _subjectiveAccountMaxFailuresNodeosArg: str="--subjective-account-max-failures" + subjectiveAccountDecayTimeMinutes: int=None + _subjectiveAccountDecayTimeMinutesNodeosDefault: int=1440 + _subjectiveAccountDecayTimeMinutesNodeosArg: str="--subjective-account-decay-time-minutes" + incomingDeferRatio: int=None + _incomingDeferRatioNodeosDefault: int=1 + _incomingDeferRatioNodeosArg: str="--incoming-defer-ratio" + incomingTransactionQueueSizeMb: int=None + _incomingTransactionQueueSizeMbNodeosDefault: int=1024 + _incomingTransactionQueueSizeMbNodeosArg: str="--incoming-transaction-queue-size-mb" + disableSubjectiveBilling: int=None + _disableSubjectiveBillingNodeosDefault: int=1 + _disableSubjectiveBillingNodeosArg: str="--disable-subjective-billing" + disableSubjectiveAccountBilling: bool=None + _disableSubjectiveAccountBillingNodeosDefault: bool=False + _disableSubjectiveAccountBillingNodeosArg: str="--disable-subjective-account-billing" + disableSubjectiveP2pBilling: int=None + _disableSubjectiveP2pBillingNodeosDefault: int=1 + _disableSubjectiveP2pBillingNodeosArg: str="--disable-subjective-p2p-billing" + disableSubjectiveApiBilling: int=None + _disableSubjectiveApiBillingNodeosDefault: int=1 + _disableSubjectiveApiBillingNodeosArg: str="--disable-subjective-api-billing" + producerThreads: int=None + _producerThreadsNodeosDefault: int=2 + _producerThreadsNodeosArg: str="--producer-threads" + snapshotsDir: str=None + _snapshotsDirNodeosDefault: str='"snapshots"' + _snapshotsDirNodeosArg: str="--snapshots-dir" + + def threads(self, threads: int): + self.producerThreads=threads + + def supportedNodeosArgs(self) -> list: + args = [] + for field in dataclasses.fields(self): + match = re.search("\w*NodeosArg", field.name) + if match is not None: + args.append(getattr(self, field.name)) + return args + + def __str__(self) -> str: + args = [] + for field in dataclasses.fields(self): + match = re.search("[^_]", field.name[0]) + if match is not None: + default = getattr(self, f"_{field.name}NodeosDefault") + current = getattr(self, field.name) + if current is not None and current != default: + if type(current) is bool: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") + else: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") + + return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" + +def main(): + pluginArgs = ProducerPluginArgs() + print(pluginArgs.supportedNodeosArgs()) + exit(0) + +if __name__ == '__main__': + main() diff --git a/tests/performance_tests/ResourceMonitorPluginArgs.py b/tests/performance_tests/ResourceMonitorPluginArgs.py new file mode 100755 index 0000000000..2103c8ee28 --- /dev/null +++ b/tests/performance_tests/ResourceMonitorPluginArgs.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 + +import dataclasses +import re + +from dataclasses import dataclass + +@dataclass +class ResourceMonitorPluginArgs: + _pluginNamespace: str = "eosio" + _pluginName: str = "resource_monitor_plugin" + resourceMonitorIntervalSeconds: int=None + _resourceMonitorIntervalSecondsNodeosDefault: int=2 + _resourceMonitorIntervalSecondsNodeosArg: str="--resource-monitor-interval-seconds" + resourceMonitorSpaceThreshold: int=None + _resourceMonitorSpaceThresholdNodeosDefault: int=90 + _resourceMonitorSpaceThresholdNodeosArg: str="--resource-monitor-space-threshold" + resourceMonitorNotShutdownOnThresholdExceeded: bool=None + _resourceMonitorNotShutdownOnThresholdExceededNodeosDefault: bool=False + _resourceMonitorNotShutdownOnThresholdExceededNodeosArg: str="--resource-monitor-not-shutdown-on-threshold-exceeded" + resourceMonitorWarningInterval: int=None + _resourceMonitorWarningIntervalNodeosDefault: int=30 + _resourceMonitorWarningIntervalNodeosArg: str="--resource-monitor-warning-interval" + + def supportedNodeosArgs(self) -> list: + args = [] + for field in dataclasses.fields(self): + match = re.search("\w*NodeosArg", field.name) + if match is not None: + args.append(getattr(self, field.name)) + return args + + def __str__(self) -> str: + args = [] + for field in dataclasses.fields(self): + match = re.search("[^_]", field.name[0]) + if match is not None: + default = getattr(self, f"_{field.name}NodeosDefault") + current = getattr(self, field.name) + if current is not None and current != default: + if type(current) is bool: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") + else: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") + + return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" + +def main(): + pluginArgs = ResourceMonitorPluginArgs() + print(pluginArgs.supportedNodeosArgs()) + exit(0) + +if __name__ == '__main__': + main() diff --git a/tests/performance_tests/SignatureProviderPluginArgs.py b/tests/performance_tests/SignatureProviderPluginArgs.py new file mode 100755 index 0000000000..a6d7a0bbc2 --- /dev/null +++ b/tests/performance_tests/SignatureProviderPluginArgs.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 + +import dataclasses +import re + +from dataclasses import dataclass + +@dataclass +class SignatureProviderPluginArgs: + _pluginNamespace: str = "eosio" + _pluginName: str = "signature_provider_plugin" + keosdProviderTimeout: int=None + _keosdProviderTimeoutNodeosDefault: int=5 + _keosdProviderTimeoutNodeosArg: str="--keosd-provider-timeout" + + def supportedNodeosArgs(self) -> list: + args = [] + for field in dataclasses.fields(self): + match = re.search("\w*NodeosArg", field.name) + if match is not None: + args.append(getattr(self, field.name)) + return args + + def __str__(self) -> str: + args = [] + for field in dataclasses.fields(self): + match = re.search("[^_]", field.name[0]) + if match is not None: + default = getattr(self, f"_{field.name}NodeosDefault") + current = getattr(self, field.name) + if current is not None and current != default: + if type(current) is bool: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") + else: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") + + return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" + +def main(): + pluginArgs = SignatureProviderPluginArgs() + print(pluginArgs.supportedNodeosArgs()) + exit(0) + +if __name__ == '__main__': + main() diff --git a/tests/performance_tests/StateHistoryPluginArgs.py b/tests/performance_tests/StateHistoryPluginArgs.py new file mode 100755 index 0000000000..4e4d133edb --- /dev/null +++ b/tests/performance_tests/StateHistoryPluginArgs.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 + +import dataclasses +import re + +from dataclasses import dataclass + +@dataclass +class StateHistoryPluginArgs: + _pluginNamespace: str = "eosio" + _pluginName: str = "state_history_plugin" + stateHistoryDir: str=None + _stateHistoryDirNodeosDefault: str='"state-history"' + _stateHistoryDirNodeosArg: str="--state-history-dir" + traceHistory: bool=None + _traceHistoryNodeosDefault: bool=False + _traceHistoryNodeosArg: str="--trace-history" + chainStateHistory: bool=None + _chainStateHistoryNodeosDefault: bool=False + _chainStateHistoryNodeosArg: str="--chain-state-history" + stateHistoryEndpoint: str=None + _stateHistoryEndpointNodeosDefault: str="127.0.0.1:8080" + _stateHistoryEndpointNodeosArg: str="--state-history-endpoint" + stateHistoryUnixSocketPath: str=None + _stateHistoryUnixSocketPathNodeosDefault: str=None + _stateHistoryUnixSocketPathNodeosArg: str="--state-history-unix-socket-path" + traceHistoryDebugMode: bool=None + _traceHistoryDebugModeNodeosDefault: bool=False + _traceHistoryDebugModeNodeosArg: str="--trace-history-debug-mode" + stateHistoryLogRetainBlocks: bool=None + _stateHistoryLogRetainBlocksNodeosDefault: bool=False + _stateHistoryLogRetainBlocksNodeosArg: str="--state-history-log-retain-blocks" + deleteStateHistory: bool=None + _deleteStateHistoryNodeosDefault: bool=False + _deleteStateHistoryNodeosArg: str="--delete-state-history" + + def supportedNodeosArgs(self) -> list: + args = [] + for field in dataclasses.fields(self): + match = re.search("\w*NodeosArg", field.name) + if match is not None: + args.append(getattr(self, field.name)) + return args + + def __str__(self) -> str: + args = [] + for field in dataclasses.fields(self): + match = re.search("[^_]", field.name[0]) + if match is not None: + default = getattr(self, f"_{field.name}NodeosDefault") + current = getattr(self, field.name) + if current is not None and current != default: + if type(current) is bool: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") + else: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") + + return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" + +def main(): + pluginArgs = StateHistoryPluginArgs() + print(pluginArgs.supportedNodeosArgs()) + exit(0) + +if __name__ == '__main__': + main() diff --git a/tests/performance_tests/TraceApiPluginArgs.py b/tests/performance_tests/TraceApiPluginArgs.py new file mode 100755 index 0000000000..a415caccc7 --- /dev/null +++ b/tests/performance_tests/TraceApiPluginArgs.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 + +import dataclasses +import re + +from dataclasses import dataclass + +@dataclass +class TraceApiPluginArgs: + _pluginNamespace: str="eosio" + _pluginName: str="trace_api_plugin" + traceDir: str=None + _traceDirNodeosDefault: str='"traces"' + _traceDirNodeosArg: str="--trace-dir" + traceSliceStride: int=None + _traceSliceStrideNodeosDefault: int=10000 + _traceSliceStrideNodeosArg: str="--trace-slice-stride" + traceMinimumIrreversibleHistoryBlocks: int=None + _traceMinimumIrreversibleHistoryBlocksNodeosDefault: int=-1 + _traceMinimumIrreversibleHistoryBlocksNodeosArg: str="--trace-minimum-irreversible-history-blocks" + traceMinimumUncompressedIrreversibleHistoryBlocks: int=None + _traceMinimumUncompressedIrreversibleHistoryBlocksNodeosDefault: int=-1 + _traceMinimumUncompressedIrreversibleHistoryBlocksNodeosArg: str="--trace-minimum-uncompressed-irreversible-history-blocks" + traceRpcAbi: str=None + _traceRpcAbiNodeosDefault: str=None + _traceRpcAbiNodeosArg: str="--trace-rpc-abi" + traceNoAbis: bool=None + _traceNoAbisNodeosDefault: bool=False + _traceNoAbisNodeosArg: str="--trace-no-abis" + + def supportedNodeosArgs(self) -> list: + args = [] + for field in dataclasses.fields(self): + match = re.search("\w*NodeosArg", field.name) + if match is not None: + args.append(getattr(self, field.name)) + return args + + def __str__(self) -> str: + args = [] + for field in dataclasses.fields(self): + match = re.search("[^_]", field.name[0]) + if match is not None: + default = getattr(self, f"_{field.name}NodeosDefault") + current = getattr(self, field.name) + if current is not None and current != default: + if type(current) is bool: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") + else: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") + + return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" + +def main(): + pluginArgs = TraceApiPluginArgs() + print(pluginArgs.supportedNodeosArgs()) + exit(0) + +if __name__ == '__main__': + main() diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index be8b925249..b0edc31437 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -11,8 +11,11 @@ harnessPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(harnessPath) +from ChainPluginArgs import ChainPluginArgs +from HttpPluginArgs import HttpPluginArgs +from NetPluginArgs import NetPluginArgs +from ProducerPluginArgs import ProducerPluginArgs from TestHarness import TestHelper, Utils -from TestHarness.TestHelper import AppArgs from performance_test_basic import PerformanceTestBasic, PtbArgumentsHandler from platform import release, system from dataclasses import dataclass, asdict, field @@ -230,7 +233,7 @@ def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: Plugin for threadCount in range(minThreadCount, maxThreadCount+1): print(f"Running {optPlugin.value} thread count optimization check with {threadCount} {optPlugin.value} threads") - getattr(clusterConfig.extraNodeosArgs, optPlugin.value + 'PluginArgs').threads = threadCount + getattr(clusterConfig.extraNodeosArgs, optPlugin.value + 'PluginArgs').threads(threadCount) binSearchResults = self.performPtbBinarySearch(clusterConfig=clusterConfig, logDirRoot=self.loggingConfig.pluginThreadOptLogsDirPath, delReport=True, quiet=False, delPerfLogs=True) @@ -497,14 +500,14 @@ def main(): verbose=args.v) ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs - chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, - threads=args.chain_threads, databaseMapMode=args.database_map_mode) - producerPluginArgs = ENA.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, - lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, - cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, - threads=args.producer_threads) - httpPluginArgs = ENA.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - netPluginArgs = ENA.NetPluginArgs(threads=args.net_threads) + chainPluginArgs = ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, + chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) + producerPluginArgs = ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, + lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, + cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, + producerThreads=args.producer_threads) + httpPluginArgs = HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) + netPluginArgs = NetPluginArgs(netThreads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 9f9ba61137..a7b10cebc1 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -1,24 +1,30 @@ #!/usr/bin/env python3 import argparse +import dataclasses import os +import re import sys -import subprocess import shutil import signal -from unittest import TestResult import log_reader -import inspect import launch_transaction_generators as ltg harnessPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(harnessPath) +from ChainPluginArgs import ChainPluginArgs +from HttpClientPluginArgs import HttpClientPluginArgs +from HttpPluginArgs import HttpPluginArgs +from NetPluginArgs import NetPluginArgs +from ProducerPluginArgs import ProducerPluginArgs +from ResourceMonitorPluginArgs import ResourceMonitorPluginArgs +from SignatureProviderPluginArgs import SignatureProviderPluginArgs +from StateHistoryPluginArgs import StateHistoryPluginArgs +from TraceApiPluginArgs import TraceApiPluginArgs from TestHarness import Cluster, TestHelper, Utils, WalletMgr -from TestHarness.TestHelper import AppArgs from dataclasses import dataclass, asdict, field from datetime import datetime -from math import ceil class PerformanceTestBasic: @dataclass @@ -48,57 +54,24 @@ def __post_init__(self): class ClusterConfig: @dataclass class ExtraNodeosArgs: - @dataclass - class ChainPluginArgs: - signatureCpuBillablePct: int = 0 - chainStateDbSizeMb: int = 10 * 1024 - threads: int = 3 - databaseMapMode: str = "mapped" - - def __str__(self) -> str: - return f"--signature-cpu-billable-pct {self.signatureCpuBillablePct} \ - --chain-state-db-size-mb {self.chainStateDbSizeMb} \ - --chain-threads {self.threads} \ - --database-map-mode {self.databaseMapMode}" - - @dataclass - class NetPluginArgs: - threads: int = 2 - - def __str__(self) -> str: - return f"--net-threads {self.threads}" - - @dataclass - class ProducerPluginArgs: - disableSubjectiveBilling: bool = True - lastBlockTimeOffsetUs: int = 0 - produceTimeOffsetUs: int = 0 - cpuEffortPercent: int = 100 - lastBlockCpuEffortPercent: int = 100 - threads: int = 6 - - def __str__(self) -> str: - return f"--disable-subjective-billing {self.disableSubjectiveBilling} \ - --last-block-time-offset-us {self.lastBlockTimeOffsetUs} \ - --produce-time-offset-us {self.produceTimeOffsetUs} \ - --cpu-effort-percent {self.cpuEffortPercent} \ - --last-block-cpu-effort-percent {self.lastBlockCpuEffortPercent} \ - --producer-threads {self.threads}" - - @dataclass - class HttpPluginArgs: - httpMaxResponseTimeMs: int = 990000 - - def __str__(self) -> str: - return f"--http-max-response-time-ms {self.httpMaxResponseTimeMs}" chainPluginArgs: ChainPluginArgs = ChainPluginArgs() - producerPluginArgs: ProducerPluginArgs = ProducerPluginArgs() + httpClientPluginArgs: HttpClientPluginArgs = HttpClientPluginArgs() httpPluginArgs: HttpPluginArgs = HttpPluginArgs() netPluginArgs: NetPluginArgs = NetPluginArgs() + producerPluginArgs: ProducerPluginArgs = ProducerPluginArgs() + resourceMonitorPluginArgs: ResourceMonitorPluginArgs = ResourceMonitorPluginArgs() + signatureProviderPluginArgs: SignatureProviderPluginArgs = SignatureProviderPluginArgs() + stateHistoryPluginArgs: StateHistoryPluginArgs = StateHistoryPluginArgs() + traceApiPluginArgs: TraceApiPluginArgs = TraceApiPluginArgs() def __str__(self) -> str: - return f" {self.httpPluginArgs} {self.producerPluginArgs} {self.chainPluginArgs} {self.netPluginArgs}" + args = [] + for field in dataclasses.fields(self): + match = re.search("\w*PluginArgs", field.name) + if match is not None: + args.append(f"{getattr(self, field.name)}") + return " ".join(args) pnodes: int = 1 totalNodes: int = 2 @@ -503,15 +476,15 @@ def main(): testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, dumpErrorDetails=args.dump_error_details, delay=args.d, nodesFile=args.nodes_file, verbose=args.v) + chainPluginArgs = ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, + chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) + producerPluginArgs = ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, + lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, + cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, + producerThreads=args.producer_threads) + httpPluginArgs = HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) + netPluginArgs = NetPluginArgs(netThreads=args.net_threads) ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs - chainPluginArgs = ENA.ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, - threads=args.chain_threads, databaseMapMode=args.database_map_mode) - producerPluginArgs = ENA.ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, - lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, - cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, - threads=args.producer_threads) - httpPluginArgs = ENA.HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - netPluginArgs = ENA.NetPluginArgs(threads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) diff --git a/tests/performance_tests/validate_nodeos_plugin_args.py b/tests/performance_tests/validate_nodeos_plugin_args.py new file mode 100755 index 0000000000..a13e7021f5 --- /dev/null +++ b/tests/performance_tests/validate_nodeos_plugin_args.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 + +import re +import subprocess + +from ChainPluginArgs import ChainPluginArgs +from HttpClientPluginArgs import HttpClientPluginArgs +from HttpPluginArgs import HttpPluginArgs +from NetPluginArgs import NetPluginArgs +from ProducerPluginArgs import ProducerPluginArgs +from ResourceMonitorPluginArgs import ResourceMonitorPluginArgs +from SignatureProviderPluginArgs import SignatureProviderPluginArgs +from StateHistoryPluginArgs import StateHistoryPluginArgs +from TraceApiPluginArgs import TraceApiPluginArgs + +testSuccessful = False + +def parseNodeosConfigOptions() -> dict: + result = subprocess.run(["programs/nodeos/nodeos", "--help"], capture_output=True, text=True) + + myStr = result.stdout + myStr = myStr.rstrip("\n") + myStr = re.sub(":\n\s+-",':@@@\n -', string=myStr) + myStr = re.sub("\n\n",'\n@@@', string=myStr) + myStr = re.sub("Application Options:\n",'', string=myStr) + pluginSections = re.split("(@@@.*?@@@\n)", string=myStr) + + sec=0 + for section in pluginSections: + sec=sec+1 + + def pairwise(iterable): + "s -> (s0, s1), (s2, s3), (s4, s5), ..." + a = iter(iterable) + return zip(a, a) + + pluginOptsDict = {} + for section, options in pairwise(pluginSections[1:]): + myOpts = re.sub("\s+", " ", options) + myOpts = re.sub("\n", " ", myOpts) + myOpts = re.sub(" --", "\n--",string = myOpts) + splitOpts=re.split("\n", myOpts) + + argDefaultsDict = {} + for opt in splitOpts[1:]: + secondSplit = re.split("(--[\w\-]+)", opt)[1:] + argument=secondSplit[0] + argDefaultDesc=secondSplit[1].lstrip("\s") + default = None + match = re.search("\(=.*?\)", argDefaultDesc) + if match is not None: + value = match.group(0)[2:-1] + try: + default = int(value) + except ValueError: + default = str(value) + argDefaultsDict[argument] = default + + section=re.sub("@@@", "", section) + section=re.sub("\n", "", section) + sectionSplit=re.split("::", section) + configSection = section + if len(sectionSplit) > 1: + configSection=sectionSplit[1] + + if configSection[-1] == ":": + configSection = configSection[:-1] + + if pluginOptsDict.get(configSection) is not None: + pluginOptsDict[configSection].update(argDefaultsDict) + else: + pluginOptsDict[configSection] = argDefaultsDict + return pluginOptsDict + +nodeosPluginOptsDict = parseNodeosConfigOptions() + +curListOfSupportedPlugins = [ChainPluginArgs(), HttpClientPluginArgs(), HttpPluginArgs(), NetPluginArgs(), ProducerPluginArgs(), + ResourceMonitorPluginArgs(), SignatureProviderPluginArgs(), StateHistoryPluginArgs(), TraceApiPluginArgs()] + +curListOfUnsupportedOptionGroups = ["txn_test_gen_plugin", "Application Config Options", "Application Command Line Options"] + +#Check whether nodeos has added any plugin configuration sections +for confSection in nodeosPluginOptsDict.keys(): + assert confSection in [paClass._pluginName for paClass in curListOfSupportedPlugins] or confSection in curListOfUnsupportedOptionGroups, f"ERROR: New config section \"{confSection}\" added to nodeos which may require updates." + +def argStrToAttrName(argStr: str) -> str: + attrName="".join([x.capitalize() for x in argStr.split('-')]).replace('--','') + attrName="".join([attrName[0].lower(), attrName[1:]]) + return attrName + +for supportedPlugin in curListOfSupportedPlugins: + #Check whether nodeos has removed any plugin configuration sections + assert supportedPlugin._pluginName in nodeosPluginOptsDict, f"ERROR: Supported config section \"{supportedPlugin}\" no longer supported by nodeos." + + for opt in supportedPlugin.supportedNodeosArgs(): + #Check whether nodeos has removed any arguments in a plugin + assert opt in nodeosPluginOptsDict[supportedPlugin._pluginName].keys(), f"ERROR: nodeos no longer supports \"{opt}\" in \"{supportedPlugin._pluginName}\"." + + + ourDefault = getattr(supportedPlugin, f"_{argStrToAttrName(opt)}NodeosDefault") + nodeosCurDefault = nodeosPluginOptsDict[supportedPlugin._pluginName][opt] + if type(ourDefault) == bool and nodeosCurDefault is None: + nodeosCurDefault=False + #Check whether our defaults no longer match nodeos's + assert ourDefault == nodeosCurDefault, f"ERROR: {type(supportedPlugin)}'s default for \"{opt}\" is {ourDefault} and no longer matches nodeos's default {nodeosCurDefault} in \"{supportedPlugin._pluginName}\"." + + #Check whether nodeos has added/updated any argument defaults + for nodeosOpt, defaultValue in nodeosPluginOptsDict[supportedPlugin._pluginName].items(): + assert nodeosOpt in supportedPlugin.supportedNodeosArgs(), f"ERROR: New nodeos option \"{nodeosOpt}\". Support for this option needs to be added to {type(supportedPlugin)}." + + ourDefault = getattr(supportedPlugin, f"_{argStrToAttrName(nodeosOpt)}NodeosDefault") + if type(ourDefault) == bool and defaultValue is None: + defaultValue=False + assert defaultValue == ourDefault, f"ERROR: nodeos's default for \"{nodeosOpt}\" is {nodeosCurDefault} and no longer matches {type(supportedPlugin)}'s default: {ourDefault} in \"{supportedPlugin._pluginName}\"." + +testSuccessful = True + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) From 23aea602b19913e250e59e38e8c7055fbe9b73b9 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 6 Dec 2022 17:50:58 -0600 Subject: [PATCH 028/178] Fixup some of the config options. --- tests/performance_tests/ChainPluginArgs.py | 22 +++++++++---------- .../performance_tests/HttpClientPluginArgs.py | 4 ++-- tests/performance_tests/HttpPluginArgs.py | 2 +- tests/performance_tests/NetPluginArgs.py | 10 ++++----- tests/performance_tests/ProducerPluginArgs.py | 4 ++-- .../ResourceMonitorPluginArgs.py | 4 ++-- .../SignatureProviderPluginArgs.py | 4 ++-- .../StateHistoryPluginArgs.py | 8 +++---- 8 files changed, 29 insertions(+), 29 deletions(-) diff --git a/tests/performance_tests/ChainPluginArgs.py b/tests/performance_tests/ChainPluginArgs.py index 9bdcb3e07a..528585bddb 100755 --- a/tests/performance_tests/ChainPluginArgs.py +++ b/tests/performance_tests/ChainPluginArgs.py @@ -7,8 +7,8 @@ @dataclass class ChainPluginArgs: - _pluginNamespace: str = "eosio" - _pluginName: str = "chain_plugin" + _pluginNamespace: str="eosio" + _pluginName: str="chain_plugin" blocksDir: str=None _blocksDirNodeosDefault: str='"blocks"' _blocksDirNodeosArg: str="--blocks-dir" @@ -19,7 +19,7 @@ class ChainPluginArgs: _checkpointNodeosDefault: str=None _checkpointNodeosArg: str="--checkpoint" wasmRuntime: str=None - _wasmRuntimeNodeosDefault: str='eos-vm-jit' + _wasmRuntimeNodeosDefault: str="eos-vm-jit" _wasmRuntimeNodeosArg: str="--wasm-runtime" profileAccount: str=None _profileAccountNodeosDefault: str=None @@ -67,13 +67,13 @@ class ChainPluginArgs: _senderBypassWhiteblacklistNodeosDefault: str=None _senderBypassWhiteblacklistNodeosArg: str="--sender-bypass-whiteblacklist" readMode: str=None - _readModeNodeosDefault: str='head' + _readModeNodeosDefault: str="head" _readModeNodeosArg: str="--read-mode" apiAcceptTransactions: int=None _apiAcceptTransactionsNodeosDefault: int=1 _apiAcceptTransactionsNodeosArg: str="--api-accept-transactions" validationMode: str=None - _validationModeNodeosDefault: str='full' + _validationModeNodeosDefault: str="full" _validationModeNodeosArg: str="--validation-mode" disableRamBillingNotifyChecks: bool=None _disableRamBillingNotifyChecksNodeosDefault: bool=False @@ -85,7 +85,7 @@ class ChainPluginArgs: _trustedProducerNodeosDefault: str=None _trustedProducerNodeosArg: str="--trusted-producer" databaseMapMode: str=None - _databaseMapModeNodeosDefault: str='mapped' + _databaseMapModeNodeosDefault: str="mapped" _databaseMapModeNodeosArg: str="--database-map-mode" eosVmOcCacheSizeMb: int=None _eosVmOcCacheSizeMbNodeosDefault: int=1024 @@ -127,7 +127,7 @@ class ChainPluginArgs: _integrityHashOnStopNodeosDefault: bool=False _integrityHashOnStopNodeosArg: str="--integrity-hash-on-stop" blockLogRetainBlocks: int=None - _blockLogRetainBlocksNodeosDefault: int=False + _blockLogRetainBlocksNodeosDefault: int=None _blockLogRetainBlocksNodeosArg: str="--block-log-retain-blocks" genesisJson: str=None _genesisJsonNodeosDefault: str=None @@ -138,14 +138,14 @@ class ChainPluginArgs: printGenesisJson: bool=None _printGenesisJsonNodeosDefault: bool=False _printGenesisJsonNodeosArg: str="--print-genesis-json" - extractGenesisJson: bool=None - _extractGenesisJsonNodeosDefault: bool=False + extractGenesisJson: str=None + _extractGenesisJsonNodeosDefault: str=None _extractGenesisJsonNodeosArg: str="--extract-genesis-json" printBuildInfo: bool=None _printBuildInfoNodeosDefault: bool=False _printBuildInfoNodeosArg: str="--print-build-info" - extractBuildInfo=None - _extractBuildInfoNodeosDefault=None + extractBuildInfo: str=None + _extractBuildInfoNodeosDefault: str=None _extractBuildInfoNodeosArg: str="--extract-build-info" forceAllChecks: bool=None _forceAllChecksNodeosDefault: bool=False diff --git a/tests/performance_tests/HttpClientPluginArgs.py b/tests/performance_tests/HttpClientPluginArgs.py index c033d31e7e..297b3b2494 100755 --- a/tests/performance_tests/HttpClientPluginArgs.py +++ b/tests/performance_tests/HttpClientPluginArgs.py @@ -7,8 +7,8 @@ @dataclass class HttpClientPluginArgs: - _pluginNamespace: str = "eosio" - _pluginName: str = "http_client_plugin" + _pluginNamespace: str="eosio" + _pluginName: str="http_client_plugin" httpsClientRootCert: str=None _httpsClientRootCertNodeosDefault: str=None _httpsClientRootCertNodeosArg: str="--https-client-root-cert" diff --git a/tests/performance_tests/HttpPluginArgs.py b/tests/performance_tests/HttpPluginArgs.py index 41aa6eee1c..254277889c 100755 --- a/tests/performance_tests/HttpPluginArgs.py +++ b/tests/performance_tests/HttpPluginArgs.py @@ -25,7 +25,7 @@ class HttpPluginArgs: _httpsPrivateKeyFileNodeosDefault: str=None _httpsPrivateKeyFileNodeosArg: str="--https-private-key-file" httpsEcdhCurve: str=None - _httpsEcdhCurveNodeosDefault: str='secp384r1' + _httpsEcdhCurveNodeosDefault: str="secp384r1" _httpsEcdhCurveNodeosArg: str="--https-ecdh-curve" accessControlAllowOrigin: str=None _accessControlAllowOriginNodeosDefault: str=None diff --git a/tests/performance_tests/NetPluginArgs.py b/tests/performance_tests/NetPluginArgs.py index 75ed490ba9..5d36d90d1b 100755 --- a/tests/performance_tests/NetPluginArgs.py +++ b/tests/performance_tests/NetPluginArgs.py @@ -7,8 +7,8 @@ @dataclass class NetPluginArgs: - _pluginNamespace: str = "eosio" - _pluginName: str = "net_plugin" + _pluginNamespace: str="eosio" + _pluginName: str="net_plugin" p2pListenEndpoint: str=None _p2pListenEndpointNodeosDefault: str="0.0.0.0:9876" _p2pListenEndpointNodeosArg: str="--p2p-listen-endpoint" @@ -25,10 +25,10 @@ class NetPluginArgs: _p2pAcceptTransactionsNodeosDefault: int=1 _p2pAcceptTransactionsNodeosArg: str="--p2p-accept-transactions" agentName: str=None - _agentNameNodeosDefault: str='EOS Test Agent' + _agentNameNodeosDefault: str="EOS Test Agent" _agentNameNodeosArg: str="--agent-name" allowedConnection: str=None - _allowedConnectionNodeosDefault: str='any' + _allowedConnectionNodeosDefault: str="any" _allowedConnectionNodeosArg: str="--allowed-connection" peerKey: str=None _peerKeyNodeosDefault: str=None @@ -58,7 +58,7 @@ class NetPluginArgs: _useSocketReadWatermarkNodeosDefault: int=0 _useSocketReadWatermarkNodeosArg: str="--use-socket-read-watermark" peerLogFormat: str=None - _peerLogFormatNodeosDefault: str="[\"${_name}\" - ${_cid} ${_ip}:${_port}] " + _peerLogFormatNodeosDefault: str="["${_name}" - ${_cid} ${_ip}:${_port}] " _peerLogFormatNodeosArg: str="--peer-log-format" p2pKeepaliveIntervalMs: int=None _p2pKeepaliveIntervalMsNodeosDefault: int=10000 diff --git a/tests/performance_tests/ProducerPluginArgs.py b/tests/performance_tests/ProducerPluginArgs.py index 0fd7795591..f487cb63e2 100755 --- a/tests/performance_tests/ProducerPluginArgs.py +++ b/tests/performance_tests/ProducerPluginArgs.py @@ -7,8 +7,8 @@ @dataclass class ProducerPluginArgs: - _pluginNamespace: str = "eosio" - _pluginName: str = "producer_plugin" + _pluginNamespace: str="eosio" + _pluginName: str="producer_plugin" enableStaleProduction: bool=None _enableStaleProductionNodeosDefault: bool=False _enableStaleProductionNodeosArg: str="--enable-stale-production" diff --git a/tests/performance_tests/ResourceMonitorPluginArgs.py b/tests/performance_tests/ResourceMonitorPluginArgs.py index 2103c8ee28..913ed37e44 100755 --- a/tests/performance_tests/ResourceMonitorPluginArgs.py +++ b/tests/performance_tests/ResourceMonitorPluginArgs.py @@ -7,8 +7,8 @@ @dataclass class ResourceMonitorPluginArgs: - _pluginNamespace: str = "eosio" - _pluginName: str = "resource_monitor_plugin" + _pluginNamespace: str="eosio" + _pluginName: str="resource_monitor_plugin" resourceMonitorIntervalSeconds: int=None _resourceMonitorIntervalSecondsNodeosDefault: int=2 _resourceMonitorIntervalSecondsNodeosArg: str="--resource-monitor-interval-seconds" diff --git a/tests/performance_tests/SignatureProviderPluginArgs.py b/tests/performance_tests/SignatureProviderPluginArgs.py index a6d7a0bbc2..95efb3ae99 100755 --- a/tests/performance_tests/SignatureProviderPluginArgs.py +++ b/tests/performance_tests/SignatureProviderPluginArgs.py @@ -7,8 +7,8 @@ @dataclass class SignatureProviderPluginArgs: - _pluginNamespace: str = "eosio" - _pluginName: str = "signature_provider_plugin" + _pluginNamespace: str="eosio" + _pluginName: str="signature_provider_plugin" keosdProviderTimeout: int=None _keosdProviderTimeoutNodeosDefault: int=5 _keosdProviderTimeoutNodeosArg: str="--keosd-provider-timeout" diff --git a/tests/performance_tests/StateHistoryPluginArgs.py b/tests/performance_tests/StateHistoryPluginArgs.py index 4e4d133edb..a0559607a9 100755 --- a/tests/performance_tests/StateHistoryPluginArgs.py +++ b/tests/performance_tests/StateHistoryPluginArgs.py @@ -7,8 +7,8 @@ @dataclass class StateHistoryPluginArgs: - _pluginNamespace: str = "eosio" - _pluginName: str = "state_history_plugin" + _pluginNamespace: str="eosio" + _pluginName: str="state_history_plugin" stateHistoryDir: str=None _stateHistoryDirNodeosDefault: str='"state-history"' _stateHistoryDirNodeosArg: str="--state-history-dir" @@ -27,8 +27,8 @@ class StateHistoryPluginArgs: traceHistoryDebugMode: bool=None _traceHistoryDebugModeNodeosDefault: bool=False _traceHistoryDebugModeNodeosArg: str="--trace-history-debug-mode" - stateHistoryLogRetainBlocks: bool=None - _stateHistoryLogRetainBlocksNodeosDefault: bool=False + stateHistoryLogRetainBlocks: int=None + _stateHistoryLogRetainBlocksNodeosDefault: int=None _stateHistoryLogRetainBlocksNodeosArg: str="--state-history-log-retain-blocks" deleteStateHistory: bool=None _deleteStateHistoryNodeosDefault: bool=False From 8655e4dca1abbe9da71accf6ec7a46e9e458aaf1 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 6 Dec 2022 17:52:28 -0600 Subject: [PATCH 029/178] Script that generates *PluginArgs.py dataclass files from nodeos --help output. --- ...generate_nodeos_plugin_args_class_files.py | 214 ++++++++++++++++++ 1 file changed, 214 insertions(+) create mode 100755 tests/performance_tests/generate_nodeos_plugin_args_class_files.py diff --git a/tests/performance_tests/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/generate_nodeos_plugin_args_class_files.py new file mode 100755 index 0000000000..cb56ba3f89 --- /dev/null +++ b/tests/performance_tests/generate_nodeos_plugin_args_class_files.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 + +import re +import subprocess + +def main(): + cmd="programs/nodeos/nodeos --help" + result = subprocess.run(["programs/nodeos/nodeos", "--help"], capture_output=True, text=True) + + myStr = result.stdout + myStr = myStr.rstrip("\n") + myStr = re.sub(":\n\s+-",':@@@\n -', string=myStr) + myStr = re.sub("\n\n",'\n@@@', string=myStr) + myStr = re.sub("Application Options:\n",'', string=myStr) + pluginSections = re.split("(@@@.*?@@@\n)", string=myStr) + + + sec=0 + for section in pluginSections: + sec=sec+1 + + def pairwise(iterable): + "s -> (s0, s1), (s2, s3), (s4, s5), ..." + a = iter(iterable) + return zip(a, a) + + pluginOptsDict = {} + for section, options in pairwise(pluginSections[1:]): + myOpts = re.sub("\s+", " ", options) + myOpts = re.sub("\n", " ", myOpts) + myOpts = re.sub(" --", "\n--",string = myOpts) + splitOpts=re.split("\n", myOpts) + + argDescDict = {} + for opt in splitOpts[1:]: + secondSplit = re.split("(--[\w\-]+)", opt)[1:] + argument=secondSplit[0] + argDefaultDesc=secondSplit[1].lstrip("\s") + argDescDict[argument] = argDefaultDesc + # print(f"argDefaultDesc: {argDefaultDesc}") + section=re.sub("@@@", "", section) + section=re.sub("\n", "", section) + sectionSplit=re.split("::", section) + configSection = section + if len(sectionSplit) > 1: + configSection=sectionSplit[1] + + if pluginOptsDict.get(configSection) is not None: + pluginOptsDict[configSection].update(argDescDict) + else: + pluginOptsDict[configSection] = argDescDict + + newDict = {} + for key, value in pluginOptsDict.items(): + newPlugin="".join([x.capitalize() for x in key.split('_')]).replace(":","") + + newArgs = {} + for key, value in value.items(): + newKey="".join([x.capitalize() for x in key.split('-')]).replace('--','') + newKey="".join([newKey[0].lower(), newKey[1:]]) + newArgs[newKey]=value + newDict[newPlugin]=newArgs + + def writeDataclass(plugin:str, dataFieldDict:dict, pluginOptsDict:dict): + newPlugin="".join([x.capitalize() for x in plugin.split('_')]).replace(":","") + pluginArgsFile=f"../tests/performance_tests/{newPlugin}Args.py" + with open(pluginArgsFile, 'w') as dataclassFile: + chainPluginArgs = dataFieldDict[newPlugin] + + dataclassFile.write(f"#!/usr/bin/env python3\n\n") + dataclassFile.write(f"import dataclasses\n") + dataclassFile.write(f"import re\n\n") + dataclassFile.write(f"from dataclasses import dataclass\n\n") + dataclassFile.write(f"@dataclass\nclass {newPlugin}Args:\n") + dataclassFile.write(f" _pluginNamespace: str=\"eosio\"\n") + dataclassFile.write(f" _pluginName: str=\"{plugin[:-1]}\"\n") + + for key, value in pluginOptsDict[plugin].items(): + newKey="".join([x.capitalize() for x in key.split('-')]).replace('--','') + newKey="".join([newKey[0].lower(), newKey[1:]]) + value = chainPluginArgs[newKey] + match = re.search("\(=.*?\)", value) + if match is not None: + value = match.group(0)[2:-1] + try: + numVal = int(value) + dataclassFile.write(f" {newKey}: int=None\n") + dataclassFile.write(f" _{newKey}NodeosDefault: int={numVal}\n") + dataclassFile.write(f" _{newKey}NodeosArg: str=\"{key}\"\n") + except ValueError: + strValue = str(value) + quote = "\'" if strValue[0] == "\"" else "\"" + dataclassFile.write(f" {newKey}: str=None\n") + dataclassFile.write(f" _{newKey}NodeosDefault: str={quote}{strValue}{quote}\n") + dataclassFile.write(f" _{newKey}NodeosArg: str=\"{key}\"\n") + else: + if re.search("deepmind", newKey, re.IGNORECASE) or \ + re.search("tracehistory", newKey, re.IGNORECASE) or \ + re.search("tracenoabis", newKey, re.IGNORECASE) or \ + re.search("chainstatehistory", newKey, re.IGNORECASE) or \ + re.search("console", newKey, re.IGNORECASE) or \ + re.search("print", newKey, re.IGNORECASE) or \ + re.search("verbose", newKey, re.IGNORECASE) or \ + re.search("debug", newKey, re.IGNORECASE) or \ + re.search("force", newKey, re.IGNORECASE) or \ + re.search("onthreshold", newKey, re.IGNORECASE) or \ + re.search("allowcredentials", newKey, re.IGNORECASE) or \ + re.search("delete", newKey, re.IGNORECASE) or \ + re.search("replay", newKey, re.IGNORECASE) or \ + re.search("onstart", newKey, re.IGNORECASE) or \ + re.search("onstop", newKey, re.IGNORECASE) or \ + re.search("enable", newKey, re.IGNORECASE) or \ + re.search("disable", newKey, re.IGNORECASE): + dataclassFile.write(f" {newKey}: bool=None\n") + dataclassFile.write(f" _{newKey}NodeosDefault: bool=False\n") + dataclassFile.write(f" _{newKey}NodeosArg: str=\"{key}\"\n") + elif re.search("sizegb", newKey, re.IGNORECASE) or \ + re.search("maxage", newKey, re.IGNORECASE) or \ + re.search("retainblocks", newKey, re.IGNORECASE): + dataclassFile.write(f" {newKey}: int=None\n") + dataclassFile.write(f" _{newKey}NodeosDefault: int=None\n") + dataclassFile.write(f" _{newKey}NodeosArg: str=\"{key}\"\n") + else: + dataclassFile.write(f" {newKey}: str=None\n") + dataclassFile.write(f" _{newKey}NodeosDefault: str=None\n") + dataclassFile.write(f" _{newKey}NodeosArg: str=\"{key}\"\n") + + def writeThreadSetter(pluginName: str) -> str: + if (re.search("chain", pluginName, re.IGNORECASE) or re.search("net", pluginName, re.IGNORECASE) or re.search("producer", pluginName, re.IGNORECASE)): + attrName = re.sub("PluginArgs", "", pluginName).lower() + return f"""\ + def threads(self, threads: int): + self.{attrName}Threads=threads\n\n""" + else: + return "" + + def writeSupportedNodeosArgs() -> str: + return f"""\ + def supportedNodeosArgs(self) -> list:\n\ + args = []\n\ + for field in dataclasses.fields(self):\n\ + match = re.search("\w*NodeosArg", field.name)\n\ + if match is not None:\n\ + args.append(getattr(self, field.name))\n\ + return args\n\n""" + + def writeStrFxn() -> str: + return f"""\ + def __str__(self) -> str:\n\ + args = [] \n\ + for field in dataclasses.fields(self):\n\ + match = re.search("[^_]", field.name[0])\n\ + if match is not None:\n\ + default = getattr(self, f"_{{field.name}}NodeosDefault")\n\ + current = getattr(self, field.name)\n\ + if current is not None and current != default:\n\ + if type(current) is bool: + args.append(f"{{getattr(self, f'_{{field.name}}NodeosArg')}}") + else: + args.append(f"{{getattr(self, f'_{{field.name}}NodeosArg')}} {{getattr(self, field.name)}}") + + return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else ""\n\n""" + + def writeMainFxn(pluginName: str) -> str: + return f"""\ +def main():\n\ + pluginArgs = {pluginName}()\n\ + print(pluginArgs.supportedNodeosArgs())\n\ + exit(0)\n\n\ +if __name__ == '__main__':\n\ + main()\n""" + + def writeHelpers(pluginName: str) -> str: + return "\n" + writeThreadSetter(pluginName) + writeSupportedNodeosArgs() + writeStrFxn() + writeMainFxn(pluginName) + # return f"""\n def supportedNodeosArgs(self) -> list:\n\ + # args = []\n\ + # for field in dataclasses.fields(self):\n\ + # match = re.search("\w*NodeosArg", field.name)\n\ + # if match is not None:\n\ + # args.append(getattr(self, field.name))\n\ + # return args\n\n\ + # def __str__(self) -> str:\n\ + # args = [] \n\ + # for field in dataclasses.fields(self):\n\ + # match = re.search("[^_]", field.name[0])\n\ + # if match is not None:\n\ + # default = getattr(self, f"_{{field.name}}NodeosDefault")\n\ + # current = getattr(self, field.name)\n\ + # if current is not None and current != default:\n\ + # args.append(f"{{getattr(self, f'_{{field.name}}NodeosArg')}} {{getattr(self, field.name)}}")\n\ + # return " ".join(args)\n\n\ + # def main():\n\ + # pluginArgs = {pluginName}()\n\ + # print(pluginArgs.supportedNodeosArgs())\n\ + # exit(0)\n\n\ + # if __name__ == '__main__':\n\ + # main()\n""" + + dataclassFile.write(writeHelpers(f"{newPlugin}Args")) + + writeDataclass(plugin="chain_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) + writeDataclass(plugin="http_client_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) + writeDataclass(plugin="http_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) + writeDataclass(plugin="net_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) + writeDataclass(plugin="producer_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) + writeDataclass(plugin="resource_monitor_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) + writeDataclass(plugin="signature_provider_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) + writeDataclass(plugin="state_history_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) + writeDataclass(plugin="trace_api_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) + + exit(0) + +if __name__ == '__main__': + main() From 6936b4144f4ca6b9d039ec3154e95716bc584a6c Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 6 Dec 2022 19:45:12 -0600 Subject: [PATCH 030/178] Fixup invalid syntax error. --- tests/performance_tests/NetPluginArgs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/NetPluginArgs.py b/tests/performance_tests/NetPluginArgs.py index 5d36d90d1b..8156475c66 100755 --- a/tests/performance_tests/NetPluginArgs.py +++ b/tests/performance_tests/NetPluginArgs.py @@ -58,7 +58,7 @@ class NetPluginArgs: _useSocketReadWatermarkNodeosDefault: int=0 _useSocketReadWatermarkNodeosArg: str="--use-socket-read-watermark" peerLogFormat: str=None - _peerLogFormatNodeosDefault: str="["${_name}" - ${_cid} ${_ip}:${_port}] " + _peerLogFormatNodeosDefault: str='["${_name}" - ${_cid} ${_ip}:${_port}] ' _peerLogFormatNodeosArg: str="--peer-log-format" p2pKeepaliveIntervalMs: int=None _p2pKeepaliveIntervalMsNodeosDefault: int=10000 From 7dc7133cd041448ea8ae5c4db7b7f34ce6ec49ff Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 6 Dec 2022 19:48:07 -0600 Subject: [PATCH 031/178] Fixup generator for proper quote detection and selection. --- ...generate_nodeos_plugin_args_class_files.py | 25 +------------------ 1 file changed, 1 insertion(+), 24 deletions(-) diff --git a/tests/performance_tests/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/generate_nodeos_plugin_args_class_files.py index cb56ba3f89..fdc7ef8246 100755 --- a/tests/performance_tests/generate_nodeos_plugin_args_class_files.py +++ b/tests/performance_tests/generate_nodeos_plugin_args_class_files.py @@ -89,7 +89,7 @@ def writeDataclass(plugin:str, dataFieldDict:dict, pluginOptsDict:dict): dataclassFile.write(f" _{newKey}NodeosArg: str=\"{key}\"\n") except ValueError: strValue = str(value) - quote = "\'" if strValue[0] == "\"" else "\"" + quote = "\'" if re.search("\"", strValue) else "\"" dataclassFile.write(f" {newKey}: str=None\n") dataclassFile.write(f" _{newKey}NodeosDefault: str={quote}{strValue}{quote}\n") dataclassFile.write(f" _{newKey}NodeosArg: str=\"{key}\"\n") @@ -172,29 +172,6 @@ def main():\n\ def writeHelpers(pluginName: str) -> str: return "\n" + writeThreadSetter(pluginName) + writeSupportedNodeosArgs() + writeStrFxn() + writeMainFxn(pluginName) - # return f"""\n def supportedNodeosArgs(self) -> list:\n\ - # args = []\n\ - # for field in dataclasses.fields(self):\n\ - # match = re.search("\w*NodeosArg", field.name)\n\ - # if match is not None:\n\ - # args.append(getattr(self, field.name))\n\ - # return args\n\n\ - # def __str__(self) -> str:\n\ - # args = [] \n\ - # for field in dataclasses.fields(self):\n\ - # match = re.search("[^_]", field.name[0])\n\ - # if match is not None:\n\ - # default = getattr(self, f"_{{field.name}}NodeosDefault")\n\ - # current = getattr(self, field.name)\n\ - # if current is not None and current != default:\n\ - # args.append(f"{{getattr(self, f'_{{field.name}}NodeosArg')}} {{getattr(self, field.name)}}")\n\ - # return " ".join(args)\n\n\ - # def main():\n\ - # pluginArgs = {pluginName}()\n\ - # print(pluginArgs.supportedNodeosArgs())\n\ - # exit(0)\n\n\ - # if __name__ == '__main__':\n\ - # main()\n""" dataclassFile.write(writeHelpers(f"{newPlugin}Args")) From 0cd997ff6e547526afdf7379c2b911cdfd7d0113 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 7 Dec 2022 08:21:05 -0600 Subject: [PATCH 032/178] Provide BasePluginArgs base class. BasePluginArgs provides generic implementations of __str__ and supportedNodeosArgs functions for use in each *PluginArgs classes. Update generation script to no longer generate base class functions, but to inherit from base class. --- tests/performance_tests/BasePluginArgs.py | 32 +++++++++++++++ tests/performance_tests/CMakeLists.txt | 1 + tests/performance_tests/ChainPluginArgs.py | 29 +------------- .../performance_tests/HttpClientPluginArgs.py | 29 +------------- tests/performance_tests/HttpPluginArgs.py | 29 +------------- tests/performance_tests/NetPluginArgs.py | 29 +------------- tests/performance_tests/ProducerPluginArgs.py | 29 +------------- .../ResourceMonitorPluginArgs.py | 29 +------------- .../SignatureProviderPluginArgs.py | 29 +------------- .../StateHistoryPluginArgs.py | 29 +------------- tests/performance_tests/TraceApiPluginArgs.py | 29 +------------- ...generate_nodeos_plugin_args_class_files.py | 39 +++---------------- 12 files changed, 56 insertions(+), 277 deletions(-) create mode 100755 tests/performance_tests/BasePluginArgs.py diff --git a/tests/performance_tests/BasePluginArgs.py b/tests/performance_tests/BasePluginArgs.py new file mode 100755 index 0000000000..e6fdbca810 --- /dev/null +++ b/tests/performance_tests/BasePluginArgs.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +import dataclasses +import re + +from dataclasses import dataclass + +@dataclass +class BasePluginArgs: + + def supportedNodeosArgs(self) -> list: + args = [] + for field in dataclasses.fields(self): + match = re.search("\w*NodeosArg", field.name) + if match is not None: + args.append(getattr(self, field.name)) + return args + + def __str__(self) -> str: + args = [] + for field in dataclasses.fields(self): + match = re.search("[^_]", field.name[0]) + if match is not None: + default = getattr(self, f"_{field.name}NodeosDefault") + current = getattr(self, field.name) + if current is not None and current != default: + if type(current) is bool: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") + else: + args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") + + return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index cff889c4ce..1796fc95d1 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -9,6 +9,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_log_3_2.txt.gz ${CMAKE_CURRENT configure_file(${CMAKE_CURRENT_SOURCE_DIR}/genesis.json ${CMAKE_CURRENT_BINARY_DIR}/genesis.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate_nodeos_plugin_args.py ${CMAKE_CURRENT_BINARY_DIR}/validate_nodeos_plugin_args.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/BasePluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/BasePluginArgs.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ChainPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ChainPluginArgs.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/HttpClientPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/HttpClientPluginArgs.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/HttpPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/HttpPluginArgs.py COPYONLY) diff --git a/tests/performance_tests/ChainPluginArgs.py b/tests/performance_tests/ChainPluginArgs.py index 528585bddb..da2d59d440 100755 --- a/tests/performance_tests/ChainPluginArgs.py +++ b/tests/performance_tests/ChainPluginArgs.py @@ -1,12 +1,10 @@ #!/usr/bin/env python3 -import dataclasses -import re - from dataclasses import dataclass +from BasePluginArgs import BasePluginArgs @dataclass -class ChainPluginArgs: +class ChainPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" _pluginName: str="chain_plugin" blocksDir: str=None @@ -175,29 +173,6 @@ class ChainPluginArgs: def threads(self, threads: int): self.chainThreads=threads - def supportedNodeosArgs(self) -> list: - args = [] - for field in dataclasses.fields(self): - match = re.search("\w*NodeosArg", field.name) - if match is not None: - args.append(getattr(self, field.name)) - return args - - def __str__(self) -> str: - args = [] - for field in dataclasses.fields(self): - match = re.search("[^_]", field.name[0]) - if match is not None: - default = getattr(self, f"_{field.name}NodeosDefault") - current = getattr(self, field.name) - if current is not None and current != default: - if type(current) is bool: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") - else: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") - - return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" - def main(): pluginArgs = ChainPluginArgs() print(pluginArgs.supportedNodeosArgs()) diff --git a/tests/performance_tests/HttpClientPluginArgs.py b/tests/performance_tests/HttpClientPluginArgs.py index 297b3b2494..b5a7f517b7 100755 --- a/tests/performance_tests/HttpClientPluginArgs.py +++ b/tests/performance_tests/HttpClientPluginArgs.py @@ -1,12 +1,10 @@ #!/usr/bin/env python3 -import dataclasses -import re - from dataclasses import dataclass +from BasePluginArgs import BasePluginArgs @dataclass -class HttpClientPluginArgs: +class HttpClientPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" _pluginName: str="http_client_plugin" httpsClientRootCert: str=None @@ -16,29 +14,6 @@ class HttpClientPluginArgs: _httpsClientValidatePeersNodeosDefault: int=1 _httpsClientValidatePeersNodeosArg: str="--https-client-validate-peers" - def supportedNodeosArgs(self) -> list: - args = [] - for field in dataclasses.fields(self): - match = re.search("\w*NodeosArg", field.name) - if match is not None: - args.append(getattr(self, field.name)) - return args - - def __str__(self) -> str: - args = [] - for field in dataclasses.fields(self): - match = re.search("[^_]", field.name[0]) - if match is not None: - default = getattr(self, f"_{field.name}NodeosDefault") - current = getattr(self, field.name) - if current is not None and current != default: - if type(current) is bool: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") - else: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") - - return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" - def main(): pluginArgs = HttpClientPluginArgs() print(pluginArgs.supportedNodeosArgs()) diff --git a/tests/performance_tests/HttpPluginArgs.py b/tests/performance_tests/HttpPluginArgs.py index 254277889c..cb3d86335b 100755 --- a/tests/performance_tests/HttpPluginArgs.py +++ b/tests/performance_tests/HttpPluginArgs.py @@ -1,12 +1,10 @@ #!/usr/bin/env python3 -import dataclasses -import re - from dataclasses import dataclass +from BasePluginArgs import BasePluginArgs @dataclass -class HttpPluginArgs: +class HttpPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" _pluginName: str="http_plugin" unixSocketPath: str=None @@ -67,29 +65,6 @@ class HttpPluginArgs: _httpKeepAliveNodeosDefault: int=1 _httpKeepAliveNodeosArg: str="--http-keep-alive" - def supportedNodeosArgs(self) -> list: - args = [] - for field in dataclasses.fields(self): - match = re.search("\w*NodeosArg", field.name) - if match is not None: - args.append(getattr(self, field.name)) - return args - - def __str__(self) -> str: - args = [] - for field in dataclasses.fields(self): - match = re.search("[^_]", field.name[0]) - if match is not None: - default = getattr(self, f"_{field.name}NodeosDefault") - current = getattr(self, field.name) - if current is not None and current != default: - if type(current) is bool: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") - else: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") - - return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" - def main(): pluginArgs = HttpPluginArgs() print(pluginArgs.supportedNodeosArgs()) diff --git a/tests/performance_tests/NetPluginArgs.py b/tests/performance_tests/NetPluginArgs.py index 8156475c66..fcfe148968 100755 --- a/tests/performance_tests/NetPluginArgs.py +++ b/tests/performance_tests/NetPluginArgs.py @@ -1,12 +1,10 @@ #!/usr/bin/env python3 -import dataclasses -import re - from dataclasses import dataclass +from BasePluginArgs import BasePluginArgs @dataclass -class NetPluginArgs: +class NetPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" _pluginName: str="net_plugin" p2pListenEndpoint: str=None @@ -67,29 +65,6 @@ class NetPluginArgs: def threads(self, threads: int): self.netThreads=threads - def supportedNodeosArgs(self) -> list: - args = [] - for field in dataclasses.fields(self): - match = re.search("\w*NodeosArg", field.name) - if match is not None: - args.append(getattr(self, field.name)) - return args - - def __str__(self) -> str: - args = [] - for field in dataclasses.fields(self): - match = re.search("[^_]", field.name[0]) - if match is not None: - default = getattr(self, f"_{field.name}NodeosDefault") - current = getattr(self, field.name) - if current is not None and current != default: - if type(current) is bool: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") - else: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") - - return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" - def main(): pluginArgs = NetPluginArgs() print(pluginArgs.supportedNodeosArgs()) diff --git a/tests/performance_tests/ProducerPluginArgs.py b/tests/performance_tests/ProducerPluginArgs.py index f487cb63e2..0f9aa9646f 100755 --- a/tests/performance_tests/ProducerPluginArgs.py +++ b/tests/performance_tests/ProducerPluginArgs.py @@ -1,12 +1,10 @@ #!/usr/bin/env python3 -import dataclasses -import re - from dataclasses import dataclass +from BasePluginArgs import BasePluginArgs @dataclass -class ProducerPluginArgs: +class ProducerPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" _pluginName: str="producer_plugin" enableStaleProduction: bool=None @@ -94,29 +92,6 @@ class ProducerPluginArgs: def threads(self, threads: int): self.producerThreads=threads - def supportedNodeosArgs(self) -> list: - args = [] - for field in dataclasses.fields(self): - match = re.search("\w*NodeosArg", field.name) - if match is not None: - args.append(getattr(self, field.name)) - return args - - def __str__(self) -> str: - args = [] - for field in dataclasses.fields(self): - match = re.search("[^_]", field.name[0]) - if match is not None: - default = getattr(self, f"_{field.name}NodeosDefault") - current = getattr(self, field.name) - if current is not None and current != default: - if type(current) is bool: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") - else: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") - - return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" - def main(): pluginArgs = ProducerPluginArgs() print(pluginArgs.supportedNodeosArgs()) diff --git a/tests/performance_tests/ResourceMonitorPluginArgs.py b/tests/performance_tests/ResourceMonitorPluginArgs.py index 913ed37e44..3cfe8f3fed 100755 --- a/tests/performance_tests/ResourceMonitorPluginArgs.py +++ b/tests/performance_tests/ResourceMonitorPluginArgs.py @@ -1,12 +1,10 @@ #!/usr/bin/env python3 -import dataclasses -import re - from dataclasses import dataclass +from BasePluginArgs import BasePluginArgs @dataclass -class ResourceMonitorPluginArgs: +class ResourceMonitorPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" _pluginName: str="resource_monitor_plugin" resourceMonitorIntervalSeconds: int=None @@ -22,29 +20,6 @@ class ResourceMonitorPluginArgs: _resourceMonitorWarningIntervalNodeosDefault: int=30 _resourceMonitorWarningIntervalNodeosArg: str="--resource-monitor-warning-interval" - def supportedNodeosArgs(self) -> list: - args = [] - for field in dataclasses.fields(self): - match = re.search("\w*NodeosArg", field.name) - if match is not None: - args.append(getattr(self, field.name)) - return args - - def __str__(self) -> str: - args = [] - for field in dataclasses.fields(self): - match = re.search("[^_]", field.name[0]) - if match is not None: - default = getattr(self, f"_{field.name}NodeosDefault") - current = getattr(self, field.name) - if current is not None and current != default: - if type(current) is bool: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") - else: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") - - return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" - def main(): pluginArgs = ResourceMonitorPluginArgs() print(pluginArgs.supportedNodeosArgs()) diff --git a/tests/performance_tests/SignatureProviderPluginArgs.py b/tests/performance_tests/SignatureProviderPluginArgs.py index 95efb3ae99..c03fb0d02d 100755 --- a/tests/performance_tests/SignatureProviderPluginArgs.py +++ b/tests/performance_tests/SignatureProviderPluginArgs.py @@ -1,41 +1,16 @@ #!/usr/bin/env python3 -import dataclasses -import re - from dataclasses import dataclass +from BasePluginArgs import BasePluginArgs @dataclass -class SignatureProviderPluginArgs: +class SignatureProviderPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" _pluginName: str="signature_provider_plugin" keosdProviderTimeout: int=None _keosdProviderTimeoutNodeosDefault: int=5 _keosdProviderTimeoutNodeosArg: str="--keosd-provider-timeout" - def supportedNodeosArgs(self) -> list: - args = [] - for field in dataclasses.fields(self): - match = re.search("\w*NodeosArg", field.name) - if match is not None: - args.append(getattr(self, field.name)) - return args - - def __str__(self) -> str: - args = [] - for field in dataclasses.fields(self): - match = re.search("[^_]", field.name[0]) - if match is not None: - default = getattr(self, f"_{field.name}NodeosDefault") - current = getattr(self, field.name) - if current is not None and current != default: - if type(current) is bool: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") - else: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") - - return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" - def main(): pluginArgs = SignatureProviderPluginArgs() print(pluginArgs.supportedNodeosArgs()) diff --git a/tests/performance_tests/StateHistoryPluginArgs.py b/tests/performance_tests/StateHistoryPluginArgs.py index a0559607a9..306c44895c 100755 --- a/tests/performance_tests/StateHistoryPluginArgs.py +++ b/tests/performance_tests/StateHistoryPluginArgs.py @@ -1,12 +1,10 @@ #!/usr/bin/env python3 -import dataclasses -import re - from dataclasses import dataclass +from BasePluginArgs import BasePluginArgs @dataclass -class StateHistoryPluginArgs: +class StateHistoryPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" _pluginName: str="state_history_plugin" stateHistoryDir: str=None @@ -34,29 +32,6 @@ class StateHistoryPluginArgs: _deleteStateHistoryNodeosDefault: bool=False _deleteStateHistoryNodeosArg: str="--delete-state-history" - def supportedNodeosArgs(self) -> list: - args = [] - for field in dataclasses.fields(self): - match = re.search("\w*NodeosArg", field.name) - if match is not None: - args.append(getattr(self, field.name)) - return args - - def __str__(self) -> str: - args = [] - for field in dataclasses.fields(self): - match = re.search("[^_]", field.name[0]) - if match is not None: - default = getattr(self, f"_{field.name}NodeosDefault") - current = getattr(self, field.name) - if current is not None and current != default: - if type(current) is bool: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") - else: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") - - return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" - def main(): pluginArgs = StateHistoryPluginArgs() print(pluginArgs.supportedNodeosArgs()) diff --git a/tests/performance_tests/TraceApiPluginArgs.py b/tests/performance_tests/TraceApiPluginArgs.py index a415caccc7..3734c2fcb7 100755 --- a/tests/performance_tests/TraceApiPluginArgs.py +++ b/tests/performance_tests/TraceApiPluginArgs.py @@ -1,12 +1,10 @@ #!/usr/bin/env python3 -import dataclasses -import re - from dataclasses import dataclass +from BasePluginArgs import BasePluginArgs @dataclass -class TraceApiPluginArgs: +class TraceApiPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" _pluginName: str="trace_api_plugin" traceDir: str=None @@ -28,29 +26,6 @@ class TraceApiPluginArgs: _traceNoAbisNodeosDefault: bool=False _traceNoAbisNodeosArg: str="--trace-no-abis" - def supportedNodeosArgs(self) -> list: - args = [] - for field in dataclasses.fields(self): - match = re.search("\w*NodeosArg", field.name) - if match is not None: - args.append(getattr(self, field.name)) - return args - - def __str__(self) -> str: - args = [] - for field in dataclasses.fields(self): - match = re.search("[^_]", field.name[0]) - if match is not None: - default = getattr(self, f"_{field.name}NodeosDefault") - current = getattr(self, field.name) - if current is not None and current != default: - if type(current) is bool: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')}") - else: - args.append(f"{getattr(self, f'_{field.name}NodeosArg')} {getattr(self, field.name)}") - - return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else "" - def main(): pluginArgs = TraceApiPluginArgs() print(pluginArgs.supportedNodeosArgs()) diff --git a/tests/performance_tests/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/generate_nodeos_plugin_args_class_files.py index fdc7ef8246..98b04fe213 100755 --- a/tests/performance_tests/generate_nodeos_plugin_args_class_files.py +++ b/tests/performance_tests/generate_nodeos_plugin_args_class_files.py @@ -37,7 +37,6 @@ def pairwise(iterable): argument=secondSplit[0] argDefaultDesc=secondSplit[1].lstrip("\s") argDescDict[argument] = argDefaultDesc - # print(f"argDefaultDesc: {argDefaultDesc}") section=re.sub("@@@", "", section) section=re.sub("\n", "", section) sectionSplit=re.split("::", section) @@ -67,11 +66,10 @@ def writeDataclass(plugin:str, dataFieldDict:dict, pluginOptsDict:dict): with open(pluginArgsFile, 'w') as dataclassFile: chainPluginArgs = dataFieldDict[newPlugin] - dataclassFile.write(f"#!/usr/bin/env python3\n\n") - dataclassFile.write(f"import dataclasses\n") - dataclassFile.write(f"import re\n\n") - dataclassFile.write(f"from dataclasses import dataclass\n\n") - dataclassFile.write(f"@dataclass\nclass {newPlugin}Args:\n") + dataclassFile.write(f"#!/usr/bin/env python3\n\n") + dataclassFile.write(f"from dataclasses import dataclass\n") + dataclassFile.write(f"from BasePluginArgs import BasePluginArgs\n\n") + dataclassFile.write(f"@dataclass\nclass {newPlugin}Args(BasePluginArgs):\n") dataclassFile.write(f" _pluginNamespace: str=\"eosio\"\n") dataclassFile.write(f" _pluginName: str=\"{plugin[:-1]}\"\n") @@ -134,33 +132,6 @@ def threads(self, threads: int): else: return "" - def writeSupportedNodeosArgs() -> str: - return f"""\ - def supportedNodeosArgs(self) -> list:\n\ - args = []\n\ - for field in dataclasses.fields(self):\n\ - match = re.search("\w*NodeosArg", field.name)\n\ - if match is not None:\n\ - args.append(getattr(self, field.name))\n\ - return args\n\n""" - - def writeStrFxn() -> str: - return f"""\ - def __str__(self) -> str:\n\ - args = [] \n\ - for field in dataclasses.fields(self):\n\ - match = re.search("[^_]", field.name[0])\n\ - if match is not None:\n\ - default = getattr(self, f"_{{field.name}}NodeosDefault")\n\ - current = getattr(self, field.name)\n\ - if current is not None and current != default:\n\ - if type(current) is bool: - args.append(f"{{getattr(self, f'_{{field.name}}NodeosArg')}}") - else: - args.append(f"{{getattr(self, f'_{{field.name}}NodeosArg')}} {{getattr(self, field.name)}}") - - return "--plugin " + self._pluginNamespace + "::" + self._pluginName + " " + " ".join(args) if len(args) > 0 else ""\n\n""" - def writeMainFxn(pluginName: str) -> str: return f"""\ def main():\n\ @@ -171,7 +142,7 @@ def main():\n\ main()\n""" def writeHelpers(pluginName: str) -> str: - return "\n" + writeThreadSetter(pluginName) + writeSupportedNodeosArgs() + writeStrFxn() + writeMainFxn(pluginName) + return "\n" + writeThreadSetter(pluginName) + writeMainFxn(pluginName) dataclassFile.write(writeHelpers(f"{newPlugin}Args")) From d1d717d8b05c69fad7190029d43e126c5197d1d3 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 7 Dec 2022 08:46:05 -0600 Subject: [PATCH 033/178] Remove threads setter function and use setattr instead. --- tests/performance_tests/ChainPluginArgs.py | 3 --- tests/performance_tests/NetPluginArgs.py | 3 --- tests/performance_tests/ProducerPluginArgs.py | 3 --- .../generate_nodeos_plugin_args_class_files.py | 11 +---------- tests/performance_tests/performance_test.py | 2 +- 5 files changed, 2 insertions(+), 20 deletions(-) diff --git a/tests/performance_tests/ChainPluginArgs.py b/tests/performance_tests/ChainPluginArgs.py index da2d59d440..aa075e3f11 100755 --- a/tests/performance_tests/ChainPluginArgs.py +++ b/tests/performance_tests/ChainPluginArgs.py @@ -170,9 +170,6 @@ class ChainPluginArgs(BasePluginArgs): _snapshotNodeosDefault: str=None _snapshotNodeosArg: str="--snapshot" - def threads(self, threads: int): - self.chainThreads=threads - def main(): pluginArgs = ChainPluginArgs() print(pluginArgs.supportedNodeosArgs()) diff --git a/tests/performance_tests/NetPluginArgs.py b/tests/performance_tests/NetPluginArgs.py index fcfe148968..6774a58267 100755 --- a/tests/performance_tests/NetPluginArgs.py +++ b/tests/performance_tests/NetPluginArgs.py @@ -62,9 +62,6 @@ class NetPluginArgs(BasePluginArgs): _p2pKeepaliveIntervalMsNodeosDefault: int=10000 _p2pKeepaliveIntervalMsNodeosArg: str="--p2p-keepalive-interval-ms" - def threads(self, threads: int): - self.netThreads=threads - def main(): pluginArgs = NetPluginArgs() print(pluginArgs.supportedNodeosArgs()) diff --git a/tests/performance_tests/ProducerPluginArgs.py b/tests/performance_tests/ProducerPluginArgs.py index 0f9aa9646f..4cb8ee8781 100755 --- a/tests/performance_tests/ProducerPluginArgs.py +++ b/tests/performance_tests/ProducerPluginArgs.py @@ -89,9 +89,6 @@ class ProducerPluginArgs(BasePluginArgs): _snapshotsDirNodeosDefault: str='"snapshots"' _snapshotsDirNodeosArg: str="--snapshots-dir" - def threads(self, threads: int): - self.producerThreads=threads - def main(): pluginArgs = ProducerPluginArgs() print(pluginArgs.supportedNodeosArgs()) diff --git a/tests/performance_tests/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/generate_nodeos_plugin_args_class_files.py index 98b04fe213..433a6d63c3 100755 --- a/tests/performance_tests/generate_nodeos_plugin_args_class_files.py +++ b/tests/performance_tests/generate_nodeos_plugin_args_class_files.py @@ -123,15 +123,6 @@ def writeDataclass(plugin:str, dataFieldDict:dict, pluginOptsDict:dict): dataclassFile.write(f" _{newKey}NodeosDefault: str=None\n") dataclassFile.write(f" _{newKey}NodeosArg: str=\"{key}\"\n") - def writeThreadSetter(pluginName: str) -> str: - if (re.search("chain", pluginName, re.IGNORECASE) or re.search("net", pluginName, re.IGNORECASE) or re.search("producer", pluginName, re.IGNORECASE)): - attrName = re.sub("PluginArgs", "", pluginName).lower() - return f"""\ - def threads(self, threads: int): - self.{attrName}Threads=threads\n\n""" - else: - return "" - def writeMainFxn(pluginName: str) -> str: return f"""\ def main():\n\ @@ -142,7 +133,7 @@ def main():\n\ main()\n""" def writeHelpers(pluginName: str) -> str: - return "\n" + writeThreadSetter(pluginName) + writeMainFxn(pluginName) + return "\n" + writeMainFxn(pluginName) dataclassFile.write(writeHelpers(f"{newPlugin}Args")) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index b0edc31437..10794c7bb6 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -233,7 +233,7 @@ def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: Plugin for threadCount in range(minThreadCount, maxThreadCount+1): print(f"Running {optPlugin.value} thread count optimization check with {threadCount} {optPlugin.value} threads") - getattr(clusterConfig.extraNodeosArgs, optPlugin.value + 'PluginArgs').threads(threadCount) + setattr(getattr(clusterConfig.extraNodeosArgs, optPlugin.value + 'PluginArgs'), f"{optPlugin.value}Threads", threadCount) binSearchResults = self.performPtbBinarySearch(clusterConfig=clusterConfig, logDirRoot=self.loggingConfig.pluginThreadOptLogsDirPath, delReport=True, quiet=False, delPerfLogs=True) From 7703c6ef087e1d8a0cc0fe5aa67f7e23cb626473 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 7 Dec 2022 08:49:27 -0600 Subject: [PATCH 034/178] Remove cruft. --- tests/performance_tests/validate_nodeos_plugin_args.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/performance_tests/validate_nodeos_plugin_args.py b/tests/performance_tests/validate_nodeos_plugin_args.py index a13e7021f5..f2cd2f0b1d 100755 --- a/tests/performance_tests/validate_nodeos_plugin_args.py +++ b/tests/performance_tests/validate_nodeos_plugin_args.py @@ -25,10 +25,6 @@ def parseNodeosConfigOptions() -> dict: myStr = re.sub("Application Options:\n",'', string=myStr) pluginSections = re.split("(@@@.*?@@@\n)", string=myStr) - sec=0 - for section in pluginSections: - sec=sec+1 - def pairwise(iterable): "s -> (s0, s1), (s2, s3), (s4, s5), ..." a = iter(iterable) From cdb1342fc849019d0353e550a054b33aed0ce398 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 7 Dec 2022 10:58:58 -0600 Subject: [PATCH 035/178] Create NodeosPluginArgs python package --- tests/performance_tests/CMakeLists.txt | 13 ++----------- .../{ => NodeosPluginArgs}/BasePluginArgs.py | 0 .../NodeosPluginArgs/CMakeLists.txt | 11 +++++++++++ .../{ => NodeosPluginArgs}/ChainPluginArgs.py | 0 .../{ => NodeosPluginArgs}/HttpClientPluginArgs.py | 0 .../{ => NodeosPluginArgs}/HttpPluginArgs.py | 0 .../{ => NodeosPluginArgs}/NetPluginArgs.py | 0 .../{ => NodeosPluginArgs}/ProducerPluginArgs.py | 0 .../ResourceMonitorPluginArgs.py | 0 .../SignatureProviderPluginArgs.py | 0 .../StateHistoryPluginArgs.py | 0 .../{ => NodeosPluginArgs}/TraceApiPluginArgs.py | 0 .../performance_tests/NodeosPluginArgs/__init__.py | 12 ++++++++++++ .../generate_nodeos_plugin_args_class_files.py | 3 +-- tests/performance_tests/performance_test.py | 5 +---- tests/performance_tests/performance_test_basic.py | 10 +--------- 16 files changed, 28 insertions(+), 26 deletions(-) rename tests/performance_tests/{ => NodeosPluginArgs}/BasePluginArgs.py (100%) create mode 100644 tests/performance_tests/NodeosPluginArgs/CMakeLists.txt rename tests/performance_tests/{ => NodeosPluginArgs}/ChainPluginArgs.py (100%) rename tests/performance_tests/{ => NodeosPluginArgs}/HttpClientPluginArgs.py (100%) rename tests/performance_tests/{ => NodeosPluginArgs}/HttpPluginArgs.py (100%) rename tests/performance_tests/{ => NodeosPluginArgs}/NetPluginArgs.py (100%) rename tests/performance_tests/{ => NodeosPluginArgs}/ProducerPluginArgs.py (100%) rename tests/performance_tests/{ => NodeosPluginArgs}/ResourceMonitorPluginArgs.py (100%) rename tests/performance_tests/{ => NodeosPluginArgs}/SignatureProviderPluginArgs.py (100%) rename tests/performance_tests/{ => NodeosPluginArgs}/StateHistoryPluginArgs.py (100%) rename tests/performance_tests/{ => NodeosPluginArgs}/TraceApiPluginArgs.py (100%) create mode 100644 tests/performance_tests/NodeosPluginArgs/__init__.py rename tests/performance_tests/{ => NodeosPluginArgs}/generate_nodeos_plugin_args_class_files.py (98%) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 1796fc95d1..61704ef84d 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -9,18 +9,9 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_log_3_2.txt.gz ${CMAKE_CURRENT configure_file(${CMAKE_CURRENT_SOURCE_DIR}/genesis.json ${CMAKE_CURRENT_BINARY_DIR}/genesis.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate_nodeos_plugin_args.py ${CMAKE_CURRENT_BINARY_DIR}/validate_nodeos_plugin_args.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/BasePluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/BasePluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ChainPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ChainPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/HttpClientPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/HttpClientPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/HttpPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/HttpPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/NetPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/NetPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ProducerPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ProducerPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ResourceMonitorPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ResourceMonitorPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/SignatureProviderPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/SignatureProviderPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/StateHistoryPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/StateHistoryPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/TraceApiPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/TraceApiPluginArgs.py COPYONLY) - add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 6000 --tps-limit-per-generator 1500 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) + +add_subdirectory( NodeosPluginArgs ) diff --git a/tests/performance_tests/BasePluginArgs.py b/tests/performance_tests/NodeosPluginArgs/BasePluginArgs.py similarity index 100% rename from tests/performance_tests/BasePluginArgs.py rename to tests/performance_tests/NodeosPluginArgs/BasePluginArgs.py diff --git a/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt b/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt new file mode 100644 index 0000000000..6ac9f90d02 --- /dev/null +++ b/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt @@ -0,0 +1,11 @@ +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/__init__.py ${CMAKE_CURRENT_BINARY_DIR}/__init__.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/BasePluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/BasePluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ChainPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ChainPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/HttpClientPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/HttpClientPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/HttpPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/HttpPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/NetPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/NetPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ProducerPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ProducerPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ResourceMonitorPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ResourceMonitorPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/SignatureProviderPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/SignatureProviderPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/StateHistoryPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/StateHistoryPluginArgs.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/TraceApiPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/TraceApiPluginArgs.py COPYONLY) diff --git a/tests/performance_tests/ChainPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py similarity index 100% rename from tests/performance_tests/ChainPluginArgs.py rename to tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py diff --git a/tests/performance_tests/HttpClientPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py similarity index 100% rename from tests/performance_tests/HttpClientPluginArgs.py rename to tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py diff --git a/tests/performance_tests/HttpPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py similarity index 100% rename from tests/performance_tests/HttpPluginArgs.py rename to tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py diff --git a/tests/performance_tests/NetPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py similarity index 100% rename from tests/performance_tests/NetPluginArgs.py rename to tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py diff --git a/tests/performance_tests/ProducerPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py similarity index 100% rename from tests/performance_tests/ProducerPluginArgs.py rename to tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py diff --git a/tests/performance_tests/ResourceMonitorPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py similarity index 100% rename from tests/performance_tests/ResourceMonitorPluginArgs.py rename to tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py diff --git a/tests/performance_tests/SignatureProviderPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py similarity index 100% rename from tests/performance_tests/SignatureProviderPluginArgs.py rename to tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py diff --git a/tests/performance_tests/StateHistoryPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py similarity index 100% rename from tests/performance_tests/StateHistoryPluginArgs.py rename to tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py diff --git a/tests/performance_tests/TraceApiPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py similarity index 100% rename from tests/performance_tests/TraceApiPluginArgs.py rename to tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py diff --git a/tests/performance_tests/NodeosPluginArgs/__init__.py b/tests/performance_tests/NodeosPluginArgs/__init__.py new file mode 100644 index 0000000000..af58fe3fdd --- /dev/null +++ b/tests/performance_tests/NodeosPluginArgs/__init__.py @@ -0,0 +1,12 @@ +__all__ = ['BasePluginArgs', 'ChainPluginArgs', 'HttpClientPluginArgs', 'HttpPluginArgs', 'NetPluginArgs', 'ProducerPluginArgs', 'ResourceMonitorPluginArgs', 'SignatureProviderPluginArgs', 'StateHistoryPluginArgs', 'TraceApiPluginArgs'] + +from .BasePluginArgs import BasePluginArgs +from .ChainPluginArgs import ChainPluginArgs +from .HttpClientPluginArgs import HttpClientPluginArgs +from .HttpPluginArgs import HttpPluginArgs +from .NetPluginArgs import NetPluginArgs +from .ProducerPluginArgs import ProducerPluginArgs +from .ResourceMonitorPluginArgs import ResourceMonitorPluginArgs +from .SignatureProviderPluginArgs import SignatureProviderPluginArgs +from .StateHistoryPluginArgs import StateHistoryPluginArgs +from .TraceApiPluginArgs import TraceApiPluginArgs diff --git a/tests/performance_tests/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py similarity index 98% rename from tests/performance_tests/generate_nodeos_plugin_args_class_files.py rename to tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py index 433a6d63c3..bc5644caa6 100755 --- a/tests/performance_tests/generate_nodeos_plugin_args_class_files.py +++ b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py @@ -4,7 +4,6 @@ import subprocess def main(): - cmd="programs/nodeos/nodeos --help" result = subprocess.run(["programs/nodeos/nodeos", "--help"], capture_output=True, text=True) myStr = result.stdout @@ -62,7 +61,7 @@ def pairwise(iterable): def writeDataclass(plugin:str, dataFieldDict:dict, pluginOptsDict:dict): newPlugin="".join([x.capitalize() for x in plugin.split('_')]).replace(":","") - pluginArgsFile=f"../tests/performance_tests/{newPlugin}Args.py" + pluginArgsFile=f"../tests/performance_tests/NodeosPluginArgs/{newPlugin}Args.py" with open(pluginArgsFile, 'w') as dataclassFile: chainPluginArgs = dataFieldDict[newPlugin] diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 10794c7bb6..ff42e4a55f 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -11,10 +11,7 @@ harnessPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(harnessPath) -from ChainPluginArgs import ChainPluginArgs -from HttpPluginArgs import HttpPluginArgs -from NetPluginArgs import NetPluginArgs -from ProducerPluginArgs import ProducerPluginArgs +from NodeosPluginArgs import ChainPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs from TestHarness import TestHelper, Utils from performance_test_basic import PerformanceTestBasic, PtbArgumentsHandler from platform import release, system diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index a7b10cebc1..ef89aee9f2 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -13,15 +13,7 @@ harnessPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(harnessPath) -from ChainPluginArgs import ChainPluginArgs -from HttpClientPluginArgs import HttpClientPluginArgs -from HttpPluginArgs import HttpPluginArgs -from NetPluginArgs import NetPluginArgs -from ProducerPluginArgs import ProducerPluginArgs -from ResourceMonitorPluginArgs import ResourceMonitorPluginArgs -from SignatureProviderPluginArgs import SignatureProviderPluginArgs -from StateHistoryPluginArgs import StateHistoryPluginArgs -from TraceApiPluginArgs import TraceApiPluginArgs +from NodeosPluginArgs import ChainPluginArgs, HttpClientPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs, SignatureProviderPluginArgs, StateHistoryPluginArgs, TraceApiPluginArgs from TestHarness import Cluster, TestHelper, Utils, WalletMgr from dataclasses import dataclass, asdict, field from datetime import datetime From bea0ec01956aa70c13355c0d38079517a186369e Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 7 Dec 2022 11:53:23 -0600 Subject: [PATCH 036/178] Fix imports. --- tests/performance_tests/validate_nodeos_plugin_args.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/tests/performance_tests/validate_nodeos_plugin_args.py b/tests/performance_tests/validate_nodeos_plugin_args.py index f2cd2f0b1d..948eed9b6a 100755 --- a/tests/performance_tests/validate_nodeos_plugin_args.py +++ b/tests/performance_tests/validate_nodeos_plugin_args.py @@ -3,15 +3,7 @@ import re import subprocess -from ChainPluginArgs import ChainPluginArgs -from HttpClientPluginArgs import HttpClientPluginArgs -from HttpPluginArgs import HttpPluginArgs -from NetPluginArgs import NetPluginArgs -from ProducerPluginArgs import ProducerPluginArgs -from ResourceMonitorPluginArgs import ResourceMonitorPluginArgs -from SignatureProviderPluginArgs import SignatureProviderPluginArgs -from StateHistoryPluginArgs import StateHistoryPluginArgs -from TraceApiPluginArgs import TraceApiPluginArgs +from NodeosPluginArgs import ChainPluginArgs, HttpClientPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs, SignatureProviderPluginArgs, StateHistoryPluginArgs, TraceApiPluginArgs testSuccessful = False From cc6879c46737d304dd502e091edce04b2c75d675 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 7 Dec 2022 12:11:16 -0600 Subject: [PATCH 037/178] Fix module name. --- tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py | 2 +- .../performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py | 2 +- tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py | 2 +- tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py | 2 +- tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py | 2 +- .../NodeosPluginArgs/ResourceMonitorPluginArgs.py | 2 +- .../NodeosPluginArgs/SignatureProviderPluginArgs.py | 2 +- .../NodeosPluginArgs/StateHistoryPluginArgs.py | 2 +- tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py | 2 +- .../NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py index aa075e3f11..4706868269 100755 --- a/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 from dataclasses import dataclass -from BasePluginArgs import BasePluginArgs +from .BasePluginArgs import BasePluginArgs @dataclass class ChainPluginArgs(BasePluginArgs): diff --git a/tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py index b5a7f517b7..6c60aec9f9 100755 --- a/tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 from dataclasses import dataclass -from BasePluginArgs import BasePluginArgs +from .BasePluginArgs import BasePluginArgs @dataclass class HttpClientPluginArgs(BasePluginArgs): diff --git a/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py index cb3d86335b..7881907081 100755 --- a/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 from dataclasses import dataclass -from BasePluginArgs import BasePluginArgs +from .BasePluginArgs import BasePluginArgs @dataclass class HttpPluginArgs(BasePluginArgs): diff --git a/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py index 6774a58267..cb467102d3 100755 --- a/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 from dataclasses import dataclass -from BasePluginArgs import BasePluginArgs +from .BasePluginArgs import BasePluginArgs @dataclass class NetPluginArgs(BasePluginArgs): diff --git a/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py index 4cb8ee8781..015e6720f4 100755 --- a/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 from dataclasses import dataclass -from BasePluginArgs import BasePluginArgs +from .BasePluginArgs import BasePluginArgs @dataclass class ProducerPluginArgs(BasePluginArgs): diff --git a/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py index 3cfe8f3fed..ce9f453522 100755 --- a/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 from dataclasses import dataclass -from BasePluginArgs import BasePluginArgs +from .BasePluginArgs import BasePluginArgs @dataclass class ResourceMonitorPluginArgs(BasePluginArgs): diff --git a/tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py index c03fb0d02d..cfd9847d29 100755 --- a/tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 from dataclasses import dataclass -from BasePluginArgs import BasePluginArgs +from .BasePluginArgs import BasePluginArgs @dataclass class SignatureProviderPluginArgs(BasePluginArgs): diff --git a/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py index 306c44895c..005f1a13b6 100755 --- a/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 from dataclasses import dataclass -from BasePluginArgs import BasePluginArgs +from .BasePluginArgs import BasePluginArgs @dataclass class StateHistoryPluginArgs(BasePluginArgs): diff --git a/tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py index 3734c2fcb7..19d345479d 100755 --- a/tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 from dataclasses import dataclass -from BasePluginArgs import BasePluginArgs +from .BasePluginArgs import BasePluginArgs @dataclass class TraceApiPluginArgs(BasePluginArgs): diff --git a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py index bc5644caa6..e104168d37 100755 --- a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py +++ b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py @@ -67,7 +67,7 @@ def writeDataclass(plugin:str, dataFieldDict:dict, pluginOptsDict:dict): dataclassFile.write(f"#!/usr/bin/env python3\n\n") dataclassFile.write(f"from dataclasses import dataclass\n") - dataclassFile.write(f"from BasePluginArgs import BasePluginArgs\n\n") + dataclassFile.write(f"from .BasePluginArgs import BasePluginArgs\n\n") dataclassFile.write(f"@dataclass\nclass {newPlugin}Args(BasePluginArgs):\n") dataclassFile.write(f" _pluginNamespace: str=\"eosio\"\n") dataclassFile.write(f" _pluginName: str=\"{plugin[:-1]}\"\n") From 52b7903d92b667fd836e97aa649c3021ba4c3f72 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 7 Dec 2022 14:17:54 -0600 Subject: [PATCH 038/178] Remove redundant paths in CMake files. --- tests/performance_tests/CMakeLists.txt | 20 ++++++++--------- .../NodeosPluginArgs/CMakeLists.txt | 22 +++++++++---------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 61704ef84d..a06043b380 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -1,13 +1,13 @@ -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/performance_test_basic.py ${CMAKE_CURRENT_BINARY_DIR}/performance_test_basic.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/performance_test.py ${CMAKE_CURRENT_BINARY_DIR}/performance_test.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/log_reader.py ${CMAKE_CURRENT_BINARY_DIR}/log_reader.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/read_log_data.py ${CMAKE_CURRENT_BINARY_DIR}/read_log_data.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/log_reader_tests.py ${CMAKE_CURRENT_BINARY_DIR}/log_reader_tests.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launch_transaction_generators.py ${CMAKE_CURRENT_BINARY_DIR}/launch_transaction_generators.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_log_2_0_14.txt.gz ${CMAKE_CURRENT_BINARY_DIR}/nodeos_log_2_0_14.txt.gz COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_log_3_2.txt.gz ${CMAKE_CURRENT_BINARY_DIR}/nodeos_log_3_2.txt.gz COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/genesis.json ${CMAKE_CURRENT_BINARY_DIR}/genesis.json COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate_nodeos_plugin_args.py ${CMAKE_CURRENT_BINARY_DIR}/validate_nodeos_plugin_args.py COPYONLY) +configure_file(performance_test_basic.py performance_test_basic.py COPYONLY) +configure_file(performance_test.py performance_test.py COPYONLY) +configure_file(log_reader.py log_reader.py COPYONLY) +configure_file(read_log_data.py read_log_data.py COPYONLY) +configure_file(log_reader_tests.py log_reader_tests.py COPYONLY) +configure_file(launch_transaction_generators.py launch_transaction_generators.py COPYONLY) +configure_file(nodeos_log_2_0_14.txt.gz nodeos_log_2_0_14.txt.gz COPYONLY) +configure_file(nodeos_log_3_2.txt.gz nodeos_log_3_2.txt.gz COPYONLY) +configure_file(genesis.json genesis.json COPYONLY) +configure_file(validate_nodeos_plugin_args.py validate_nodeos_plugin_args.py COPYONLY) add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 6000 --tps-limit-per-generator 1500 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt b/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt index 6ac9f90d02..57833a3942 100644 --- a/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt +++ b/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt @@ -1,11 +1,11 @@ -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/__init__.py ${CMAKE_CURRENT_BINARY_DIR}/__init__.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/BasePluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/BasePluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ChainPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ChainPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/HttpClientPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/HttpClientPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/HttpPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/HttpPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/NetPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/NetPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ProducerPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ProducerPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ResourceMonitorPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/ResourceMonitorPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/SignatureProviderPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/SignatureProviderPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/StateHistoryPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/StateHistoryPluginArgs.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/TraceApiPluginArgs.py ${CMAKE_CURRENT_BINARY_DIR}/TraceApiPluginArgs.py COPYONLY) +configure_file(__init__.py __init__.py COPYONLY) +configure_file(BasePluginArgs.py BasePluginArgs.py COPYONLY) +configure_file(ChainPluginArgs.py ChainPluginArgs.py COPYONLY) +configure_file(HttpClientPluginArgs.py HttpClientPluginArgs.py COPYONLY) +configure_file(HttpPluginArgs.py HttpPluginArgs.py COPYONLY) +configure_file(NetPluginArgs.py NetPluginArgs.py COPYONLY) +configure_file(ProducerPluginArgs.py ProducerPluginArgs.py COPYONLY) +configure_file(ResourceMonitorPluginArgs.py ResourceMonitorPluginArgs.py COPYONLY) +configure_file(SignatureProviderPluginArgs.py SignatureProviderPluginArgs.py COPYONLY) +configure_file(StateHistoryPluginArgs.py StateHistoryPluginArgs.py COPYONLY) +configure_file(TraceApiPluginArgs.py TraceApiPluginArgs.py COPYONLY) From 792a0f8559656403d63b8c87ee952ff6b4cee140 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 7 Dec 2022 14:50:31 -0600 Subject: [PATCH 039/178] Add purpose and limitations statement to generation script. Also add note to each generated class calling out that it was generated. --- .../NodeosPluginArgs/ChainPluginArgs.py | 4 ++ .../NodeosPluginArgs/HttpClientPluginArgs.py | 4 ++ .../NodeosPluginArgs/HttpPluginArgs.py | 4 ++ .../NodeosPluginArgs/NetPluginArgs.py | 4 ++ .../NodeosPluginArgs/ProducerPluginArgs.py | 4 ++ .../ResourceMonitorPluginArgs.py | 4 ++ .../SignatureProviderPluginArgs.py | 4 ++ .../StateHistoryPluginArgs.py | 4 ++ .../NodeosPluginArgs/TraceApiPluginArgs.py | 4 ++ ...generate_nodeos_plugin_args_class_files.py | 51 +++++++++++++++++++ 10 files changed, 87 insertions(+) diff --git a/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py index 4706868269..41268994a8 100755 --- a/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py @@ -3,6 +3,10 @@ from dataclasses import dataclass from .BasePluginArgs import BasePluginArgs +""" +This file/class was generated by generate_nodeos_plugin_args_class_files.py +""" + @dataclass class ChainPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" diff --git a/tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py index 6c60aec9f9..1a3bd81a1b 100755 --- a/tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py @@ -3,6 +3,10 @@ from dataclasses import dataclass from .BasePluginArgs import BasePluginArgs +""" +This file/class was generated by generate_nodeos_plugin_args_class_files.py +""" + @dataclass class HttpClientPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" diff --git a/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py index 7881907081..25b2336dc4 100755 --- a/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py @@ -3,6 +3,10 @@ from dataclasses import dataclass from .BasePluginArgs import BasePluginArgs +""" +This file/class was generated by generate_nodeos_plugin_args_class_files.py +""" + @dataclass class HttpPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" diff --git a/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py index cb467102d3..a8e8bbc962 100755 --- a/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py @@ -3,6 +3,10 @@ from dataclasses import dataclass from .BasePluginArgs import BasePluginArgs +""" +This file/class was generated by generate_nodeos_plugin_args_class_files.py +""" + @dataclass class NetPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" diff --git a/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py index 015e6720f4..98a234e788 100755 --- a/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py @@ -3,6 +3,10 @@ from dataclasses import dataclass from .BasePluginArgs import BasePluginArgs +""" +This file/class was generated by generate_nodeos_plugin_args_class_files.py +""" + @dataclass class ProducerPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" diff --git a/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py index ce9f453522..cc731d9133 100755 --- a/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py @@ -3,6 +3,10 @@ from dataclasses import dataclass from .BasePluginArgs import BasePluginArgs +""" +This file/class was generated by generate_nodeos_plugin_args_class_files.py +""" + @dataclass class ResourceMonitorPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" diff --git a/tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py index cfd9847d29..39d9ddf0dd 100755 --- a/tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py @@ -3,6 +3,10 @@ from dataclasses import dataclass from .BasePluginArgs import BasePluginArgs +""" +This file/class was generated by generate_nodeos_plugin_args_class_files.py +""" + @dataclass class SignatureProviderPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" diff --git a/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py index 005f1a13b6..732909a2b8 100755 --- a/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py @@ -3,6 +3,10 @@ from dataclasses import dataclass from .BasePluginArgs import BasePluginArgs +""" +This file/class was generated by generate_nodeos_plugin_args_class_files.py +""" + @dataclass class StateHistoryPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" diff --git a/tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py index 19d345479d..9ef9f5f222 100755 --- a/tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py @@ -3,6 +3,10 @@ from dataclasses import dataclass from .BasePluginArgs import BasePluginArgs +""" +This file/class was generated by generate_nodeos_plugin_args_class_files.py +""" + @dataclass class TraceApiPluginArgs(BasePluginArgs): _pluginNamespace: str="eosio" diff --git a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py index e104168d37..c9f063d8cb 100755 --- a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py +++ b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py @@ -3,6 +3,54 @@ import re import subprocess +""" +The purpose of this script is to attempt to generate *PluginArgs.py files, containing respective dataclass objects, +to encapsulate the configurations options available for each plugin as currently documented in nodeos's --help command. + +It currently makes use of the compiled nodeos program and runs the --help command, capturing the output. +It then parses the output, breaking down the presented configuration options by plugin section (ignoring applicaiton and test plugin config options). +This provides a rudimentary list of plugins supported, config options for each plugin, and attempts to acertain default values and types. +The script then uses the parsed output to generate *PluginArgs.py scripts, placing them in the NodeosPluginArgs directory. + +Currently it generates the following scripts: +- ChainPluginArgs.py +- HttpClientPluginArgs.py +- HttpPluginArgs.py +- NetPluginArgs.py +- ProducerPluginArgs.py +- ResourceMonitorPluginArgs.py +- SignatureProviderPluginArgs.py +- StateHistoryPluginArgs.py +- TraceApiPluginArgs.py + +Each *PluginArgs.py file contains one dataclass that captures the available configuration options for that plugin via nodeos command line. + +Each config options is represented by 3 member variables, for example: +1) blocksDir: str=None + --This is the field that will be populated when the dataclass is used by other scripts to configure nodeos +2) _blocksDirNodeosDefault: str='"blocks"' + --This field captures the default value in the nodeos output. This will be compared against the first field to see if the configuration option will be required on the command line to override the default value when running nodeos. +3) _blocksDirNodeosArg: str="--blocks-dir" + --This field captures the command line config option for use when creating the command line string + +The BasePluginArgs class provides implemenations for 2 useful functions for each of these classes: +1) supportedNodeosArgs + -- Provides a list of all the command line config options currently supported by the dataclass +2) __str__ + -- Provides the command line argument string for the current configuration to pass to nodeos + (this only provides command line options where configured values differ from defaults) + +Some current limitations: +- There are some hardoded edge cases when trying to determine the types associated with certain default argument parameters. + These may need to be updated to account for new/different options as they are added/removed/modified by nodeos + +Note: +- To help with maintainability the validate_nodeos_plugin_args.py test script is provided which validates the current + *PluginArgs dataclass configuration objects against the current nodeos --help output to notify developers when + configuration options have changed and updates are required. +""" + + def main(): result = subprocess.run(["programs/nodeos/nodeos", "--help"], capture_output=True, text=True) @@ -68,6 +116,9 @@ def writeDataclass(plugin:str, dataFieldDict:dict, pluginOptsDict:dict): dataclassFile.write(f"#!/usr/bin/env python3\n\n") dataclassFile.write(f"from dataclasses import dataclass\n") dataclassFile.write(f"from .BasePluginArgs import BasePluginArgs\n\n") + dataclassFile.write(f"\"\"\"\n") + dataclassFile.write(f"This file/class was generated by generate_nodeos_plugin_args_class_files.py\n") + dataclassFile.write(f"\"\"\"\n\n") dataclassFile.write(f"@dataclass\nclass {newPlugin}Args(BasePluginArgs):\n") dataclassFile.write(f" _pluginNamespace: str=\"eosio\"\n") dataclassFile.write(f" _pluginName: str=\"{plugin[:-1]}\"\n") From 2e4495e651a9df196a0dd5710c854b64aa585e68 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 7 Dec 2022 14:58:44 -0600 Subject: [PATCH 040/178] Add suggestion to regen classes if test fails due to updated nodeos config options. --- .../validate_nodeos_plugin_args.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/performance_tests/validate_nodeos_plugin_args.py b/tests/performance_tests/validate_nodeos_plugin_args.py index 948eed9b6a..47be3f08ef 100755 --- a/tests/performance_tests/validate_nodeos_plugin_args.py +++ b/tests/performance_tests/validate_nodeos_plugin_args.py @@ -7,6 +7,9 @@ testSuccessful = False +regenSuggestion = "Try updating *PluginArgs classes to nodeos's current config options by running the script: generate_nodeos_plugin_args_class_files.py. \ + Updates to generation script may be required if a plugin was added/removed or in some default parameter cases." + def parseNodeosConfigOptions() -> dict: result = subprocess.run(["programs/nodeos/nodeos", "--help"], capture_output=True, text=True) @@ -69,7 +72,7 @@ def pairwise(iterable): #Check whether nodeos has added any plugin configuration sections for confSection in nodeosPluginOptsDict.keys(): - assert confSection in [paClass._pluginName for paClass in curListOfSupportedPlugins] or confSection in curListOfUnsupportedOptionGroups, f"ERROR: New config section \"{confSection}\" added to nodeos which may require updates." + assert confSection in [paClass._pluginName for paClass in curListOfSupportedPlugins] or confSection in curListOfUnsupportedOptionGroups, f"ERROR: New config section \"{confSection}\" added to nodeos which may require updates. {regenSuggestion}" def argStrToAttrName(argStr: str) -> str: attrName="".join([x.capitalize() for x in argStr.split('-')]).replace('--','') @@ -78,11 +81,11 @@ def argStrToAttrName(argStr: str) -> str: for supportedPlugin in curListOfSupportedPlugins: #Check whether nodeos has removed any plugin configuration sections - assert supportedPlugin._pluginName in nodeosPluginOptsDict, f"ERROR: Supported config section \"{supportedPlugin}\" no longer supported by nodeos." + assert supportedPlugin._pluginName in nodeosPluginOptsDict, f"ERROR: Supported config section \"{supportedPlugin}\" no longer supported by nodeos. {regenSuggestion}" for opt in supportedPlugin.supportedNodeosArgs(): #Check whether nodeos has removed any arguments in a plugin - assert opt in nodeosPluginOptsDict[supportedPlugin._pluginName].keys(), f"ERROR: nodeos no longer supports \"{opt}\" in \"{supportedPlugin._pluginName}\"." + assert opt in nodeosPluginOptsDict[supportedPlugin._pluginName].keys(), f"ERROR: nodeos no longer supports \"{opt}\" in \"{supportedPlugin._pluginName}\". {regenSuggestion}" ourDefault = getattr(supportedPlugin, f"_{argStrToAttrName(opt)}NodeosDefault") @@ -90,16 +93,16 @@ def argStrToAttrName(argStr: str) -> str: if type(ourDefault) == bool and nodeosCurDefault is None: nodeosCurDefault=False #Check whether our defaults no longer match nodeos's - assert ourDefault == nodeosCurDefault, f"ERROR: {type(supportedPlugin)}'s default for \"{opt}\" is {ourDefault} and no longer matches nodeos's default {nodeosCurDefault} in \"{supportedPlugin._pluginName}\"." + assert ourDefault == nodeosCurDefault, f"ERROR: {type(supportedPlugin)}'s default for \"{opt}\" is {ourDefault} and no longer matches nodeos's default {nodeosCurDefault} in \"{supportedPlugin._pluginName}\". {regenSuggestion}" #Check whether nodeos has added/updated any argument defaults for nodeosOpt, defaultValue in nodeosPluginOptsDict[supportedPlugin._pluginName].items(): - assert nodeosOpt in supportedPlugin.supportedNodeosArgs(), f"ERROR: New nodeos option \"{nodeosOpt}\". Support for this option needs to be added to {type(supportedPlugin)}." + assert nodeosOpt in supportedPlugin.supportedNodeosArgs(), f"ERROR: New nodeos option \"{nodeosOpt}\". Support for this option needs to be added to {type(supportedPlugin)}. {regenSuggestion}" ourDefault = getattr(supportedPlugin, f"_{argStrToAttrName(nodeosOpt)}NodeosDefault") if type(ourDefault) == bool and defaultValue is None: defaultValue=False - assert defaultValue == ourDefault, f"ERROR: nodeos's default for \"{nodeosOpt}\" is {nodeosCurDefault} and no longer matches {type(supportedPlugin)}'s default: {ourDefault} in \"{supportedPlugin._pluginName}\"." + assert defaultValue == ourDefault, f"ERROR: nodeos's default for \"{nodeosOpt}\" is {nodeosCurDefault} and no longer matches {type(supportedPlugin)}'s default: {ourDefault} in \"{supportedPlugin._pluginName}\". {regenSuggestion}" testSuccessful = True From 45c6376270747c7ce115b360959551ebd09d1599 Mon Sep 17 00:00:00 2001 From: jgiszczak Date: Wed, 7 Dec 2022 15:11:11 -0600 Subject: [PATCH 041/178] Fix typos and formatting. --- .../generate_nodeos_plugin_args_class_files.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py index c9f063d8cb..1c200d2481 100755 --- a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py +++ b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py @@ -7,8 +7,8 @@ The purpose of this script is to attempt to generate *PluginArgs.py files, containing respective dataclass objects, to encapsulate the configurations options available for each plugin as currently documented in nodeos's --help command. -It currently makes use of the compiled nodeos program and runs the --help command, capturing the output. -It then parses the output, breaking down the presented configuration options by plugin section (ignoring applicaiton and test plugin config options). +It makes use of the compiled nodeos program and runs the --help command, capturing the output. +It then parses the output, breaking down the presented configuration options by plugin section (ignoring application and test plugin config options). This provides a rudimentary list of plugins supported, config options for each plugin, and attempts to acertain default values and types. The script then uses the parsed output to generate *PluginArgs.py scripts, placing them in the NodeosPluginArgs directory. @@ -29,11 +29,12 @@ 1) blocksDir: str=None --This is the field that will be populated when the dataclass is used by other scripts to configure nodeos 2) _blocksDirNodeosDefault: str='"blocks"' - --This field captures the default value in the nodeos output. This will be compared against the first field to see if the configuration option will be required on the command line to override the default value when running nodeos. + --This field captures the default value in the nodeos output. This will be compared against the first field to see if the configuration + option will be required on the command line to override the default value when running nodeos. 3) _blocksDirNodeosArg: str="--blocks-dir" --This field captures the command line config option for use when creating the command line string -The BasePluginArgs class provides implemenations for 2 useful functions for each of these classes: +The BasePluginArgs class provides implementations for 2 useful functions for each of these classes: 1) supportedNodeosArgs -- Provides a list of all the command line config options currently supported by the dataclass 2) __str__ @@ -41,7 +42,7 @@ (this only provides command line options where configured values differ from defaults) Some current limitations: -- There are some hardoded edge cases when trying to determine the types associated with certain default argument parameters. +- There are some hardcoded edge cases when trying to determine the types associated with certain default argument parameters. These may need to be updated to account for new/different options as they are added/removed/modified by nodeos Note: From e095f2b154dc613639eb7dfabae62473f8abecd4 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Tue, 13 Dec 2022 17:34:07 -0600 Subject: [PATCH 042/178] replace use of os.path with pathlib's Path and PurePath --- tests/performance_tests/log_reader.py | 4 ++-- tests/performance_tests/performance_test.py | 12 ++++++------ tests/performance_tests/performance_test_basic.py | 14 +++++++------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 4cd550bf45..39e081fe2e 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -9,8 +9,8 @@ import gzip import math -harnessPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.append(harnessPath) +from pathlib import Path, PurePath +sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) from TestHarness import Utils from dataclasses import dataclass, asdict, field diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index ff42e4a55f..84633ec3af 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -8,8 +8,8 @@ import json import shutil -harnessPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.append(harnessPath) +from pathlib import Path, PurePath +sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) from NodeosPluginArgs import ChainPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs from TestHarness import TestHelper, Utils @@ -79,7 +79,7 @@ class PerfTestSearchResults: @dataclass class LoggingConfig: - logDirBase: str = f"./{os.path.splitext(os.path.basename(__file__))[0]}" + logDirBase: str = f"./{PurePath(PurePath(__file__).name).stem[0]}" logDirTimestamp: str = f"{datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')}" logDirPath: str = field(default_factory=str, init=False) ptbLogsDirPath: str = field(default_factory=str, init=False) @@ -98,7 +98,7 @@ def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=Perfo self.testsStart = datetime.utcnow() - self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=f"{self.ptConfig.logDirRoot}/{os.path.splitext(os.path.basename(__file__))[0]}", + self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=f"{self.ptConfig.logDirRoot}/{PurePath(PurePath(__file__).name).stem[0]}", logDirTimestamp=f"{self.testsStart.strftime('%Y-%m-%d_%H-%M-%S')}") def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: str, delReport: bool, quiet: bool, delPerfLogs: bool) -> TpsTestResult.PerfTestSearchResults: @@ -311,7 +311,7 @@ def testDirsCleanup(self): try: def removeArtifacts(path): print(f"Checking if test artifacts dir exists: {path}") - if os.path.isdir(f"{path}"): + if Path(f"{path}").is_dir(): print(f"Cleaning up test artifacts dir and all contents of: {path}") shutil.rmtree(f"{path}") @@ -327,7 +327,7 @@ def testDirsSetup(self): try: def createArtifactsDir(path): print(f"Checking if test artifacts dir exists: {path}") - if not os.path.isdir(f"{path}"): + if not Path(f"{path}").is_dir(): print(f"Creating test artifacts dir: {path}") os.mkdir(f"{path}") diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index ef89aee9f2..1ac1c2c4cd 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -10,8 +10,8 @@ import log_reader import launch_transaction_generators as ltg -harnessPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.append(harnessPath) +from pathlib import Path, PurePath +sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) from NodeosPluginArgs import ChainPluginArgs, HttpClientPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs, SignatureProviderPluginArgs, StateHistoryPluginArgs, TraceApiPluginArgs from TestHarness import Cluster, TestHelper, Utils, WalletMgr @@ -100,7 +100,7 @@ def __post_init__(self): @dataclass class LoggingConfig: - logDirBase: str = f"./{os.path.splitext(os.path.basename(__file__))[0]}" + logDirBase: str = f"./{PurePath(PurePath(__file__).name).stem[0]}" logDirTimestamp: str = f"{datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')}" logDirTimestampedOptSuffix: str = "" logDirPath: str = field(default_factory=str, init=False) @@ -121,7 +121,7 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.testStart = datetime.utcnow() - self.loggingConfig = PerformanceTestBasic.LoggingConfig(logDirBase=f"{self.ptbConfig.logDirRoot}/{os.path.splitext(os.path.basename(__file__))[0]}", + self.loggingConfig = PerformanceTestBasic.LoggingConfig(logDirBase=f"{self.ptbConfig.logDirRoot}/{PurePath(PurePath(__file__).name).stem[0]}", logDirTimestamp=f"{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}", logDirTimestampedOptSuffix = f"-{self.ptbConfig.targetTps}") @@ -155,7 +155,7 @@ def testDirsCleanup(self, delReport: bool=False): try: def removeArtifacts(path): print(f"Checking if test artifacts dir exists: {path}") - if os.path.isdir(f"{path}"): + if Path(f"{path}").is_dir(): print(f"Cleaning up test artifacts dir and all contents of: {path}") shutil.rmtree(f"{path}") @@ -177,7 +177,7 @@ def testDirsSetup(self): try: def createArtifactsDir(path): print(f"Checking if test artifacts dir exists: {path}") - if not os.path.isdir(f"{path}"): + if not Path(f"{path}").is_dir(): print(f"Creating test artifacts dir: {path}") os.mkdir(f"{path}") @@ -194,7 +194,7 @@ def createArtifactsDir(path): print(error) def fileOpenMode(self, filePath) -> str: - if os.path.exists(filePath): + if Path(filePath).exists(): append_write = 'a' else: append_write = 'w' From acfdfc53a64b8d4a61235dc747fc3b0a42a2e828 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Tue, 20 Dec 2022 10:56:49 -0600 Subject: [PATCH 043/178] convert additional string paths to pathLike objects --- tests/performance_tests/log_reader.py | 24 +++++----- tests/performance_tests/performance_test.py | 30 ++++++------- .../performance_test_basic.py | 44 +++++++++---------- tests/performance_tests/read_log_data.py | 10 +++-- 4 files changed, 57 insertions(+), 51 deletions(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 39e081fe2e..965f12e6b8 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -5,7 +5,6 @@ import re import numpy as np import json -import glob import gzip import math @@ -24,10 +23,10 @@ @dataclass class ArtifactPaths: - nodeosLogPath: str = "" - trxGenLogDirPath: str = "" - blockTrxDataPath: str = "" - blockDataPath: str = "" + nodeosLogPath: Path = Path("") + trxGenLogDirPath: Path = Path("") + blockTrxDataPath: Path = Path("") + blockDataPath: Path = Path("") @dataclass class TpsTestConfig: @@ -178,8 +177,11 @@ def printBlockData(self): def assertEquality(self, other): assert self == other, f"Error: Actual log:\n{self}\ndid not match expected log:\n{other}" +def selectedOpen(path): + return gzip.open if path.suffix == '.gz' else open + def scrapeLog(data, path): - selectedopen = gzip.open if path.endswith('.gz') else open + selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: blockResult = re.findall(r'Received block ([0-9a-fA-F]*).* #(\d+) .*trxs: (\d+)(.*)', f.read()) if data.startBlock is None: @@ -202,23 +204,23 @@ def scrapeLog(data, path): print("Error: Unknown log format") def scrapeTrxGenLog(trxSent, path): - selectedopen = gzip.open if path.endswith('.gz') else open + selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: trxSent.update(dict([(x[0], x[1]) for x in (line.rstrip('\n').split(',') for line in f)])) def scrapeBlockTrxDataLog(trxDict, path): - selectedopen = gzip.open if path.endswith('.gz') else open + selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: trxDict.update(dict([(x[0], trxData(x[1], x[2], x[3])) for x in (line.rstrip('\n').split(',') for line in f)])) def scrapeBlockDataLog(blockDict, path): - selectedopen = gzip.open if path.endswith('.gz') else open + selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: blockDict.update(dict([(x[0], blkData(x[1], x[2], x[3], x[4])) for x in (line.rstrip('\n').split(',') for line in f)])) def scrapeTrxGenTrxSentDataLogs(trxSent, trxGenLogDirPath, quiet): filesScraped = [] - for fileName in glob.glob(f"{trxGenLogDirPath}/trx_data_output_*.txt"): + for fileName in trxGenLogDirPath.glob("trx_data_output_*.txt"): filesScraped.append(fileName) scrapeTrxGenLog(trxSent, fileName) @@ -380,6 +382,8 @@ class LogReaderEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime): return obj.isoformat() + if isinstance(obj, PurePath): + return str(obj) if obj is None: return "Unknown" return json.JSONEncoder.default(self, obj) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 84633ec3af..410421261e 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -34,7 +34,7 @@ class PerfTestBasicResult: trxExpectMet: bool = False basicTestSuccess: bool = False testAnalysisBlockCnt: int = 0 - logsDir: str = "" + logsDir: Path = Path("") testStart: datetime = None testEnd: datetime = None @@ -56,7 +56,7 @@ class PtConfig: delTestReport: bool=False numAddlBlocksToPrune: int=2 quiet: bool=False - logDirRoot: str="." + logDirRoot: Path=Path(".") skipTpsTests: bool=False calcProducerThreads: str="none" calcChainThreads: str="none" @@ -79,16 +79,16 @@ class PerfTestSearchResults: @dataclass class LoggingConfig: - logDirBase: str = f"./{PurePath(PurePath(__file__).name).stem[0]}" + logDirBase: Path = Path(".")/PurePath(PurePath(__file__).name).stem[0] logDirTimestamp: str = f"{datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')}" - logDirPath: str = field(default_factory=str, init=False) - ptbLogsDirPath: str = field(default_factory=str, init=False) - pluginThreadOptLogsDirPath: str = field(default_factory=str, init=False) + logDirPath: Path = field(default_factory=Path, init=False) + ptbLogsDirPath: Path = field(default_factory=Path, init=False) + pluginThreadOptLogsDirPath: Path = field(default_factory=Path, init=False) def __post_init__(self): - self.logDirPath = f"{self.logDirBase}/{self.logDirTimestamp}" - self.ptbLogsDirPath = f"{self.logDirPath}/testRunLogs" - self.pluginThreadOptLogsDirPath = f"{self.logDirPath}/pluginThreadOptRunLogs" + self.logDirPath = Path(self.logDirBase)/Path(self.logDirTimestamp) + self.ptbLogsDirPath = Path(self.logDirPath)/Path("testRunLogs") + self.pluginThreadOptLogsDirPath = Path(self.logDirPath)/Path("pluginThreadOptRunLogs") def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=PerformanceTestBasic.TestHelperConfig(), clusterConfig: PerformanceTestBasic.ClusterConfig=PerformanceTestBasic.ClusterConfig(), ptConfig=PtConfig()): @@ -98,11 +98,11 @@ def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=Perfo self.testsStart = datetime.utcnow() - self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=f"{self.ptConfig.logDirRoot}/{PurePath(PurePath(__file__).name).stem[0]}", + self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=Path(self.ptConfig.logDirRoot)/PurePath(PurePath(__file__).name).stem[0], logDirTimestamp=f"{self.testsStart.strftime('%Y-%m-%d_%H-%M-%S')}") - def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: str, delReport: bool, quiet: bool, delPerfLogs: bool) -> TpsTestResult.PerfTestSearchResults: - floor = 0 + def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: Path, delReport: bool, quiet: bool, delPerfLogs: bool) -> TpsTestResult.PerfTestSearchResults: + floor = 5000 ceiling = self.ptConfig.maxTpsToTest binSearchTarget = self.ptConfig.maxTpsToTest minStep = self.ptConfig.testIterationMinStep @@ -216,7 +216,7 @@ class PluginThreadOptResult: def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: PluginThreadOptRunType=PluginThreadOptRunType.LOCAL_MAX, minThreadCount: int=2, maxThreadCount: int=os.cpu_count()) -> PluginThreadOptResult: - resultsFile = f"{self.loggingConfig.pluginThreadOptLogsDirPath}/{optPlugin.value}ThreadResults.txt" + resultsFile = Path(self.loggingConfig.pluginThreadOptLogsDirPath)/Path(f"{optPlugin.value}ThreadResults.txt") threadToMaxTpsDict: dict = {} @@ -432,7 +432,7 @@ def runTest(self): print(f"Full Performance Test Report: {jsonReport}") if not self.ptConfig.delReport: - self.exportReportAsJSON(jsonReport, f"{self.loggingConfig.logDirPath}/report.json") + self.exportReportAsJSON(jsonReport, Path(self.loggingConfig.logDirPath)/Path("report.json")) if self.ptConfig.delPerfLogs: print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") @@ -519,7 +519,7 @@ def main(): delTestReport=args.del_test_report, numAddlBlocksToPrune=args.num_blocks_to_prune, quiet=args.quiet, - logDirRoot=".", + logDirRoot=Path("."), skipTpsTests=args.skip_tps_test, calcProducerThreads=args.calc_producer_threads, calcChainThreads=args.calc_chain_threads, diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 1ac1c2c4cd..b49011c7da 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -70,7 +70,7 @@ def __str__(self) -> str: topo: str = "mesh" extraNodeosArgs: ExtraNodeosArgs = ExtraNodeosArgs() useBiosBootFile: bool = False - genesisPath: str = "tests/performance_tests/genesis.json" + genesisPath: Path = Path("tests")/Path("performance_tests")/Path("genesis.json") maximumP2pPerHost: int = 5000 maximumClients: int = 0 loggingDict: dict = field(default_factory=lambda: { "bios": "off" }) @@ -89,7 +89,7 @@ class PtbConfig: testTrxGenDurationSec: int=30 tpsLimitPerGenerator: int=4000 numAddlBlocksToPrune: int=2 - logDirRoot: str="." + logDirRoot: Path=Path(".") delReport: bool=False quiet: bool=False delPerfLogs: bool=False @@ -100,13 +100,13 @@ def __post_init__(self): @dataclass class LoggingConfig: - logDirBase: str = f"./{PurePath(PurePath(__file__).name).stem[0]}" + logDirBase: Path = Path(".")/PurePath(PurePath(__file__).name).stem[0] logDirTimestamp: str = f"{datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')}" logDirTimestampedOptSuffix: str = "" - logDirPath: str = field(default_factory=str, init=False) + logDirPath: Path = field(default_factory=Path, init=False) def __post_init__(self): - self.logDirPath = f"{self.logDirBase}/{self.logDirTimestamp}{self.logDirTimestampedOptSuffix}" + self.logDirPath = Path(self.logDirBase)/Path(f"{self.logDirTimestamp}{self.logDirTimestampedOptSuffix}") def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), clusterConfig: ClusterConfig=ClusterConfig(), ptbConfig=PtbConfig()): self.testHelperConfig = testHelperConfig @@ -121,26 +121,27 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.testStart = datetime.utcnow() - self.loggingConfig = PerformanceTestBasic.LoggingConfig(logDirBase=f"{self.ptbConfig.logDirRoot}/{PurePath(PurePath(__file__).name).stem[0]}", + self.loggingConfig = PerformanceTestBasic.LoggingConfig(logDirBase=Path(self.ptbConfig.logDirRoot)/PurePath(PurePath(__file__).name).stem[0], logDirTimestamp=f"{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}", logDirTimestampedOptSuffix = f"-{self.ptbConfig.targetTps}") - self.trxGenLogDirPath = f"{self.loggingConfig.logDirPath}/trxGenLogs" - self.varLogsDirPath = f"{self.loggingConfig.logDirPath}/var" - self.etcLogsDirPath = f"{self.loggingConfig.logDirPath}/etc" - self.etcEosioLogsDirPath = f"{self.etcLogsDirPath}/eosio" - self.blockDataLogDirPath = f"{self.loggingConfig.logDirPath}/blockDataLogs" - self.blockDataPath = f"{self.blockDataLogDirPath}/blockData.txt" - self.blockTrxDataPath = f"{self.blockDataLogDirPath}/blockTrxData.txt" - self.reportPath = f"{self.loggingConfig.logDirPath}/data.json" + self.trxGenLogDirPath = self.loggingConfig.logDirPath/Path("trxGenLogs") + self.varLogsDirPath = self.loggingConfig.logDirPath/Path("var") + self.etcLogsDirPath = self.loggingConfig.logDirPath/Path("etc") + self.etcEosioLogsDirPath = self.etcLogsDirPath/Path("eosio") + self.blockDataLogDirPath = self.loggingConfig.logDirPath/Path("blockDataLogs") + # self.blockDataPath = f"{self.blockDataLogDirPath}/blockData.txt" + self.blockDataPath = self.blockDataLogDirPath/Path("blockData.txt") + # self.blockTrxDataPath = f"{self.blockDataLogDirPath}/blockTrxData.txt" + self.blockTrxDataPath = self.blockDataLogDirPath/Path("blockTrxData.txt") + self.reportPath = self.loggingConfig.logDirPath/Path("data.json") # Setup Expectations for Producer and Validation Node IDs # Producer Nodes are index [0, pnodes) and validation nodes/non-producer nodes [pnodes, _totalNodes) # Use first producer node and first non-producer node self.producerNodeId = 0 self.validationNodeId = self.clusterConfig.pnodes - - self.nodeosLogPath = f'var/lib/node_{str(self.validationNodeId).zfill(2)}/stderr.txt' + self.nodeosLogPath = Path("var")/Path("lib")/Path(f"node_{str(self.validationNodeId).zfill(2)}")/Path("stderr.txt") # Setup cluster and its wallet manager self.walletMgr=WalletMgr(True) @@ -155,7 +156,7 @@ def testDirsCleanup(self, delReport: bool=False): try: def removeArtifacts(path): print(f"Checking if test artifacts dir exists: {path}") - if Path(f"{path}").is_dir(): + if Path(path).is_dir(): print(f"Cleaning up test artifacts dir and all contents of: {path}") shutil.rmtree(f"{path}") @@ -177,7 +178,7 @@ def testDirsSetup(self): try: def createArtifactsDir(path): print(f"Checking if test artifacts dir exists: {path}") - if not Path(f"{path}").is_dir(): + if not Path(path).is_dir(): print(f"Creating test artifacts dir: {path}") os.mkdir(f"{path}") @@ -299,18 +300,18 @@ def captureLowLevelArtifacts(self): except Exception as e: print(f"Failed to move 'var' to '{self.varLogsDirPath}': {type(e)}: {e}") - etcEosioDir = "etc/eosio" + etcEosioDir = Path("etc")/Path("eosio") for path in os.listdir(etcEosioDir): if path == "launcher": try: # Need to copy here since testnet.template is only generated at compile time then reused, therefore # it needs to remain in etc/eosio/launcher for subsequent tests. - shutil.copytree(f"{etcEosioDir}/{path}", f"{self.etcEosioLogsDirPath}/{path}") + shutil.copytree(etcEosioDir/Path(path), self.etcEosioLogsDirPath/Path(path)) except Exception as e: print(f"Failed to copy '{etcEosioDir}/{path}' to '{self.etcEosioLogsDirPath}/{path}': {type(e)}: {e}") else: try: - shutil.move(f"{etcEosioDir}/{path}", f"{self.etcEosioLogsDirPath}/{path}") + shutil.move(etcEosioDir/Path(path), self.etcEosioLogsDirPath/Path(path)) except Exception as e: print(f"Failed to move '{etcEosioDir}/{path}' to '{self.etcEosioLogsDirPath}/{path}': {type(e)}: {e}") @@ -360,7 +361,6 @@ def runTest(self) -> bool: self.preTestSpinup() self.ptbTestResult = self.runTpsTest() - self.postTpsTestSteps() self.analyzeResultsAndReport(self.ptbTestResult) diff --git a/tests/performance_tests/read_log_data.py b/tests/performance_tests/read_log_data.py index 05d40a9167..18c2814e06 100755 --- a/tests/performance_tests/read_log_data.py +++ b/tests/performance_tests/read_log_data.py @@ -4,6 +4,8 @@ import log_reader import launch_transaction_generators as ltg +from pathlib import Path, PurePath + parser = argparse.ArgumentParser(add_help=False) parser.add_argument("--target-tps", type=int, help="The target transfers per second to send during test", default=8000) parser.add_argument("--test-duration-sec", type=int, help="The duration of transfer trx generation for the test in seconds", default=30) @@ -18,14 +20,14 @@ parser.add_argument("--json-path", type=str, help="Path to save json output", default="data.json") parser.add_argument("--quiet", type=bool, help="Whether to quiet printing intermediate results and reports to stdout", default=False) args = parser.parse_args() -nodeosLogPath=args.log_path +nodeosLogPath=Path(args.log_path) blockDataLogDirPath = args.block_data_logs_dir -trxGenLogDirPath = args.trx_data_logs_dir +trxGenLogDirPath = Path(args.trx_data_logs_dir) data = log_reader.chainData() data.startBlock = args.start_block data.ceaseBlock = args.cease_block -blockDataPath = f"{blockDataLogDirPath}/blockData.txt" -blockTrxDataPath = f"{blockDataLogDirPath}/blockTrxData.txt" +blockDataPath = Path(blockDataLogDirPath)/Path("blockData.txt") +blockTrxDataPath = Path(blockDataLogDirPath)/Path("blockTrxData.txt") tpsLimitPerGenerator=args.tps_limit_per_generator targetTps=args.target_tps tpsTrxGensConfig = ltg.TpsTrxGensConfig(targetTps=targetTps, tpsLimitPerGenerator=tpsLimitPerGenerator) From bf1126db27df6121d414d738b9367603c554ac7f Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Tue, 20 Dec 2022 11:17:16 -0600 Subject: [PATCH 044/178] fix some problems with pathLike changes --- tests/performance_tests/performance_test.py | 16 ++++++++-------- .../performance_tests/performance_test_basic.py | 7 +++---- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 410421261e..4aa18c5fb4 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -86,9 +86,9 @@ class LoggingConfig: pluginThreadOptLogsDirPath: Path = field(default_factory=Path, init=False) def __post_init__(self): - self.logDirPath = Path(self.logDirBase)/Path(self.logDirTimestamp) - self.ptbLogsDirPath = Path(self.logDirPath)/Path("testRunLogs") - self.pluginThreadOptLogsDirPath = Path(self.logDirPath)/Path("pluginThreadOptRunLogs") + self.logDirPath = self.logDirBase/Path(self.logDirTimestamp) + self.ptbLogsDirPath = self.logDirPath/Path("testRunLogs") + self.pluginThreadOptLogsDirPath = self.logDirPath/Path("pluginThreadOptRunLogs") def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=PerformanceTestBasic.TestHelperConfig(), clusterConfig: PerformanceTestBasic.ClusterConfig=PerformanceTestBasic.ClusterConfig(), ptConfig=PtConfig()): @@ -102,7 +102,7 @@ def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=Perfo logDirTimestamp=f"{self.testsStart.strftime('%Y-%m-%d_%H-%M-%S')}") def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: Path, delReport: bool, quiet: bool, delPerfLogs: bool) -> TpsTestResult.PerfTestSearchResults: - floor = 5000 + floor = 0 ceiling = self.ptConfig.maxTpsToTest binSearchTarget = self.ptConfig.maxTpsToTest minStep = self.ptConfig.testIterationMinStep @@ -216,7 +216,7 @@ class PluginThreadOptResult: def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: PluginThreadOptRunType=PluginThreadOptRunType.LOCAL_MAX, minThreadCount: int=2, maxThreadCount: int=os.cpu_count()) -> PluginThreadOptResult: - resultsFile = Path(self.loggingConfig.pluginThreadOptLogsDirPath)/Path(f"{optPlugin.value}ThreadResults.txt") + resultsFile = self.loggingConfig.pluginThreadOptLogsDirPath/Path(f"{optPlugin.value}ThreadResults.txt") threadToMaxTpsDict: dict = {} @@ -311,7 +311,7 @@ def testDirsCleanup(self): try: def removeArtifacts(path): print(f"Checking if test artifacts dir exists: {path}") - if Path(f"{path}").is_dir(): + if Path(path).is_dir(): print(f"Cleaning up test artifacts dir and all contents of: {path}") shutil.rmtree(f"{path}") @@ -327,7 +327,7 @@ def testDirsSetup(self): try: def createArtifactsDir(path): print(f"Checking if test artifacts dir exists: {path}") - if not Path(f"{path}").is_dir(): + if not Path(path).is_dir(): print(f"Creating test artifacts dir: {path}") os.mkdir(f"{path}") @@ -432,7 +432,7 @@ def runTest(self): print(f"Full Performance Test Report: {jsonReport}") if not self.ptConfig.delReport: - self.exportReportAsJSON(jsonReport, Path(self.loggingConfig.logDirPath)/Path("report.json")) + self.exportReportAsJSON(jsonReport, self.loggingConfig.logDirPath/Path("report.json")) if self.ptConfig.delPerfLogs: print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index b49011c7da..0c4b3507fe 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -106,7 +106,7 @@ class LoggingConfig: logDirPath: Path = field(default_factory=Path, init=False) def __post_init__(self): - self.logDirPath = Path(self.logDirBase)/Path(f"{self.logDirTimestamp}{self.logDirTimestampedOptSuffix}") + self.logDirPath = self.logDirBase/Path(f"{self.logDirTimestamp}{self.logDirTimestampedOptSuffix}") def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), clusterConfig: ClusterConfig=ClusterConfig(), ptbConfig=PtbConfig()): self.testHelperConfig = testHelperConfig @@ -130,9 +130,7 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.etcLogsDirPath = self.loggingConfig.logDirPath/Path("etc") self.etcEosioLogsDirPath = self.etcLogsDirPath/Path("eosio") self.blockDataLogDirPath = self.loggingConfig.logDirPath/Path("blockDataLogs") - # self.blockDataPath = f"{self.blockDataLogDirPath}/blockData.txt" self.blockDataPath = self.blockDataLogDirPath/Path("blockData.txt") - # self.blockTrxDataPath = f"{self.blockDataLogDirPath}/blockTrxData.txt" self.blockTrxDataPath = self.blockDataLogDirPath/Path("blockTrxData.txt") self.reportPath = self.loggingConfig.logDirPath/Path("data.json") @@ -195,7 +193,7 @@ def createArtifactsDir(path): print(error) def fileOpenMode(self, filePath) -> str: - if Path(filePath).exists(): + if filePath.exists(): append_write = 'a' else: append_write = 'w' @@ -361,6 +359,7 @@ def runTest(self) -> bool: self.preTestSpinup() self.ptbTestResult = self.runTpsTest() + self.postTpsTestSteps() self.analyzeResultsAndReport(self.ptbTestResult) From 339d1c8624b87024ff06633636417a02ac8334da Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 21 Dec 2022 11:06:23 -0600 Subject: [PATCH 045/178] convert log reader tests to use pathlike objects) --- tests/performance_tests/log_reader_tests.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/performance_tests/log_reader_tests.py b/tests/performance_tests/log_reader_tests.py index a7db74ce0b..cc061ba253 100755 --- a/tests/performance_tests/log_reader_tests.py +++ b/tests/performance_tests/log_reader_tests.py @@ -3,13 +3,15 @@ # Also ensures that all versions of nodeos logs can be handled import log_reader +from pathlib import Path, PurePath + testSuccessful = False # Test log scraping for 3.2 log format dataCurrent = log_reader.chainData() dataCurrent.startBlock = None dataCurrent.ceaseBlock = None -log_reader.scrapeLog(dataCurrent, "tests/performance_tests/nodeos_log_3_2.txt.gz") +log_reader.scrapeLog(dataCurrent, Path("tests")/Path("performance_tests")/Path("nodeos_log_3_2.txt.gz")) expectedCurrent = log_reader.chainData() expectedCurrent.startBlock = 2 @@ -101,7 +103,7 @@ dataOld = log_reader.chainData() dataOld.startBlock = None dataOld.ceaseBlock = None -log_reader.scrapeLog(dataOld, "tests/performance_tests/nodeos_log_2_0_14.txt.gz") +log_reader.scrapeLog(dataOld, Path("tests")/Path("performance_tests")/Path("nodeos_log_2_0_14.txt.gz")) expectedOld = log_reader.chainData() expectedOld.startBlock = 2 expectedOld.ceaseBlock = 93 From 54af028748e3d9976e150a618734f14e68d8ec35 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 21 Dec 2022 11:07:20 -0600 Subject: [PATCH 046/178] remove unused import --- tests/performance_tests/log_reader_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/log_reader_tests.py b/tests/performance_tests/log_reader_tests.py index cc061ba253..5a86c1ef63 100755 --- a/tests/performance_tests/log_reader_tests.py +++ b/tests/performance_tests/log_reader_tests.py @@ -3,7 +3,7 @@ # Also ensures that all versions of nodeos logs can be handled import log_reader -from pathlib import Path, PurePath +from pathlib import Path testSuccessful = False From 17ada31c3399c1ba8739aa47892dc68a4f2183ce Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 21 Dec 2022 11:09:47 -0600 Subject: [PATCH 047/178] remove more unused imports --- tests/performance_tests/log_reader.py | 1 - tests/performance_tests/read_log_data.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 965f12e6b8..f04b9aaeeb 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -6,7 +6,6 @@ import numpy as np import json import gzip -import math from pathlib import Path, PurePath sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) diff --git a/tests/performance_tests/read_log_data.py b/tests/performance_tests/read_log_data.py index 18c2814e06..00d8d166c0 100755 --- a/tests/performance_tests/read_log_data.py +++ b/tests/performance_tests/read_log_data.py @@ -4,7 +4,7 @@ import log_reader import launch_transaction_generators as ltg -from pathlib import Path, PurePath +from pathlib import Path parser = argparse.ArgumentParser(add_help=False) parser.add_argument("--target-tps", type=int, help="The target transfers per second to send during test", default=8000) From 4a53c71ed0d1718f73f982cb7aacd1009222a1d4 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 23 Dec 2022 17:09:18 -0600 Subject: [PATCH 048/178] replace repeated path calls with concatonated strings using / operator from path --- tests/performance_tests/performance_test_basic.py | 6 +++--- tests/performance_tests/read_log_data.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 0c4b3507fe..80ae33ebff 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -70,7 +70,7 @@ def __str__(self) -> str: topo: str = "mesh" extraNodeosArgs: ExtraNodeosArgs = ExtraNodeosArgs() useBiosBootFile: bool = False - genesisPath: Path = Path("tests")/Path("performance_tests")/Path("genesis.json") + genesisPath: Path = Path("tests")/"performance_tests"/"genesis.json" maximumP2pPerHost: int = 5000 maximumClients: int = 0 loggingDict: dict = field(default_factory=lambda: { "bios": "off" }) @@ -139,7 +139,7 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste # Use first producer node and first non-producer node self.producerNodeId = 0 self.validationNodeId = self.clusterConfig.pnodes - self.nodeosLogPath = Path("var")/Path("lib")/Path(f"node_{str(self.validationNodeId).zfill(2)}")/Path("stderr.txt") + self.nodeosLogPath = Path("var")/"lib"/f"node_{str(self.validationNodeId).zfill(2)}"/"stderr.txt" # Setup cluster and its wallet manager self.walletMgr=WalletMgr(True) @@ -298,7 +298,7 @@ def captureLowLevelArtifacts(self): except Exception as e: print(f"Failed to move 'var' to '{self.varLogsDirPath}': {type(e)}: {e}") - etcEosioDir = Path("etc")/Path("eosio") + etcEosioDir = Path("etc")/"eosio" for path in os.listdir(etcEosioDir): if path == "launcher": try: diff --git a/tests/performance_tests/read_log_data.py b/tests/performance_tests/read_log_data.py index 00d8d166c0..975b0a50c1 100755 --- a/tests/performance_tests/read_log_data.py +++ b/tests/performance_tests/read_log_data.py @@ -26,8 +26,8 @@ data = log_reader.chainData() data.startBlock = args.start_block data.ceaseBlock = args.cease_block -blockDataPath = Path(blockDataLogDirPath)/Path("blockData.txt") -blockTrxDataPath = Path(blockDataLogDirPath)/Path("blockTrxData.txt") +blockDataPath = Path(blockDataLogDirPath)/"blockData.txt" +blockTrxDataPath = Path(blockDataLogDirPath)/"blockTrxData.txt" tpsLimitPerGenerator=args.tps_limit_per_generator targetTps=args.target_tps tpsTrxGensConfig = ltg.TpsTrxGensConfig(targetTps=targetTps, tpsLimitPerGenerator=tpsLimitPerGenerator) From 99dd39dd8f04daf8d685c36be696dadff199a4ff Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 23 Dec 2022 17:11:07 -0600 Subject: [PATCH 049/178] more path to string changes --- tests/performance_tests/log_reader_tests.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/performance_tests/log_reader_tests.py b/tests/performance_tests/log_reader_tests.py index 5a86c1ef63..fc9cdf8765 100755 --- a/tests/performance_tests/log_reader_tests.py +++ b/tests/performance_tests/log_reader_tests.py @@ -11,7 +11,7 @@ dataCurrent = log_reader.chainData() dataCurrent.startBlock = None dataCurrent.ceaseBlock = None -log_reader.scrapeLog(dataCurrent, Path("tests")/Path("performance_tests")/Path("nodeos_log_3_2.txt.gz")) +log_reader.scrapeLog(dataCurrent, Path("tests")/"performance_tests"/"nodeos_log_3_2.txt.gz") expectedCurrent = log_reader.chainData() expectedCurrent.startBlock = 2 @@ -103,7 +103,7 @@ dataOld = log_reader.chainData() dataOld.startBlock = None dataOld.ceaseBlock = None -log_reader.scrapeLog(dataOld, Path("tests")/Path("performance_tests")/Path("nodeos_log_2_0_14.txt.gz")) +log_reader.scrapeLog(dataOld, Path("tests")/"performance_tests"/"nodeos_log_2_0_14.txt.gz") expectedOld = log_reader.chainData() expectedOld.startBlock = 2 expectedOld.ceaseBlock = 93 From 1b1a191816854a374c6798037215207503163642 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 23 Dec 2022 18:13:50 -0600 Subject: [PATCH 050/178] more changes to using specified contracts in performance tests. --- tests/performance_tests/README.md | 12 +++++ tests/performance_tests/performance_test.py | 14 +++++- .../performance_test_basic.py | 44 +++++++++++++++++-- 3 files changed, 66 insertions(+), 4 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index ec14127cd6..79e38ae741 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -273,6 +273,12 @@ The Performance Harness main script `performance_test.py` can be configured usin In "none" mode, the default, no calculation will be attempted and default configured --net-threads value will be used. In "lmax" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. In "full" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) +* `--account-name` Name of the account to create and assign a contract to +* `--owner-public-key` Owner public key to use with specified account name +* `--active-public-key` Active public key to use with specified account name +* `--contract-dir` Path to contract dir +* `--wasm-file` WASM file name for contract +* `--abi-file` ABI file name for contract ### Support Scripts @@ -338,6 +344,12 @@ The following scripts are typically used by the Performance Harness main script * `--quiet` Whether to quiet printing intermediate results and reports to stdout (default: False) * `--prods-enable-trace-api` Determines whether producer nodes should have eosio::trace_api_plugin enabled (default: False) +* `--account-name` Name of the account to create and assign a contract to +* `--owner-public-key` Owner public key to use with specified account name +* `--active-public-key` Active public key to use with specified account name +* `--contract-dir` Path to contract dir +* `--wasm-file` WASM file name for contract +* `--abi-file` ABI file name for contract #### Launch Transaction Generators diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index ff42e4a55f..f0f13308f7 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -91,10 +91,12 @@ def __post_init__(self): self.pluginThreadOptLogsDirPath = f"{self.logDirPath}/pluginThreadOptRunLogs" def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=PerformanceTestBasic.TestHelperConfig(), - clusterConfig: PerformanceTestBasic.ClusterConfig=PerformanceTestBasic.ClusterConfig(), ptConfig=PtConfig()): + clusterConfig: PerformanceTestBasic.ClusterConfig=PerformanceTestBasic.ClusterConfig(), ptConfig=PtConfig(), + specifiedContract: PerformanceTestBasic.SpecifiedContract=PerformanceTestBasic.SpecifiedContract()): self.testHelperConfig = testHelperConfig self.clusterConfig = clusterConfig self.ptConfig = ptConfig + self.specifiedContract = specifiedContract self.testsStart = datetime.utcnow() @@ -120,6 +122,9 @@ def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConf quiet=quiet, delPerfLogs=delPerfLogs) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig) + if self.specifiedContract.accountName != "": + print("using specified contract") + myTest.specifiedContract = self.specifiedContract testSuccessful = myTest.runTest() if self.evaluateSuccess(myTest, testSuccessful, ptbResult): maxTpsAchieved = binSearchTarget @@ -161,6 +166,9 @@ def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTe numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig) + if self.specifiedContract.accountName != "": + print("using specified contract") + myTest.specifiedContract = self.specifiedContract testSuccessful = myTest.runTest() if self.evaluateSuccess(myTest, testSuccessful, ptbResult): maxTpsAchieved = searchTarget @@ -526,6 +534,10 @@ def main(): calcNetThreads=args.calc_net_threads) myTest = PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig) + if args.account_name and args.owner_public_key and args.active_public_key and args.contract_dir and args.wasm_file and args.abi_file: + print("using specified contract") + specifiedContract = PerformanceTestBasic.SpecifiedContract(args.account_name, args.owner_public_key, args.active_public_key, args.contract_dir, args.wasm_file, args.abi_file) + myTest.specifiedContract = specifiedContract perfRunSuccessful = myTest.runTest() exitCode = 0 if perfRunSuccessful else 1 diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index ef89aee9f2..a313668c3c 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -14,7 +14,7 @@ sys.path.append(harnessPath) from NodeosPluginArgs import ChainPluginArgs, HttpClientPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs, SignatureProviderPluginArgs, StateHistoryPluginArgs, TraceApiPluginArgs -from TestHarness import Cluster, TestHelper, Utils, WalletMgr +from TestHarness import Account, Cluster, TestHelper, Utils, WalletMgr from dataclasses import dataclass, asdict, field from datetime import datetime @@ -26,6 +26,15 @@ class PtbTpsTestResult: targetTpsPerGenList: list = field(default_factory=list) trxGenExitCodes: list = field(default_factory=list) + @dataclass + class SpecifiedContract: + accountName: str = "" + ownerPublicKey: str = "" + activePublicKey: str = "" + contractDir: str = "" + wasmFile: str = "" + abiFile: str = "" + @dataclass class TestHelperConfig: killAll: bool = True # clean_run @@ -108,10 +117,11 @@ class LoggingConfig: def __post_init__(self): self.logDirPath = f"{self.logDirBase}/{self.logDirTimestamp}{self.logDirTimestampedOptSuffix}" - def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), clusterConfig: ClusterConfig=ClusterConfig(), ptbConfig=PtbConfig()): + def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), clusterConfig: ClusterConfig=ClusterConfig(), ptbConfig=PtbConfig(), specifiedContract=SpecifiedContract()): self.testHelperConfig = testHelperConfig self.clusterConfig = clusterConfig self.ptbConfig = ptbConfig + self.specifiedContract = specifiedContract self.testHelperConfig.keepLogs = not self.ptbConfig.delPerfLogs @@ -248,10 +258,27 @@ def setupWalletAndAccounts(self): self.account1PrivKey = self.cluster.accounts[0].activePrivateKey self.account2PrivKey = self.cluster.accounts[1].activePrivateKey + def setupContract(self): + print(self.specifiedContract) + specifiedAccount = Account(self.specifiedContract.accountName) + specifiedAccount.ownerPublicKey = self.specifiedContract.ownerPublicKey + specifiedAccount.activePublicKey = self.specifiedContract.activePublicKey + self.cluster.createAccountAndVerify(specifiedAccount, self.cluster.eosioAccount, validationNodeIndex=self.validationNodeId) + print("Publishing contract") + transaction=self.cluster.biosNode.publishContract(specifiedAccount, self.specifiedContract.contractDir, self.specifiedContract.wasmFile, self.specifiedContract.abiFile, waitForTransBlock=True) + if transaction is None: + print("ERROR: Failed to publish contract.") + return None + def runTpsTest(self) -> PtbTpsTestResult: completedRun = False self.producerNode = self.cluster.getNode(self.producerNodeId) self.validationNode = self.cluster.getNode(self.validationNodeId) + if self.specifiedContract.accountName: + self.setupContract() + handlerAcct=self.specifiedContract.accountName + else: + handlerAcct=self.cluster.eosioAccount.name info = self.producerNode.getInfo() chainId = info['chain_id'] lib_id = info['last_irreversible_block_id'] @@ -262,7 +289,7 @@ def runTpsTest(self) -> PtbTpsTestResult: self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) tpsTrxGensConfig = ltg.TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator) trxGenLauncher = ltg.TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, - handlerAcct=self.cluster.eosioAccount.name, accts=f"{self.account1Name},{self.account2Name}", + handlerAcct=handlerAcct, accts=f"{self.account1Name},{self.account2Name}", privateKeys=f"{self.account1PrivKey},{self.account2PrivKey}", trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, tpsTrxGensConfig=tpsTrxGensConfig) @@ -437,6 +464,12 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--del-report", help="Whether to delete overarching performance run report.", action='store_true') ptbBaseParserGroup.add_argument("--quiet", help="Whether to quiet printing intermediate results and reports to stdout", action='store_true') ptbBaseParserGroup.add_argument("--prods-enable-trace-api", help="Determines whether producer nodes should have eosio::trace_api_plugin enabled", action='store_true') + ptbBaseParserGroup.add_argument("--account-name", type=str, help="Name of the account to create and assign a contract to") + ptbBaseParserGroup.add_argument("--owner-public-key", type=str, help="Owner public key to use with specified account name") + ptbBaseParserGroup.add_argument("--active-public-key", type=str, help="Active public key to use with specified account name") + ptbBaseParserGroup.add_argument("--contract-dir", type=str, help="Path to contract dir") + ptbBaseParserGroup.add_argument("--wasm-file", type=str, help="WASM file name for contract") + ptbBaseParserGroup.add_argument("--abi-file", type=str, help="ABI file name for contract") return ptbBaseParser @staticmethod @@ -483,6 +516,11 @@ def main(): ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) + if args.account_name and args.owner_public_key and args.active_public_key and args.contract_dir and args.wasm_file and args.abi_file: + print("using specified contract") + specifiedContract = PerformanceTestBasic.SpecifiedContract(args.account_name, args.owner_public_key, args.active_public_key, args.contract_dir, args.wasm_file, args.abi_file) + myTest.specifiedContract = specifiedContract + testSuccessful = myTest.runTest() exitCode = 0 if testSuccessful else 1 From dc1575ba6bf5e855ab0427774329240beaab7c55 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Mon, 2 Jan 2023 15:27:07 -0600 Subject: [PATCH 051/178] resolve failures for 2.0 following merge. --- tests/performance_tests/performance_test.py | 3 ++- tests/performance_tests/performance_test_basic.py | 10 ++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index ff42e4a55f..7d1099d9c2 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -507,7 +507,8 @@ def main(): netPluginArgs = NetPluginArgs(netThreads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, - prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) + prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, + nodeosVers=Utils.getNodeosVersion().split('.')[0]) ptConfig = PerformanceTest.PtConfig(testDurationSec=args.test_iteration_duration_sec, finalDurationSec=args.final_iterations_duration_sec, diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index c2d1befc0c..0b48bb0c01 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -157,7 +157,8 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste # Setup cluster and its wallet manager self.walletMgr=WalletMgr(True) - self.cluster=Cluster(walletd=True, loggingLevel="info", loggingLevelDict=self.clusterConfig.loggingDict, nodeosVers=self.clusterConfig.nodeosVers) + self.cluster=Cluster(walletd=True, loggingLevel="info", loggingLevelDict=self.clusterConfig.loggingDict, + nodeosVers=self.clusterConfig.nodeosVers) self.cluster.setWalletMgr(self.walletMgr) def cleanupOldClusters(self): @@ -218,11 +219,11 @@ def queryBlockTrxData(self, node, blockDataPath, blockTrxDataPath, startBlockNum block = self.clusterConfig.fetchBlock(node, blockNum) btdf_append_write = self.fileOpenMode(blockTrxDataPath) with open(blockTrxDataPath, btdf_append_write) as trxDataFile: - [trxDataFile.write(f"{trx['id']},{trx['block_num']},{trx['cpu_usage_us']},{trx['net_usage_words']}\n") for trx in block['payload']['transactions'] if block['payload']['transactions']] + self.clusterConfig.writeTrx(trxDataFile, block, blockNum) bdf_append_write = self.fileOpenMode(blockDataPath) with open(blockDataPath, bdf_append_write) as blockDataFile: - blockDataFile.write(f"{block['payload']['number']},{block['payload']['id']},{block['payload']['producer']},{block['payload']['status']},{block['payload']['timestamp']}\n") + self.clusterConfig.writeBlock(blockDataFile, block) def waitForEmptyBlocks(self, node, numEmptyToWaitOn): emptyBlocks = 0 @@ -492,7 +493,8 @@ def main(): ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, - prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) + prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, + nodeosVers=Utils.getNodeosVersion().split('.')[0]) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) From bbfe01eb34a097a72e900703aebf21846d938b3c Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Tue, 3 Jan 2023 11:20:14 -0600 Subject: [PATCH 052/178] update NodeosPluginArgs to match addition of --state-dir --- tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py index 41268994a8..e3cd77c64c 100755 --- a/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py @@ -14,6 +14,9 @@ class ChainPluginArgs(BasePluginArgs): blocksDir: str=None _blocksDirNodeosDefault: str='"blocks"' _blocksDirNodeosArg: str="--blocks-dir" + stateDir: str=None + _stateDirNodeosDefault: str='"state"' + _stateDirNodeosArg: str="--state-dir" protocolFeaturesDir: str=None _protocolFeaturesDirNodeosDefault: str='"protocol_features"' _protocolFeaturesDirNodeosArg: str="--protocol-features-dir" From 5cc242518e67d12d2f15b370887a4184f8c3950b Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 4 Jan 2023 12:32:06 -0600 Subject: [PATCH 053/178] rework custom contract activation in performance harness --- tests/performance_tests/performance_test.py | 18 ++---- .../performance_test_basic.py | 61 +++++++++---------- 2 files changed, 32 insertions(+), 47 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index f0f13308f7..7729944a7f 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -91,12 +91,10 @@ def __post_init__(self): self.pluginThreadOptLogsDirPath = f"{self.logDirPath}/pluginThreadOptRunLogs" def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=PerformanceTestBasic.TestHelperConfig(), - clusterConfig: PerformanceTestBasic.ClusterConfig=PerformanceTestBasic.ClusterConfig(), ptConfig=PtConfig(), - specifiedContract: PerformanceTestBasic.SpecifiedContract=PerformanceTestBasic.SpecifiedContract()): + clusterConfig: PerformanceTestBasic.ClusterConfig=PerformanceTestBasic.ClusterConfig(), ptConfig=PtConfig()): self.testHelperConfig = testHelperConfig self.clusterConfig = clusterConfig self.ptConfig = ptConfig - self.specifiedContract = specifiedContract self.testsStart = datetime.utcnow() @@ -122,9 +120,6 @@ def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConf quiet=quiet, delPerfLogs=delPerfLogs) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig) - if self.specifiedContract.accountName != "": - print("using specified contract") - myTest.specifiedContract = self.specifiedContract testSuccessful = myTest.runTest() if self.evaluateSuccess(myTest, testSuccessful, ptbResult): maxTpsAchieved = binSearchTarget @@ -166,9 +161,6 @@ def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTe numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig) - if self.specifiedContract.accountName != "": - print("using specified contract") - myTest.specifiedContract = self.specifiedContract testSuccessful = myTest.runTest() if self.evaluateSuccess(myTest, testSuccessful, ptbResult): maxTpsAchieved = searchTarget @@ -514,8 +506,10 @@ def main(): httpPluginArgs = HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) netPluginArgs = NetPluginArgs(netThreads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) + testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, - prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) + prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, + specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(args.account_name, args.owner_public_key, args.active_public_key, args.contract_dir, args.wasm_file, args.abi_file)) ptConfig = PerformanceTest.PtConfig(testDurationSec=args.test_iteration_duration_sec, finalDurationSec=args.final_iterations_duration_sec, @@ -534,10 +528,6 @@ def main(): calcNetThreads=args.calc_net_threads) myTest = PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig) - if args.account_name and args.owner_public_key and args.active_public_key and args.contract_dir and args.wasm_file and args.abi_file: - print("using specified contract") - specifiedContract = PerformanceTestBasic.SpecifiedContract(args.account_name, args.owner_public_key, args.active_public_key, args.contract_dir, args.wasm_file, args.abi_file) - myTest.specifiedContract = specifiedContract perfRunSuccessful = myTest.runTest() exitCode = 0 if perfRunSuccessful else 1 diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index a313668c3c..ace5b57b8b 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -26,15 +26,6 @@ class PtbTpsTestResult: targetTpsPerGenList: list = field(default_factory=list) trxGenExitCodes: list = field(default_factory=list) - @dataclass - class SpecifiedContract: - accountName: str = "" - ownerPublicKey: str = "" - activePublicKey: str = "" - contractDir: str = "" - wasmFile: str = "" - abiFile: str = "" - @dataclass class TestHelperConfig: killAll: bool = True # clean_run @@ -74,10 +65,20 @@ def __str__(self) -> str: args.append(f"{getattr(self, field.name)}") return " ".join(args) + @dataclass + class SpecifiedContract: + accountName: str = "c" + ownerPublicKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + activePublicKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + contractDir: str = "unittests/contracts/eosio.system" + wasmFile: str = "eosio.system.wasm" + abiFile: str = "eosio.system.abi" + pnodes: int = 1 totalNodes: int = 2 topo: str = "mesh" extraNodeosArgs: ExtraNodeosArgs = ExtraNodeosArgs() + specifiedContract: SpecifiedContract = SpecifiedContract() useBiosBootFile: bool = False genesisPath: str = "tests/performance_tests/genesis.json" maximumP2pPerHost: int = 5000 @@ -117,11 +118,10 @@ class LoggingConfig: def __post_init__(self): self.logDirPath = f"{self.logDirBase}/{self.logDirTimestamp}{self.logDirTimestampedOptSuffix}" - def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), clusterConfig: ClusterConfig=ClusterConfig(), ptbConfig=PtbConfig(), specifiedContract=SpecifiedContract()): + def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), clusterConfig: ClusterConfig=ClusterConfig(), ptbConfig=PtbConfig()): self.testHelperConfig = testHelperConfig self.clusterConfig = clusterConfig self.ptbConfig = ptbConfig - self.specifiedContract = specifiedContract self.testHelperConfig.keepLogs = not self.ptbConfig.delPerfLogs @@ -259,13 +259,14 @@ def setupWalletAndAccounts(self): self.account2PrivKey = self.cluster.accounts[1].activePrivateKey def setupContract(self): - print(self.specifiedContract) - specifiedAccount = Account(self.specifiedContract.accountName) - specifiedAccount.ownerPublicKey = self.specifiedContract.ownerPublicKey - specifiedAccount.activePublicKey = self.specifiedContract.activePublicKey + specifiedAccount = Account(self.clusterConfig.specifiedContract.accountName) + specifiedAccount.ownerPublicKey = self.clusterConfig.specifiedContract.ownerPublicKey + specifiedAccount.activePublicKey = self.clusterConfig.specifiedContract.activePublicKey self.cluster.createAccountAndVerify(specifiedAccount, self.cluster.eosioAccount, validationNodeIndex=self.validationNodeId) print("Publishing contract") - transaction=self.cluster.biosNode.publishContract(specifiedAccount, self.specifiedContract.contractDir, self.specifiedContract.wasmFile, self.specifiedContract.abiFile, waitForTransBlock=True) + transaction=self.cluster.biosNode.publishContract(specifiedAccount, self.clusterConfig.specifiedContract.contractDir, + self.clusterConfig.specifiedContract.wasmFile, + self.clusterConfig.specifiedContract.abiFile, waitForTransBlock=True) if transaction is None: print("ERROR: Failed to publish contract.") return None @@ -274,11 +275,7 @@ def runTpsTest(self) -> PtbTpsTestResult: completedRun = False self.producerNode = self.cluster.getNode(self.producerNodeId) self.validationNode = self.cluster.getNode(self.validationNodeId) - if self.specifiedContract.accountName: - self.setupContract() - handlerAcct=self.specifiedContract.accountName - else: - handlerAcct=self.cluster.eosioAccount.name + self.setupContract() info = self.producerNode.getInfo() chainId = info['chain_id'] lib_id = info['last_irreversible_block_id'] @@ -289,7 +286,7 @@ def runTpsTest(self) -> PtbTpsTestResult: self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) tpsTrxGensConfig = ltg.TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator) trxGenLauncher = ltg.TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, - handlerAcct=handlerAcct, accts=f"{self.account1Name},{self.account2Name}", + handlerAcct=self.clusterConfig.specifiedContract.accountName, accts=f"{self.account1Name},{self.account2Name}", privateKeys=f"{self.account1PrivKey},{self.account2PrivKey}", trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, tpsTrxGensConfig=tpsTrxGensConfig) @@ -464,12 +461,12 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--del-report", help="Whether to delete overarching performance run report.", action='store_true') ptbBaseParserGroup.add_argument("--quiet", help="Whether to quiet printing intermediate results and reports to stdout", action='store_true') ptbBaseParserGroup.add_argument("--prods-enable-trace-api", help="Determines whether producer nodes should have eosio::trace_api_plugin enabled", action='store_true') - ptbBaseParserGroup.add_argument("--account-name", type=str, help="Name of the account to create and assign a contract to") - ptbBaseParserGroup.add_argument("--owner-public-key", type=str, help="Owner public key to use with specified account name") - ptbBaseParserGroup.add_argument("--active-public-key", type=str, help="Active public key to use with specified account name") - ptbBaseParserGroup.add_argument("--contract-dir", type=str, help="Path to contract dir") - ptbBaseParserGroup.add_argument("--wasm-file", type=str, help="WASM file name for contract") - ptbBaseParserGroup.add_argument("--abi-file", type=str, help="ABI file name for contract") + ptbBaseParserGroup.add_argument("--account-name", type=str, help="Name of the account to create and assign a contract to", default="c") + ptbBaseParserGroup.add_argument("--owner-public-key", type=str, help="Owner public key to use with specified account name", default="EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV") + ptbBaseParserGroup.add_argument("--active-public-key", type=str, help="Active public key to use with specified account name", default="EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV") + ptbBaseParserGroup.add_argument("--contract-dir", type=str, help="Path to contract dir", default="unittests/contracts/eosio.system") + ptbBaseParserGroup.add_argument("--wasm-file", type=str, help="WASM file name for contract", default="eosio.system.wasm") + ptbBaseParserGroup.add_argument("--abi-file", type=str, help="ABI file name for contract", default="eosio.system.abi") return ptbBaseParser @staticmethod @@ -512,14 +509,12 @@ def main(): ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, - prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs) + prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, + specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(args.account_name, args.owner_public_key, args.active_public_key, args.contract_dir, args.wasm_file, args.abi_file)) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) - if args.account_name and args.owner_public_key and args.active_public_key and args.contract_dir and args.wasm_file and args.abi_file: - print("using specified contract") - specifiedContract = PerformanceTestBasic.SpecifiedContract(args.account_name, args.owner_public_key, args.active_public_key, args.contract_dir, args.wasm_file, args.abi_file) - myTest.specifiedContract = specifiedContract + testSuccessful = myTest.runTest() From a8ea37a27cdb9117fd3256c6a6a29d2b499a64bc Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 4 Jan 2023 12:45:19 -0600 Subject: [PATCH 054/178] remove unnecessary new lines --- tests/performance_tests/performance_test.py | 1 - tests/performance_tests/performance_test_basic.py | 1 - 2 files changed, 2 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 7729944a7f..03e50dea0a 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -506,7 +506,6 @@ def main(): httpPluginArgs = HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) netPluginArgs = NetPluginArgs(netThreads=args.net_threads) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) - testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(args.account_name, args.owner_public_key, args.active_public_key, args.contract_dir, args.wasm_file, args.abi_file)) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index ace5b57b8b..e7e0bcc787 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -515,7 +515,6 @@ def main(): numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) - testSuccessful = myTest.runTest() exitCode = 0 if testSuccessful else 1 From fba333be4d8a53ffcda37573ac8c98ae162f9676 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 5 Jan 2023 11:58:00 -0600 Subject: [PATCH 055/178] report forks and forked blocks in performance harness --- tests/nodeos_forked_chain_test.py | 2 +- tests/performance_tests/log_reader.py | 15 +++++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 6b3ebc8f6b..e4e5193548 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -562,7 +562,7 @@ def getBlock(self, blockNum): blockProducers0=[] blockProducers1=[] - testSuccessful=True + testSuccessful=False finally: TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 4cd550bf45..4a9d3a9e17 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -152,6 +152,7 @@ def __init__(self): self.totalElapsed = 0 self.totalTime = 0 self.totalLatency = 0 + self.forkedBlocks = [] def __eq__(self, other): return self.startBlock == other.startBlock and\ self.ceaseBlock == other.ceaseBlock and\ @@ -178,10 +179,11 @@ def printBlockData(self): def assertEquality(self, other): assert self == other, f"Error: Actual log:\n{self}\ndid not match expected log:\n{other}" -def scrapeLog(data, path): +def scrapeLog(data: chainData, path): selectedopen = gzip.open if path.endswith('.gz') else open with selectedopen(path, 'rt') as f: - blockResult = re.findall(r'Received block ([0-9a-fA-F]*).* #(\d+) .*trxs: (\d+)(.*)', f.read()) + line = f.read() + blockResult = re.findall(r'Received block ([0-9a-fA-F]*).* #(\d+) .*trxs: (\d+)(.*)', line) if data.startBlock is None: data.startBlock = 2 if data.ceaseBlock is None: @@ -200,6 +202,9 @@ def scrapeLog(data, path): data.updateTotal(int(value[2]), 0, 0, 0, 0, int(v2Logging[0])) else: print("Error: Unknown log format") + forks = re.findall(r'switching forks from ([0-9a-fA-F]+) \(block number (\d+)\) to ([0-9a-fA-F]+) \(block number (\d+)\)', line) + for fork in forks: + data.forkedBlocks.append(int(fork[1]) - int(fork[3])) def scrapeTrxGenLog(trxSent, path): selectedopen = gzip.open if path.endswith('.gz') else open @@ -355,7 +360,7 @@ def calcTrxLatencyCpuNetStats(trxDict : dict, blockDict: dict): basicStats(float(np.min(npLatencyCpuNetList[:,2])), float(np.max(npLatencyCpuNetList[:,2])), float(np.average(npLatencyCpuNetList[:,2])), float(np.std(npLatencyCpuNetList[:,2])), len(npLatencyCpuNetList)) def createReport(guide: chainBlocksGuide, tpsTestConfig: TpsTestConfig, tpsStats: stats, blockSizeStats: stats, trxLatencyStats: basicStats, trxCpuStats: basicStats, - trxNetStats: basicStats, testStart: datetime, testFinish: datetime, argsDict: dict, completedRun: bool) -> dict: + trxNetStats: basicStats, forkedBlocks, testStart: datetime, testFinish: datetime, argsDict: dict, completedRun: bool) -> dict: report = {} report['completedRun'] = completedRun report['testStart'] = testStart @@ -371,6 +376,8 @@ def createReport(guide: chainBlocksGuide, tpsTestConfig: TpsTestConfig, tpsStats report['Analysis']['TrxCPU'] = asdict(trxCpuStats) report['Analysis']['TrxLatency'] = asdict(trxLatencyStats) report['Analysis']['TrxNet'] = asdict(trxNetStats) + report['Analysis']['ForkedBlocks'] = forkedBlocks + report['Analysis']['NumForks'] = len(forkedBlocks) report['args'] = argsDict report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} report['nodeosVersion'] = Utils.getNodeosVersion() @@ -420,7 +427,7 @@ def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: Arti finish = datetime.utcnow() report = createReport(guide=guide, tpsTestConfig=tpsTestConfig, tpsStats=tpsStats, blockSizeStats=blkSizeStats, trxLatencyStats=trxLatencyStats, - trxCpuStats=trxCpuStats, trxNetStats=trxNetStats, testStart=start, testFinish=finish, argsDict=argsDict, completedRun=completedRun) + trxCpuStats=trxCpuStats, trxNetStats=trxNetStats, forkedBlocks=data.forkedBlocks, testStart=start, testFinish=finish, argsDict=argsDict, completedRun=completedRun) return report def exportReportAsJSON(report: json, exportPath): From 186a0b038ed7edc36f988264e25c96d0a7c19557 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 5 Jan 2023 12:00:38 -0600 Subject: [PATCH 056/178] revert unintended change made to get logs for testing. --- tests/nodeos_forked_chain_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index e4e5193548..6b3ebc8f6b 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -562,7 +562,7 @@ def getBlock(self, blockNum): blockProducers0=[] blockProducers1=[] - testSuccessful=False + testSuccessful=True finally: TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) From 22b60a3ae3cf3ce474ef9e4a1458ca608ad08444 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 5 Jan 2023 12:07:43 -0600 Subject: [PATCH 057/178] add a +1 to how many forked blocks are logged --- tests/performance_tests/log_reader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index baa680ea62..25c8b054cb 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -205,7 +205,7 @@ def scrapeLog(data, path): print("Error: Unknown log format") forks = re.findall(r'switching forks from ([0-9a-fA-F]+) \(block number (\d+)\) to ([0-9a-fA-F]+) \(block number (\d+)\)', line) for fork in forks: - data.forkedBlocks.append(int(fork[1]) - int(fork[3])) + data.forkedBlocks.append(int(fork[1]) - int(fork[3]) + 1) def scrapeTrxGenLog(trxSent, path): selectedopen = selectedOpen(path) From 58fed8c51255c1cf095ee0f74f8b2f236c24857f Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 9 Jan 2023 13:02:48 -0600 Subject: [PATCH 058/178] Trx Generator config support for JSON transaction description Add functionality to transaction generator to take input of abi from file, transaction action type, and action data from a string or file json description and generate transactions matching that description. Rename handler_account to contract_owner_account for more accurate description. Add integration test exercising the new functionality that builds transactions from abi, action type and action data. Makes use of the performance_test_basic with new argument '--exercise-trx-specification', but generates the transfer transactions from the abi, action name and action description in the test. Added unit test to exercise the new trx_generator constructor. --- tests/performance_tests/CMakeLists.txt | 1 + tests/performance_tests/README.md | 4 +- .../launch_transaction_generators.py | 96 +++++++++---- .../performance_test_basic.py | 15 +- tests/trx_generator/main.cpp | 70 ++++++++-- tests/trx_generator/trx_generator.cpp | 130 +++++++++++++++++- tests/trx_generator/trx_generator.hpp | 41 +++++- tests/trx_generator/trx_generator_tests.cpp | 19 ++- 8 files changed, 317 insertions(+), 59 deletions(-) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index f8f6c5215d..45e771f5f8 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -10,6 +10,7 @@ configure_file(genesis.json genesis.json COPYONLY) configure_file(validate_nodeos_plugin_args.py validate_nodeos_plugin_args.py COPYONLY) add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --exercise-trx-specification WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index f510c47d9e..b4785d1971 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -349,7 +349,7 @@ The following scripts are typically used by the Performance Harness main script * `chain_id` set the chain id * `last_irreversible_block_id` Current last-irreversible-block-id (LIB ID) to use for transactions. -* `handler_account` Account name of the handler account for the transfer actions +* `contract_owner_account` Account name of the contract owner account for the transfer actions * `accounts` Comma separated list of account names * `priv_keys` Comma separated list of private keys. * `trx_gen_duration` Transaction generation duration (seconds). Defaults to 60 seconds. @@ -365,7 +365,7 @@ The following scripts are typically used by the Performance Harness main script Expand Argument List * `--chain-id arg` set the chain id -* `--handler-account arg` Account name of the handler account for +* `--contract-owner-account arg` Account name of the contract owner account for the transfer actions * `--accounts arg` comma-separated list of accounts that will be used for transfers. Minimum diff --git a/tests/performance_tests/launch_transaction_generators.py b/tests/performance_tests/launch_transaction_generators.py index 2a2f44091b..a87602732f 100755 --- a/tests/performance_tests/launch_transaction_generators.py +++ b/tests/performance_tests/launch_transaction_generators.py @@ -11,6 +11,8 @@ sys.path.append(harnessPath) from TestHarness import Utils +from pathlib import Path + Print = Utils.Print class TpsTrxGensConfig: @@ -34,45 +36,81 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int): class TransactionGeneratorsLauncher: - def __init__(self, chainId: int, lastIrreversibleBlockId: int, handlerAcct: str, accts: str, privateKeys: str, - trxGenDurationSec: int, logDir: str, tpsTrxGensConfig: TpsTrxGensConfig): + def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAccount: str, accts: str, privateKeys: str, + trxGenDurationSec: int, logDir: str, abiFile: Path, actionName: str, actionData, tpsTrxGensConfig: TpsTrxGensConfig): self.chainId = chainId self.lastIrreversibleBlockId = lastIrreversibleBlockId - self.handlerAcct = handlerAcct + self.contractOwnerAccount = contractOwnerAccount self.accts = accts self.privateKeys = privateKeys self.trxGenDurationSec = trxGenDurationSec self.tpsTrxGensConfig = tpsTrxGensConfig self.logDir = logDir + self.abiFile = abiFile + self.actionName = actionName + self.actionData = actionData def launch(self): subprocess_ret_codes = [] for targetTps in self.tpsTrxGensConfig.targetTpsPerGenList: - if Utils.Debug: - Print( - f'Running trx_generator: ./tests/trx_generator/trx_generator ' - f'--chain-id {self.chainId} ' - f'--last-irreversible-block-id {self.lastIrreversibleBlockId} ' - f'--handler-account {self.handlerAcct} ' - f'--accounts {self.accts} ' - f'--priv-keys {self.privateKeys} ' - f'--trx-gen-duration {self.trxGenDurationSec} ' - f'--target-tps {targetTps} ' - f'--log-dir {self.logDir}' + if self.abiFile is not None and self.actionName is not None and self.actionData is not None: + if Utils.Debug: + Print( + f'Running trx_generator: ./tests/trx_generator/trx_generator ' + f'--chain-id {self.chainId} ' + f'--last-irreversible-block-id {self.lastIrreversibleBlockId} ' + f'--contract-owner-account {self.contractOwnerAccount} ' + f'--accounts {self.accts} ' + f'--priv-keys {self.privateKeys} ' + f'--trx-gen-duration {self.trxGenDurationSec} ' + f'--target-tps {targetTps} ' + f'--log-dir {self.logDir} ' + f'--action-name {self.actionName} ' + f'--action-data {self.actionData} ' + f'--abi-file {self.abiFile}' + ) + subprocess_ret_codes.append( + subprocess.Popen([ + './tests/trx_generator/trx_generator', + '--chain-id', f'{self.chainId}', + '--last-irreversible-block-id', f'{self.lastIrreversibleBlockId}', + '--contract-owner-account', f'{self.contractOwnerAccount}', + '--accounts', f'{self.accts}', + '--priv-keys', f'{self.privateKeys}', + '--trx-gen-duration', f'{self.trxGenDurationSec}', + '--target-tps', f'{targetTps}', + '--log-dir', f'{self.logDir}', + '--action-name', f'{self.actionName}', + '--action-data', f'{self.actionData}', + '--abi-file', f'{self.abiFile}' + ]) + ) + else: + if Utils.Debug: + Print( + f'Running trx_generator: ./tests/trx_generator/trx_generator ' + f'--chain-id {self.chainId} ' + f'--last-irreversible-block-id {self.lastIrreversibleBlockId} ' + f'--contract-owner-account {self.contractOwnerAccount} ' + f'--accounts {self.accts} ' + f'--priv-keys {self.privateKeys} ' + f'--trx-gen-duration {self.trxGenDurationSec} ' + f'--target-tps {targetTps} ' + f'--log-dir {self.logDir}' + ) + subprocess_ret_codes.append( + subprocess.Popen([ + './tests/trx_generator/trx_generator', + '--chain-id', f'{self.chainId}', + '--last-irreversible-block-id', f'{self.lastIrreversibleBlockId}', + '--contract-owner-account', f'{self.contractOwnerAccount}', + '--accounts', f'{self.accts}', + '--priv-keys', f'{self.privateKeys}', + '--trx-gen-duration', f'{self.trxGenDurationSec}', + '--target-tps', f'{targetTps}', + '--log-dir', f'{self.logDir}' + ]) ) - subprocess_ret_codes.append( - subprocess.Popen([ - './tests/trx_generator/trx_generator', - '--chain-id', f'{self.chainId}', - '--last-irreversible-block-id', f'{self.lastIrreversibleBlockId}', - '--handler-account', f'{self.handlerAcct}', - '--accounts', f'{self.accts}', - '--priv-keys', f'{self.privateKeys}', - '--trx-gen-duration', f'{self.trxGenDurationSec}', - '--target-tps', f'{targetTps}', - '--log-dir', f'{self.logDir}' - ]) - ) exitCodes = [ret_code.wait() for ret_code in subprocess_ret_codes] return exitCodes @@ -81,7 +119,7 @@ def parseArgs(): parser.add_argument('-?', action='help', default=argparse.SUPPRESS, help=argparse._('show this help message and exit')) parser.add_argument("chain_id", type=str, help="Chain ID") parser.add_argument("last_irreversible_block_id", type=str, help="Last irreversible block ID") - parser.add_argument("handler_account", type=str, help="Cluster handler account name") + parser.add_argument("contract_owner_account", type=str, help="Cluster contract owner account name") parser.add_argument("accounts", type=str, help="Comma separated list of account names") parser.add_argument("priv_keys", type=str, help="Comma separated list of private keys") parser.add_argument("trx_gen_duration", type=str, help="How long to run transaction generators") @@ -95,7 +133,7 @@ def main(): args = parseArgs() trxGenLauncher = TransactionGeneratorsLauncher(chainId=args.chain_id, lastIrreversibleBlockId=args.last_irreversible_block_id, - handlerAcct=args.handler_account, accts=args.accounts, + contractOwnerAccount=args.contract_owner_account, accts=args.accounts, privateKeys=args.priv_keys, trxGenDurationSec=args.trx_gen_duration, logDir=args.log_dir, tpsTrxGensConfig=TpsTrxGensConfig(targetTps=args.target_tps, tpsLimitPerGenerator=args.tps_limit_per_generator)) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 0b48bb0c01..95b332d330 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -17,6 +17,7 @@ from TestHarness import Cluster, TestHelper, Utils, WalletMgr from dataclasses import dataclass, asdict, field from datetime import datetime +from pathlib import Path class PerformanceTestBasic: @dataclass @@ -107,6 +108,7 @@ class PtbConfig: quiet: bool=False delPerfLogs: bool=False expectedTransactionsSent: int = field(default_factory=int, init=False) + exerciseTrxSpecification: bool=False def __post_init__(self): self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps @@ -275,10 +277,15 @@ def runTpsTest(self) -> PtbTpsTestResult: self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) tpsTrxGensConfig = ltg.TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator) + + abiFile = Path("unittests")/"contracts"/"eosio.token"/"eosio.token.abi" if self.ptbConfig.exerciseTrxSpecification else None + actionName = "transfer" if self.ptbConfig.exerciseTrxSpecification else None + actionData = f'{{"from":"{self.account1Name}","to":"{self.account2Name}","quantity":"0.0001 CUR","memo":"transaction specified"}}' if self.ptbConfig.exerciseTrxSpecification else None + trxGenLauncher = ltg.TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, - handlerAcct=self.cluster.eosioAccount.name, accts=f"{self.account1Name},{self.account2Name}", + contractOwnerAccount=self.cluster.eosioAccount.name, accts=f"{self.account1Name},{self.account2Name}", privateKeys=f"{self.account1PrivKey},{self.account2PrivKey}", trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, - logDir=self.trxGenLogDirPath, tpsTrxGensConfig=tpsTrxGensConfig) + logDir=self.trxGenLogDirPath, abiFile=abiFile, actionName=actionName, actionData=actionData, tpsTrxGensConfig=tpsTrxGensConfig) trxGenExitCodes = trxGenLauncher.launch() print(f"Transaction Generator exit codes: {trxGenExitCodes}") @@ -466,6 +473,7 @@ def createArgumentParser(): ptbParserGroup.add_argument("--target-tps", type=int, help="The target transfers per second to send during test", default=8000) ptbParserGroup.add_argument("--test-duration-sec", type=int, help="The duration of transfer trx generation for the test in seconds", default=90) + ptbParserGroup.add_argument("--exercise-trx-specification", help="Test Transaction Generator: abi, action name, action data api", action='store_true') return ptbParser @staticmethod @@ -496,7 +504,8 @@ def main(): prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, nodeosVers=Utils.getNodeosVersion().split('.')[0]) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, - numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs) + numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs, + exerciseTrxSpecification=args.exercise_trx_specification) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 2f876af6b9..6cc4d0d2f5 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -28,7 +28,7 @@ int main(int argc, char** argv) { variables_map vmap; options_description cli("Transaction Generator command line options."); string chain_id_in; - string h_acct; + string contract_owner_acct; string accts; string p_keys; int64_t trx_expr; @@ -40,6 +40,10 @@ int main(int argc, char** argv) { int64_t max_lag_duration_us; string log_dir_in; + bool transaction_specified = false; + std::string action_name_in; + std::string action_data_file_or_str; + std::string abi_file_path_in; vector account_str_vector; vector private_keys_str_vector; @@ -47,7 +51,7 @@ int main(int argc, char** argv) { cli.add_options() ("chain-id", bpo::value(&chain_id_in), "set the chain id") - ("handler-account", bpo::value(&h_acct), "Account name of the handler account for the transfer actions") + ("contract-owner-account", bpo::value(&contract_owner_acct), "Account name of the contract account for the transaction actions") ("accounts", bpo::value(&accts), "comma-separated list of accounts that will be used for transfers. Minimum required accounts: 2.") ("priv-keys", bpo::value(&p_keys), "comma-separated list of private keys in same order of accounts list that will be used to sign transactions. Minimum required: 2.") ("trx-expiration", bpo::value(&trx_expr)->default_value(3600), "transaction expiration time in seconds. Defaults to 3,600. Maximum allowed: 3,600") @@ -58,6 +62,9 @@ int main(int argc, char** argv) { ("monitor-max-lag-percent", bpo::value(&max_lag_per)->default_value(5), "Max percentage off from expected transactions sent before being in violation. Defaults to 5.") ("monitor-max-lag-duration-us", bpo::value(&max_lag_duration_us)->default_value(1000000), "Max microseconds that transaction generation can be in violation before quitting. Defaults to 1000000 (1s).") ("log-dir", bpo::value(&log_dir_in), "set the logs directory") + ("action-name", bpo::value(&action_name_in), "The action name applied to the provided action data input") + ("action-data", bpo::value(&action_data_file_or_str), "The path to the json action data file or json action data description string to use") + ("abi-file", bpo::value(&abi_file_path_in), "The path to the contract abi file to use for the supplied transaction action data") ("help,h", "print this list") ; @@ -70,6 +77,17 @@ int main(int argc, char** argv) { return SUCCESS; } + if((vmap.count("action-name") || vmap.count("action-data") || vmap.count("abi-file")) && !(vmap.count("action-name") && vmap.count("action-data") && vmap.count("abi-file"))) { + ilog("Initialization error: If using action-name, action-data, or abi-file to specify a transaction type to generate, must provide all three inputs."); + cli.print(std::cerr); + return INITIALIZE_FAIL; + } + + if(vmap.count("action-name") && vmap.count("action-data") && vmap.count("abi-file")) { + ilog("Specifying transaction to generate directly using action-name, action-data, and abi-file."); + transaction_specified = true; + } + if(!vmap.count("chain-id")) { ilog("Initialization error: missing chain-id"); cli.print(std::cerr); @@ -88,35 +106,45 @@ int main(int argc, char** argv) { return INITIALIZE_FAIL; } - if(vmap.count("handler-account")) { + if(vmap.count("contract-owner-account")) { } else { - ilog("Initialization error: missing handler-account"); + ilog("Initialization error: missing contract-owner-account"); cli.print(std::cerr); return INITIALIZE_FAIL; } if(vmap.count("accounts")) { boost::split(account_str_vector, accts, boost::is_any_of(",")); - if(account_str_vector.size() < 2) { + if(!transaction_specified && account_str_vector.size() < 2) { ilog("Initialization error: requires at minimum 2 transfer accounts"); cli.print(std::cerr); return INITIALIZE_FAIL; } + if (transaction_specified && account_str_vector.size() < 1) { + ilog("Initialization error: Specifying transaction to generate requires at minimum 1 account."); + cli.print(std::cerr); + return INITIALIZE_FAIL; + } } else { - ilog("Initialization error: did not specify transfer accounts. requires at minimum 2 transfer accounts"); + ilog("Initialization error: did not specify transfer accounts. Auto transfer transaction generation requires at minimum 2 transfer accounts, while providing transaction action data requires at least one."); cli.print(std::cerr); return INITIALIZE_FAIL; } if(vmap.count("priv-keys")) { boost::split(private_keys_str_vector, p_keys, boost::is_any_of(",")); - if(private_keys_str_vector.size() < 2) { + if(!transaction_specified && private_keys_str_vector.size() < 2) { ilog("Initialization error: requires at minimum 2 private keys"); cli.print(std::cerr); return INITIALIZE_FAIL; } + if (transaction_specified && private_keys_str_vector.size() < 1) { + ilog("Initialization error: Specifying transaction to generate requires at minimum 1 private key"); + cli.print(std::cerr); + return INITIALIZE_FAIL; + } } else { - ilog("Initialization error: did not specify accounts' private keys. requires at minimum 2 private keys"); + ilog("Initialization error: did not specify accounts' private keys. Auto transfer transaction generation requires at minimum 2 private keys, while providing transaction action data requires at least one."); cli.print(std::cerr); return INITIALIZE_FAIL; } @@ -159,7 +187,7 @@ int main(int argc, char** argv) { } ilog("Initial chain id ${chainId}", ("chainId", chain_id_in)); - ilog("Handler account ${acct}", ("acct", h_acct)); + ilog("Contract owner account ${acct}", ("acct", contract_owner_acct)); ilog("Transfer accounts ${accts}", ("accts", accts)); ilog("Account private keys ${priv_keys}", ("priv_keys", p_keys)); ilog("Transaction expiration microsections ${expr}", ("expr", trx_expr)); @@ -168,14 +196,26 @@ int main(int argc, char** argv) { ilog("Target generation Transaction Per Second (TPS) ${tps}", ("tps", target_tps)); ilog("Logs directory ${logDir}", ("logDir", log_dir_in)); - auto generator = std::make_shared(chain_id_in, h_acct, - account_str_vector, trx_expr, private_keys_str_vector, lib_id_str, log_dir_in); - std::shared_ptr monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); + std::shared_ptr monitor; + if (transaction_specified) { + auto generator = std::make_shared(chain_id_in, abi_file_path_in, contract_owner_acct, account_str_vector.at(0), action_name_in, + action_data_file_or_str, trx_expr, private_keys_str_vector.at(0), lib_id_str, log_dir_in); + monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); + trx_tps_tester tester{generator, monitor, gen_duration, target_tps}; - trx_tps_tester tester{generator, monitor, gen_duration, target_tps}; + if (!tester.run()) { + return OTHER_FAIL; + } + } else { + auto generator = std::make_shared(chain_id_in, contract_owner_acct, account_str_vector, trx_expr, private_keys_str_vector, + lib_id_str, log_dir_in); + + monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); + trx_tps_tester tester{generator, monitor, gen_duration, target_tps}; - if (!tester.run()) { - return OTHER_FAIL; + if (!tester.run()) { + return OTHER_FAIL; + } } if (monitor->terminated_early()) { diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index f22885e875..a8f8fe2081 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -7,8 +7,11 @@ #include #include #include +#include #include +#include +using namespace std; using namespace eosio::chain; using namespace eosio; using namespace appbase; @@ -69,17 +72,17 @@ namespace eosio::testing { account, "transfer"_n, make_transfer_data(from, to, quantity, std::move(memo))); } - vector create_initial_transfer_actions(const std::string& salt, const uint64_t& period, const name& handler_acct, const vector& accounts, const vector& priv_keys) { + vector create_initial_transfer_actions(const std::string& salt, const uint64_t& period, const name& contract_owner_account, const vector& accounts, const vector& priv_keys) { vector actions_pairs_vector; for(size_t i = 0; i < accounts.size(); ++i) { for(size_t j = i + 1; j < accounts.size(); ++j) { //create the actions here ilog("create_initial_transfer_actions: creating transfer from ${acctA} to ${acctB}", ("acctA", accounts.at(i))("acctB", accounts.at(j))); - action act_a_to_b = make_transfer_action(handler_acct, accounts.at(i), accounts.at(j), asset::from_string("1.0000 CUR"), salt); + action act_a_to_b = make_transfer_action(contract_owner_account, accounts.at(i), accounts.at(j), asset::from_string("1.0000 CUR"), salt); ilog("create_initial_transfer_actions: creating transfer from ${acctB} to ${acctA}", ("acctB", accounts.at(j))("acctA", accounts.at(i))); - action act_b_to_a = make_transfer_action(handler_acct, accounts.at(j), accounts.at(i), asset::from_string("1.0000 CUR"), salt); + action act_b_to_a = make_transfer_action(contract_owner_account, accounts.at(j), accounts.at(i), asset::from_string("1.0000 CUR"), salt); actions_pairs_vector.push_back(action_pair_w_keys(act_a_to_b, act_b_to_a, priv_keys.at(i), priv_keys.at(j))); } @@ -88,10 +91,10 @@ namespace eosio::testing { return actions_pairs_vector; } - transfer_trx_generator::transfer_trx_generator(std::string chain_id_in, std::string handler_acct, + transfer_trx_generator::transfer_trx_generator(std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, int64_t trx_expr, const std::vector& private_keys_str_vector, std::string lib_id_str, std::string log_dir) : - _provider(), _chain_id(chain_id_in), _handler_acct(handler_acct), _accts(accts), + _provider(), _chain_id(chain_id_in), _contract_owner_account(contract_owner_account), _accts(accts), _trx_expiration(trx_expr*1000000), _private_keys_str_vector(private_keys_str_vector), _last_irr_block_id(fc::variant(lib_id_str).as()), _log_dir(log_dir) { } @@ -141,7 +144,7 @@ namespace eosio::testing { std::cout << "Create All Initial Transfer Action/Reaction Pairs (acct 1 -> acct 2, acct 2 -> acct 1) between all provided accounts." << std::endl; - const auto action_pairs_vector = create_initial_transfer_actions(salt, period, _handler_acct, accounts, + const auto action_pairs_vector = create_initial_transfer_actions(salt, period, _contract_owner_account, accounts, private_key_vector); std::cout @@ -202,4 +205,117 @@ namespace eosio::testing { return true; } -} \ No newline at end of file + fc::variant json_from_file_or_string(const string& file_or_str, fc::json::parse_type ptype = fc::json::parse_type::legacy_parser) + { + regex r("^[ \t]*[\{\[]"); + if ( !regex_search(file_or_str, r) && fc::is_regular_file(file_or_str) ) { + try { + return fc::json::from_file(file_or_str, ptype); + } EOS_RETHROW_EXCEPTIONS(json_parse_exception, "Fail to parse JSON from file: ${file}", ("file", file_or_str)); + + } else { + try { + return fc::json::from_string(file_or_str, ptype); + } EOS_RETHROW_EXCEPTIONS(json_parse_exception, "Fail to parse JSON from string: ${string}", ("string", file_or_str)); + } + } + + trx_generator::trx_generator(std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, std::string auth_account, std::string action_name, + const std::string& action_data_file_or_str, int64_t trx_expr, const std::string& private_key_str, std::string lib_id_str, std::string log_dir) : + _provider(), _chain_id(chain_id_in), _abi_data_file_path(abi_data_file), _contract_owner_account(contract_owner_account), _auth_account(auth_account), _action(action_name), _action_data_file_or_str(action_data_file_or_str), + _trx_expiration(trx_expr*1000000), _private_key(fc::crypto::private_key(private_key_str)), + _last_irr_block_id(fc::variant(lib_id_str).as()), _log_dir(log_dir) + { + } + + bool trx_generator::setup() { + _nonce_prefix = 0; + _nonce = static_cast(fc::time_point::now().sec_since_epoch()) << 32; + + std::cout + << "Stop Generation (form potential ongoing generation in preparation for starting new generation run)." + << std::endl; + stop_generation(); + + std::cout << "Create Initial Transaction with action data." << std::endl; + abi_serializer abi = abi_serializer(fc::json::from_file(_abi_data_file_path).as(), abi_serializer::create_yield_function( abi_serializer_max_time )); + fc::variant unpacked_action_data_json = json_from_file_or_string(_action_data_file_or_str); + std::cout << "action data variant: " << fc::json::to_pretty_string(unpacked_action_data_json) << std::endl; + + bytes packed_action_data_string; + try { + auto action_type = abi.get_action_type( _action ); + FC_ASSERT( !action_type.empty(), "Unknown action ${action} in contract ${contract}", ("action", _action)( "contract", _auth_account )); + packed_action_data_string = abi.variant_to_binary( action_type, unpacked_action_data_json, abi_serializer::create_yield_function( abi_serializer_max_time ) ); + + } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse unpacked action data JSON") + + std::cout << fc::to_hex(packed_action_data_string.data(), packed_action_data_string.size()) << std::endl; + + eosio::chain::action act; + act.account = _contract_owner_account; + act.name = _action; + act.authorization = vector{{_auth_account, config::active_name}}; + act.data = std::move(packed_action_data_string); + + _trxs.emplace_back(create_transfer_trx_w_signer(act, _private_key, ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, _last_irr_block_id)); + + std::cout << "Setup p2p transaction provider" << std::endl; + + std::cout + << "Update each trx to qualify as unique and fresh timestamps, re-sign trx, and send each updated transactions via p2p transaction provider" + << std::endl; + + _provider.setup(); + return true; + } + + bool trx_generator::tear_down() { + _provider.log_trxs(_log_dir); + _provider.teardown(); + + std::cout << "Sent transactions: " << _txcount << std::endl; + std::cout << "Tear down p2p transaction provider" << std::endl; + + //Stop & Cleanup + std::cout << "Stop Generation." << std::endl; + stop_generation(); + return true; + } + + void trx_generator::push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + update_resign_transaction(trx._trx, trx._signer, ++nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); + provider.send(trx._trx); + } + + bool trx_generator::generate_and_send() { + try { + if (_trxs.size()) { + size_t index_to_send = _txcount % _trxs.size(); + push_transaction(_provider, _trxs.at(index_to_send), ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, + _last_irr_block_id); + ++_txcount; + } else { + elog("no transactions available to send"); + return false; + } + } catch (const std::exception &e) { + elog("${e}", ("e", e.what())); + return false; + } catch (...) { + elog("unknown exception"); + return false; + } + + return true; + } + + void trx_generator::stop_generation() { + ilog("Stopping transaction generation"); + + if(_txcount) { + ilog("${d} transactions executed, ${t}us / transaction", ("d", _txcount)("t", _total_us / (double) _txcount)); + _txcount = _total_us = 0; + } + } +} diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index 8eee26ea7f..65f54f4998 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -18,7 +18,7 @@ namespace eosio::testing { struct transfer_trx_generator { p2p_trx_provider _provider; eosio::chain::chain_id_type _chain_id; - eosio::chain::name _handler_acct; + eosio::chain::name _contract_owner_account; const std::vector _accts; fc::microseconds _trx_expiration; std::vector _private_keys_str_vector; @@ -33,7 +33,7 @@ namespace eosio::testing { uint64_t _nonce = 0; uint64_t _nonce_prefix = 0; - transfer_trx_generator(std::string chain_id_in, std::string handler_acct, const std::vector& accts, + transfer_trx_generator(std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, int64_t trx_expr, const std::vector& private_keys_str_vector, std::string lib_id_str, std::string log_dir); void push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, @@ -49,4 +49,41 @@ namespace eosio::testing { void stop_generation(); bool generate_and_send(); }; + + struct trx_generator { + p2p_trx_provider _provider; + eosio::chain::chain_id_type _chain_id; + std::string _abi_data_file_path; + eosio::chain::name _contract_owner_account; + eosio::chain::name _auth_account; + eosio::chain::name _action; + std::string _action_data_file_or_str; + fc::microseconds _trx_expiration; + fc::crypto::private_key _private_key; + eosio::chain::block_id_type _last_irr_block_id; + std::string _log_dir; + + const fc::microseconds abi_serializer_max_time = fc::seconds(10); // No risk to client side serialization taking a long time + + uint64_t _total_us = 0; + uint64_t _txcount = 0; + + std::vector _trxs; + + uint64_t _nonce = 0; + uint64_t _nonce_prefix = 0; + + trx_generator(std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, std::string auth_account, std::string action_name, const std::string& action_data_file_or_str, + int64_t trx_expr, const std::string& private_key_str, std::string lib_id_str, std::string log_dir); + + void push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, + uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, + const eosio::chain::block_id_type& last_irr_block_id); + + bool setup(); + bool tear_down(); + + void stop_generation(); + bool generate_and_send(); + }; } diff --git a/tests/trx_generator/trx_generator_tests.cpp b/tests/trx_generator/trx_generator_tests.cpp index cc36b9651d..c39728d236 100644 --- a/tests/trx_generator/trx_generator_tests.cpp +++ b/tests/trx_generator/trx_generator_tests.cpp @@ -1,4 +1,5 @@ -#include "trx_provider.hpp" +#include +#include #define BOOST_TEST_MODULE trx_generator_tests #include @@ -317,4 +318,20 @@ BOOST_AUTO_TEST_CASE(tps_cant_keep_up_monitored) BOOST_REQUIRE_LT(generator->_calls.size(), expected_trxs); } + +BOOST_AUTO_TEST_CASE(trx_generator_constructor) +{ + std::string chain_id = "999"; + std::string h_acct = "eosio"; + std::string acct = "aaa"; + std::string action_name = "transfer"; + const std::string action_data = "{\"from\":\"aaa\",\"to\":\"bbb\",\"quantity\":\"10.0000 SYS\",\"memo\":\"hello\"}"; + const std::string abi_file = "../../unittests/contracts/eosio.token/eosio.token.abi"; + int64_t trx_expr = 3600; + std::string log_dir = "."; + std::string lib_id_str = "00000062989f69fd251df3e0b274c3364ffc2f4fce73de3f1c7b5e11a4c92f21"; + std::string private_key_str = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"; + auto generator = trx_generator(chain_id, abi_file, h_acct, acct, action_name, action_data, trx_expr, private_key_str, lib_id_str, log_dir); +} + BOOST_AUTO_TEST_SUITE_END() From 00a552f1b0e2ad8eaa229c4631aa787da9f89b13 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 9 Jan 2023 13:21:19 -0600 Subject: [PATCH 059/178] Add action_name, action_data, and abi_file argument support. --- tests/performance_tests/launch_transaction_generators.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/performance_tests/launch_transaction_generators.py b/tests/performance_tests/launch_transaction_generators.py index a87602732f..5d58ec4519 100755 --- a/tests/performance_tests/launch_transaction_generators.py +++ b/tests/performance_tests/launch_transaction_generators.py @@ -126,6 +126,9 @@ def parseArgs(): parser.add_argument("target_tps", type=int, help="Goal transactions per second") parser.add_argument("tps_limit_per_generator", type=int, help="Maximum amount of transactions per second a single generator can have.", default=4000) parser.add_argument("log_dir", type=str, help="Path to directory where trx logs should be written.") + parser.add_argument("action_name", type=str, help="The action name applied to the provided action data input") + parser.add_argument("action_data", type=str, help="The path to the json action data file or json action data description string to use") + parser.add_argument("abi_file", type=str, help="The path to the contract abi file to use for the supplied transaction action data") args = parser.parse_args() return args @@ -135,6 +138,7 @@ def main(): trxGenLauncher = TransactionGeneratorsLauncher(chainId=args.chain_id, lastIrreversibleBlockId=args.last_irreversible_block_id, contractOwnerAccount=args.contract_owner_account, accts=args.accounts, privateKeys=args.priv_keys, trxGenDurationSec=args.trx_gen_duration, logDir=args.log_dir, + abiFile=args.abi_file, actionName=args.action_name, actionData=args.action_data, tpsTrxGensConfig=TpsTrxGensConfig(targetTps=args.target_tps, tpsLimitPerGenerator=args.tps_limit_per_generator)) From eb8337adb1f26d528f49d60cc4f1620a9c01c845 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Tue, 10 Jan 2023 12:06:13 -0600 Subject: [PATCH 060/178] use explicit names on constructor for specified contract --- tests/performance_tests/performance_test_basic.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index a558a480a3..9b9aa4fd28 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -523,7 +523,9 @@ def main(): extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, - specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(args.account_name, args.owner_public_key, args.active_public_key, args.contract_dir, args.wasm_file, args.abi_file), + specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(accountName=args.account_name, + ownerPublicKey=args.owner_public_key, activePublicKey=args.active_public_key, contractDir=args.contract_dir, + wasmFile=args.wasm_file, abiFile=args.abi_file), nodeosVers=Utils.getNodeosVersion().split('.')[0]) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs) From 060be863a3007679b088b22caff49a1f8cb1b75d Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 11 Jan 2023 13:19:36 -0600 Subject: [PATCH 061/178] log production windows, dropped blocks, and dropped transactions in performance harness --- tests/performance_tests/README.md | 715 ++++++++++++++---- tests/performance_tests/log_reader.py | 71 +- .../performance_test_basic.py | 23 +- 3 files changed, 661 insertions(+), 148 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index f510c47d9e..39a0e77573 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -338,6 +338,8 @@ The following scripts are typically used by the Performance Harness main script * `--quiet` Whether to quiet printing intermediate results and reports to stdout (default: False) * `--prods-enable-trace-api` Determines whether producer nodes should have eosio::trace_api_plugin enabled (default: False) +* `--print-missing-transactions` + Toggles if missing transactions are be printed upon test completion. (default: False) #### Launch Transaction Generators @@ -816,142 +818,581 @@ The Performance Test Basic generates, by default, a report that details results Expand for full sample report ``` json -{ - "completedRun": true, - "testStart": "2022-11-23T15:18:52.115767", - "testFinish": "2022-11-23T15:20:16.911367", - "Analysis": { - "BlockSize": { - "min": 1937088, - "max": 2971200, - "avg": 2493345.882352941, - "sigma": 186567.07030350564, - "emptyBlocks": 0, - "numBlocks": 17 - }, - "BlocksGuide": { - "firstBlockNum": 2, - "lastBlockNum": 165, - "totalBlocks": 164, - "testStartBlockNum": 106, - "testEndBlockNum": 149, - "setupBlocksCnt": 104, - "tearDownBlocksCnt": 16, - "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 22, - "configAddlDropCnt": 2, - "testAnalysisBlockCnt": 17 - }, - "TPS": { - "min": 23164, - "max": 28791, - "avg": 25986.9375, - "sigma": 1033.1693634606816, - "emptyBlocks": 0, - "numBlocks": 17, - "configTps": 26000, - "configTestDuration": 10, - "tpsPerGenerator": [ - 3714, - 3714, - 3714, - 3714, - 3714, - 3715, - 3715 - ], - "generatorCount": 7 - }, - "TrxCPU": { - "min": 7.0, - "max": 10893.0, - "avg": 17.314342307692307, - "sigma": 41.16144172726996, - "samples": 260000 - }, - "TrxLatency": { - "min": 0.0009999275207519531, - "max": 0.6380000114440918, - "avg": 0.26549454224201346, - "sigma": 0.14674558675649374, - "samples": 260000 - }, - "TrxNet": { - "min": 24.0, - "max": 24.0, - "avg": 24.0, - "sigma": 0.0, - "samples": 260000 - } - }, - "args": { - "killAll": false, - "dontKill": false, - "keepLogs": true, - "dumpErrorDetails": false, - "delay": 1, - "nodesFile": null, - "verbose": false, - "_killEosInstances": true, - "_killWallet": true, - "pnodes": 1, - "totalNodes": 0, - "topo": "mesh", - "extraNodeosArgs": { - "chainPluginArgs": { - "signatureCpuBillablePct": 0, - "chainStateDbSizeMb": 10240, - "chainThreads": 3, - "databaseMapMode": "mapped" - }, - "producerPluginArgs": { - "disableSubjectiveBilling": true, - "lastBlockTimeOffsetUs": 0, - "produceTimeOffsetUs": 0, - "cpuEffortPercent": 100, - "lastBlockCpuEffortPercent": 100, - "producerThreads": 6 - }, - "httpPluginArgs": { - "httpMaxResponseTimeMs": 990000 - }, - "netPluginArgs": { - "netThreads": 2 - } - }, - "useBiosBootFile": false, - "genesisPath": "tests/performance_tests/genesis.json", - "maximumP2pPerHost": 5000, - "maximumClients": 0, - "loggingDict": { - "bios": "off" - }, - "prodsEnableTraceApi": false, - "specificExtraNodeosArgs": { - "1": "--plugin eosio::trace_api_plugin" - }, - "_totalNodes": 2, - "targetTps": 26000, - "testTrxGenDurationSec": 10, - "tpsLimitPerGenerator": 4000, - "numAddlBlocksToPrune": 2, - "logDirRoot": "./performance_test/2022-11-23_12-56-58/testRunLogs", - "delReport": false, - "quiet": false, - "delPerfLogs": false, - "expectedTransactionsSent": 260000, - "logDirBase": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic", - "logDirTimestamp": "2022-11-23_15-18-52", - "logDirTimestampedOptSuffix": "-26000", - "logDirPath": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-18-52-26000" - }, - "env": { - "system": "Linux", - "os": "posix", - "release": "5.15.74.2-microsoft-standard-WSL2", - "logical_cpu_count": 16 - }, - "nodeosVersion": "v4.0.0-dev" -} + Report: + { + "Analysis": { + "BlockSize": { + "avg": 1920.0, + "emptyBlocks": 0, + "max": 1920, + "min": 1920, + "numBlocks": 177, + "sigma": 0.0 + }, + "BlocksGuide": { + "configAddlDropCnt": 2, + "firstBlockNum": 2, + "lastBlockNum": 301, + "leadingEmptyBlocksCnt": 1, + "setupBlocksCnt": 112, + "tearDownBlocksCnt": 0, + "testAnalysisBlockCnt": 177, + "testEndBlockNum": 301, + "testStartBlockNum": 114, + "totalBlocks": 300, + "trailingEmptyBlocksCnt": 6 + }, + "DroppedBlocks": {}, + "DroppedBlocksCount": 0, + "ProductionWindowsAverageSize": 0, + "ProductionWindowsMissed": 0, + "ProductionWindowsTotal": 0, + "TPS": { + "avg": 20.0, + "configTestDuration": 90, + "configTps": 20, + "emptyBlocks": 0, + "generatorCount": 2, + "max": 20, + "min": 20, + "numBlocks": 177, + "sigma": 0.0, + "tpsPerGenerator": [ + 10, + 10 + ] + }, + "TrxCPU": { + "avg": 98.16555555555556, + "max": 380.0, + "min": 8.0, + "samples": 1800, + "sigma": 53.96847158009348 + }, + "TrxLatency": { + "avg": 0.4364516670174069, + "max": 0.6380000114440918, + "min": 0.2349998950958252, + "samples": 1800, + "sigma": 0.1414206312537471 + }, + "TrxNet": { + "avg": 24.0, + "max": 24.0, + "min": 24.0, + "samples": 1800, + "sigma": 0.0 + } + }, + "args": { + "_killEosInstances": true, + "_killWallet": true, + "_totalNodes": 2, + "delPerfLogs": false, + "delReport": false, + "delay": 1, + "dontKill": false, + "dumpErrorDetails": false, + "expectedTransactionsSent": 1800, + "extraNodeosArgs": { + "chainPluginArgs": { + "_abiSerializerMaxTimeMsNodeosArg": "--abi-serializer-max-time-ms", + "_abiSerializerMaxTimeMsNodeosDefault": 15, + "_actionBlacklistNodeosArg": "--action-blacklist", + "_actionBlacklistNodeosDefault": null, + "_actorBlacklistNodeosArg": "--actor-blacklist", + "_actorBlacklistNodeosDefault": null, + "_actorWhitelistNodeosArg": "--actor-whitelist", + "_actorWhitelistNodeosDefault": null, + "_apiAcceptTransactionsNodeosArg": "--api-accept-transactions", + "_apiAcceptTransactionsNodeosDefault": 1, + "_blockLogRetainBlocksNodeosArg": "--block-log-retain-blocks", + "_blockLogRetainBlocksNodeosDefault": null, + "_blocksDirNodeosArg": "--blocks-dir", + "_blocksDirNodeosDefault": "\"blocks\"", + "_chainStateDbGuardSizeMbNodeosArg": "--chain-state-db-guard-size-mb", + "_chainStateDbGuardSizeMbNodeosDefault": 128, + "_chainStateDbSizeMbNodeosArg": "--chain-state-db-size-mb", + "_chainStateDbSizeMbNodeosDefault": 1024, + "_chainThreadsNodeosArg": "--chain-threads", + "_chainThreadsNodeosDefault": 2, + "_checkpointNodeosArg": "--checkpoint", + "_checkpointNodeosDefault": null, + "_contractBlacklistNodeosArg": "--contract-blacklist", + "_contractBlacklistNodeosDefault": null, + "_contractWhitelistNodeosArg": "--contract-whitelist", + "_contractWhitelistNodeosDefault": null, + "_contractsConsoleNodeosArg": "--contracts-console", + "_contractsConsoleNodeosDefault": false, + "_databaseMapModeNodeosArg": "--database-map-mode", + "_databaseMapModeNodeosDefault": "mapped", + "_deepMindNodeosArg": "--deep-mind", + "_deepMindNodeosDefault": false, + "_deleteAllBlocksNodeosArg": "--delete-all-blocks", + "_deleteAllBlocksNodeosDefault": false, + "_disableRamBillingNotifyChecksNodeosArg": "--disable-ram-billing-notify-checks", + "_disableRamBillingNotifyChecksNodeosDefault": false, + "_disableReplayOptsNodeosArg": "--disable-replay-opts", + "_disableReplayOptsNodeosDefault": false, + "_enableAccountQueriesNodeosArg": "--enable-account-queries", + "_enableAccountQueriesNodeosDefault": 0, + "_eosVmOcCacheSizeMbNodeosArg": "--eos-vm-oc-cache-size-mb", + "_eosVmOcCacheSizeMbNodeosDefault": 1024, + "_eosVmOcCompileThreadsNodeosArg": "--eos-vm-oc-compile-threads", + "_eosVmOcCompileThreadsNodeosDefault": 1, + "_eosVmOcEnableNodeosArg": "--eos-vm-oc-enable", + "_eosVmOcEnableNodeosDefault": false, + "_extractBuildInfoNodeosArg": "--extract-build-info", + "_extractBuildInfoNodeosDefault": null, + "_extractGenesisJsonNodeosArg": "--extract-genesis-json", + "_extractGenesisJsonNodeosDefault": null, + "_forceAllChecksNodeosArg": "--force-all-checks", + "_forceAllChecksNodeosDefault": false, + "_genesisJsonNodeosArg": "--genesis-json", + "_genesisJsonNodeosDefault": null, + "_genesisTimestampNodeosArg": "--genesis-timestamp", + "_genesisTimestampNodeosDefault": null, + "_hardReplayBlockchainNodeosArg": "--hard-replay-blockchain", + "_hardReplayBlockchainNodeosDefault": false, + "_integrityHashOnStartNodeosArg": "--integrity-hash-on-start", + "_integrityHashOnStartNodeosDefault": false, + "_integrityHashOnStopNodeosArg": "--integrity-hash-on-stop", + "_integrityHashOnStopNodeosDefault": false, + "_keyBlacklistNodeosArg": "--key-blacklist", + "_keyBlacklistNodeosDefault": null, + "_maxNonprivilegedInlineActionSizeNodeosArg": "--max-nonprivileged-inline-action-size", + "_maxNonprivilegedInlineActionSizeNodeosDefault": 4096, + "_maximumVariableSignatureLengthNodeosArg": "--maximum-variable-signature-length", + "_maximumVariableSignatureLengthNodeosDefault": 16384, + "_pluginName": "chain_plugin", + "_pluginNamespace": "eosio", + "_printBuildInfoNodeosArg": "--print-build-info", + "_printBuildInfoNodeosDefault": false, + "_printGenesisJsonNodeosArg": "--print-genesis-json", + "_printGenesisJsonNodeosDefault": false, + "_profileAccountNodeosArg": "--profile-account", + "_profileAccountNodeosDefault": null, + "_protocolFeaturesDirNodeosArg": "--protocol-features-dir", + "_protocolFeaturesDirNodeosDefault": "\"protocol_features\"", + "_readModeNodeosArg": "--read-mode", + "_readModeNodeosDefault": "head", + "_replayBlockchainNodeosArg": "--replay-blockchain", + "_replayBlockchainNodeosDefault": false, + "_senderBypassWhiteblacklistNodeosArg": "--sender-bypass-whiteblacklist", + "_senderBypassWhiteblacklistNodeosDefault": null, + "_signatureCpuBillablePctNodeosArg": "--signature-cpu-billable-pct", + "_signatureCpuBillablePctNodeosDefault": 50, + "_snapshotNodeosArg": "--snapshot", + "_snapshotNodeosDefault": null, + "_stateDirNodeosArg": "--state-dir", + "_stateDirNodeosDefault": "\"state\"", + "_terminateAtBlockNodeosArg": "--terminate-at-block", + "_terminateAtBlockNodeosDefault": 0, + "_transactionFinalityStatusFailureDurationSecNodeosArg": "--transaction-finality-status-failure-duration-sec", + "_transactionFinalityStatusFailureDurationSecNodeosDefault": 180, + "_transactionFinalityStatusMaxStorageSizeGbNodeosArg": "--transaction-finality-status-max-storage-size-gb", + "_transactionFinalityStatusMaxStorageSizeGbNodeosDefault": null, + "_transactionFinalityStatusSuccessDurationSecNodeosArg": "--transaction-finality-status-success-duration-sec", + "_transactionFinalityStatusSuccessDurationSecNodeosDefault": 180, + "_transactionRetryIntervalSecNodeosArg": "--transaction-retry-interval-sec", + "_transactionRetryIntervalSecNodeosDefault": 20, + "_transactionRetryMaxExpirationSecNodeosArg": "--transaction-retry-max-expiration-sec", + "_transactionRetryMaxExpirationSecNodeosDefault": 120, + "_transactionRetryMaxStorageSizeGbNodeosArg": "--transaction-retry-max-storage-size-gb", + "_transactionRetryMaxStorageSizeGbNodeosDefault": null, + "_truncateAtBlockNodeosArg": "--truncate-at-block", + "_truncateAtBlockNodeosDefault": 0, + "_trustedProducerNodeosArg": "--trusted-producer", + "_trustedProducerNodeosDefault": null, + "_validationModeNodeosArg": "--validation-mode", + "_validationModeNodeosDefault": "full", + "_wasmRuntimeNodeosArg": "--wasm-runtime", + "_wasmRuntimeNodeosDefault": "eos-vm-jit", + "abiSerializerMaxTimeMs": null, + "actionBlacklist": null, + "actorBlacklist": null, + "actorWhitelist": null, + "apiAcceptTransactions": null, + "blockLogRetainBlocks": null, + "blocksDir": null, + "chainStateDbGuardSizeMb": null, + "chainStateDbSizeMb": 10240, + "chainThreads": 2, + "checkpoint": null, + "contractBlacklist": null, + "contractWhitelist": null, + "contractsConsole": null, + "databaseMapMode": "mapped", + "deepMind": null, + "deleteAllBlocks": null, + "disableRamBillingNotifyChecks": null, + "disableReplayOpts": null, + "enableAccountQueries": null, + "eosVmOcCacheSizeMb": null, + "eosVmOcCompileThreads": null, + "eosVmOcEnable": null, + "extractBuildInfo": null, + "extractGenesisJson": null, + "forceAllChecks": null, + "genesisJson": null, + "genesisTimestamp": null, + "hardReplayBlockchain": null, + "integrityHashOnStart": null, + "integrityHashOnStop": null, + "keyBlacklist": null, + "maxNonprivilegedInlineActionSize": null, + "maximumVariableSignatureLength": null, + "printBuildInfo": null, + "printGenesisJson": null, + "profileAccount": null, + "protocolFeaturesDir": null, + "readMode": null, + "replayBlockchain": null, + "senderBypassWhiteblacklist": null, + "signatureCpuBillablePct": 0, + "snapshot": null, + "stateDir": null, + "terminateAtBlock": null, + "transactionFinalityStatusFailureDurationSec": null, + "transactionFinalityStatusMaxStorageSizeGb": null, + "transactionFinalityStatusSuccessDurationSec": null, + "transactionRetryIntervalSec": null, + "transactionRetryMaxExpirationSec": null, + "transactionRetryMaxStorageSizeGb": null, + "truncateAtBlock": null, + "trustedProducer": null, + "validationMode": null, + "wasmRuntime": null + }, + "httpClientPluginArgs": { + "_httpsClientRootCertNodeosArg": "--https-client-root-cert", + "_httpsClientRootCertNodeosDefault": null, + "_httpsClientValidatePeersNodeosArg": "--https-client-validate-peers", + "_httpsClientValidatePeersNodeosDefault": 1, + "_pluginName": "http_client_plugin", + "_pluginNamespace": "eosio", + "httpsClientRootCert": null, + "httpsClientValidatePeers": null + }, + "httpPluginArgs": { + "_accessControlAllowCredentialsNodeosArg": "--access-control-allow-credentials", + "_accessControlAllowCredentialsNodeosDefault": false, + "_accessControlAllowHeadersNodeosArg": "--access-control-allow-headers", + "_accessControlAllowHeadersNodeosDefault": null, + "_accessControlAllowOriginNodeosArg": "--access-control-allow-origin", + "_accessControlAllowOriginNodeosDefault": null, + "_accessControlMaxAgeNodeosArg": "--access-control-max-age", + "_accessControlMaxAgeNodeosDefault": null, + "_httpAliasNodeosArg": "--http-alias", + "_httpAliasNodeosDefault": null, + "_httpKeepAliveNodeosArg": "--http-keep-alive", + "_httpKeepAliveNodeosDefault": 1, + "_httpMaxBytesInFlightMbNodeosArg": "--http-max-bytes-in-flight-mb", + "_httpMaxBytesInFlightMbNodeosDefault": 500, + "_httpMaxInFlightRequestsNodeosArg": "--http-max-in-flight-requests", + "_httpMaxInFlightRequestsNodeosDefault": -1, + "_httpMaxResponseTimeMsNodeosArg": "--http-max-response-time-ms", + "_httpMaxResponseTimeMsNodeosDefault": 30, + "_httpServerAddressNodeosArg": "--http-server-address", + "_httpServerAddressNodeosDefault": "127.0.0.1:8888", + "_httpThreadsNodeosArg": "--http-threads", + "_httpThreadsNodeosDefault": 2, + "_httpValidateHostNodeosArg": "--http-validate-host", + "_httpValidateHostNodeosDefault": 1, + "_httpsCertificateChainFileNodeosArg": "--https-certificate-chain-file", + "_httpsCertificateChainFileNodeosDefault": null, + "_httpsEcdhCurveNodeosArg": "--https-ecdh-curve", + "_httpsEcdhCurveNodeosDefault": "secp384r1", + "_httpsPrivateKeyFileNodeosArg": "--https-private-key-file", + "_httpsPrivateKeyFileNodeosDefault": null, + "_httpsServerAddressNodeosArg": "--https-server-address", + "_httpsServerAddressNodeosDefault": null, + "_maxBodySizeNodeosArg": "--max-body-size", + "_maxBodySizeNodeosDefault": 2097152, + "_pluginName": "http_plugin", + "_pluginNamespace": "eosio", + "_unixSocketPathNodeosArg": "--unix-socket-path", + "_unixSocketPathNodeosDefault": null, + "_verboseHttpErrorsNodeosArg": "--verbose-http-errors", + "_verboseHttpErrorsNodeosDefault": false, + "accessControlAllowCredentials": null, + "accessControlAllowHeaders": null, + "accessControlAllowOrigin": null, + "accessControlMaxAge": null, + "httpAlias": null, + "httpKeepAlive": null, + "httpMaxBytesInFlightMb": null, + "httpMaxInFlightRequests": null, + "httpMaxResponseTimeMs": 990000, + "httpServerAddress": null, + "httpThreads": null, + "httpValidateHost": null, + "httpsCertificateChainFile": null, + "httpsEcdhCurve": null, + "httpsPrivateKeyFile": null, + "httpsServerAddress": null, + "maxBodySize": null, + "unixSocketPath": null, + "verboseHttpErrors": null + }, + "netPluginArgs": { + "_agentNameNodeosArg": "--agent-name", + "_agentNameNodeosDefault": "EOS Test Agent", + "_allowedConnectionNodeosArg": "--allowed-connection", + "_allowedConnectionNodeosDefault": "any", + "_connectionCleanupPeriodNodeosArg": "--connection-cleanup-period", + "_connectionCleanupPeriodNodeosDefault": 30, + "_maxCleanupTimeMsecNodeosArg": "--max-cleanup-time-msec", + "_maxCleanupTimeMsecNodeosDefault": 10, + "_maxClientsNodeosArg": "--max-clients", + "_maxClientsNodeosDefault": 25, + "_netThreadsNodeosArg": "--net-threads", + "_netThreadsNodeosDefault": 2, + "_p2pAcceptTransactionsNodeosArg": "--p2p-accept-transactions", + "_p2pAcceptTransactionsNodeosDefault": 1, + "_p2pDedupCacheExpireTimeSecNodeosArg": "--p2p-dedup-cache-expire-time-sec", + "_p2pDedupCacheExpireTimeSecNodeosDefault": 10, + "_p2pKeepaliveIntervalMsNodeosArg": "--p2p-keepalive-interval-ms", + "_p2pKeepaliveIntervalMsNodeosDefault": 10000, + "_p2pListenEndpointNodeosArg": "--p2p-listen-endpoint", + "_p2pListenEndpointNodeosDefault": "0.0.0.0:9876", + "_p2pMaxNodesPerHostNodeosArg": "--p2p-max-nodes-per-host", + "_p2pMaxNodesPerHostNodeosDefault": 1, + "_p2pPeerAddressNodeosArg": "--p2p-peer-address", + "_p2pPeerAddressNodeosDefault": null, + "_p2pServerAddressNodeosArg": "--p2p-server-address", + "_p2pServerAddressNodeosDefault": null, + "_peerKeyNodeosArg": "--peer-key", + "_peerKeyNodeosDefault": null, + "_peerLogFormatNodeosArg": "--peer-log-format", + "_peerLogFormatNodeosDefault": "[\"${_name}\" - ${_cid} ${_ip}:${_port}] ", + "_peerPrivateKeyNodeosArg": "--peer-private-key", + "_peerPrivateKeyNodeosDefault": null, + "_pluginName": "net_plugin", + "_pluginNamespace": "eosio", + "_syncFetchSpanNodeosArg": "--sync-fetch-span", + "_syncFetchSpanNodeosDefault": 100, + "_useSocketReadWatermarkNodeosArg": "--use-socket-read-watermark", + "_useSocketReadWatermarkNodeosDefault": 0, + "agentName": null, + "allowedConnection": null, + "connectionCleanupPeriod": null, + "maxCleanupTimeMsec": null, + "maxClients": null, + "netThreads": 2, + "p2pAcceptTransactions": null, + "p2pDedupCacheExpireTimeSec": null, + "p2pKeepaliveIntervalMs": null, + "p2pListenEndpoint": null, + "p2pMaxNodesPerHost": null, + "p2pPeerAddress": null, + "p2pServerAddress": null, + "peerKey": null, + "peerLogFormat": null, + "peerPrivateKey": null, + "syncFetchSpan": null, + "useSocketReadWatermark": null + }, + "producerPluginArgs": { + "_cpuEffortPercentNodeosArg": "--cpu-effort-percent", + "_cpuEffortPercentNodeosDefault": 80, + "_disableSubjectiveAccountBillingNodeosArg": "--disable-subjective-account-billing", + "_disableSubjectiveAccountBillingNodeosDefault": false, + "_disableSubjectiveApiBillingNodeosArg": "--disable-subjective-api-billing", + "_disableSubjectiveApiBillingNodeosDefault": 1, + "_disableSubjectiveBillingNodeosArg": "--disable-subjective-billing", + "_disableSubjectiveBillingNodeosDefault": 1, + "_disableSubjectiveP2pBillingNodeosArg": "--disable-subjective-p2p-billing", + "_disableSubjectiveP2pBillingNodeosDefault": 1, + "_enableStaleProductionNodeosArg": "--enable-stale-production", + "_enableStaleProductionNodeosDefault": false, + "_greylistAccountNodeosArg": "--greylist-account", + "_greylistAccountNodeosDefault": null, + "_greylistLimitNodeosArg": "--greylist-limit", + "_greylistLimitNodeosDefault": 1000, + "_incomingDeferRatioNodeosArg": "--incoming-defer-ratio", + "_incomingDeferRatioNodeosDefault": 1, + "_incomingTransactionQueueSizeMbNodeosArg": "--incoming-transaction-queue-size-mb", + "_incomingTransactionQueueSizeMbNodeosDefault": 1024, + "_lastBlockCpuEffortPercentNodeosArg": "--last-block-cpu-effort-percent", + "_lastBlockCpuEffortPercentNodeosDefault": 80, + "_lastBlockTimeOffsetUsNodeosArg": "--last-block-time-offset-us", + "_lastBlockTimeOffsetUsNodeosDefault": -200000, + "_maxBlockCpuUsageThresholdUsNodeosArg": "--max-block-cpu-usage-threshold-us", + "_maxBlockCpuUsageThresholdUsNodeosDefault": 5000, + "_maxBlockNetUsageThresholdBytesNodeosArg": "--max-block-net-usage-threshold-bytes", + "_maxBlockNetUsageThresholdBytesNodeosDefault": 1024, + "_maxIrreversibleBlockAgeNodeosArg": "--max-irreversible-block-age", + "_maxIrreversibleBlockAgeNodeosDefault": -1, + "_maxScheduledTransactionTimePerBlockMsNodeosArg": "--max-scheduled-transaction-time-per-block-ms", + "_maxScheduledTransactionTimePerBlockMsNodeosDefault": 100, + "_maxTransactionTimeNodeosArg": "--max-transaction-time", + "_maxTransactionTimeNodeosDefault": 30, + "_pauseOnStartupNodeosArg": "--pause-on-startup", + "_pauseOnStartupNodeosDefault": false, + "_pluginName": "producer_plugin", + "_pluginNamespace": "eosio", + "_privateKeyNodeosArg": "--private-key", + "_privateKeyNodeosDefault": null, + "_produceTimeOffsetUsNodeosArg": "--produce-time-offset-us", + "_produceTimeOffsetUsNodeosDefault": 0, + "_producerNameNodeosArg": "--producer-name", + "_producerNameNodeosDefault": null, + "_producerThreadsNodeosArg": "--producer-threads", + "_producerThreadsNodeosDefault": 2, + "_signatureProviderNodeosArg": "--signature-provider", + "_signatureProviderNodeosDefault": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", + "_snapshotsDirNodeosArg": "--snapshots-dir", + "_snapshotsDirNodeosDefault": "\"snapshots\"", + "_subjectiveAccountDecayTimeMinutesNodeosArg": "--subjective-account-decay-time-minutes", + "_subjectiveAccountDecayTimeMinutesNodeosDefault": 1440, + "_subjectiveAccountMaxFailuresNodeosArg": "--subjective-account-max-failures", + "_subjectiveAccountMaxFailuresNodeosDefault": 3, + "_subjectiveCpuLeewayUsNodeosArg": "--subjective-cpu-leeway-us", + "_subjectiveCpuLeewayUsNodeosDefault": 31000, + "cpuEffortPercent": 80, + "disableSubjectiveAccountBilling": null, + "disableSubjectiveApiBilling": null, + "disableSubjectiveBilling": true, + "disableSubjectiveP2pBilling": null, + "enableStaleProduction": null, + "greylistAccount": null, + "greylistLimit": null, + "incomingDeferRatio": null, + "incomingTransactionQueueSizeMb": null, + "lastBlockCpuEffortPercent": 80, + "lastBlockTimeOffsetUs": -200000, + "maxBlockCpuUsageThresholdUs": null, + "maxBlockNetUsageThresholdBytes": null, + "maxIrreversibleBlockAge": null, + "maxScheduledTransactionTimePerBlockMs": null, + "maxTransactionTime": null, + "pauseOnStartup": null, + "privateKey": null, + "produceTimeOffsetUs": -200000, + "producerName": null, + "producerThreads": 2, + "signatureProvider": null, + "snapshotsDir": null, + "subjectiveAccountDecayTimeMinutes": null, + "subjectiveAccountMaxFailures": null, + "subjectiveCpuLeewayUs": null + }, + "resourceMonitorPluginArgs": { + "_pluginName": "resource_monitor_plugin", + "_pluginNamespace": "eosio", + "_resourceMonitorIntervalSecondsNodeosArg": "--resource-monitor-interval-seconds", + "_resourceMonitorIntervalSecondsNodeosDefault": 2, + "_resourceMonitorNotShutdownOnThresholdExceededNodeosArg": "--resource-monitor-not-shutdown-on-threshold-exceeded", + "_resourceMonitorNotShutdownOnThresholdExceededNodeosDefault": false, + "_resourceMonitorSpaceThresholdNodeosArg": "--resource-monitor-space-threshold", + "_resourceMonitorSpaceThresholdNodeosDefault": 90, + "_resourceMonitorWarningIntervalNodeosArg": "--resource-monitor-warning-interval", + "_resourceMonitorWarningIntervalNodeosDefault": 30, + "resourceMonitorIntervalSeconds": null, + "resourceMonitorNotShutdownOnThresholdExceeded": null, + "resourceMonitorSpaceThreshold": null, + "resourceMonitorWarningInterval": null + }, + "signatureProviderPluginArgs": { + "_keosdProviderTimeoutNodeosArg": "--keosd-provider-timeout", + "_keosdProviderTimeoutNodeosDefault": 5, + "_pluginName": "signature_provider_plugin", + "_pluginNamespace": "eosio", + "keosdProviderTimeout": null + }, + "stateHistoryPluginArgs": { + "_chainStateHistoryNodeosArg": "--chain-state-history", + "_chainStateHistoryNodeosDefault": false, + "_deleteStateHistoryNodeosArg": "--delete-state-history", + "_deleteStateHistoryNodeosDefault": false, + "_pluginName": "state_history_plugin", + "_pluginNamespace": "eosio", + "_stateHistoryDirNodeosArg": "--state-history-dir", + "_stateHistoryDirNodeosDefault": "\"state-history\"", + "_stateHistoryEndpointNodeosArg": "--state-history-endpoint", + "_stateHistoryEndpointNodeosDefault": "127.0.0.1:8080", + "_stateHistoryLogRetainBlocksNodeosArg": "--state-history-log-retain-blocks", + "_stateHistoryLogRetainBlocksNodeosDefault": null, + "_stateHistoryUnixSocketPathNodeosArg": "--state-history-unix-socket-path", + "_stateHistoryUnixSocketPathNodeosDefault": null, + "_traceHistoryDebugModeNodeosArg": "--trace-history-debug-mode", + "_traceHistoryDebugModeNodeosDefault": false, + "_traceHistoryNodeosArg": "--trace-history", + "_traceHistoryNodeosDefault": false, + "chainStateHistory": null, + "deleteStateHistory": null, + "stateHistoryDir": null, + "stateHistoryEndpoint": null, + "stateHistoryLogRetainBlocks": null, + "stateHistoryUnixSocketPath": null, + "traceHistory": null, + "traceHistoryDebugMode": null + }, + "traceApiPluginArgs": { + "_pluginName": "trace_api_plugin", + "_pluginNamespace": "eosio", + "_traceDirNodeosArg": "--trace-dir", + "_traceDirNodeosDefault": "\"traces\"", + "_traceMinimumIrreversibleHistoryBlocksNodeosArg": "--trace-minimum-irreversible-history-blocks", + "_traceMinimumIrreversibleHistoryBlocksNodeosDefault": -1, + "_traceMinimumUncompressedIrreversibleHistoryBlocksNodeosArg": "--trace-minimum-uncompressed-irreversible-history-blocks", + "_traceMinimumUncompressedIrreversibleHistoryBlocksNodeosDefault": -1, + "_traceNoAbisNodeosArg": "--trace-no-abis", + "_traceNoAbisNodeosDefault": false, + "_traceRpcAbiNodeosArg": "--trace-rpc-abi", + "_traceRpcAbiNodeosDefault": null, + "_traceSliceStrideNodeosArg": "--trace-slice-stride", + "_traceSliceStrideNodeosDefault": 10000, + "traceDir": null, + "traceMinimumIrreversibleHistoryBlocks": null, + "traceMinimumUncompressedIrreversibleHistoryBlocks": null, + "traceNoAbis": null, + "traceRpcAbi": null, + "traceSliceStride": null + } + }, + "genesisPath": "tests/performance_tests/genesis.json", + "keepLogs": true, + "killAll": true, + "logDirBase": "p", + "logDirPath": "p/2023-01-11_19-13-31-20", + "logDirRoot": ".", + "logDirTimestamp": "2023-01-11_19-13-31", + "logDirTimestampedOptSuffix": "-20", + "loggingDict": { + "bios": "off" + }, + "maximumClients": 0, + "maximumP2pPerHost": 5000, + "nodeosVers": "v4", + "nodesFile": null, + "numAddlBlocksToPrune": 2, + "pnodes": 1, + "printMissingTransactions": false, + "prodsEnableTraceApi": false, + "quiet": false, + "specificExtraNodeosArgs": { + "1": "--plugin eosio::trace_api_plugin" + }, + "targetTps": 20, + "testTrxGenDurationSec": 90, + "topo": "mesh", + "totalNodes": 1, + "tpsLimitPerGenerator": 10, + "useBiosBootFile": false, + "verbose": true + }, + "completedRun": true, + "env": { + "logical_cpu_count": 16, + "os": "posix", + "release": "5.10.16.3-microsoft-standard-WSL2", + "system": "Linux" + }, + "nodeosVersion": "v4.0.0-dev", + "testFinish": "2023-01-11T19:16:01.623808", + "testStart": "2023-01-11T19:13:31.346325" + } ``` diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index f04b9aaeeb..ce95ac9434 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -114,6 +114,19 @@ def timestamp(self): self._timestamp = "" self._calcdTimeEpoch = 0 +@dataclass +class productionWindow(): + producer: str = "" + startBlock: int = 0 + endBlock: int = 0 + blockCount: int = 0 + +@dataclass +class productionWindows(): + totalWindows: int = 0 + averageWindowSize: float = 0 + missedWindows: int = 0 + @dataclass class chainBlocksGuide(): firstBlockNum: int = 0 @@ -150,6 +163,7 @@ def __init__(self): self.totalElapsed = 0 self.totalTime = 0 self.totalLatency = 0 + self.droppedBlocks = {} def __eq__(self, other): return self.startBlock == other.startBlock and\ self.ceaseBlock == other.ceaseBlock and\ @@ -179,10 +193,11 @@ def assertEquality(self, other): def selectedOpen(path): return gzip.open if path.suffix == '.gz' else open -def scrapeLog(data, path): +def scrapeLog(data: chainData, path): selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: - blockResult = re.findall(r'Received block ([0-9a-fA-F]*).* #(\d+) .*trxs: (\d+)(.*)', f.read()) + line = f.read() + blockResult = re.findall(r'Received block ([0-9a-fA-F]*).* #(\d+) .*trxs: (\d+)(.*)', line) if data.startBlock is None: data.startBlock = 2 if data.ceaseBlock is None: @@ -201,6 +216,9 @@ def scrapeLog(data, path): data.updateTotal(int(value[2]), 0, 0, 0, 0, int(v2Logging[0])) else: print("Error: Unknown log format") + droppedBlocks = re.findall(r'dropped incoming block #(\d+) id: ([0-9a-fA-F]+)', line) + for block in droppedBlocks: + data.droppedBlocks[block[0]] = block[1] def scrapeTrxGenLog(trxSent, path): selectedopen = selectedOpen(path) @@ -233,6 +251,40 @@ def populateTrxSentTimestamp(trxSent: dict, trxDict: dict, notFound): else: notFound.append(sentTrxId) +def getProductionWindows(prodDict: dict, blockDict: dict, data: chainData): + prod = "" + count = 0 + blocksFromCurProd = 0 + numProdWindows = 0 + for k, v in blockDict.items(): + count += 1 + if prod == "": + prod = v.producer + if prod != v.producer or count+data.startBlock == data.ceaseBlock: + prodDict[str(numProdWindows)] = productionWindow(prod, count-blocksFromCurProd+data.startBlock-1, count+data.startBlock-2, blocksFromCurProd) + prod = v.producer + blocksFromCurProd = 1 + numProdWindows += 1 + else: + blocksFromCurProd += 1 + return prodDict + +def calcProductionWindows(prodDict: dict): + prodWindows = productionWindows() + prodWindows.totalWindows = len(prodDict) - 2 + totalBlocksForAverage = 0 + for k, v in prodDict.items(): + if k != "0" and k != str(prodWindows.totalWindows+1): + if v.blockCount < 12: + prodWindows.missedWindows += 1 + totalBlocksForAverage += v.blockCount + if prodWindows.totalWindows <= 0: + prodWindows.totalWindows = 0 + prodWindows.averageWindowSize = 0 + else: + prodWindows.averageWindowSize = totalBlocksForAverage / prodWindows.totalWindows + return prodWindows + def calcChainGuide(data: chainData, numAddlBlocksToDrop=0) -> chainBlocksGuide: """Calculates guide to understanding key points/blocks in chain data. In particular, test scenario phases like setup, teardown, etc. @@ -356,7 +408,7 @@ def calcTrxLatencyCpuNetStats(trxDict : dict, blockDict: dict): basicStats(float(np.min(npLatencyCpuNetList[:,2])), float(np.max(npLatencyCpuNetList[:,2])), float(np.average(npLatencyCpuNetList[:,2])), float(np.std(npLatencyCpuNetList[:,2])), len(npLatencyCpuNetList)) def createReport(guide: chainBlocksGuide, tpsTestConfig: TpsTestConfig, tpsStats: stats, blockSizeStats: stats, trxLatencyStats: basicStats, trxCpuStats: basicStats, - trxNetStats: basicStats, testStart: datetime, testFinish: datetime, argsDict: dict, completedRun: bool) -> dict: + trxNetStats: basicStats, droppedBlocks, prodWindows: productionWindows, testStart: datetime, testFinish: datetime, argsDict: dict, completedRun: bool) -> dict: report = {} report['completedRun'] = completedRun report['testStart'] = testStart @@ -372,6 +424,11 @@ def createReport(guide: chainBlocksGuide, tpsTestConfig: TpsTestConfig, tpsStats report['Analysis']['TrxCPU'] = asdict(trxCpuStats) report['Analysis']['TrxLatency'] = asdict(trxLatencyStats) report['Analysis']['TrxNet'] = asdict(trxNetStats) + report['Analysis']['DroppedBlocks'] = droppedBlocks + report['Analysis']['DroppedBlocksCount'] = len(droppedBlocks) + report['Analysis']['ProductionWindowsTotal'] = prodWindows.totalWindows + report['Analysis']['ProductionWindowsAverageSize'] = prodWindows.averageWindowSize + report['Analysis']['ProductionWindowsMissed'] = prodWindows.missedWindows report['args'] = argsDict report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} report['nodeosVersion'] = Utils.getNodeosVersion() @@ -405,13 +462,19 @@ def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: Arti notFound = [] populateTrxSentTimestamp(trxSent, trxDict, notFound) + prodDict = {} + getProductionWindows(prodDict, blockDict, data) + if len(notFound) > 0: print(f"Transactions logged as sent but NOT FOUND in block!! lost {len(notFound)} out of {len(trxSent)}") + if argsDict.get("printMissingTransactions"): + print(notFound) guide = calcChainGuide(data, tpsTestConfig.numBlocksToPrune) trxLatencyStats, trxCpuStats, trxNetStats = calcTrxLatencyCpuNetStats(trxDict, blockDict) tpsStats = scoreTransfersPerSecond(data, guide) blkSizeStats = calcBlockSizeStats(data, guide) + prodWindows = calcProductionWindows(prodDict) if not tpsTestConfig.quiet: print(f"Blocks Guide: {guide}\nTPS: {tpsStats}\nBlock Size: {blkSizeStats}\nTrx Latency: {trxLatencyStats}\nTrx CPU: {trxCpuStats}\nTrx Net: {trxNetStats}") @@ -423,7 +486,7 @@ def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: Arti finish = datetime.utcnow() report = createReport(guide=guide, tpsTestConfig=tpsTestConfig, tpsStats=tpsStats, blockSizeStats=blkSizeStats, trxLatencyStats=trxLatencyStats, - trxCpuStats=trxCpuStats, trxNetStats=trxNetStats, testStart=start, testFinish=finish, argsDict=argsDict, completedRun=completedRun) + trxCpuStats=trxCpuStats, trxNetStats=trxNetStats, droppedBlocks=data.droppedBlocks, prodWindows=prodWindows, testStart=start, testFinish=finish, argsDict=argsDict, completedRun=completedRun) return report def exportReportAsJSON(report: json, exportPath): diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 5aa0fe953f..f6ada20cf7 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -107,6 +107,7 @@ class PtbConfig: quiet: bool=False delPerfLogs: bool=False expectedTransactionsSent: int = field(default_factory=int, init=False) + printMissingTransactions: bool=False def __post_init__(self): self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps @@ -440,16 +441,17 @@ def createBaseArgumentParser(): choices=["mapped", "heap", "locked"], default="mapped") ptbBaseParserGroup.add_argument("--net-threads", type=int, help="Number of worker threads in net_plugin thread pool", default=2) ptbBaseParserGroup.add_argument("--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) - ptbBaseParserGroup.add_argument("--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) - ptbBaseParserGroup.add_argument("--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) - ptbBaseParserGroup.add_argument("--cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=100) - ptbBaseParserGroup.add_argument("--last-block-cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80%%", default=100) + ptbBaseParserGroup.add_argument("--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=-200000) + ptbBaseParserGroup.add_argument("--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=-200000) + ptbBaseParserGroup.add_argument("--cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=80) + ptbBaseParserGroup.add_argument("--last-block-cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80%%", default=80) ptbBaseParserGroup.add_argument("--producer-threads", type=int, help="Number of worker threads in producer thread pool", default=2) ptbBaseParserGroup.add_argument("--http-max-response-time-ms", type=int, help="Maximum time for processing a request, -1 for unlimited", default=990000) ptbBaseParserGroup.add_argument("--del-perf-logs", help="Whether to delete performance test specific logs.", action='store_true') ptbBaseParserGroup.add_argument("--del-report", help="Whether to delete overarching performance run report.", action='store_true') ptbBaseParserGroup.add_argument("--quiet", help="Whether to quiet printing intermediate results and reports to stdout", action='store_true') ptbBaseParserGroup.add_argument("--prods-enable-trace-api", help="Determines whether producer nodes should have eosio::trace_api_plugin enabled", action='store_true') + ptbBaseParserGroup.add_argument("--print-missing-transactions", type=bool, help="Toggles if missing transactions are be printed upon test completion.", default=False) return ptbBaseParser @staticmethod @@ -483,9 +485,15 @@ def main(): chainPluginArgs = ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) + + lbto = args.last_block_time_offset_us + lbcep = args.last_block_cpu_effort_percent + if args.p > 1: + lbto = -200000 + lbcep = 80 producerPluginArgs = ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, - lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, - cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, + lastBlockTimeOffsetUs=lbto, produceTimeOffsetUs=args.produce_time_offset_us, + cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=lbcep, producerThreads=args.producer_threads) httpPluginArgs = HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) netPluginArgs = NetPluginArgs(netThreads=args.net_threads) @@ -495,7 +503,8 @@ def main(): prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, nodeosVers=Utils.getNodeosVersion().split('.')[0]) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, - numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs) + numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs, + printMissingTransactions=args.print_missing_transactions) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() From 26f0b5a1114cbcd49b3f0d27f71b534d2be545ea Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 11 Jan 2023 14:07:02 -0600 Subject: [PATCH 062/178] change production window size to constant --- tests/performance_tests/log_reader.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 5b2d07def1..e4a89c4729 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -20,6 +20,8 @@ errorExit = Utils.errorExit cmdError = Utils.cmdError +COMPLETEPRODUCTIONWINDOWSIZE = 12 + @dataclass class ArtifactPaths: nodeosLogPath: Path = Path("") @@ -279,7 +281,7 @@ def calcProductionWindows(prodDict: dict): totalBlocksForAverage = 0 for k, v in prodDict.items(): if k != "0" and k != str(prodWindows.totalWindows+1): - if v.blockCount < 12: + if v.blockCount < COMPLETEPRODUCTIONWINDOWSIZE: prodWindows.missedWindows += 1 totalBlocksForAverage += v.blockCount if prodWindows.totalWindows <= 0: From c26432e9bea2d92178e01fd69a6f4a559214e63f Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 11 Jan 2023 14:21:05 -0600 Subject: [PATCH 063/178] more explicit constructor names --- tests/performance_tests/performance_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 83b84eb6a0..7f270e0f4f 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -508,7 +508,9 @@ def main(): extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, - specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(args.account_name, args.owner_public_key, args.active_public_key, args.contract_dir, args.wasm_file, args.abi_file), + specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(accountName=args.account_name, + ownerPublicKey=args.owner_public_key, activePublicKey=args.active_public_key, + contractDir=args.contract_dir, wasmFile=args.wasm_file, abiFile=args.abi_file), nodeosVers=Utils.getNodeosVersion().split('.')[0]) ptConfig = PerformanceTest.PtConfig(testDurationSec=args.test_iteration_duration_sec, From 48174929b2ba59cce7c53c7a6291843547e7d521 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 11 Jan 2023 20:04:06 -0600 Subject: [PATCH 064/178] Rename for clarity as this is not a string. --- tests/trx_generator/trx_generator.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index a8f8fe2081..3dba78dcd2 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -242,21 +242,21 @@ namespace eosio::testing { fc::variant unpacked_action_data_json = json_from_file_or_string(_action_data_file_or_str); std::cout << "action data variant: " << fc::json::to_pretty_string(unpacked_action_data_json) << std::endl; - bytes packed_action_data_string; + bytes packed_action_data; try { auto action_type = abi.get_action_type( _action ); FC_ASSERT( !action_type.empty(), "Unknown action ${action} in contract ${contract}", ("action", _action)( "contract", _auth_account )); - packed_action_data_string = abi.variant_to_binary( action_type, unpacked_action_data_json, abi_serializer::create_yield_function( abi_serializer_max_time ) ); + packed_action_data = abi.variant_to_binary( action_type, unpacked_action_data_json, abi_serializer::create_yield_function( abi_serializer_max_time ) ); } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse unpacked action data JSON") - std::cout << fc::to_hex(packed_action_data_string.data(), packed_action_data_string.size()) << std::endl; + std::cout << fc::to_hex(packed_action_data.data(), packed_action_data.size()) << std::endl; eosio::chain::action act; act.account = _contract_owner_account; act.name = _action; act.authorization = vector{{_auth_account, config::active_name}}; - act.data = std::move(packed_action_data_string); + act.data = std::move(packed_action_data); _trxs.emplace_back(create_transfer_trx_w_signer(act, _private_key, ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, _last_irr_block_id)); From e290f59aefc65bc2113d953ba732c146de5079e0 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 11 Jan 2023 20:37:14 -0600 Subject: [PATCH 065/178] Refactor user defined trx data into easily modifiable dataclass UserTrxData. Update performance_test_basic to support user specified transaction data through the use of the --user-trx-data command argument and the UserTrxData class. Now supports creation of named accounts for use in user defined transactions. Cluster supports populating wallet with named accounts. Renamed command line argument for making use of user provided trx data to --user-trx-data. --- tests/TestHarness/Cluster.py | 7 ++- tests/performance_tests/CMakeLists.txt | 2 +- .../performance_test_basic.py | 59 ++++++++++++------- 3 files changed, 45 insertions(+), 23 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index be5a53b320..2142e5d675 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -700,7 +700,7 @@ def createAccountKeys(count): # create account keys and import into wallet. Wallet initialization will be user responsibility # also imports defproducera and defproducerb accounts - def populateWallet(self, accountsCount, wallet): + def populateWallet(self, accountsCount, wallet, accountNames: list=None): if self.walletMgr is None: Utils.Print("ERROR: WalletMgr hasn't been initialized.") return False @@ -723,6 +723,10 @@ def populateWallet(self, accountsCount, wallet): Utils.Print("ERROR: Failed to import key for account %s" % (self.defproducerbAccount.name)) return False + if accountNames is not None: + for idx, name in enumerate(accountNames): + accounts[idx].name = name + for account in accounts: Utils.Print("Importing keys for account %s into wallet %s." % (account.name, wallet.name)) if not self.walletMgr.importKey(account, wallet): @@ -1569,7 +1573,6 @@ def cleanup(self): for f in self.filesToCleanup: os.remove(f) - # Create accounts and validates that the last transaction is received on root node def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000, validationNodeIndex=0): if self.accounts is None: diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 45e771f5f8..a0827d7b60 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -10,7 +10,7 @@ configure_file(genesis.json genesis.json COPYONLY) configure_file(validate_nodeos_plugin_args.py validate_nodeos_plugin_args.py COPYONLY) add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --exercise-trx-specification WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --user-trx-data WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 95b332d330..a5dcafb1b3 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -19,6 +19,13 @@ from datetime import datetime from pathlib import Path +@dataclass +class UserTrxData: + accounts: list = field(default_factory=lambda: ["testacct1", "testacct2"]) + abiFile: Path = Path("unittests")/"contracts"/"eosio.token"/"eosio.token.abi" + actionName: str = "transfer" + actionData: str = f'{{"from":"testacct1","to":"testacct2","quantity":"0.0001 CUR","memo":"transaction specified"}}' + class PerformanceTestBasic: @dataclass class PtbTpsTestResult: @@ -108,7 +115,7 @@ class PtbConfig: quiet: bool=False delPerfLogs: bool=False expectedTransactionsSent: int = field(default_factory=int, init=False) - exerciseTrxSpecification: bool=False + useUserTrxData: bool=False def __post_init__(self): self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps @@ -253,16 +260,22 @@ def launchCluster(self): specificExtraNodeosArgs=self.clusterConfig.specificExtraNodeosArgs ) - def setupWalletAndAccounts(self): + def setupWalletAndAccounts(self, accountCnt: int=2, accountNames: list=None): self.wallet = self.walletMgr.create('default') - self.cluster.populateWallet(2, self.wallet) - self.cluster.createAccounts(self.cluster.eosioAccount, stakedDeposit=0, validationNodeIndex=self.validationNodeId) - - self.account1Name = self.cluster.accounts[0].name - self.account2Name = self.cluster.accounts[1].name - - self.account1PrivKey = self.cluster.accounts[0].activePrivateKey - self.account2PrivKey = self.cluster.accounts[1].activePrivateKey + self.accountNames=[] + self.accountPrivKeys=[] + if accountNames is not None: + self.cluster.populateWallet(accountsCount=len(accountNames), wallet=self.wallet, accountNames=accountNames) + self.cluster.createAccounts(self.cluster.eosioAccount, stakedDeposit=0, validationNodeIndex=self.validationNodeId) + for index in range(0, len(accountNames)): + self.accountNames.append(self.cluster.accounts[index].name) + self.accountPrivKeys.append(self.cluster.accounts[index].activePrivateKey) + else: + self.cluster.populateWallet(accountsCount=accountCnt, wallet=self.wallet) + self.cluster.createAccounts(self.cluster.eosioAccount, stakedDeposit=0, validationNodeIndex=self.validationNodeId) + for index in range(0, accountCnt): + self.accountNames.append(self.cluster.accounts[index].name) + self.accountPrivKeys.append(self.cluster.accounts[index].activePrivateKey) def runTpsTest(self) -> PtbTpsTestResult: completedRun = False @@ -273,18 +286,26 @@ def runTpsTest(self) -> PtbTpsTestResult: lib_id = info['last_irreversible_block_id'] self.data = log_reader.chainData() + abiFile=None + actionName=None + actionData=None + if (self.ptbConfig.useUserTrxData): + self.userTrxData = UserTrxData() + self.setupWalletAndAccounts(accountCnt=len(self.userTrxData.accounts), accountNames=self.userTrxData.accounts) + abiFile = self.userTrxData.abiFile + actionName = self.userTrxData.actionName + actionData = self.userTrxData.actionData + else: + self.setupWalletAndAccounts() + self.cluster.biosNode.kill(signal.SIGTERM) self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) tpsTrxGensConfig = ltg.TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator) - abiFile = Path("unittests")/"contracts"/"eosio.token"/"eosio.token.abi" if self.ptbConfig.exerciseTrxSpecification else None - actionName = "transfer" if self.ptbConfig.exerciseTrxSpecification else None - actionData = f'{{"from":"{self.account1Name}","to":"{self.account2Name}","quantity":"0.0001 CUR","memo":"transaction specified"}}' if self.ptbConfig.exerciseTrxSpecification else None - trxGenLauncher = ltg.TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, - contractOwnerAccount=self.cluster.eosioAccount.name, accts=f"{self.account1Name},{self.account2Name}", - privateKeys=f"{self.account1PrivKey},{self.account2PrivKey}", trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, + contractOwnerAccount=self.cluster.eosioAccount.name, accts=','.join(map(str, self.accountNames)), + privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, abiFile=abiFile, actionName=actionName, actionData=actionData, tpsTrxGensConfig=tpsTrxGensConfig) trxGenExitCodes = trxGenLauncher.launch() @@ -366,8 +387,6 @@ def preTestSpinup(self): if self.launchCluster() == False: self.errorExit('Failed to stand up cluster.') - self.setupWalletAndAccounts() - def postTpsTestSteps(self): self.queryBlockTrxData(self.validationNode, self.blockDataPath, self.blockTrxDataPath, self.data.startBlock, self.data.ceaseBlock) @@ -473,7 +492,7 @@ def createArgumentParser(): ptbParserGroup.add_argument("--target-tps", type=int, help="The target transfers per second to send during test", default=8000) ptbParserGroup.add_argument("--test-duration-sec", type=int, help="The duration of transfer trx generation for the test in seconds", default=90) - ptbParserGroup.add_argument("--exercise-trx-specification", help="Test Transaction Generator: abi, action name, action data api", action='store_true') + ptbParserGroup.add_argument("--user-trx-data", help="Make use of user defined trx data in UserTrxData class", action='store_true') return ptbParser @staticmethod @@ -505,7 +524,7 @@ def main(): nodeosVers=Utils.getNodeosVersion().split('.')[0]) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs, - exerciseTrxSpecification=args.exercise_trx_specification) + useUserTrxData=args.user_trx_data) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() From 03750c994aa61d4877ae708c2920245ff8e9526b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 11 Jan 2023 21:24:17 -0600 Subject: [PATCH 066/178] Refactor common transaction generator code into base class. Fix trx_expr to be fc::microseconds instead of int64_t --- tests/trx_generator/main.cpp | 8 +- tests/trx_generator/trx_generator.cpp | 86 ++++----------------- tests/trx_generator/trx_generator.hpp | 48 ++++-------- tests/trx_generator/trx_generator_tests.cpp | 2 +- 4 files changed, 38 insertions(+), 106 deletions(-) diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 6cc4d0d2f5..b052509174 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -190,16 +190,18 @@ int main(int argc, char** argv) { ilog("Contract owner account ${acct}", ("acct", contract_owner_acct)); ilog("Transfer accounts ${accts}", ("accts", accts)); ilog("Account private keys ${priv_keys}", ("priv_keys", p_keys)); - ilog("Transaction expiration microsections ${expr}", ("expr", trx_expr)); + ilog("Transaction expiration seconds ${expr}", ("expr", trx_expr)); ilog("Reference LIB block id ${LIB}", ("LIB", lib_id_str)); ilog("Transaction Generation Duration (sec) ${dur}", ("dur", gen_duration)); ilog("Target generation Transaction Per Second (TPS) ${tps}", ("tps", target_tps)); ilog("Logs directory ${logDir}", ("logDir", log_dir_in)); + fc::microseconds trx_expr_ms = fc::seconds(trx_expr); + std::shared_ptr monitor; if (transaction_specified) { auto generator = std::make_shared(chain_id_in, abi_file_path_in, contract_owner_acct, account_str_vector.at(0), action_name_in, - action_data_file_or_str, trx_expr, private_keys_str_vector.at(0), lib_id_str, log_dir_in); + action_data_file_or_str, trx_expr_ms, private_keys_str_vector.at(0), lib_id_str, log_dir_in); monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); trx_tps_tester tester{generator, monitor, gen_duration, target_tps}; @@ -207,7 +209,7 @@ int main(int argc, char** argv) { return OTHER_FAIL; } } else { - auto generator = std::make_shared(chain_id_in, contract_owner_acct, account_str_vector, trx_expr, private_keys_str_vector, + auto generator = std::make_shared(chain_id_in, contract_owner_acct, account_str_vector, trx_expr_ms, private_keys_str_vector, lib_id_str, log_dir_in); monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 3dba78dcd2..3465eb5d21 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -91,27 +91,13 @@ namespace eosio::testing { return actions_pairs_vector; } - transfer_trx_generator::transfer_trx_generator(std::string chain_id_in, std::string contract_owner_account, - const std::vector& accts, int64_t trx_expr, const std::vector& private_keys_str_vector, - std::string lib_id_str, std::string log_dir) : - _provider(), _chain_id(chain_id_in), _contract_owner_account(contract_owner_account), _accts(accts), - _trx_expiration(trx_expr*1000000), _private_keys_str_vector(private_keys_str_vector), - _last_irr_block_id(fc::variant(lib_id_str).as()), _log_dir(log_dir) { - } - - void transfer_trx_generator::push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { - update_resign_transaction(trx._trx, trx._signer, ++nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); - provider.send(trx._trx); - } - - void transfer_trx_generator::stop_generation() { - ilog("Stopping transaction generation"); + trx_generator_base::trx_generator_base(std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir) + : _provider(), _chain_id(chain_id_in), _contract_owner_account(contract_owner_account), _trx_expiration(trx_expr), + _last_irr_block_id(fc::variant(lib_id_str).as()), _log_dir(log_dir){} - if(_txcount) { - ilog("${d} transactions executed, ${t}us / transaction", ("d", _txcount)("t", _total_us / (double) _txcount)); - _txcount = _total_us = 0; - } - } + transfer_trx_generator::transfer_trx_generator(std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, + fc::microseconds trx_expr, const std::vector& private_keys_str_vector, std::string lib_id_str, std::string log_dir) + : trx_generator_base(chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir), _accts(accts), _private_keys_str_vector(private_keys_str_vector) {} vector transfer_trx_generator::get_accounts(const vector& account_str_vector) { vector acct_name_list; @@ -170,41 +156,6 @@ namespace eosio::testing { return true; } - bool transfer_trx_generator::tear_down() { - _provider.log_trxs(_log_dir); - _provider.teardown(); - - std::cout << "Sent transactions: " << _txcount << std::endl; - std::cout << "Tear down p2p transaction provider" << std::endl; - - //Stop & Cleanup - std::cout << "Stop Generation." << std::endl; - stop_generation(); - return true; - } - - bool transfer_trx_generator::generate_and_send() { - try { - if (_trxs.size()) { - size_t index_to_send = _txcount % _trxs.size(); - push_transaction(_provider, _trxs.at(index_to_send), ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, - _last_irr_block_id); - ++_txcount; - } else { - elog("no transactions available to send"); - return false; - } - } catch (const std::exception &e) { - elog("${e}", ("e", e.what())); - return false; - } catch (...) { - elog("unknown exception"); - return false; - } - - return true; - } - fc::variant json_from_file_or_string(const string& file_or_str, fc::json::parse_type ptype = fc::json::parse_type::legacy_parser) { regex r("^[ \t]*[\{\[]"); @@ -221,12 +172,9 @@ namespace eosio::testing { } trx_generator::trx_generator(std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, std::string auth_account, std::string action_name, - const std::string& action_data_file_or_str, int64_t trx_expr, const std::string& private_key_str, std::string lib_id_str, std::string log_dir) : - _provider(), _chain_id(chain_id_in), _abi_data_file_path(abi_data_file), _contract_owner_account(contract_owner_account), _auth_account(auth_account), _action(action_name), _action_data_file_or_str(action_data_file_or_str), - _trx_expiration(trx_expr*1000000), _private_key(fc::crypto::private_key(private_key_str)), - _last_irr_block_id(fc::variant(lib_id_str).as()), _log_dir(log_dir) - { - } + const std::string& action_data_file_or_str, fc::microseconds trx_expr, const std::string& private_key_str, std::string lib_id_str, std::string log_dir) + : trx_generator_base(chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir), _abi_data_file_path(abi_data_file), _auth_account(auth_account), + _action(action_name), _action_data_file_or_str(action_data_file_or_str), _private_key(fc::crypto::private_key(private_key_str)) {} bool trx_generator::setup() { _nonce_prefix = 0; @@ -270,7 +218,7 @@ namespace eosio::testing { return true; } - bool trx_generator::tear_down() { + bool trx_generator_base::tear_down() { _provider.log_trxs(_log_dir); _provider.teardown(); @@ -283,12 +231,7 @@ namespace eosio::testing { return true; } - void trx_generator::push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { - update_resign_transaction(trx._trx, trx._signer, ++nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); - provider.send(trx._trx); - } - - bool trx_generator::generate_and_send() { + bool trx_generator_base::generate_and_send() { try { if (_trxs.size()) { size_t index_to_send = _txcount % _trxs.size(); @@ -310,7 +253,12 @@ namespace eosio::testing { return true; } - void trx_generator::stop_generation() { + void trx_generator_base::push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + update_resign_transaction(trx._trx, trx._signer, ++nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); + provider.send(trx._trx); + } + + void trx_generator_base::stop_generation() { ilog("Stopping transaction generation"); if(_txcount) { diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index 65f54f4998..58ac363c89 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -15,13 +15,11 @@ namespace eosio::testing { fc::crypto::private_key _signer; }; - struct transfer_trx_generator { + struct trx_generator_base { p2p_trx_provider _provider; eosio::chain::chain_id_type _chain_id; eosio::chain::name _contract_owner_account; - const std::vector _accts; fc::microseconds _trx_expiration; - std::vector _private_keys_str_vector; eosio::chain::block_id_type _last_irr_block_id; std::string _log_dir; @@ -33,57 +31,41 @@ namespace eosio::testing { uint64_t _nonce = 0; uint64_t _nonce_prefix = 0; - transfer_trx_generator(std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, - int64_t trx_expr, const std::vector& private_keys_str_vector, std::string lib_id_str, std::string log_dir); + trx_generator_base(std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir); void push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); + bool generate_and_send(); + bool tear_down(); + void stop_generation(); + }; + + struct transfer_trx_generator : public trx_generator_base { + const std::vector _accts; + std::vector _private_keys_str_vector; + + transfer_trx_generator(std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, + fc::microseconds trx_expr, const std::vector& private_keys_str_vector, std::string lib_id_str, std::string log_dir); std::vector get_accounts(const std::vector& account_str_vector); std::vector get_private_keys(const std::vector& priv_key_str_vector); bool setup(); - bool tear_down(); - - void stop_generation(); - bool generate_and_send(); }; - struct trx_generator { - p2p_trx_provider _provider; - eosio::chain::chain_id_type _chain_id; + struct trx_generator : public trx_generator_base{ std::string _abi_data_file_path; - eosio::chain::name _contract_owner_account; eosio::chain::name _auth_account; eosio::chain::name _action; std::string _action_data_file_or_str; - fc::microseconds _trx_expiration; fc::crypto::private_key _private_key; - eosio::chain::block_id_type _last_irr_block_id; - std::string _log_dir; const fc::microseconds abi_serializer_max_time = fc::seconds(10); // No risk to client side serialization taking a long time - uint64_t _total_us = 0; - uint64_t _txcount = 0; - - std::vector _trxs; - - uint64_t _nonce = 0; - uint64_t _nonce_prefix = 0; - trx_generator(std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, std::string auth_account, std::string action_name, const std::string& action_data_file_or_str, - int64_t trx_expr, const std::string& private_key_str, std::string lib_id_str, std::string log_dir); - - void push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, - uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, - const eosio::chain::block_id_type& last_irr_block_id); + fc::microseconds trx_expr, const std::string& private_key_str, std::string lib_id_str, std::string log_dir); bool setup(); - bool tear_down(); - - void stop_generation(); - bool generate_and_send(); }; } diff --git a/tests/trx_generator/trx_generator_tests.cpp b/tests/trx_generator/trx_generator_tests.cpp index c39728d236..c4779ee0ce 100644 --- a/tests/trx_generator/trx_generator_tests.cpp +++ b/tests/trx_generator/trx_generator_tests.cpp @@ -327,7 +327,7 @@ BOOST_AUTO_TEST_CASE(trx_generator_constructor) std::string action_name = "transfer"; const std::string action_data = "{\"from\":\"aaa\",\"to\":\"bbb\",\"quantity\":\"10.0000 SYS\",\"memo\":\"hello\"}"; const std::string abi_file = "../../unittests/contracts/eosio.token/eosio.token.abi"; - int64_t trx_expr = 3600; + fc::microseconds trx_expr = fc::seconds(3600); std::string log_dir = "."; std::string lib_id_str = "00000062989f69fd251df3e0b274c3364ffc2f4fce73de3f1c7b5e11a4c92f21"; std::string private_key_str = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"; From 777d9d30bf5918a4fb6f106d45eef3f69d3b59f4 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 11 Jan 2023 22:55:12 -0600 Subject: [PATCH 067/178] Remove UserTrxData in favor of loading user data from json file. Replace prev argument --user-trx-data with --user-trx-data-file. Read user trx data in from file and use that directly in lieu of UserTrxData. Update tests to exercise this path with sample userTrxData.json file. Update report construction to support Paths. --- tests/performance_tests/CMakeLists.txt | 3 +- tests/performance_tests/log_reader.py | 3 ++ .../performance_test_basic.py | 32 +++++++++---------- tests/performance_tests/userTrxData.json | 12 +++++++ 4 files changed, 33 insertions(+), 17 deletions(-) create mode 100644 tests/performance_tests/userTrxData.json diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index a0827d7b60..7d589f42c1 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -8,9 +8,10 @@ configure_file(nodeos_log_2_0_14.txt.gz nodeos_log_2_0_14.txt.gz COPYONLY) configure_file(nodeos_log_3_2.txt.gz nodeos_log_3_2.txt.gz COPYONLY) configure_file(genesis.json genesis.json COPYONLY) configure_file(validate_nodeos_plugin_args.py validate_nodeos_plugin_args.py COPYONLY) +configure_file(userTrxData.json userTrxData.json COPYONLY) add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --user-trx-data WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --user-trx-data-file tests/performance_tests/userTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 4cd550bf45..b5b09ca2e5 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -17,6 +17,7 @@ from platform import release, system from datetime import datetime from typing import List +from pathlib import Path Print = Utils.Print errorExit = Utils.errorExit @@ -382,6 +383,8 @@ def default(self, obj): return obj.isoformat() if obj is None: return "Unknown" + if isinstance(obj, Path): + return str(obj) return json.JSONEncoder.default(self, obj) def reportAsJSON(report: dict) -> json: diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index a5dcafb1b3..1f6b315486 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -7,6 +7,7 @@ import sys import shutil import signal +import json import log_reader import launch_transaction_generators as ltg @@ -19,13 +20,6 @@ from datetime import datetime from pathlib import Path -@dataclass -class UserTrxData: - accounts: list = field(default_factory=lambda: ["testacct1", "testacct2"]) - abiFile: Path = Path("unittests")/"contracts"/"eosio.token"/"eosio.token.abi" - actionName: str = "transfer" - actionData: str = f'{{"from":"testacct1","to":"testacct2","quantity":"0.0001 CUR","memo":"transaction specified"}}' - class PerformanceTestBasic: @dataclass class PtbTpsTestResult: @@ -115,7 +109,7 @@ class PtbConfig: quiet: bool=False delPerfLogs: bool=False expectedTransactionsSent: int = field(default_factory=int, init=False) - useUserTrxData: bool=False + userTrxDataFile: Path=None def __post_init__(self): self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps @@ -277,6 +271,10 @@ def setupWalletAndAccounts(self, accountCnt: int=2, accountNames: list=None): self.accountNames.append(self.cluster.accounts[index].name) self.accountPrivKeys.append(self.cluster.accounts[index].activePrivateKey) + def readUserTrxDataFromFile(self, userTrxDataFile: Path): + with open(userTrxDataFile) as f: + self.userTrxDataDict = json.load(f) + def runTpsTest(self) -> PtbTpsTestResult: completedRun = False self.producerNode = self.cluster.getNode(self.producerNodeId) @@ -289,12 +287,12 @@ def runTpsTest(self) -> PtbTpsTestResult: abiFile=None actionName=None actionData=None - if (self.ptbConfig.useUserTrxData): - self.userTrxData = UserTrxData() - self.setupWalletAndAccounts(accountCnt=len(self.userTrxData.accounts), accountNames=self.userTrxData.accounts) - abiFile = self.userTrxData.abiFile - actionName = self.userTrxData.actionName - actionData = self.userTrxData.actionData + if (self.ptbConfig.userTrxDataFile is not None): + self.readUserTrxDataFromFile(self.ptbConfig.userTrxDataFile) + self.setupWalletAndAccounts(accountCnt=len(self.userTrxDataDict['accounts']), accountNames=self.userTrxDataDict['accounts']) + abiFile = self.userTrxDataDict['abiFile'] + actionName = self.userTrxDataDict['actionName'] + actionData = json.dumps(self.userTrxDataDict['actionData']) else: self.setupWalletAndAccounts() @@ -492,7 +490,8 @@ def createArgumentParser(): ptbParserGroup.add_argument("--target-tps", type=int, help="The target transfers per second to send during test", default=8000) ptbParserGroup.add_argument("--test-duration-sec", type=int, help="The duration of transfer trx generation for the test in seconds", default=90) - ptbParserGroup.add_argument("--user-trx-data", help="Make use of user defined trx data in UserTrxData class", action='store_true') + ptbParserGroup.add_argument("--user-trx-data-file", type=str, help="Path to userTrxData.json") + return ptbParser @staticmethod @@ -524,8 +523,9 @@ def main(): nodeosVers=Utils.getNodeosVersion().split('.')[0]) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs, - useUserTrxData=args.user_trx_data) + userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) + testSuccessful = myTest.runTest() exitCode = 0 if testSuccessful else 1 diff --git a/tests/performance_tests/userTrxData.json b/tests/performance_tests/userTrxData.json new file mode 100644 index 0000000000..b247e44dc5 --- /dev/null +++ b/tests/performance_tests/userTrxData.json @@ -0,0 +1,12 @@ +{ + "accounts": ["testacct1", "testacct2"], + "abiFile": "unittests/contracts/eosio.token/eosio.token.abi", + "actionName": "transfer", + "actionData": + { + "from":"testacct1", + "to":"testacct2", + "quantity":"0.0001 CUR", + "memo":"transaction specified" + } +} From d86782461b5914287da0e9b2ac48689013a155c4 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 12 Jan 2023 08:45:27 -0600 Subject: [PATCH 068/178] Replace cout in favor of xlog statements. --- tests/trx_generator/trx_generator.cpp | 38 ++++++++++----------------- 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 3465eb5d21..86975247c4 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -127,18 +127,14 @@ namespace eosio::testing { _nonce_prefix = 0; _nonce = static_cast(fc::time_point::now().sec_since_epoch()) << 32; - std::cout - << "Create All Initial Transfer Action/Reaction Pairs (acct 1 -> acct 2, acct 2 -> acct 1) between all provided accounts." - << std::endl; + ilog("Create All Initial Transfer Action/Reaction Pairs (acct 1 -> acct 2, acct 2 -> acct 1) between all provided accounts."); const auto action_pairs_vector = create_initial_transfer_actions(salt, period, _contract_owner_account, accounts, private_key_vector); - std::cout - << "Stop Generation (form potential ongoing generation in preparation for starting new generation run)." - << std::endl; + ilog("Stop Generation (form potential ongoing generation in preparation for starting new generation run)."); stop_generation(); - std::cout << "Create All Initial Transfer Transactions (one for each created action)." << std::endl; + ilog("Create All Initial Transfer Transactions (one for each created action)."); _trxs = create_initial_transfer_transactions(action_pairs_vector, ++_nonce_prefix, _nonce, @@ -146,11 +142,9 @@ namespace eosio::testing { _chain_id, _last_irr_block_id); - std::cout << "Setup p2p transaction provider" << std::endl; + ilog("Setup p2p transaction provider"); - std::cout - << "Update each trx to qualify as unique and fresh timestamps, re-sign trx, and send each updated transactions via p2p transaction provider" - << std::endl; + ilog("Update each trx to qualify as unique and fresh timestamps, re-sign trx, and send each updated transactions via p2p transaction provider"); _provider.setup(); return true; @@ -180,15 +174,13 @@ namespace eosio::testing { _nonce_prefix = 0; _nonce = static_cast(fc::time_point::now().sec_since_epoch()) << 32; - std::cout - << "Stop Generation (form potential ongoing generation in preparation for starting new generation run)." - << std::endl; + ilog("Stop Generation (form potential ongoing generation in preparation for starting new generation run)."); stop_generation(); - std::cout << "Create Initial Transaction with action data." << std::endl; + ilog("Create Initial Transaction with action data."); abi_serializer abi = abi_serializer(fc::json::from_file(_abi_data_file_path).as(), abi_serializer::create_yield_function( abi_serializer_max_time )); fc::variant unpacked_action_data_json = json_from_file_or_string(_action_data_file_or_str); - std::cout << "action data variant: " << fc::json::to_pretty_string(unpacked_action_data_json) << std::endl; + ilog("action data variant: ${data}", ("data", fc::json::to_pretty_string(unpacked_action_data_json))); bytes packed_action_data; try { @@ -198,7 +190,7 @@ namespace eosio::testing { } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse unpacked action data JSON") - std::cout << fc::to_hex(packed_action_data.data(), packed_action_data.size()) << std::endl; + ilog("${packed_data}", ("packed_data", fc::to_hex(packed_action_data.data(), packed_action_data.size()))); eosio::chain::action act; act.account = _contract_owner_account; @@ -208,11 +200,9 @@ namespace eosio::testing { _trxs.emplace_back(create_transfer_trx_w_signer(act, _private_key, ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, _last_irr_block_id)); - std::cout << "Setup p2p transaction provider" << std::endl; + ilog("Setup p2p transaction provider"); - std::cout - << "Update each trx to qualify as unique and fresh timestamps, re-sign trx, and send each updated transactions via p2p transaction provider" - << std::endl; + ilog("Update each trx to qualify as unique and fresh timestamps, re-sign trx, and send each updated transactions via p2p transaction provider"); _provider.setup(); return true; @@ -222,11 +212,11 @@ namespace eosio::testing { _provider.log_trxs(_log_dir); _provider.teardown(); - std::cout << "Sent transactions: " << _txcount << std::endl; - std::cout << "Tear down p2p transaction provider" << std::endl; + ilog("Sent transactions: ${cnt}", ("cnt", _txcount)); + ilog("Tear down p2p transaction provider"); //Stop & Cleanup - std::cout << "Stop Generation." << std::endl; + ilog("Stop Generation."); stop_generation(); return true; } From 3775f5050aec2c55b0c7476b1f361ed0584f020c Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 12 Jan 2023 16:49:38 -0600 Subject: [PATCH 069/178] undo unintentional default change. Inform users when adjusting lbto and lbcep from defaults. Add DroppedTransactions to report --- tests/performance_tests/README.md | 1153 +++++++++-------- tests/performance_tests/log_reader.py | 6 +- .../performance_test_basic.py | 10 +- 3 files changed, 586 insertions(+), 583 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 39a0e77573..bceb4ff502 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -818,581 +818,582 @@ The Performance Test Basic generates, by default, a report that details results Expand for full sample report ``` json - Report: - { - "Analysis": { - "BlockSize": { - "avg": 1920.0, - "emptyBlocks": 0, - "max": 1920, - "min": 1920, - "numBlocks": 177, - "sigma": 0.0 - }, - "BlocksGuide": { - "configAddlDropCnt": 2, - "firstBlockNum": 2, - "lastBlockNum": 301, - "leadingEmptyBlocksCnt": 1, - "setupBlocksCnt": 112, - "tearDownBlocksCnt": 0, - "testAnalysisBlockCnt": 177, - "testEndBlockNum": 301, - "testStartBlockNum": 114, - "totalBlocks": 300, - "trailingEmptyBlocksCnt": 6 - }, - "DroppedBlocks": {}, - "DroppedBlocksCount": 0, - "ProductionWindowsAverageSize": 0, - "ProductionWindowsMissed": 0, - "ProductionWindowsTotal": 0, - "TPS": { - "avg": 20.0, - "configTestDuration": 90, - "configTps": 20, - "emptyBlocks": 0, - "generatorCount": 2, - "max": 20, - "min": 20, - "numBlocks": 177, - "sigma": 0.0, - "tpsPerGenerator": [ - 10, - 10 - ] - }, - "TrxCPU": { - "avg": 98.16555555555556, - "max": 380.0, - "min": 8.0, - "samples": 1800, - "sigma": 53.96847158009348 - }, - "TrxLatency": { - "avg": 0.4364516670174069, - "max": 0.6380000114440918, - "min": 0.2349998950958252, - "samples": 1800, - "sigma": 0.1414206312537471 - }, - "TrxNet": { - "avg": 24.0, - "max": 24.0, - "min": 24.0, - "samples": 1800, - "sigma": 0.0 - } - }, - "args": { - "_killEosInstances": true, - "_killWallet": true, - "_totalNodes": 2, - "delPerfLogs": false, - "delReport": false, - "delay": 1, - "dontKill": false, - "dumpErrorDetails": false, - "expectedTransactionsSent": 1800, - "extraNodeosArgs": { - "chainPluginArgs": { - "_abiSerializerMaxTimeMsNodeosArg": "--abi-serializer-max-time-ms", - "_abiSerializerMaxTimeMsNodeosDefault": 15, - "_actionBlacklistNodeosArg": "--action-blacklist", - "_actionBlacklistNodeosDefault": null, - "_actorBlacklistNodeosArg": "--actor-blacklist", - "_actorBlacklistNodeosDefault": null, - "_actorWhitelistNodeosArg": "--actor-whitelist", - "_actorWhitelistNodeosDefault": null, - "_apiAcceptTransactionsNodeosArg": "--api-accept-transactions", - "_apiAcceptTransactionsNodeosDefault": 1, - "_blockLogRetainBlocksNodeosArg": "--block-log-retain-blocks", - "_blockLogRetainBlocksNodeosDefault": null, - "_blocksDirNodeosArg": "--blocks-dir", - "_blocksDirNodeosDefault": "\"blocks\"", - "_chainStateDbGuardSizeMbNodeosArg": "--chain-state-db-guard-size-mb", - "_chainStateDbGuardSizeMbNodeosDefault": 128, - "_chainStateDbSizeMbNodeosArg": "--chain-state-db-size-mb", - "_chainStateDbSizeMbNodeosDefault": 1024, - "_chainThreadsNodeosArg": "--chain-threads", - "_chainThreadsNodeosDefault": 2, - "_checkpointNodeosArg": "--checkpoint", - "_checkpointNodeosDefault": null, - "_contractBlacklistNodeosArg": "--contract-blacklist", - "_contractBlacklistNodeosDefault": null, - "_contractWhitelistNodeosArg": "--contract-whitelist", - "_contractWhitelistNodeosDefault": null, - "_contractsConsoleNodeosArg": "--contracts-console", - "_contractsConsoleNodeosDefault": false, - "_databaseMapModeNodeosArg": "--database-map-mode", - "_databaseMapModeNodeosDefault": "mapped", - "_deepMindNodeosArg": "--deep-mind", - "_deepMindNodeosDefault": false, - "_deleteAllBlocksNodeosArg": "--delete-all-blocks", - "_deleteAllBlocksNodeosDefault": false, - "_disableRamBillingNotifyChecksNodeosArg": "--disable-ram-billing-notify-checks", - "_disableRamBillingNotifyChecksNodeosDefault": false, - "_disableReplayOptsNodeosArg": "--disable-replay-opts", - "_disableReplayOptsNodeosDefault": false, - "_enableAccountQueriesNodeosArg": "--enable-account-queries", - "_enableAccountQueriesNodeosDefault": 0, - "_eosVmOcCacheSizeMbNodeosArg": "--eos-vm-oc-cache-size-mb", - "_eosVmOcCacheSizeMbNodeosDefault": 1024, - "_eosVmOcCompileThreadsNodeosArg": "--eos-vm-oc-compile-threads", - "_eosVmOcCompileThreadsNodeosDefault": 1, - "_eosVmOcEnableNodeosArg": "--eos-vm-oc-enable", - "_eosVmOcEnableNodeosDefault": false, - "_extractBuildInfoNodeosArg": "--extract-build-info", - "_extractBuildInfoNodeosDefault": null, - "_extractGenesisJsonNodeosArg": "--extract-genesis-json", - "_extractGenesisJsonNodeosDefault": null, - "_forceAllChecksNodeosArg": "--force-all-checks", - "_forceAllChecksNodeosDefault": false, - "_genesisJsonNodeosArg": "--genesis-json", - "_genesisJsonNodeosDefault": null, - "_genesisTimestampNodeosArg": "--genesis-timestamp", - "_genesisTimestampNodeosDefault": null, - "_hardReplayBlockchainNodeosArg": "--hard-replay-blockchain", - "_hardReplayBlockchainNodeosDefault": false, - "_integrityHashOnStartNodeosArg": "--integrity-hash-on-start", - "_integrityHashOnStartNodeosDefault": false, - "_integrityHashOnStopNodeosArg": "--integrity-hash-on-stop", - "_integrityHashOnStopNodeosDefault": false, - "_keyBlacklistNodeosArg": "--key-blacklist", - "_keyBlacklistNodeosDefault": null, - "_maxNonprivilegedInlineActionSizeNodeosArg": "--max-nonprivileged-inline-action-size", - "_maxNonprivilegedInlineActionSizeNodeosDefault": 4096, - "_maximumVariableSignatureLengthNodeosArg": "--maximum-variable-signature-length", - "_maximumVariableSignatureLengthNodeosDefault": 16384, - "_pluginName": "chain_plugin", - "_pluginNamespace": "eosio", - "_printBuildInfoNodeosArg": "--print-build-info", - "_printBuildInfoNodeosDefault": false, - "_printGenesisJsonNodeosArg": "--print-genesis-json", - "_printGenesisJsonNodeosDefault": false, - "_profileAccountNodeosArg": "--profile-account", - "_profileAccountNodeosDefault": null, - "_protocolFeaturesDirNodeosArg": "--protocol-features-dir", - "_protocolFeaturesDirNodeosDefault": "\"protocol_features\"", - "_readModeNodeosArg": "--read-mode", - "_readModeNodeosDefault": "head", - "_replayBlockchainNodeosArg": "--replay-blockchain", - "_replayBlockchainNodeosDefault": false, - "_senderBypassWhiteblacklistNodeosArg": "--sender-bypass-whiteblacklist", - "_senderBypassWhiteblacklistNodeosDefault": null, - "_signatureCpuBillablePctNodeosArg": "--signature-cpu-billable-pct", - "_signatureCpuBillablePctNodeosDefault": 50, - "_snapshotNodeosArg": "--snapshot", - "_snapshotNodeosDefault": null, - "_stateDirNodeosArg": "--state-dir", - "_stateDirNodeosDefault": "\"state\"", - "_terminateAtBlockNodeosArg": "--terminate-at-block", - "_terminateAtBlockNodeosDefault": 0, - "_transactionFinalityStatusFailureDurationSecNodeosArg": "--transaction-finality-status-failure-duration-sec", - "_transactionFinalityStatusFailureDurationSecNodeosDefault": 180, - "_transactionFinalityStatusMaxStorageSizeGbNodeosArg": "--transaction-finality-status-max-storage-size-gb", - "_transactionFinalityStatusMaxStorageSizeGbNodeosDefault": null, - "_transactionFinalityStatusSuccessDurationSecNodeosArg": "--transaction-finality-status-success-duration-sec", - "_transactionFinalityStatusSuccessDurationSecNodeosDefault": 180, - "_transactionRetryIntervalSecNodeosArg": "--transaction-retry-interval-sec", - "_transactionRetryIntervalSecNodeosDefault": 20, - "_transactionRetryMaxExpirationSecNodeosArg": "--transaction-retry-max-expiration-sec", - "_transactionRetryMaxExpirationSecNodeosDefault": 120, - "_transactionRetryMaxStorageSizeGbNodeosArg": "--transaction-retry-max-storage-size-gb", - "_transactionRetryMaxStorageSizeGbNodeosDefault": null, - "_truncateAtBlockNodeosArg": "--truncate-at-block", - "_truncateAtBlockNodeosDefault": 0, - "_trustedProducerNodeosArg": "--trusted-producer", - "_trustedProducerNodeosDefault": null, - "_validationModeNodeosArg": "--validation-mode", - "_validationModeNodeosDefault": "full", - "_wasmRuntimeNodeosArg": "--wasm-runtime", - "_wasmRuntimeNodeosDefault": "eos-vm-jit", - "abiSerializerMaxTimeMs": null, - "actionBlacklist": null, - "actorBlacklist": null, - "actorWhitelist": null, - "apiAcceptTransactions": null, - "blockLogRetainBlocks": null, - "blocksDir": null, - "chainStateDbGuardSizeMb": null, - "chainStateDbSizeMb": 10240, - "chainThreads": 2, - "checkpoint": null, - "contractBlacklist": null, - "contractWhitelist": null, - "contractsConsole": null, - "databaseMapMode": "mapped", - "deepMind": null, - "deleteAllBlocks": null, - "disableRamBillingNotifyChecks": null, - "disableReplayOpts": null, - "enableAccountQueries": null, - "eosVmOcCacheSizeMb": null, - "eosVmOcCompileThreads": null, - "eosVmOcEnable": null, - "extractBuildInfo": null, - "extractGenesisJson": null, - "forceAllChecks": null, - "genesisJson": null, - "genesisTimestamp": null, - "hardReplayBlockchain": null, - "integrityHashOnStart": null, - "integrityHashOnStop": null, - "keyBlacklist": null, - "maxNonprivilegedInlineActionSize": null, - "maximumVariableSignatureLength": null, - "printBuildInfo": null, - "printGenesisJson": null, - "profileAccount": null, - "protocolFeaturesDir": null, - "readMode": null, - "replayBlockchain": null, - "senderBypassWhiteblacklist": null, - "signatureCpuBillablePct": 0, - "snapshot": null, - "stateDir": null, - "terminateAtBlock": null, - "transactionFinalityStatusFailureDurationSec": null, - "transactionFinalityStatusMaxStorageSizeGb": null, - "transactionFinalityStatusSuccessDurationSec": null, - "transactionRetryIntervalSec": null, - "transactionRetryMaxExpirationSec": null, - "transactionRetryMaxStorageSizeGb": null, - "truncateAtBlock": null, - "trustedProducer": null, - "validationMode": null, - "wasmRuntime": null - }, - "httpClientPluginArgs": { - "_httpsClientRootCertNodeosArg": "--https-client-root-cert", - "_httpsClientRootCertNodeosDefault": null, - "_httpsClientValidatePeersNodeosArg": "--https-client-validate-peers", - "_httpsClientValidatePeersNodeosDefault": 1, - "_pluginName": "http_client_plugin", - "_pluginNamespace": "eosio", - "httpsClientRootCert": null, - "httpsClientValidatePeers": null - }, - "httpPluginArgs": { - "_accessControlAllowCredentialsNodeosArg": "--access-control-allow-credentials", - "_accessControlAllowCredentialsNodeosDefault": false, - "_accessControlAllowHeadersNodeosArg": "--access-control-allow-headers", - "_accessControlAllowHeadersNodeosDefault": null, - "_accessControlAllowOriginNodeosArg": "--access-control-allow-origin", - "_accessControlAllowOriginNodeosDefault": null, - "_accessControlMaxAgeNodeosArg": "--access-control-max-age", - "_accessControlMaxAgeNodeosDefault": null, - "_httpAliasNodeosArg": "--http-alias", - "_httpAliasNodeosDefault": null, - "_httpKeepAliveNodeosArg": "--http-keep-alive", - "_httpKeepAliveNodeosDefault": 1, - "_httpMaxBytesInFlightMbNodeosArg": "--http-max-bytes-in-flight-mb", - "_httpMaxBytesInFlightMbNodeosDefault": 500, - "_httpMaxInFlightRequestsNodeosArg": "--http-max-in-flight-requests", - "_httpMaxInFlightRequestsNodeosDefault": -1, - "_httpMaxResponseTimeMsNodeosArg": "--http-max-response-time-ms", - "_httpMaxResponseTimeMsNodeosDefault": 30, - "_httpServerAddressNodeosArg": "--http-server-address", - "_httpServerAddressNodeosDefault": "127.0.0.1:8888", - "_httpThreadsNodeosArg": "--http-threads", - "_httpThreadsNodeosDefault": 2, - "_httpValidateHostNodeosArg": "--http-validate-host", - "_httpValidateHostNodeosDefault": 1, - "_httpsCertificateChainFileNodeosArg": "--https-certificate-chain-file", - "_httpsCertificateChainFileNodeosDefault": null, - "_httpsEcdhCurveNodeosArg": "--https-ecdh-curve", - "_httpsEcdhCurveNodeosDefault": "secp384r1", - "_httpsPrivateKeyFileNodeosArg": "--https-private-key-file", - "_httpsPrivateKeyFileNodeosDefault": null, - "_httpsServerAddressNodeosArg": "--https-server-address", - "_httpsServerAddressNodeosDefault": null, - "_maxBodySizeNodeosArg": "--max-body-size", - "_maxBodySizeNodeosDefault": 2097152, - "_pluginName": "http_plugin", - "_pluginNamespace": "eosio", - "_unixSocketPathNodeosArg": "--unix-socket-path", - "_unixSocketPathNodeosDefault": null, - "_verboseHttpErrorsNodeosArg": "--verbose-http-errors", - "_verboseHttpErrorsNodeosDefault": false, - "accessControlAllowCredentials": null, - "accessControlAllowHeaders": null, - "accessControlAllowOrigin": null, - "accessControlMaxAge": null, - "httpAlias": null, - "httpKeepAlive": null, - "httpMaxBytesInFlightMb": null, - "httpMaxInFlightRequests": null, - "httpMaxResponseTimeMs": 990000, - "httpServerAddress": null, - "httpThreads": null, - "httpValidateHost": null, - "httpsCertificateChainFile": null, - "httpsEcdhCurve": null, - "httpsPrivateKeyFile": null, - "httpsServerAddress": null, - "maxBodySize": null, - "unixSocketPath": null, - "verboseHttpErrors": null - }, - "netPluginArgs": { - "_agentNameNodeosArg": "--agent-name", - "_agentNameNodeosDefault": "EOS Test Agent", - "_allowedConnectionNodeosArg": "--allowed-connection", - "_allowedConnectionNodeosDefault": "any", - "_connectionCleanupPeriodNodeosArg": "--connection-cleanup-period", - "_connectionCleanupPeriodNodeosDefault": 30, - "_maxCleanupTimeMsecNodeosArg": "--max-cleanup-time-msec", - "_maxCleanupTimeMsecNodeosDefault": 10, - "_maxClientsNodeosArg": "--max-clients", - "_maxClientsNodeosDefault": 25, - "_netThreadsNodeosArg": "--net-threads", - "_netThreadsNodeosDefault": 2, - "_p2pAcceptTransactionsNodeosArg": "--p2p-accept-transactions", - "_p2pAcceptTransactionsNodeosDefault": 1, - "_p2pDedupCacheExpireTimeSecNodeosArg": "--p2p-dedup-cache-expire-time-sec", - "_p2pDedupCacheExpireTimeSecNodeosDefault": 10, - "_p2pKeepaliveIntervalMsNodeosArg": "--p2p-keepalive-interval-ms", - "_p2pKeepaliveIntervalMsNodeosDefault": 10000, - "_p2pListenEndpointNodeosArg": "--p2p-listen-endpoint", - "_p2pListenEndpointNodeosDefault": "0.0.0.0:9876", - "_p2pMaxNodesPerHostNodeosArg": "--p2p-max-nodes-per-host", - "_p2pMaxNodesPerHostNodeosDefault": 1, - "_p2pPeerAddressNodeosArg": "--p2p-peer-address", - "_p2pPeerAddressNodeosDefault": null, - "_p2pServerAddressNodeosArg": "--p2p-server-address", - "_p2pServerAddressNodeosDefault": null, - "_peerKeyNodeosArg": "--peer-key", - "_peerKeyNodeosDefault": null, - "_peerLogFormatNodeosArg": "--peer-log-format", - "_peerLogFormatNodeosDefault": "[\"${_name}\" - ${_cid} ${_ip}:${_port}] ", - "_peerPrivateKeyNodeosArg": "--peer-private-key", - "_peerPrivateKeyNodeosDefault": null, - "_pluginName": "net_plugin", - "_pluginNamespace": "eosio", - "_syncFetchSpanNodeosArg": "--sync-fetch-span", - "_syncFetchSpanNodeosDefault": 100, - "_useSocketReadWatermarkNodeosArg": "--use-socket-read-watermark", - "_useSocketReadWatermarkNodeosDefault": 0, - "agentName": null, - "allowedConnection": null, - "connectionCleanupPeriod": null, - "maxCleanupTimeMsec": null, - "maxClients": null, - "netThreads": 2, - "p2pAcceptTransactions": null, - "p2pDedupCacheExpireTimeSec": null, - "p2pKeepaliveIntervalMs": null, - "p2pListenEndpoint": null, - "p2pMaxNodesPerHost": null, - "p2pPeerAddress": null, - "p2pServerAddress": null, - "peerKey": null, - "peerLogFormat": null, - "peerPrivateKey": null, - "syncFetchSpan": null, - "useSocketReadWatermark": null - }, - "producerPluginArgs": { - "_cpuEffortPercentNodeosArg": "--cpu-effort-percent", - "_cpuEffortPercentNodeosDefault": 80, - "_disableSubjectiveAccountBillingNodeosArg": "--disable-subjective-account-billing", - "_disableSubjectiveAccountBillingNodeosDefault": false, - "_disableSubjectiveApiBillingNodeosArg": "--disable-subjective-api-billing", - "_disableSubjectiveApiBillingNodeosDefault": 1, - "_disableSubjectiveBillingNodeosArg": "--disable-subjective-billing", - "_disableSubjectiveBillingNodeosDefault": 1, - "_disableSubjectiveP2pBillingNodeosArg": "--disable-subjective-p2p-billing", - "_disableSubjectiveP2pBillingNodeosDefault": 1, - "_enableStaleProductionNodeosArg": "--enable-stale-production", - "_enableStaleProductionNodeosDefault": false, - "_greylistAccountNodeosArg": "--greylist-account", - "_greylistAccountNodeosDefault": null, - "_greylistLimitNodeosArg": "--greylist-limit", - "_greylistLimitNodeosDefault": 1000, - "_incomingDeferRatioNodeosArg": "--incoming-defer-ratio", - "_incomingDeferRatioNodeosDefault": 1, - "_incomingTransactionQueueSizeMbNodeosArg": "--incoming-transaction-queue-size-mb", - "_incomingTransactionQueueSizeMbNodeosDefault": 1024, - "_lastBlockCpuEffortPercentNodeosArg": "--last-block-cpu-effort-percent", - "_lastBlockCpuEffortPercentNodeosDefault": 80, - "_lastBlockTimeOffsetUsNodeosArg": "--last-block-time-offset-us", - "_lastBlockTimeOffsetUsNodeosDefault": -200000, - "_maxBlockCpuUsageThresholdUsNodeosArg": "--max-block-cpu-usage-threshold-us", - "_maxBlockCpuUsageThresholdUsNodeosDefault": 5000, - "_maxBlockNetUsageThresholdBytesNodeosArg": "--max-block-net-usage-threshold-bytes", - "_maxBlockNetUsageThresholdBytesNodeosDefault": 1024, - "_maxIrreversibleBlockAgeNodeosArg": "--max-irreversible-block-age", - "_maxIrreversibleBlockAgeNodeosDefault": -1, - "_maxScheduledTransactionTimePerBlockMsNodeosArg": "--max-scheduled-transaction-time-per-block-ms", - "_maxScheduledTransactionTimePerBlockMsNodeosDefault": 100, - "_maxTransactionTimeNodeosArg": "--max-transaction-time", - "_maxTransactionTimeNodeosDefault": 30, - "_pauseOnStartupNodeosArg": "--pause-on-startup", - "_pauseOnStartupNodeosDefault": false, - "_pluginName": "producer_plugin", - "_pluginNamespace": "eosio", - "_privateKeyNodeosArg": "--private-key", - "_privateKeyNodeosDefault": null, - "_produceTimeOffsetUsNodeosArg": "--produce-time-offset-us", - "_produceTimeOffsetUsNodeosDefault": 0, - "_producerNameNodeosArg": "--producer-name", - "_producerNameNodeosDefault": null, - "_producerThreadsNodeosArg": "--producer-threads", - "_producerThreadsNodeosDefault": 2, - "_signatureProviderNodeosArg": "--signature-provider", - "_signatureProviderNodeosDefault": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", - "_snapshotsDirNodeosArg": "--snapshots-dir", - "_snapshotsDirNodeosDefault": "\"snapshots\"", - "_subjectiveAccountDecayTimeMinutesNodeosArg": "--subjective-account-decay-time-minutes", - "_subjectiveAccountDecayTimeMinutesNodeosDefault": 1440, - "_subjectiveAccountMaxFailuresNodeosArg": "--subjective-account-max-failures", - "_subjectiveAccountMaxFailuresNodeosDefault": 3, - "_subjectiveCpuLeewayUsNodeosArg": "--subjective-cpu-leeway-us", - "_subjectiveCpuLeewayUsNodeosDefault": 31000, - "cpuEffortPercent": 80, - "disableSubjectiveAccountBilling": null, - "disableSubjectiveApiBilling": null, - "disableSubjectiveBilling": true, - "disableSubjectiveP2pBilling": null, - "enableStaleProduction": null, - "greylistAccount": null, - "greylistLimit": null, - "incomingDeferRatio": null, - "incomingTransactionQueueSizeMb": null, - "lastBlockCpuEffortPercent": 80, - "lastBlockTimeOffsetUs": -200000, - "maxBlockCpuUsageThresholdUs": null, - "maxBlockNetUsageThresholdBytes": null, - "maxIrreversibleBlockAge": null, - "maxScheduledTransactionTimePerBlockMs": null, - "maxTransactionTime": null, - "pauseOnStartup": null, - "privateKey": null, - "produceTimeOffsetUs": -200000, - "producerName": null, - "producerThreads": 2, - "signatureProvider": null, - "snapshotsDir": null, - "subjectiveAccountDecayTimeMinutes": null, - "subjectiveAccountMaxFailures": null, - "subjectiveCpuLeewayUs": null - }, - "resourceMonitorPluginArgs": { - "_pluginName": "resource_monitor_plugin", - "_pluginNamespace": "eosio", - "_resourceMonitorIntervalSecondsNodeosArg": "--resource-monitor-interval-seconds", - "_resourceMonitorIntervalSecondsNodeosDefault": 2, - "_resourceMonitorNotShutdownOnThresholdExceededNodeosArg": "--resource-monitor-not-shutdown-on-threshold-exceeded", - "_resourceMonitorNotShutdownOnThresholdExceededNodeosDefault": false, - "_resourceMonitorSpaceThresholdNodeosArg": "--resource-monitor-space-threshold", - "_resourceMonitorSpaceThresholdNodeosDefault": 90, - "_resourceMonitorWarningIntervalNodeosArg": "--resource-monitor-warning-interval", - "_resourceMonitorWarningIntervalNodeosDefault": 30, - "resourceMonitorIntervalSeconds": null, - "resourceMonitorNotShutdownOnThresholdExceeded": null, - "resourceMonitorSpaceThreshold": null, - "resourceMonitorWarningInterval": null - }, - "signatureProviderPluginArgs": { - "_keosdProviderTimeoutNodeosArg": "--keosd-provider-timeout", - "_keosdProviderTimeoutNodeosDefault": 5, - "_pluginName": "signature_provider_plugin", - "_pluginNamespace": "eosio", - "keosdProviderTimeout": null - }, - "stateHistoryPluginArgs": { - "_chainStateHistoryNodeosArg": "--chain-state-history", - "_chainStateHistoryNodeosDefault": false, - "_deleteStateHistoryNodeosArg": "--delete-state-history", - "_deleteStateHistoryNodeosDefault": false, - "_pluginName": "state_history_plugin", - "_pluginNamespace": "eosio", - "_stateHistoryDirNodeosArg": "--state-history-dir", - "_stateHistoryDirNodeosDefault": "\"state-history\"", - "_stateHistoryEndpointNodeosArg": "--state-history-endpoint", - "_stateHistoryEndpointNodeosDefault": "127.0.0.1:8080", - "_stateHistoryLogRetainBlocksNodeosArg": "--state-history-log-retain-blocks", - "_stateHistoryLogRetainBlocksNodeosDefault": null, - "_stateHistoryUnixSocketPathNodeosArg": "--state-history-unix-socket-path", - "_stateHistoryUnixSocketPathNodeosDefault": null, - "_traceHistoryDebugModeNodeosArg": "--trace-history-debug-mode", - "_traceHistoryDebugModeNodeosDefault": false, - "_traceHistoryNodeosArg": "--trace-history", - "_traceHistoryNodeosDefault": false, - "chainStateHistory": null, - "deleteStateHistory": null, - "stateHistoryDir": null, - "stateHistoryEndpoint": null, - "stateHistoryLogRetainBlocks": null, - "stateHistoryUnixSocketPath": null, - "traceHistory": null, - "traceHistoryDebugMode": null - }, - "traceApiPluginArgs": { - "_pluginName": "trace_api_plugin", - "_pluginNamespace": "eosio", - "_traceDirNodeosArg": "--trace-dir", - "_traceDirNodeosDefault": "\"traces\"", - "_traceMinimumIrreversibleHistoryBlocksNodeosArg": "--trace-minimum-irreversible-history-blocks", - "_traceMinimumIrreversibleHistoryBlocksNodeosDefault": -1, - "_traceMinimumUncompressedIrreversibleHistoryBlocksNodeosArg": "--trace-minimum-uncompressed-irreversible-history-blocks", - "_traceMinimumUncompressedIrreversibleHistoryBlocksNodeosDefault": -1, - "_traceNoAbisNodeosArg": "--trace-no-abis", - "_traceNoAbisNodeosDefault": false, - "_traceRpcAbiNodeosArg": "--trace-rpc-abi", - "_traceRpcAbiNodeosDefault": null, - "_traceSliceStrideNodeosArg": "--trace-slice-stride", - "_traceSliceStrideNodeosDefault": 10000, - "traceDir": null, - "traceMinimumIrreversibleHistoryBlocks": null, - "traceMinimumUncompressedIrreversibleHistoryBlocks": null, - "traceNoAbis": null, - "traceRpcAbi": null, - "traceSliceStride": null - } - }, - "genesisPath": "tests/performance_tests/genesis.json", - "keepLogs": true, - "killAll": true, - "logDirBase": "p", - "logDirPath": "p/2023-01-11_19-13-31-20", - "logDirRoot": ".", - "logDirTimestamp": "2023-01-11_19-13-31", - "logDirTimestampedOptSuffix": "-20", - "loggingDict": { - "bios": "off" - }, - "maximumClients": 0, - "maximumP2pPerHost": 5000, - "nodeosVers": "v4", - "nodesFile": null, - "numAddlBlocksToPrune": 2, - "pnodes": 1, - "printMissingTransactions": false, - "prodsEnableTraceApi": false, - "quiet": false, - "specificExtraNodeosArgs": { - "1": "--plugin eosio::trace_api_plugin" - }, - "targetTps": 20, - "testTrxGenDurationSec": 90, - "topo": "mesh", - "totalNodes": 1, - "tpsLimitPerGenerator": 10, - "useBiosBootFile": false, - "verbose": true - }, - "completedRun": true, - "env": { - "logical_cpu_count": 16, - "os": "posix", - "release": "5.10.16.3-microsoft-standard-WSL2", - "system": "Linux" - }, - "nodeosVersion": "v4.0.0-dev", - "testFinish": "2023-01-11T19:16:01.623808", - "testStart": "2023-01-11T19:13:31.346325" - } +{ + "Analysis": { + "BlockSize": { + "avg": 1920.0, + "emptyBlocks": 0, + "max": 1920, + "min": 1920, + "numBlocks": 177, + "sigma": 0.0 + }, + "BlocksGuide": { + "configAddlDropCnt": 2, + "firstBlockNum": 2, + "lastBlockNum": 301, + "leadingEmptyBlocksCnt": 1, + "setupBlocksCnt": 112, + "tearDownBlocksCnt": 0, + "testAnalysisBlockCnt": 177, + "testEndBlockNum": 301, + "testStartBlockNum": 114, + "totalBlocks": 300, + "trailingEmptyBlocksCnt": 6 + }, + "DroppedBlocks": {}, + "DroppedBlocksCount": 0, + "ForkedBlocks": [], + "ForksCount": 0, + "ProductionWindowsAverageSize": 0, + "ProductionWindowsMissed": 0, + "ProductionWindowsTotal": 0, + "TPS": { + "avg": 20.0, + "configTestDuration": 90, + "configTps": 20, + "emptyBlocks": 0, + "generatorCount": 2, + "max": 20, + "min": 20, + "numBlocks": 177, + "sigma": 0.0, + "tpsPerGenerator": [ + 10, + 10 + ] + }, + "TrxCPU": { + "avg": 89.22111111111111, + "max": 404.0, + "min": 7.0, + "samples": 1800, + "sigma": 52.66483117992383 + }, + "TrxLatency": { + "avg": 0.47760056018829344, + "max": 0.6789999008178711, + "min": 0.2760000228881836, + "samples": 1800, + "sigma": 0.14143152148157506 + }, + "TrxNet": { + "avg": 24.0, + "max": 24.0, + "min": 24.0, + "samples": 1800, + "sigma": 0.0 + } + }, + "args": { + "_killEosInstances": true, + "_killWallet": true, + "_totalNodes": 2, + "delPerfLogs": false, + "delReport": false, + "delay": 1, + "dontKill": false, + "dumpErrorDetails": false, + "expectedTransactionsSent": 1800, + "extraNodeosArgs": { + "chainPluginArgs": { + "_abiSerializerMaxTimeMsNodeosArg": "--abi-serializer-max-time-ms", + "_abiSerializerMaxTimeMsNodeosDefault": 15, + "_actionBlacklistNodeosArg": "--action-blacklist", + "_actionBlacklistNodeosDefault": null, + "_actorBlacklistNodeosArg": "--actor-blacklist", + "_actorBlacklistNodeosDefault": null, + "_actorWhitelistNodeosArg": "--actor-whitelist", + "_actorWhitelistNodeosDefault": null, + "_apiAcceptTransactionsNodeosArg": "--api-accept-transactions", + "_apiAcceptTransactionsNodeosDefault": 1, + "_blockLogRetainBlocksNodeosArg": "--block-log-retain-blocks", + "_blockLogRetainBlocksNodeosDefault": null, + "_blocksDirNodeosArg": "--blocks-dir", + "_blocksDirNodeosDefault": "\"blocks\"", + "_chainStateDbGuardSizeMbNodeosArg": "--chain-state-db-guard-size-mb", + "_chainStateDbGuardSizeMbNodeosDefault": 128, + "_chainStateDbSizeMbNodeosArg": "--chain-state-db-size-mb", + "_chainStateDbSizeMbNodeosDefault": 1024, + "_chainThreadsNodeosArg": "--chain-threads", + "_chainThreadsNodeosDefault": 2, + "_checkpointNodeosArg": "--checkpoint", + "_checkpointNodeosDefault": null, + "_contractBlacklistNodeosArg": "--contract-blacklist", + "_contractBlacklistNodeosDefault": null, + "_contractWhitelistNodeosArg": "--contract-whitelist", + "_contractWhitelistNodeosDefault": null, + "_contractsConsoleNodeosArg": "--contracts-console", + "_contractsConsoleNodeosDefault": false, + "_databaseMapModeNodeosArg": "--database-map-mode", + "_databaseMapModeNodeosDefault": "mapped", + "_deepMindNodeosArg": "--deep-mind", + "_deepMindNodeosDefault": false, + "_deleteAllBlocksNodeosArg": "--delete-all-blocks", + "_deleteAllBlocksNodeosDefault": false, + "_disableRamBillingNotifyChecksNodeosArg": "--disable-ram-billing-notify-checks", + "_disableRamBillingNotifyChecksNodeosDefault": false, + "_disableReplayOptsNodeosArg": "--disable-replay-opts", + "_disableReplayOptsNodeosDefault": false, + "_enableAccountQueriesNodeosArg": "--enable-account-queries", + "_enableAccountQueriesNodeosDefault": 0, + "_eosVmOcCacheSizeMbNodeosArg": "--eos-vm-oc-cache-size-mb", + "_eosVmOcCacheSizeMbNodeosDefault": 1024, + "_eosVmOcCompileThreadsNodeosArg": "--eos-vm-oc-compile-threads", + "_eosVmOcCompileThreadsNodeosDefault": 1, + "_eosVmOcEnableNodeosArg": "--eos-vm-oc-enable", + "_eosVmOcEnableNodeosDefault": false, + "_extractBuildInfoNodeosArg": "--extract-build-info", + "_extractBuildInfoNodeosDefault": null, + "_extractGenesisJsonNodeosArg": "--extract-genesis-json", + "_extractGenesisJsonNodeosDefault": null, + "_forceAllChecksNodeosArg": "--force-all-checks", + "_forceAllChecksNodeosDefault": false, + "_genesisJsonNodeosArg": "--genesis-json", + "_genesisJsonNodeosDefault": null, + "_genesisTimestampNodeosArg": "--genesis-timestamp", + "_genesisTimestampNodeosDefault": null, + "_hardReplayBlockchainNodeosArg": "--hard-replay-blockchain", + "_hardReplayBlockchainNodeosDefault": false, + "_integrityHashOnStartNodeosArg": "--integrity-hash-on-start", + "_integrityHashOnStartNodeosDefault": false, + "_integrityHashOnStopNodeosArg": "--integrity-hash-on-stop", + "_integrityHashOnStopNodeosDefault": false, + "_keyBlacklistNodeosArg": "--key-blacklist", + "_keyBlacklistNodeosDefault": null, + "_maxNonprivilegedInlineActionSizeNodeosArg": "--max-nonprivileged-inline-action-size", + "_maxNonprivilegedInlineActionSizeNodeosDefault": 4096, + "_maximumVariableSignatureLengthNodeosArg": "--maximum-variable-signature-length", + "_maximumVariableSignatureLengthNodeosDefault": 16384, + "_pluginName": "chain_plugin", + "_pluginNamespace": "eosio", + "_printBuildInfoNodeosArg": "--print-build-info", + "_printBuildInfoNodeosDefault": false, + "_printGenesisJsonNodeosArg": "--print-genesis-json", + "_printGenesisJsonNodeosDefault": false, + "_profileAccountNodeosArg": "--profile-account", + "_profileAccountNodeosDefault": null, + "_protocolFeaturesDirNodeosArg": "--protocol-features-dir", + "_protocolFeaturesDirNodeosDefault": "\"protocol_features\"", + "_readModeNodeosArg": "--read-mode", + "_readModeNodeosDefault": "head", + "_replayBlockchainNodeosArg": "--replay-blockchain", + "_replayBlockchainNodeosDefault": false, + "_senderBypassWhiteblacklistNodeosArg": "--sender-bypass-whiteblacklist", + "_senderBypassWhiteblacklistNodeosDefault": null, + "_signatureCpuBillablePctNodeosArg": "--signature-cpu-billable-pct", + "_signatureCpuBillablePctNodeosDefault": 50, + "_snapshotNodeosArg": "--snapshot", + "_snapshotNodeosDefault": null, + "_stateDirNodeosArg": "--state-dir", + "_stateDirNodeosDefault": "\"state\"", + "_terminateAtBlockNodeosArg": "--terminate-at-block", + "_terminateAtBlockNodeosDefault": 0, + "_transactionFinalityStatusFailureDurationSecNodeosArg": "--transaction-finality-status-failure-duration-sec", + "_transactionFinalityStatusFailureDurationSecNodeosDefault": 180, + "_transactionFinalityStatusMaxStorageSizeGbNodeosArg": "--transaction-finality-status-max-storage-size-gb", + "_transactionFinalityStatusMaxStorageSizeGbNodeosDefault": null, + "_transactionFinalityStatusSuccessDurationSecNodeosArg": "--transaction-finality-status-success-duration-sec", + "_transactionFinalityStatusSuccessDurationSecNodeosDefault": 180, + "_transactionRetryIntervalSecNodeosArg": "--transaction-retry-interval-sec", + "_transactionRetryIntervalSecNodeosDefault": 20, + "_transactionRetryMaxExpirationSecNodeosArg": "--transaction-retry-max-expiration-sec", + "_transactionRetryMaxExpirationSecNodeosDefault": 120, + "_transactionRetryMaxStorageSizeGbNodeosArg": "--transaction-retry-max-storage-size-gb", + "_transactionRetryMaxStorageSizeGbNodeosDefault": null, + "_truncateAtBlockNodeosArg": "--truncate-at-block", + "_truncateAtBlockNodeosDefault": 0, + "_trustedProducerNodeosArg": "--trusted-producer", + "_trustedProducerNodeosDefault": null, + "_validationModeNodeosArg": "--validation-mode", + "_validationModeNodeosDefault": "full", + "_wasmRuntimeNodeosArg": "--wasm-runtime", + "_wasmRuntimeNodeosDefault": "eos-vm-jit", + "abiSerializerMaxTimeMs": null, + "actionBlacklist": null, + "actorBlacklist": null, + "actorWhitelist": null, + "apiAcceptTransactions": null, + "blockLogRetainBlocks": null, + "blocksDir": null, + "chainStateDbGuardSizeMb": null, + "chainStateDbSizeMb": 10240, + "chainThreads": 2, + "checkpoint": null, + "contractBlacklist": null, + "contractWhitelist": null, + "contractsConsole": null, + "databaseMapMode": "mapped", + "deepMind": null, + "deleteAllBlocks": null, + "disableRamBillingNotifyChecks": null, + "disableReplayOpts": null, + "enableAccountQueries": null, + "eosVmOcCacheSizeMb": null, + "eosVmOcCompileThreads": null, + "eosVmOcEnable": null, + "extractBuildInfo": null, + "extractGenesisJson": null, + "forceAllChecks": null, + "genesisJson": null, + "genesisTimestamp": null, + "hardReplayBlockchain": null, + "integrityHashOnStart": null, + "integrityHashOnStop": null, + "keyBlacklist": null, + "maxNonprivilegedInlineActionSize": null, + "maximumVariableSignatureLength": null, + "printBuildInfo": null, + "printGenesisJson": null, + "profileAccount": null, + "protocolFeaturesDir": null, + "readMode": null, + "replayBlockchain": null, + "senderBypassWhiteblacklist": null, + "signatureCpuBillablePct": 0, + "snapshot": null, + "stateDir": null, + "terminateAtBlock": null, + "transactionFinalityStatusFailureDurationSec": null, + "transactionFinalityStatusMaxStorageSizeGb": null, + "transactionFinalityStatusSuccessDurationSec": null, + "transactionRetryIntervalSec": null, + "transactionRetryMaxExpirationSec": null, + "transactionRetryMaxStorageSizeGb": null, + "truncateAtBlock": null, + "trustedProducer": null, + "validationMode": null, + "wasmRuntime": null + }, + "httpClientPluginArgs": { + "_httpsClientRootCertNodeosArg": "--https-client-root-cert", + "_httpsClientRootCertNodeosDefault": null, + "_httpsClientValidatePeersNodeosArg": "--https-client-validate-peers", + "_httpsClientValidatePeersNodeosDefault": 1, + "_pluginName": "http_client_plugin", + "_pluginNamespace": "eosio", + "httpsClientRootCert": null, + "httpsClientValidatePeers": null + }, + "httpPluginArgs": { + "_accessControlAllowCredentialsNodeosArg": "--access-control-allow-credentials", + "_accessControlAllowCredentialsNodeosDefault": false, + "_accessControlAllowHeadersNodeosArg": "--access-control-allow-headers", + "_accessControlAllowHeadersNodeosDefault": null, + "_accessControlAllowOriginNodeosArg": "--access-control-allow-origin", + "_accessControlAllowOriginNodeosDefault": null, + "_accessControlMaxAgeNodeosArg": "--access-control-max-age", + "_accessControlMaxAgeNodeosDefault": null, + "_httpAliasNodeosArg": "--http-alias", + "_httpAliasNodeosDefault": null, + "_httpKeepAliveNodeosArg": "--http-keep-alive", + "_httpKeepAliveNodeosDefault": 1, + "_httpMaxBytesInFlightMbNodeosArg": "--http-max-bytes-in-flight-mb", + "_httpMaxBytesInFlightMbNodeosDefault": 500, + "_httpMaxInFlightRequestsNodeosArg": "--http-max-in-flight-requests", + "_httpMaxInFlightRequestsNodeosDefault": -1, + "_httpMaxResponseTimeMsNodeosArg": "--http-max-response-time-ms", + "_httpMaxResponseTimeMsNodeosDefault": 30, + "_httpServerAddressNodeosArg": "--http-server-address", + "_httpServerAddressNodeosDefault": "127.0.0.1:8888", + "_httpThreadsNodeosArg": "--http-threads", + "_httpThreadsNodeosDefault": 2, + "_httpValidateHostNodeosArg": "--http-validate-host", + "_httpValidateHostNodeosDefault": 1, + "_httpsCertificateChainFileNodeosArg": "--https-certificate-chain-file", + "_httpsCertificateChainFileNodeosDefault": null, + "_httpsEcdhCurveNodeosArg": "--https-ecdh-curve", + "_httpsEcdhCurveNodeosDefault": "secp384r1", + "_httpsPrivateKeyFileNodeosArg": "--https-private-key-file", + "_httpsPrivateKeyFileNodeosDefault": null, + "_httpsServerAddressNodeosArg": "--https-server-address", + "_httpsServerAddressNodeosDefault": null, + "_maxBodySizeNodeosArg": "--max-body-size", + "_maxBodySizeNodeosDefault": 2097152, + "_pluginName": "http_plugin", + "_pluginNamespace": "eosio", + "_unixSocketPathNodeosArg": "--unix-socket-path", + "_unixSocketPathNodeosDefault": null, + "_verboseHttpErrorsNodeosArg": "--verbose-http-errors", + "_verboseHttpErrorsNodeosDefault": false, + "accessControlAllowCredentials": null, + "accessControlAllowHeaders": null, + "accessControlAllowOrigin": null, + "accessControlMaxAge": null, + "httpAlias": null, + "httpKeepAlive": null, + "httpMaxBytesInFlightMb": null, + "httpMaxInFlightRequests": null, + "httpMaxResponseTimeMs": 990000, + "httpServerAddress": null, + "httpThreads": null, + "httpValidateHost": null, + "httpsCertificateChainFile": null, + "httpsEcdhCurve": null, + "httpsPrivateKeyFile": null, + "httpsServerAddress": null, + "maxBodySize": null, + "unixSocketPath": null, + "verboseHttpErrors": null + }, + "netPluginArgs": { + "_agentNameNodeosArg": "--agent-name", + "_agentNameNodeosDefault": "EOS Test Agent", + "_allowedConnectionNodeosArg": "--allowed-connection", + "_allowedConnectionNodeosDefault": "any", + "_connectionCleanupPeriodNodeosArg": "--connection-cleanup-period", + "_connectionCleanupPeriodNodeosDefault": 30, + "_maxCleanupTimeMsecNodeosArg": "--max-cleanup-time-msec", + "_maxCleanupTimeMsecNodeosDefault": 10, + "_maxClientsNodeosArg": "--max-clients", + "_maxClientsNodeosDefault": 25, + "_netThreadsNodeosArg": "--net-threads", + "_netThreadsNodeosDefault": 2, + "_p2pAcceptTransactionsNodeosArg": "--p2p-accept-transactions", + "_p2pAcceptTransactionsNodeosDefault": 1, + "_p2pDedupCacheExpireTimeSecNodeosArg": "--p2p-dedup-cache-expire-time-sec", + "_p2pDedupCacheExpireTimeSecNodeosDefault": 10, + "_p2pKeepaliveIntervalMsNodeosArg": "--p2p-keepalive-interval-ms", + "_p2pKeepaliveIntervalMsNodeosDefault": 10000, + "_p2pListenEndpointNodeosArg": "--p2p-listen-endpoint", + "_p2pListenEndpointNodeosDefault": "0.0.0.0:9876", + "_p2pMaxNodesPerHostNodeosArg": "--p2p-max-nodes-per-host", + "_p2pMaxNodesPerHostNodeosDefault": 1, + "_p2pPeerAddressNodeosArg": "--p2p-peer-address", + "_p2pPeerAddressNodeosDefault": null, + "_p2pServerAddressNodeosArg": "--p2p-server-address", + "_p2pServerAddressNodeosDefault": null, + "_peerKeyNodeosArg": "--peer-key", + "_peerKeyNodeosDefault": null, + "_peerLogFormatNodeosArg": "--peer-log-format", + "_peerLogFormatNodeosDefault": "[\"${_name}\" - ${_cid} ${_ip}:${_port}] ", + "_peerPrivateKeyNodeosArg": "--peer-private-key", + "_peerPrivateKeyNodeosDefault": null, + "_pluginName": "net_plugin", + "_pluginNamespace": "eosio", + "_syncFetchSpanNodeosArg": "--sync-fetch-span", + "_syncFetchSpanNodeosDefault": 100, + "_useSocketReadWatermarkNodeosArg": "--use-socket-read-watermark", + "_useSocketReadWatermarkNodeosDefault": 0, + "agentName": null, + "allowedConnection": null, + "connectionCleanupPeriod": null, + "maxCleanupTimeMsec": null, + "maxClients": null, + "netThreads": 2, + "p2pAcceptTransactions": null, + "p2pDedupCacheExpireTimeSec": null, + "p2pKeepaliveIntervalMs": null, + "p2pListenEndpoint": null, + "p2pMaxNodesPerHost": null, + "p2pPeerAddress": null, + "p2pServerAddress": null, + "peerKey": null, + "peerLogFormat": null, + "peerPrivateKey": null, + "syncFetchSpan": null, + "useSocketReadWatermark": null + }, + "producerPluginArgs": { + "_cpuEffortPercentNodeosArg": "--cpu-effort-percent", + "_cpuEffortPercentNodeosDefault": 80, + "_disableSubjectiveAccountBillingNodeosArg": "--disable-subjective-account-billing", + "_disableSubjectiveAccountBillingNodeosDefault": false, + "_disableSubjectiveApiBillingNodeosArg": "--disable-subjective-api-billing", + "_disableSubjectiveApiBillingNodeosDefault": 1, + "_disableSubjectiveBillingNodeosArg": "--disable-subjective-billing", + "_disableSubjectiveBillingNodeosDefault": 1, + "_disableSubjectiveP2pBillingNodeosArg": "--disable-subjective-p2p-billing", + "_disableSubjectiveP2pBillingNodeosDefault": 1, + "_enableStaleProductionNodeosArg": "--enable-stale-production", + "_enableStaleProductionNodeosDefault": false, + "_greylistAccountNodeosArg": "--greylist-account", + "_greylistAccountNodeosDefault": null, + "_greylistLimitNodeosArg": "--greylist-limit", + "_greylistLimitNodeosDefault": 1000, + "_incomingDeferRatioNodeosArg": "--incoming-defer-ratio", + "_incomingDeferRatioNodeosDefault": 1, + "_incomingTransactionQueueSizeMbNodeosArg": "--incoming-transaction-queue-size-mb", + "_incomingTransactionQueueSizeMbNodeosDefault": 1024, + "_lastBlockCpuEffortPercentNodeosArg": "--last-block-cpu-effort-percent", + "_lastBlockCpuEffortPercentNodeosDefault": 80, + "_lastBlockTimeOffsetUsNodeosArg": "--last-block-time-offset-us", + "_lastBlockTimeOffsetUsNodeosDefault": -200000, + "_maxBlockCpuUsageThresholdUsNodeosArg": "--max-block-cpu-usage-threshold-us", + "_maxBlockCpuUsageThresholdUsNodeosDefault": 5000, + "_maxBlockNetUsageThresholdBytesNodeosArg": "--max-block-net-usage-threshold-bytes", + "_maxBlockNetUsageThresholdBytesNodeosDefault": 1024, + "_maxIrreversibleBlockAgeNodeosArg": "--max-irreversible-block-age", + "_maxIrreversibleBlockAgeNodeosDefault": -1, + "_maxScheduledTransactionTimePerBlockMsNodeosArg": "--max-scheduled-transaction-time-per-block-ms", + "_maxScheduledTransactionTimePerBlockMsNodeosDefault": 100, + "_maxTransactionTimeNodeosArg": "--max-transaction-time", + "_maxTransactionTimeNodeosDefault": 30, + "_pauseOnStartupNodeosArg": "--pause-on-startup", + "_pauseOnStartupNodeosDefault": false, + "_pluginName": "producer_plugin", + "_pluginNamespace": "eosio", + "_privateKeyNodeosArg": "--private-key", + "_privateKeyNodeosDefault": null, + "_produceTimeOffsetUsNodeosArg": "--produce-time-offset-us", + "_produceTimeOffsetUsNodeosDefault": 0, + "_producerNameNodeosArg": "--producer-name", + "_producerNameNodeosDefault": null, + "_producerThreadsNodeosArg": "--producer-threads", + "_producerThreadsNodeosDefault": 2, + "_signatureProviderNodeosArg": "--signature-provider", + "_signatureProviderNodeosDefault": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", + "_snapshotsDirNodeosArg": "--snapshots-dir", + "_snapshotsDirNodeosDefault": "\"snapshots\"", + "_subjectiveAccountDecayTimeMinutesNodeosArg": "--subjective-account-decay-time-minutes", + "_subjectiveAccountDecayTimeMinutesNodeosDefault": 1440, + "_subjectiveAccountMaxFailuresNodeosArg": "--subjective-account-max-failures", + "_subjectiveAccountMaxFailuresNodeosDefault": 3, + "_subjectiveCpuLeewayUsNodeosArg": "--subjective-cpu-leeway-us", + "_subjectiveCpuLeewayUsNodeosDefault": 31000, + "cpuEffortPercent": 80, + "disableSubjectiveAccountBilling": null, + "disableSubjectiveApiBilling": null, + "disableSubjectiveBilling": true, + "disableSubjectiveP2pBilling": null, + "enableStaleProduction": null, + "greylistAccount": null, + "greylistLimit": null, + "incomingDeferRatio": null, + "incomingTransactionQueueSizeMb": null, + "lastBlockCpuEffortPercent": 80, + "lastBlockTimeOffsetUs": -200000, + "maxBlockCpuUsageThresholdUs": null, + "maxBlockNetUsageThresholdBytes": null, + "maxIrreversibleBlockAge": null, + "maxScheduledTransactionTimePerBlockMs": null, + "maxTransactionTime": null, + "pauseOnStartup": null, + "privateKey": null, + "produceTimeOffsetUs": -200000, + "producerName": null, + "producerThreads": 2, + "signatureProvider": null, + "snapshotsDir": null, + "subjectiveAccountDecayTimeMinutes": null, + "subjectiveAccountMaxFailures": null, + "subjectiveCpuLeewayUs": null + }, + "resourceMonitorPluginArgs": { + "_pluginName": "resource_monitor_plugin", + "_pluginNamespace": "eosio", + "_resourceMonitorIntervalSecondsNodeosArg": "--resource-monitor-interval-seconds", + "_resourceMonitorIntervalSecondsNodeosDefault": 2, + "_resourceMonitorNotShutdownOnThresholdExceededNodeosArg": "--resource-monitor-not-shutdown-on-threshold-exceeded", + "_resourceMonitorNotShutdownOnThresholdExceededNodeosDefault": false, + "_resourceMonitorSpaceThresholdNodeosArg": "--resource-monitor-space-threshold", + "_resourceMonitorSpaceThresholdNodeosDefault": 90, + "_resourceMonitorWarningIntervalNodeosArg": "--resource-monitor-warning-interval", + "_resourceMonitorWarningIntervalNodeosDefault": 30, + "resourceMonitorIntervalSeconds": null, + "resourceMonitorNotShutdownOnThresholdExceeded": null, + "resourceMonitorSpaceThreshold": null, + "resourceMonitorWarningInterval": null + }, + "signatureProviderPluginArgs": { + "_keosdProviderTimeoutNodeosArg": "--keosd-provider-timeout", + "_keosdProviderTimeoutNodeosDefault": 5, + "_pluginName": "signature_provider_plugin", + "_pluginNamespace": "eosio", + "keosdProviderTimeout": null + }, + "stateHistoryPluginArgs": { + "_chainStateHistoryNodeosArg": "--chain-state-history", + "_chainStateHistoryNodeosDefault": false, + "_deleteStateHistoryNodeosArg": "--delete-state-history", + "_deleteStateHistoryNodeosDefault": false, + "_pluginName": "state_history_plugin", + "_pluginNamespace": "eosio", + "_stateHistoryDirNodeosArg": "--state-history-dir", + "_stateHistoryDirNodeosDefault": "\"state-history\"", + "_stateHistoryEndpointNodeosArg": "--state-history-endpoint", + "_stateHistoryEndpointNodeosDefault": "127.0.0.1:8080", + "_stateHistoryLogRetainBlocksNodeosArg": "--state-history-log-retain-blocks", + "_stateHistoryLogRetainBlocksNodeosDefault": null, + "_stateHistoryUnixSocketPathNodeosArg": "--state-history-unix-socket-path", + "_stateHistoryUnixSocketPathNodeosDefault": null, + "_traceHistoryDebugModeNodeosArg": "--trace-history-debug-mode", + "_traceHistoryDebugModeNodeosDefault": false, + "_traceHistoryNodeosArg": "--trace-history", + "_traceHistoryNodeosDefault": false, + "chainStateHistory": null, + "deleteStateHistory": null, + "stateHistoryDir": null, + "stateHistoryEndpoint": null, + "stateHistoryLogRetainBlocks": null, + "stateHistoryUnixSocketPath": null, + "traceHistory": null, + "traceHistoryDebugMode": null + }, + "traceApiPluginArgs": { + "_pluginName": "trace_api_plugin", + "_pluginNamespace": "eosio", + "_traceDirNodeosArg": "--trace-dir", + "_traceDirNodeosDefault": "\"traces\"", + "_traceMinimumIrreversibleHistoryBlocksNodeosArg": "--trace-minimum-irreversible-history-blocks", + "_traceMinimumIrreversibleHistoryBlocksNodeosDefault": -1, + "_traceMinimumUncompressedIrreversibleHistoryBlocksNodeosArg": "--trace-minimum-uncompressed-irreversible-history-blocks", + "_traceMinimumUncompressedIrreversibleHistoryBlocksNodeosDefault": -1, + "_traceNoAbisNodeosArg": "--trace-no-abis", + "_traceNoAbisNodeosDefault": false, + "_traceRpcAbiNodeosArg": "--trace-rpc-abi", + "_traceRpcAbiNodeosDefault": null, + "_traceSliceStrideNodeosArg": "--trace-slice-stride", + "_traceSliceStrideNodeosDefault": 10000, + "traceDir": null, + "traceMinimumIrreversibleHistoryBlocks": null, + "traceMinimumUncompressedIrreversibleHistoryBlocks": null, + "traceNoAbis": null, + "traceRpcAbi": null, + "traceSliceStride": null + } + }, + "genesisPath": "tests/performance_tests/genesis.json", + "keepLogs": true, + "killAll": true, + "logDirBase": "p", + "logDirPath": "p/2023-01-11_20-01-22-20", + "logDirRoot": ".", + "logDirTimestamp": "2023-01-11_20-01-22", + "logDirTimestampedOptSuffix": "-20", + "loggingDict": { + "bios": "off" + }, + "maximumClients": 0, + "maximumP2pPerHost": 5000, + "nodeosVers": "v4", + "nodesFile": null, + "numAddlBlocksToPrune": 2, + "pnodes": 1, + "printMissingTransactions": false, + "prodsEnableTraceApi": false, + "quiet": false, + "specificExtraNodeosArgs": { + "1": "--plugin eosio::trace_api_plugin" + }, + "targetTps": 20, + "testTrxGenDurationSec": 90, + "topo": "mesh", + "totalNodes": 1, + "tpsLimitPerGenerator": 10, + "useBiosBootFile": false, + "verbose": true + }, + "completedRun": true, + "env": { + "logical_cpu_count": 16, + "os": "posix", + "release": "5.10.16.3-microsoft-standard-WSL2", + "system": "Linux" + }, + "nodeosVersion": "v4.0.0-dev", + "testFinish": "2023-01-11T20:03:53.210082", + "testStart": "2023-01-11T20:01:22.768422" +} ``` diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index e4a89c4729..c5545dff5d 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -414,7 +414,7 @@ def calcTrxLatencyCpuNetStats(trxDict : dict, blockDict: dict): basicStats(float(np.min(npLatencyCpuNetList[:,2])), float(np.max(npLatencyCpuNetList[:,2])), float(np.average(npLatencyCpuNetList[:,2])), float(np.std(npLatencyCpuNetList[:,2])), len(npLatencyCpuNetList)) def createReport(guide: chainBlocksGuide, tpsTestConfig: TpsTestConfig, tpsStats: stats, blockSizeStats: stats, trxLatencyStats: basicStats, trxCpuStats: basicStats, - trxNetStats: basicStats, forkedBlocks, droppedBlocks, prodWindows: productionWindows, testStart: datetime, testFinish: datetime, argsDict: dict, completedRun: bool) -> dict: + trxNetStats: basicStats, forkedBlocks, droppedBlocks, prodWindows: productionWindows, notFound: dict, testStart: datetime, testFinish: datetime, argsDict: dict, completedRun: bool) -> dict: report = {} report['completedRun'] = completedRun report['testStart'] = testStart @@ -432,6 +432,7 @@ def createReport(guide: chainBlocksGuide, tpsTestConfig: TpsTestConfig, tpsStats report['Analysis']['TrxNet'] = asdict(trxNetStats) report['Analysis']['DroppedBlocks'] = droppedBlocks report['Analysis']['DroppedBlocksCount'] = len(droppedBlocks) + report['Analysis']['DroppedTransactions'] = len(notFound) report['Analysis']['ProductionWindowsTotal'] = prodWindows.totalWindows report['Analysis']['ProductionWindowsAverageSize'] = prodWindows.averageWindowSize report['Analysis']['ProductionWindowsMissed'] = prodWindows.missedWindows @@ -494,7 +495,8 @@ def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: Arti finish = datetime.utcnow() report = createReport(guide=guide, tpsTestConfig=tpsTestConfig, tpsStats=tpsStats, blockSizeStats=blkSizeStats, trxLatencyStats=trxLatencyStats, - trxCpuStats=trxCpuStats, trxNetStats=trxNetStats, forkedBlocks=data.forkedBlocks, droppedBlocks=data.droppedBlocks, prodWindows=prodWindows, testStart=start, testFinish=finish, argsDict=argsDict, completedRun=completedRun) + trxCpuStats=trxCpuStats, trxNetStats=trxNetStats, forkedBlocks=data.forkedBlocks, droppedBlocks=data.droppedBlocks, + prodWindows=prodWindows, notFound=notFound, testStart=start, testFinish=finish, argsDict=argsDict, completedRun=completedRun) return report def exportReportAsJSON(report: json, exportPath): diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index f6ada20cf7..8692b2a780 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -441,10 +441,10 @@ def createBaseArgumentParser(): choices=["mapped", "heap", "locked"], default="mapped") ptbBaseParserGroup.add_argument("--net-threads", type=int, help="Number of worker threads in net_plugin thread pool", default=2) ptbBaseParserGroup.add_argument("--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) - ptbBaseParserGroup.add_argument("--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=-200000) - ptbBaseParserGroup.add_argument("--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=-200000) - ptbBaseParserGroup.add_argument("--cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=80) - ptbBaseParserGroup.add_argument("--last-block-cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80%%", default=80) + ptbBaseParserGroup.add_argument("--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) + ptbBaseParserGroup.add_argument("--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) + ptbBaseParserGroup.add_argument("--cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=100) + ptbBaseParserGroup.add_argument("--last-block-cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80%%", default=100) ptbBaseParserGroup.add_argument("--producer-threads", type=int, help="Number of worker threads in producer thread pool", default=2) ptbBaseParserGroup.add_argument("--http-max-response-time-ms", type=int, help="Maximum time for processing a request, -1 for unlimited", default=990000) ptbBaseParserGroup.add_argument("--del-perf-logs", help="Whether to delete performance test specific logs.", action='store_true') @@ -488,7 +488,7 @@ def main(): lbto = args.last_block_time_offset_us lbcep = args.last_block_cpu_effort_percent - if args.p > 1: + if args.p > 1 and lbto == 0 and lbcep == 100: lbto = -200000 lbcep = 80 producerPluginArgs = ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, From 18d654f3bc26f4b999a48b875988aec119cd713f Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 12 Jan 2023 16:53:29 -0600 Subject: [PATCH 070/178] commit print statement that was unsaved in past commit --- tests/performance_tests/performance_test_basic.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 8692b2a780..81ee058313 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -489,6 +489,7 @@ def main(): lbto = args.last_block_time_offset_us lbcep = args.last_block_cpu_effort_percent if args.p > 1 and lbto == 0 and lbcep == 100: + print("Overriding defaults for last_block_time_offset_us and last_block_cpu_effort_percent to ensure proper production windows.") lbto = -200000 lbcep = 80 producerPluginArgs = ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, From bda0d82103ae257d65022783e0a208e849df485f Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 12 Jan 2023 17:04:07 -0600 Subject: [PATCH 071/178] count first and last window as complete windows if containing 12 blocks, otherwise ignore them. --- tests/performance_tests/log_reader.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index c5545dff5d..d14b2c1e3a 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -284,6 +284,10 @@ def calcProductionWindows(prodDict: dict): if v.blockCount < COMPLETEPRODUCTIONWINDOWSIZE: prodWindows.missedWindows += 1 totalBlocksForAverage += v.blockCount + else: + if v.blockCount == COMPLETEPRODUCTIONWINDOWSIZE: + prodWindows.totalWindows += 1 + totalBlocksForAverage += v.blockCount if prodWindows.totalWindows <= 0: prodWindows.totalWindows = 0 prodWindows.averageWindowSize = 0 From 1334957ae5667edd9f4590bd1ac4b47ea83aa14c Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 13 Jan 2023 10:13:55 -0600 Subject: [PATCH 072/178] Fixup from Merge branch 'main' into feature_performance_harness_stage_2 --- tests/TestHarness/Node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/TestHarness/Node.py b/tests/TestHarness/Node.py index 09d6ee7815..9582e74793 100644 --- a/tests/TestHarness/Node.py +++ b/tests/TestHarness/Node.py @@ -1668,7 +1668,7 @@ def launchTrxGenerators(self, tpsPerGenerator: int, numGenerators: int, duration tpsTrxGensConfig = TpsTrxGensConfig(targetTps=targetTps, tpsLimitPerGenerator=tpsLimitPerGenerator) trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, - handlerAcct=contractOwnerAcctName, accts=','.join(map(str, acctNamesList)), + contractOwnerAccount=contractOwnerAcctName, accts=','.join(map(str, acctNamesList)), privateKeys=','.join(map(str, acctPrivKeysList)), trxGenDurationSec=durationSec, logDir=Utils.DataDir, peerEndpoint=self.host, port=p2pListenPort, tpsTrxGensConfig=tpsTrxGensConfig) From 07436866587227dabbb6b79c98e8eaf5d86f4f5a Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 13 Jan 2023 10:57:07 -0600 Subject: [PATCH 073/178] Another fixup from Merge branch 'main' into feature_performance_harness_stage_2 --- tests/TestHarness/Node.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/TestHarness/Node.py b/tests/TestHarness/Node.py index 9582e74793..8458617cf3 100644 --- a/tests/TestHarness/Node.py +++ b/tests/TestHarness/Node.py @@ -1657,7 +1657,8 @@ def analyzeProduction(self, specificBlockNum=None, thresholdMs=500): return blockAnalysis - def launchTrxGenerators(self, tpsPerGenerator: int, numGenerators: int, durationSec: int, contractOwnerAcctName: str, acctNamesList: list, acctPrivKeysList: list, p2pListenPort: int, waitToComplete:bool=False): + def launchTrxGenerators(self, tpsPerGenerator: int, numGenerators: int, durationSec: int, contractOwnerAcctName: str, acctNamesList: list, + acctPrivKeysList: list, p2pListenPort: int, waitToComplete:bool=False, abiFile=None, actionName=None, actionData=None): Utils.Print("Configure txn generators") info = self.getInfo() chainId = info['chain_id'] @@ -1670,7 +1671,8 @@ def launchTrxGenerators(self, tpsPerGenerator: int, numGenerators: int, duration trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=contractOwnerAcctName, accts=','.join(map(str, acctNamesList)), privateKeys=','.join(map(str, acctPrivKeysList)), trxGenDurationSec=durationSec, - logDir=Utils.DataDir, peerEndpoint=self.host, port=p2pListenPort, tpsTrxGensConfig=tpsTrxGensConfig) + logDir=Utils.DataDir, abiFile=abiFile, actionName=actionName, actionData=actionData, peerEndpoint=self.host, + port=p2pListenPort, tpsTrxGensConfig=tpsTrxGensConfig) Utils.Print("Launch txn generators and start generating/sending transactions") trxGenLauncher.launch(waitToComplete=waitToComplete) From a70daf9c87b35878d823148f7cf2385855400a0b Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 13 Jan 2023 11:59:47 -0600 Subject: [PATCH 074/178] remove sorted keys and regenerate report for readme to reduce diff --- tests/performance_tests/README.md | 899 +++++++++++++------------- tests/performance_tests/log_reader.py | 2 +- 2 files changed, 451 insertions(+), 450 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index bceb4ff502..0ae6e0220f 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -819,581 +819,582 @@ The Performance Test Basic generates, by default, a report that details results ``` json { + "completedRun": true, + "testStart": "2023-01-13T17:51:38.885663", + "testFinish": "2023-01-13T17:54:16.588906", "Analysis": { "BlockSize": { + "min": 1152, + "max": 2688, "avg": 1920.0, + "sigma": 316.1805963235654, "emptyBlocks": 0, - "max": 1920, - "min": 1920, - "numBlocks": 177, - "sigma": 0.0 + "numBlocks": 177 }, "BlocksGuide": { - "configAddlDropCnt": 2, "firstBlockNum": 2, - "lastBlockNum": 301, + "lastBlockNum": 316, + "totalBlocks": 315, + "testStartBlockNum": 129, + "testEndBlockNum": 315, + "setupBlocksCnt": 127, + "tearDownBlocksCnt": 1, "leadingEmptyBlocksCnt": 1, - "setupBlocksCnt": 112, - "tearDownBlocksCnt": 0, - "testAnalysisBlockCnt": 177, - "testEndBlockNum": 301, - "testStartBlockNum": 114, - "totalBlocks": 300, - "trailingEmptyBlocksCnt": 6 + "trailingEmptyBlocksCnt": 5, + "configAddlDropCnt": 2, + "testAnalysisBlockCnt": 177 }, - "DroppedBlocks": {}, - "DroppedBlocksCount": 0, - "ForkedBlocks": [], - "ForksCount": 0, - "ProductionWindowsAverageSize": 0, - "ProductionWindowsMissed": 0, - "ProductionWindowsTotal": 0, "TPS": { + "min": 16, + "max": 24, "avg": 20.0, - "configTestDuration": 90, - "configTps": 20, + "sigma": 1.651445647689541, "emptyBlocks": 0, - "generatorCount": 2, - "max": 20, - "min": 20, "numBlocks": 177, - "sigma": 0.0, + "configTps": 20, + "configTestDuration": 90, "tpsPerGenerator": [ 10, 10 - ] + ], + "generatorCount": 2 }, "TrxCPU": { - "avg": 89.22111111111111, - "max": 404.0, - "min": 7.0, - "samples": 1800, - "sigma": 52.66483117992383 + "min": 8.0, + "max": 225.0, + "avg": 65.61944444444444, + "sigma": 39.33333558929686, + "samples": 1800 }, "TrxLatency": { - "avg": 0.47760056018829344, - "max": 0.6789999008178711, - "min": 0.2760000228881836, - "samples": 1800, - "sigma": 0.14143152148157506 + "min": 0.09500002861022949, + "max": 0.6970000267028809, + "avg": 0.3131494364473555, + "sigma": 0.15184769957733368, + "samples": 1800 }, "TrxNet": { - "avg": 24.0, - "max": 24.0, "min": 24.0, - "samples": 1800, - "sigma": 0.0 - } + "max": 24.0, + "avg": 24.0, + "sigma": 0.0, + "samples": 1800 + }, + "DroppedBlocks": {}, + "DroppedBlocksCount": 0, + "DroppedTransactions": 0, + "ProductionWindowsTotal": 14, + "ProductionWindowsAverageSize": 12.0, + "ProductionWindowsMissed": 0, + "ForkedBlocks": [], + "ForksCount": 0 }, "args": { - "_killEosInstances": true, - "_killWallet": true, - "_totalNodes": 2, - "delPerfLogs": false, - "delReport": false, - "delay": 1, + "killAll": true, "dontKill": false, + "keepLogs": true, "dumpErrorDetails": false, - "expectedTransactionsSent": 1800, + "delay": 1, + "nodesFile": null, + "verbose": true, + "_killEosInstances": true, + "_killWallet": true, + "pnodes": 2, + "totalNodes": 1, + "topo": "mesh", "extraNodeosArgs": { "chainPluginArgs": { - "_abiSerializerMaxTimeMsNodeosArg": "--abi-serializer-max-time-ms", - "_abiSerializerMaxTimeMsNodeosDefault": 15, - "_actionBlacklistNodeosArg": "--action-blacklist", - "_actionBlacklistNodeosDefault": null, - "_actorBlacklistNodeosArg": "--actor-blacklist", - "_actorBlacklistNodeosDefault": null, - "_actorWhitelistNodeosArg": "--actor-whitelist", - "_actorWhitelistNodeosDefault": null, - "_apiAcceptTransactionsNodeosArg": "--api-accept-transactions", - "_apiAcceptTransactionsNodeosDefault": 1, - "_blockLogRetainBlocksNodeosArg": "--block-log-retain-blocks", - "_blockLogRetainBlocksNodeosDefault": null, - "_blocksDirNodeosArg": "--blocks-dir", - "_blocksDirNodeosDefault": "\"blocks\"", - "_chainStateDbGuardSizeMbNodeosArg": "--chain-state-db-guard-size-mb", - "_chainStateDbGuardSizeMbNodeosDefault": 128, - "_chainStateDbSizeMbNodeosArg": "--chain-state-db-size-mb", - "_chainStateDbSizeMbNodeosDefault": 1024, - "_chainThreadsNodeosArg": "--chain-threads", - "_chainThreadsNodeosDefault": 2, - "_checkpointNodeosArg": "--checkpoint", - "_checkpointNodeosDefault": null, - "_contractBlacklistNodeosArg": "--contract-blacklist", - "_contractBlacklistNodeosDefault": null, - "_contractWhitelistNodeosArg": "--contract-whitelist", - "_contractWhitelistNodeosDefault": null, - "_contractsConsoleNodeosArg": "--contracts-console", - "_contractsConsoleNodeosDefault": false, - "_databaseMapModeNodeosArg": "--database-map-mode", - "_databaseMapModeNodeosDefault": "mapped", - "_deepMindNodeosArg": "--deep-mind", - "_deepMindNodeosDefault": false, - "_deleteAllBlocksNodeosArg": "--delete-all-blocks", - "_deleteAllBlocksNodeosDefault": false, - "_disableRamBillingNotifyChecksNodeosArg": "--disable-ram-billing-notify-checks", - "_disableRamBillingNotifyChecksNodeosDefault": false, - "_disableReplayOptsNodeosArg": "--disable-replay-opts", - "_disableReplayOptsNodeosDefault": false, - "_enableAccountQueriesNodeosArg": "--enable-account-queries", - "_enableAccountQueriesNodeosDefault": 0, - "_eosVmOcCacheSizeMbNodeosArg": "--eos-vm-oc-cache-size-mb", - "_eosVmOcCacheSizeMbNodeosDefault": 1024, - "_eosVmOcCompileThreadsNodeosArg": "--eos-vm-oc-compile-threads", - "_eosVmOcCompileThreadsNodeosDefault": 1, - "_eosVmOcEnableNodeosArg": "--eos-vm-oc-enable", - "_eosVmOcEnableNodeosDefault": false, - "_extractBuildInfoNodeosArg": "--extract-build-info", - "_extractBuildInfoNodeosDefault": null, - "_extractGenesisJsonNodeosArg": "--extract-genesis-json", - "_extractGenesisJsonNodeosDefault": null, - "_forceAllChecksNodeosArg": "--force-all-checks", - "_forceAllChecksNodeosDefault": false, - "_genesisJsonNodeosArg": "--genesis-json", - "_genesisJsonNodeosDefault": null, - "_genesisTimestampNodeosArg": "--genesis-timestamp", - "_genesisTimestampNodeosDefault": null, - "_hardReplayBlockchainNodeosArg": "--hard-replay-blockchain", - "_hardReplayBlockchainNodeosDefault": false, - "_integrityHashOnStartNodeosArg": "--integrity-hash-on-start", - "_integrityHashOnStartNodeosDefault": false, - "_integrityHashOnStopNodeosArg": "--integrity-hash-on-stop", - "_integrityHashOnStopNodeosDefault": false, - "_keyBlacklistNodeosArg": "--key-blacklist", - "_keyBlacklistNodeosDefault": null, - "_maxNonprivilegedInlineActionSizeNodeosArg": "--max-nonprivileged-inline-action-size", - "_maxNonprivilegedInlineActionSizeNodeosDefault": 4096, - "_maximumVariableSignatureLengthNodeosArg": "--maximum-variable-signature-length", - "_maximumVariableSignatureLengthNodeosDefault": 16384, - "_pluginName": "chain_plugin", "_pluginNamespace": "eosio", - "_printBuildInfoNodeosArg": "--print-build-info", - "_printBuildInfoNodeosDefault": false, - "_printGenesisJsonNodeosArg": "--print-genesis-json", - "_printGenesisJsonNodeosDefault": false, - "_profileAccountNodeosArg": "--profile-account", - "_profileAccountNodeosDefault": null, - "_protocolFeaturesDirNodeosArg": "--protocol-features-dir", - "_protocolFeaturesDirNodeosDefault": "\"protocol_features\"", - "_readModeNodeosArg": "--read-mode", - "_readModeNodeosDefault": "head", - "_replayBlockchainNodeosArg": "--replay-blockchain", - "_replayBlockchainNodeosDefault": false, - "_senderBypassWhiteblacklistNodeosArg": "--sender-bypass-whiteblacklist", - "_senderBypassWhiteblacklistNodeosDefault": null, - "_signatureCpuBillablePctNodeosArg": "--signature-cpu-billable-pct", - "_signatureCpuBillablePctNodeosDefault": 50, - "_snapshotNodeosArg": "--snapshot", - "_snapshotNodeosDefault": null, - "_stateDirNodeosArg": "--state-dir", + "_pluginName": "chain_plugin", + "blocksDir": null, + "_blocksDirNodeosDefault": "\"blocks\"", + "_blocksDirNodeosArg": "--blocks-dir", + "stateDir": null, "_stateDirNodeosDefault": "\"state\"", - "_terminateAtBlockNodeosArg": "--terminate-at-block", - "_terminateAtBlockNodeosDefault": 0, - "_transactionFinalityStatusFailureDurationSecNodeosArg": "--transaction-finality-status-failure-duration-sec", - "_transactionFinalityStatusFailureDurationSecNodeosDefault": 180, - "_transactionFinalityStatusMaxStorageSizeGbNodeosArg": "--transaction-finality-status-max-storage-size-gb", - "_transactionFinalityStatusMaxStorageSizeGbNodeosDefault": null, - "_transactionFinalityStatusSuccessDurationSecNodeosArg": "--transaction-finality-status-success-duration-sec", - "_transactionFinalityStatusSuccessDurationSecNodeosDefault": 180, - "_transactionRetryIntervalSecNodeosArg": "--transaction-retry-interval-sec", - "_transactionRetryIntervalSecNodeosDefault": 20, - "_transactionRetryMaxExpirationSecNodeosArg": "--transaction-retry-max-expiration-sec", - "_transactionRetryMaxExpirationSecNodeosDefault": 120, - "_transactionRetryMaxStorageSizeGbNodeosArg": "--transaction-retry-max-storage-size-gb", - "_transactionRetryMaxStorageSizeGbNodeosDefault": null, - "_truncateAtBlockNodeosArg": "--truncate-at-block", - "_truncateAtBlockNodeosDefault": 0, - "_trustedProducerNodeosArg": "--trusted-producer", - "_trustedProducerNodeosDefault": null, - "_validationModeNodeosArg": "--validation-mode", - "_validationModeNodeosDefault": "full", - "_wasmRuntimeNodeosArg": "--wasm-runtime", + "_stateDirNodeosArg": "--state-dir", + "protocolFeaturesDir": null, + "_protocolFeaturesDirNodeosDefault": "\"protocol_features\"", + "_protocolFeaturesDirNodeosArg": "--protocol-features-dir", + "checkpoint": null, + "_checkpointNodeosDefault": null, + "_checkpointNodeosArg": "--checkpoint", + "wasmRuntime": null, "_wasmRuntimeNodeosDefault": "eos-vm-jit", + "_wasmRuntimeNodeosArg": "--wasm-runtime", + "profileAccount": null, + "_profileAccountNodeosDefault": null, + "_profileAccountNodeosArg": "--profile-account", "abiSerializerMaxTimeMs": null, - "actionBlacklist": null, - "actorBlacklist": null, - "actorWhitelist": null, - "apiAcceptTransactions": null, - "blockLogRetainBlocks": null, - "blocksDir": null, - "chainStateDbGuardSizeMb": null, + "_abiSerializerMaxTimeMsNodeosDefault": 15, + "_abiSerializerMaxTimeMsNodeosArg": "--abi-serializer-max-time-ms", "chainStateDbSizeMb": 10240, + "_chainStateDbSizeMbNodeosDefault": 1024, + "_chainStateDbSizeMbNodeosArg": "--chain-state-db-size-mb", + "chainStateDbGuardSizeMb": null, + "_chainStateDbGuardSizeMbNodeosDefault": 128, + "_chainStateDbGuardSizeMbNodeosArg": "--chain-state-db-guard-size-mb", + "signatureCpuBillablePct": 0, + "_signatureCpuBillablePctNodeosDefault": 50, + "_signatureCpuBillablePctNodeosArg": "--signature-cpu-billable-pct", "chainThreads": 2, - "checkpoint": null, - "contractBlacklist": null, - "contractWhitelist": null, + "_chainThreadsNodeosDefault": 2, + "_chainThreadsNodeosArg": "--chain-threads", "contractsConsole": null, - "databaseMapMode": "mapped", + "_contractsConsoleNodeosDefault": false, + "_contractsConsoleNodeosArg": "--contracts-console", "deepMind": null, - "deleteAllBlocks": null, + "_deepMindNodeosDefault": false, + "_deepMindNodeosArg": "--deep-mind", + "actorWhitelist": null, + "_actorWhitelistNodeosDefault": null, + "_actorWhitelistNodeosArg": "--actor-whitelist", + "actorBlacklist": null, + "_actorBlacklistNodeosDefault": null, + "_actorBlacklistNodeosArg": "--actor-blacklist", + "contractWhitelist": null, + "_contractWhitelistNodeosDefault": null, + "_contractWhitelistNodeosArg": "--contract-whitelist", + "contractBlacklist": null, + "_contractBlacklistNodeosDefault": null, + "_contractBlacklistNodeosArg": "--contract-blacklist", + "actionBlacklist": null, + "_actionBlacklistNodeosDefault": null, + "_actionBlacklistNodeosArg": "--action-blacklist", + "keyBlacklist": null, + "_keyBlacklistNodeosDefault": null, + "_keyBlacklistNodeosArg": "--key-blacklist", + "senderBypassWhiteblacklist": null, + "_senderBypassWhiteblacklistNodeosDefault": null, + "_senderBypassWhiteblacklistNodeosArg": "--sender-bypass-whiteblacklist", + "readMode": null, + "_readModeNodeosDefault": "head", + "_readModeNodeosArg": "--read-mode", + "apiAcceptTransactions": null, + "_apiAcceptTransactionsNodeosDefault": 1, + "_apiAcceptTransactionsNodeosArg": "--api-accept-transactions", + "validationMode": null, + "_validationModeNodeosDefault": "full", + "_validationModeNodeosArg": "--validation-mode", "disableRamBillingNotifyChecks": null, - "disableReplayOpts": null, - "enableAccountQueries": null, + "_disableRamBillingNotifyChecksNodeosDefault": false, + "_disableRamBillingNotifyChecksNodeosArg": "--disable-ram-billing-notify-checks", + "maximumVariableSignatureLength": null, + "_maximumVariableSignatureLengthNodeosDefault": 16384, + "_maximumVariableSignatureLengthNodeosArg": "--maximum-variable-signature-length", + "trustedProducer": null, + "_trustedProducerNodeosDefault": null, + "_trustedProducerNodeosArg": "--trusted-producer", + "databaseMapMode": "mapped", + "_databaseMapModeNodeosDefault": "mapped", + "_databaseMapModeNodeosArg": "--database-map-mode", "eosVmOcCacheSizeMb": null, + "_eosVmOcCacheSizeMbNodeosDefault": 1024, + "_eosVmOcCacheSizeMbNodeosArg": "--eos-vm-oc-cache-size-mb", "eosVmOcCompileThreads": null, + "_eosVmOcCompileThreadsNodeosDefault": 1, + "_eosVmOcCompileThreadsNodeosArg": "--eos-vm-oc-compile-threads", "eosVmOcEnable": null, - "extractBuildInfo": null, - "extractGenesisJson": null, - "forceAllChecks": null, - "genesisJson": null, - "genesisTimestamp": null, - "hardReplayBlockchain": null, - "integrityHashOnStart": null, - "integrityHashOnStop": null, - "keyBlacklist": null, + "_eosVmOcEnableNodeosDefault": false, + "_eosVmOcEnableNodeosArg": "--eos-vm-oc-enable", + "enableAccountQueries": null, + "_enableAccountQueriesNodeosDefault": 0, + "_enableAccountQueriesNodeosArg": "--enable-account-queries", "maxNonprivilegedInlineActionSize": null, - "maximumVariableSignatureLength": null, - "printBuildInfo": null, - "printGenesisJson": null, - "profileAccount": null, - "protocolFeaturesDir": null, - "readMode": null, - "replayBlockchain": null, - "senderBypassWhiteblacklist": null, - "signatureCpuBillablePct": 0, - "snapshot": null, - "stateDir": null, - "terminateAtBlock": null, - "transactionFinalityStatusFailureDurationSec": null, - "transactionFinalityStatusMaxStorageSizeGb": null, - "transactionFinalityStatusSuccessDurationSec": null, + "_maxNonprivilegedInlineActionSizeNodeosDefault": 4096, + "_maxNonprivilegedInlineActionSizeNodeosArg": "--max-nonprivileged-inline-action-size", + "transactionRetryMaxStorageSizeGb": null, + "_transactionRetryMaxStorageSizeGbNodeosDefault": null, + "_transactionRetryMaxStorageSizeGbNodeosArg": "--transaction-retry-max-storage-size-gb", "transactionRetryIntervalSec": null, + "_transactionRetryIntervalSecNodeosDefault": 20, + "_transactionRetryIntervalSecNodeosArg": "--transaction-retry-interval-sec", "transactionRetryMaxExpirationSec": null, - "transactionRetryMaxStorageSizeGb": null, + "_transactionRetryMaxExpirationSecNodeosDefault": 120, + "_transactionRetryMaxExpirationSecNodeosArg": "--transaction-retry-max-expiration-sec", + "transactionFinalityStatusMaxStorageSizeGb": null, + "_transactionFinalityStatusMaxStorageSizeGbNodeosDefault": null, + "_transactionFinalityStatusMaxStorageSizeGbNodeosArg": "--transaction-finality-status-max-storage-size-gb", + "transactionFinalityStatusSuccessDurationSec": null, + "_transactionFinalityStatusSuccessDurationSecNodeosDefault": 180, + "_transactionFinalityStatusSuccessDurationSecNodeosArg": "--transaction-finality-status-success-duration-sec", + "transactionFinalityStatusFailureDurationSec": null, + "_transactionFinalityStatusFailureDurationSecNodeosDefault": 180, + "_transactionFinalityStatusFailureDurationSecNodeosArg": "--transaction-finality-status-failure-duration-sec", + "integrityHashOnStart": null, + "_integrityHashOnStartNodeosDefault": false, + "_integrityHashOnStartNodeosArg": "--integrity-hash-on-start", + "integrityHashOnStop": null, + "_integrityHashOnStopNodeosDefault": false, + "_integrityHashOnStopNodeosArg": "--integrity-hash-on-stop", + "blockLogRetainBlocks": null, + "_blockLogRetainBlocksNodeosDefault": null, + "_blockLogRetainBlocksNodeosArg": "--block-log-retain-blocks", + "genesisJson": null, + "_genesisJsonNodeosDefault": null, + "_genesisJsonNodeosArg": "--genesis-json", + "genesisTimestamp": null, + "_genesisTimestampNodeosDefault": null, + "_genesisTimestampNodeosArg": "--genesis-timestamp", + "printGenesisJson": null, + "_printGenesisJsonNodeosDefault": false, + "_printGenesisJsonNodeosArg": "--print-genesis-json", + "extractGenesisJson": null, + "_extractGenesisJsonNodeosDefault": null, + "_extractGenesisJsonNodeosArg": "--extract-genesis-json", + "printBuildInfo": null, + "_printBuildInfoNodeosDefault": false, + "_printBuildInfoNodeosArg": "--print-build-info", + "extractBuildInfo": null, + "_extractBuildInfoNodeosDefault": null, + "_extractBuildInfoNodeosArg": "--extract-build-info", + "forceAllChecks": null, + "_forceAllChecksNodeosDefault": false, + "_forceAllChecksNodeosArg": "--force-all-checks", + "disableReplayOpts": null, + "_disableReplayOptsNodeosDefault": false, + "_disableReplayOptsNodeosArg": "--disable-replay-opts", + "replayBlockchain": null, + "_replayBlockchainNodeosDefault": false, + "_replayBlockchainNodeosArg": "--replay-blockchain", + "hardReplayBlockchain": null, + "_hardReplayBlockchainNodeosDefault": false, + "_hardReplayBlockchainNodeosArg": "--hard-replay-blockchain", + "deleteAllBlocks": null, + "_deleteAllBlocksNodeosDefault": false, + "_deleteAllBlocksNodeosArg": "--delete-all-blocks", "truncateAtBlock": null, - "trustedProducer": null, - "validationMode": null, - "wasmRuntime": null + "_truncateAtBlockNodeosDefault": 0, + "_truncateAtBlockNodeosArg": "--truncate-at-block", + "terminateAtBlock": null, + "_terminateAtBlockNodeosDefault": 0, + "_terminateAtBlockNodeosArg": "--terminate-at-block", + "snapshot": null, + "_snapshotNodeosDefault": null, + "_snapshotNodeosArg": "--snapshot" }, "httpClientPluginArgs": { - "_httpsClientRootCertNodeosArg": "--https-client-root-cert", - "_httpsClientRootCertNodeosDefault": null, - "_httpsClientValidatePeersNodeosArg": "--https-client-validate-peers", - "_httpsClientValidatePeersNodeosDefault": 1, - "_pluginName": "http_client_plugin", "_pluginNamespace": "eosio", + "_pluginName": "http_client_plugin", "httpsClientRootCert": null, - "httpsClientValidatePeers": null + "_httpsClientRootCertNodeosDefault": null, + "_httpsClientRootCertNodeosArg": "--https-client-root-cert", + "httpsClientValidatePeers": null, + "_httpsClientValidatePeersNodeosDefault": 1, + "_httpsClientValidatePeersNodeosArg": "--https-client-validate-peers" }, "httpPluginArgs": { - "_accessControlAllowCredentialsNodeosArg": "--access-control-allow-credentials", - "_accessControlAllowCredentialsNodeosDefault": false, - "_accessControlAllowHeadersNodeosArg": "--access-control-allow-headers", - "_accessControlAllowHeadersNodeosDefault": null, - "_accessControlAllowOriginNodeosArg": "--access-control-allow-origin", - "_accessControlAllowOriginNodeosDefault": null, - "_accessControlMaxAgeNodeosArg": "--access-control-max-age", - "_accessControlMaxAgeNodeosDefault": null, - "_httpAliasNodeosArg": "--http-alias", - "_httpAliasNodeosDefault": null, - "_httpKeepAliveNodeosArg": "--http-keep-alive", - "_httpKeepAliveNodeosDefault": 1, - "_httpMaxBytesInFlightMbNodeosArg": "--http-max-bytes-in-flight-mb", - "_httpMaxBytesInFlightMbNodeosDefault": 500, - "_httpMaxInFlightRequestsNodeosArg": "--http-max-in-flight-requests", - "_httpMaxInFlightRequestsNodeosDefault": -1, - "_httpMaxResponseTimeMsNodeosArg": "--http-max-response-time-ms", - "_httpMaxResponseTimeMsNodeosDefault": 30, - "_httpServerAddressNodeosArg": "--http-server-address", + "_pluginNamespace": "eosio", + "_pluginName": "http_plugin", + "unixSocketPath": null, + "_unixSocketPathNodeosDefault": null, + "_unixSocketPathNodeosArg": "--unix-socket-path", + "httpServerAddress": null, "_httpServerAddressNodeosDefault": "127.0.0.1:8888", - "_httpThreadsNodeosArg": "--http-threads", - "_httpThreadsNodeosDefault": 2, - "_httpValidateHostNodeosArg": "--http-validate-host", - "_httpValidateHostNodeosDefault": 1, - "_httpsCertificateChainFileNodeosArg": "--https-certificate-chain-file", + "_httpServerAddressNodeosArg": "--http-server-address", + "httpsServerAddress": null, + "_httpsServerAddressNodeosDefault": null, + "_httpsServerAddressNodeosArg": "--https-server-address", + "httpsCertificateChainFile": null, "_httpsCertificateChainFileNodeosDefault": null, - "_httpsEcdhCurveNodeosArg": "--https-ecdh-curve", - "_httpsEcdhCurveNodeosDefault": "secp384r1", - "_httpsPrivateKeyFileNodeosArg": "--https-private-key-file", + "_httpsCertificateChainFileNodeosArg": "--https-certificate-chain-file", + "httpsPrivateKeyFile": null, "_httpsPrivateKeyFileNodeosDefault": null, - "_httpsServerAddressNodeosArg": "--https-server-address", - "_httpsServerAddressNodeosDefault": null, - "_maxBodySizeNodeosArg": "--max-body-size", - "_maxBodySizeNodeosDefault": 2097152, - "_pluginName": "http_plugin", - "_pluginNamespace": "eosio", - "_unixSocketPathNodeosArg": "--unix-socket-path", - "_unixSocketPathNodeosDefault": null, - "_verboseHttpErrorsNodeosArg": "--verbose-http-errors", - "_verboseHttpErrorsNodeosDefault": false, - "accessControlAllowCredentials": null, - "accessControlAllowHeaders": null, + "_httpsPrivateKeyFileNodeosArg": "--https-private-key-file", + "httpsEcdhCurve": null, + "_httpsEcdhCurveNodeosDefault": "secp384r1", + "_httpsEcdhCurveNodeosArg": "--https-ecdh-curve", "accessControlAllowOrigin": null, + "_accessControlAllowOriginNodeosDefault": null, + "_accessControlAllowOriginNodeosArg": "--access-control-allow-origin", + "accessControlAllowHeaders": null, + "_accessControlAllowHeadersNodeosDefault": null, + "_accessControlAllowHeadersNodeosArg": "--access-control-allow-headers", "accessControlMaxAge": null, - "httpAlias": null, - "httpKeepAlive": null, + "_accessControlMaxAgeNodeosDefault": null, + "_accessControlMaxAgeNodeosArg": "--access-control-max-age", + "accessControlAllowCredentials": null, + "_accessControlAllowCredentialsNodeosDefault": false, + "_accessControlAllowCredentialsNodeosArg": "--access-control-allow-credentials", + "maxBodySize": null, + "_maxBodySizeNodeosDefault": 2097152, + "_maxBodySizeNodeosArg": "--max-body-size", "httpMaxBytesInFlightMb": null, + "_httpMaxBytesInFlightMbNodeosDefault": 500, + "_httpMaxBytesInFlightMbNodeosArg": "--http-max-bytes-in-flight-mb", "httpMaxInFlightRequests": null, + "_httpMaxInFlightRequestsNodeosDefault": -1, + "_httpMaxInFlightRequestsNodeosArg": "--http-max-in-flight-requests", "httpMaxResponseTimeMs": 990000, - "httpServerAddress": null, - "httpThreads": null, + "_httpMaxResponseTimeMsNodeosDefault": 30, + "_httpMaxResponseTimeMsNodeosArg": "--http-max-response-time-ms", + "verboseHttpErrors": null, + "_verboseHttpErrorsNodeosDefault": false, + "_verboseHttpErrorsNodeosArg": "--verbose-http-errors", "httpValidateHost": null, - "httpsCertificateChainFile": null, - "httpsEcdhCurve": null, - "httpsPrivateKeyFile": null, - "httpsServerAddress": null, - "maxBodySize": null, - "unixSocketPath": null, - "verboseHttpErrors": null + "_httpValidateHostNodeosDefault": 1, + "_httpValidateHostNodeosArg": "--http-validate-host", + "httpAlias": null, + "_httpAliasNodeosDefault": null, + "_httpAliasNodeosArg": "--http-alias", + "httpThreads": null, + "_httpThreadsNodeosDefault": 2, + "_httpThreadsNodeosArg": "--http-threads", + "httpKeepAlive": null, + "_httpKeepAliveNodeosDefault": 1, + "_httpKeepAliveNodeosArg": "--http-keep-alive" }, "netPluginArgs": { - "_agentNameNodeosArg": "--agent-name", - "_agentNameNodeosDefault": "EOS Test Agent", - "_allowedConnectionNodeosArg": "--allowed-connection", - "_allowedConnectionNodeosDefault": "any", - "_connectionCleanupPeriodNodeosArg": "--connection-cleanup-period", - "_connectionCleanupPeriodNodeosDefault": 30, - "_maxCleanupTimeMsecNodeosArg": "--max-cleanup-time-msec", - "_maxCleanupTimeMsecNodeosDefault": 10, - "_maxClientsNodeosArg": "--max-clients", - "_maxClientsNodeosDefault": 25, - "_netThreadsNodeosArg": "--net-threads", - "_netThreadsNodeosDefault": 2, - "_p2pAcceptTransactionsNodeosArg": "--p2p-accept-transactions", - "_p2pAcceptTransactionsNodeosDefault": 1, - "_p2pDedupCacheExpireTimeSecNodeosArg": "--p2p-dedup-cache-expire-time-sec", - "_p2pDedupCacheExpireTimeSecNodeosDefault": 10, - "_p2pKeepaliveIntervalMsNodeosArg": "--p2p-keepalive-interval-ms", - "_p2pKeepaliveIntervalMsNodeosDefault": 10000, - "_p2pListenEndpointNodeosArg": "--p2p-listen-endpoint", + "_pluginNamespace": "eosio", + "_pluginName": "net_plugin", + "p2pListenEndpoint": null, "_p2pListenEndpointNodeosDefault": "0.0.0.0:9876", - "_p2pMaxNodesPerHostNodeosArg": "--p2p-max-nodes-per-host", - "_p2pMaxNodesPerHostNodeosDefault": 1, - "_p2pPeerAddressNodeosArg": "--p2p-peer-address", - "_p2pPeerAddressNodeosDefault": null, - "_p2pServerAddressNodeosArg": "--p2p-server-address", + "_p2pListenEndpointNodeosArg": "--p2p-listen-endpoint", + "p2pServerAddress": null, "_p2pServerAddressNodeosDefault": null, - "_peerKeyNodeosArg": "--peer-key", - "_peerKeyNodeosDefault": null, - "_peerLogFormatNodeosArg": "--peer-log-format", - "_peerLogFormatNodeosDefault": "[\"${_name}\" - ${_cid} ${_ip}:${_port}] ", - "_peerPrivateKeyNodeosArg": "--peer-private-key", - "_peerPrivateKeyNodeosDefault": null, - "_pluginName": "net_plugin", - "_pluginNamespace": "eosio", - "_syncFetchSpanNodeosArg": "--sync-fetch-span", - "_syncFetchSpanNodeosDefault": 100, - "_useSocketReadWatermarkNodeosArg": "--use-socket-read-watermark", - "_useSocketReadWatermarkNodeosDefault": 0, + "_p2pServerAddressNodeosArg": "--p2p-server-address", + "p2pPeerAddress": null, + "_p2pPeerAddressNodeosDefault": null, + "_p2pPeerAddressNodeosArg": "--p2p-peer-address", + "p2pMaxNodesPerHost": null, + "_p2pMaxNodesPerHostNodeosDefault": 1, + "_p2pMaxNodesPerHostNodeosArg": "--p2p-max-nodes-per-host", + "p2pAcceptTransactions": null, + "_p2pAcceptTransactionsNodeosDefault": 1, + "_p2pAcceptTransactionsNodeosArg": "--p2p-accept-transactions", "agentName": null, + "_agentNameNodeosDefault": "EOS Test Agent", + "_agentNameNodeosArg": "--agent-name", "allowedConnection": null, + "_allowedConnectionNodeosDefault": "any", + "_allowedConnectionNodeosArg": "--allowed-connection", + "peerKey": null, + "_peerKeyNodeosDefault": null, + "_peerKeyNodeosArg": "--peer-key", + "peerPrivateKey": null, + "_peerPrivateKeyNodeosDefault": null, + "_peerPrivateKeyNodeosArg": "--peer-private-key", + "maxClients": null, + "_maxClientsNodeosDefault": 25, + "_maxClientsNodeosArg": "--max-clients", "connectionCleanupPeriod": null, + "_connectionCleanupPeriodNodeosDefault": 30, + "_connectionCleanupPeriodNodeosArg": "--connection-cleanup-period", "maxCleanupTimeMsec": null, - "maxClients": null, - "netThreads": 2, - "p2pAcceptTransactions": null, + "_maxCleanupTimeMsecNodeosDefault": 10, + "_maxCleanupTimeMsecNodeosArg": "--max-cleanup-time-msec", "p2pDedupCacheExpireTimeSec": null, - "p2pKeepaliveIntervalMs": null, - "p2pListenEndpoint": null, - "p2pMaxNodesPerHost": null, - "p2pPeerAddress": null, - "p2pServerAddress": null, - "peerKey": null, + "_p2pDedupCacheExpireTimeSecNodeosDefault": 10, + "_p2pDedupCacheExpireTimeSecNodeosArg": "--p2p-dedup-cache-expire-time-sec", + "netThreads": 2, + "_netThreadsNodeosDefault": 2, + "_netThreadsNodeosArg": "--net-threads", + "syncFetchSpan": null, + "_syncFetchSpanNodeosDefault": 100, + "_syncFetchSpanNodeosArg": "--sync-fetch-span", + "useSocketReadWatermark": null, + "_useSocketReadWatermarkNodeosDefault": 0, + "_useSocketReadWatermarkNodeosArg": "--use-socket-read-watermark", "peerLogFormat": null, - "peerPrivateKey": null, - "syncFetchSpan": null, - "useSocketReadWatermark": null + "_peerLogFormatNodeosDefault": "[\"${_name}\" - ${_cid} ${_ip}:${_port}] ", + "_peerLogFormatNodeosArg": "--peer-log-format", + "p2pKeepaliveIntervalMs": null, + "_p2pKeepaliveIntervalMsNodeosDefault": 10000, + "_p2pKeepaliveIntervalMsNodeosArg": "--p2p-keepalive-interval-ms" }, "producerPluginArgs": { - "_cpuEffortPercentNodeosArg": "--cpu-effort-percent", - "_cpuEffortPercentNodeosDefault": 80, - "_disableSubjectiveAccountBillingNodeosArg": "--disable-subjective-account-billing", - "_disableSubjectiveAccountBillingNodeosDefault": false, - "_disableSubjectiveApiBillingNodeosArg": "--disable-subjective-api-billing", - "_disableSubjectiveApiBillingNodeosDefault": 1, - "_disableSubjectiveBillingNodeosArg": "--disable-subjective-billing", - "_disableSubjectiveBillingNodeosDefault": 1, - "_disableSubjectiveP2pBillingNodeosArg": "--disable-subjective-p2p-billing", - "_disableSubjectiveP2pBillingNodeosDefault": 1, - "_enableStaleProductionNodeosArg": "--enable-stale-production", + "_pluginNamespace": "eosio", + "_pluginName": "producer_plugin", + "enableStaleProduction": null, "_enableStaleProductionNodeosDefault": false, - "_greylistAccountNodeosArg": "--greylist-account", - "_greylistAccountNodeosDefault": null, - "_greylistLimitNodeosArg": "--greylist-limit", - "_greylistLimitNodeosDefault": 1000, - "_incomingDeferRatioNodeosArg": "--incoming-defer-ratio", - "_incomingDeferRatioNodeosDefault": 1, - "_incomingTransactionQueueSizeMbNodeosArg": "--incoming-transaction-queue-size-mb", - "_incomingTransactionQueueSizeMbNodeosDefault": 1024, - "_lastBlockCpuEffortPercentNodeosArg": "--last-block-cpu-effort-percent", - "_lastBlockCpuEffortPercentNodeosDefault": 80, - "_lastBlockTimeOffsetUsNodeosArg": "--last-block-time-offset-us", - "_lastBlockTimeOffsetUsNodeosDefault": -200000, - "_maxBlockCpuUsageThresholdUsNodeosArg": "--max-block-cpu-usage-threshold-us", - "_maxBlockCpuUsageThresholdUsNodeosDefault": 5000, - "_maxBlockNetUsageThresholdBytesNodeosArg": "--max-block-net-usage-threshold-bytes", - "_maxBlockNetUsageThresholdBytesNodeosDefault": 1024, - "_maxIrreversibleBlockAgeNodeosArg": "--max-irreversible-block-age", - "_maxIrreversibleBlockAgeNodeosDefault": -1, - "_maxScheduledTransactionTimePerBlockMsNodeosArg": "--max-scheduled-transaction-time-per-block-ms", - "_maxScheduledTransactionTimePerBlockMsNodeosDefault": 100, - "_maxTransactionTimeNodeosArg": "--max-transaction-time", - "_maxTransactionTimeNodeosDefault": 30, - "_pauseOnStartupNodeosArg": "--pause-on-startup", + "_enableStaleProductionNodeosArg": "--enable-stale-production", + "pauseOnStartup": null, "_pauseOnStartupNodeosDefault": false, - "_pluginName": "producer_plugin", - "_pluginNamespace": "eosio", - "_privateKeyNodeosArg": "--private-key", - "_privateKeyNodeosDefault": null, - "_produceTimeOffsetUsNodeosArg": "--produce-time-offset-us", - "_produceTimeOffsetUsNodeosDefault": 0, - "_producerNameNodeosArg": "--producer-name", + "_pauseOnStartupNodeosArg": "--pause-on-startup", + "maxTransactionTime": null, + "_maxTransactionTimeNodeosDefault": 30, + "_maxTransactionTimeNodeosArg": "--max-transaction-time", + "maxIrreversibleBlockAge": null, + "_maxIrreversibleBlockAgeNodeosDefault": -1, + "_maxIrreversibleBlockAgeNodeosArg": "--max-irreversible-block-age", + "producerName": null, "_producerNameNodeosDefault": null, - "_producerThreadsNodeosArg": "--producer-threads", - "_producerThreadsNodeosDefault": 2, - "_signatureProviderNodeosArg": "--signature-provider", + "_producerNameNodeosArg": "--producer-name", + "privateKey": null, + "_privateKeyNodeosDefault": null, + "_privateKeyNodeosArg": "--private-key", + "signatureProvider": null, "_signatureProviderNodeosDefault": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", - "_snapshotsDirNodeosArg": "--snapshots-dir", - "_snapshotsDirNodeosDefault": "\"snapshots\"", - "_subjectiveAccountDecayTimeMinutesNodeosArg": "--subjective-account-decay-time-minutes", - "_subjectiveAccountDecayTimeMinutesNodeosDefault": 1440, - "_subjectiveAccountMaxFailuresNodeosArg": "--subjective-account-max-failures", - "_subjectiveAccountMaxFailuresNodeosDefault": 3, - "_subjectiveCpuLeewayUsNodeosArg": "--subjective-cpu-leeway-us", - "_subjectiveCpuLeewayUsNodeosDefault": 31000, - "cpuEffortPercent": 80, - "disableSubjectiveAccountBilling": null, - "disableSubjectiveApiBilling": null, - "disableSubjectiveBilling": true, - "disableSubjectiveP2pBilling": null, - "enableStaleProduction": null, + "_signatureProviderNodeosArg": "--signature-provider", "greylistAccount": null, + "_greylistAccountNodeosDefault": null, + "_greylistAccountNodeosArg": "--greylist-account", "greylistLimit": null, - "incomingDeferRatio": null, - "incomingTransactionQueueSizeMb": null, - "lastBlockCpuEffortPercent": 80, + "_greylistLimitNodeosDefault": 1000, + "_greylistLimitNodeosArg": "--greylist-limit", + "produceTimeOffsetUs": 0, + "_produceTimeOffsetUsNodeosDefault": 0, + "_produceTimeOffsetUsNodeosArg": "--produce-time-offset-us", "lastBlockTimeOffsetUs": -200000, + "_lastBlockTimeOffsetUsNodeosDefault": -200000, + "_lastBlockTimeOffsetUsNodeosArg": "--last-block-time-offset-us", + "cpuEffortPercent": 100, + "_cpuEffortPercentNodeosDefault": 80, + "_cpuEffortPercentNodeosArg": "--cpu-effort-percent", + "lastBlockCpuEffortPercent": 80, + "_lastBlockCpuEffortPercentNodeosDefault": 80, + "_lastBlockCpuEffortPercentNodeosArg": "--last-block-cpu-effort-percent", "maxBlockCpuUsageThresholdUs": null, + "_maxBlockCpuUsageThresholdUsNodeosDefault": 5000, + "_maxBlockCpuUsageThresholdUsNodeosArg": "--max-block-cpu-usage-threshold-us", "maxBlockNetUsageThresholdBytes": null, - "maxIrreversibleBlockAge": null, + "_maxBlockNetUsageThresholdBytesNodeosDefault": 1024, + "_maxBlockNetUsageThresholdBytesNodeosArg": "--max-block-net-usage-threshold-bytes", "maxScheduledTransactionTimePerBlockMs": null, - "maxTransactionTime": null, - "pauseOnStartup": null, - "privateKey": null, - "produceTimeOffsetUs": -200000, - "producerName": null, + "_maxScheduledTransactionTimePerBlockMsNodeosDefault": 100, + "_maxScheduledTransactionTimePerBlockMsNodeosArg": "--max-scheduled-transaction-time-per-block-ms", + "subjectiveCpuLeewayUs": null, + "_subjectiveCpuLeewayUsNodeosDefault": 31000, + "_subjectiveCpuLeewayUsNodeosArg": "--subjective-cpu-leeway-us", + "subjectiveAccountMaxFailures": null, + "_subjectiveAccountMaxFailuresNodeosDefault": 3, + "_subjectiveAccountMaxFailuresNodeosArg": "--subjective-account-max-failures", + "subjectiveAccountDecayTimeMinutes": null, + "_subjectiveAccountDecayTimeMinutesNodeosDefault": 1440, + "_subjectiveAccountDecayTimeMinutesNodeosArg": "--subjective-account-decay-time-minutes", + "incomingDeferRatio": null, + "_incomingDeferRatioNodeosDefault": 1, + "_incomingDeferRatioNodeosArg": "--incoming-defer-ratio", + "incomingTransactionQueueSizeMb": null, + "_incomingTransactionQueueSizeMbNodeosDefault": 1024, + "_incomingTransactionQueueSizeMbNodeosArg": "--incoming-transaction-queue-size-mb", + "disableSubjectiveBilling": true, + "_disableSubjectiveBillingNodeosDefault": 1, + "_disableSubjectiveBillingNodeosArg": "--disable-subjective-billing", + "disableSubjectiveAccountBilling": null, + "_disableSubjectiveAccountBillingNodeosDefault": false, + "_disableSubjectiveAccountBillingNodeosArg": "--disable-subjective-account-billing", + "disableSubjectiveP2pBilling": null, + "_disableSubjectiveP2pBillingNodeosDefault": 1, + "_disableSubjectiveP2pBillingNodeosArg": "--disable-subjective-p2p-billing", + "disableSubjectiveApiBilling": null, + "_disableSubjectiveApiBillingNodeosDefault": 1, + "_disableSubjectiveApiBillingNodeosArg": "--disable-subjective-api-billing", "producerThreads": 2, - "signatureProvider": null, + "_producerThreadsNodeosDefault": 2, + "_producerThreadsNodeosArg": "--producer-threads", "snapshotsDir": null, - "subjectiveAccountDecayTimeMinutes": null, - "subjectiveAccountMaxFailures": null, - "subjectiveCpuLeewayUs": null + "_snapshotsDirNodeosDefault": "\"snapshots\"", + "_snapshotsDirNodeosArg": "--snapshots-dir" }, "resourceMonitorPluginArgs": { - "_pluginName": "resource_monitor_plugin", "_pluginNamespace": "eosio", - "_resourceMonitorIntervalSecondsNodeosArg": "--resource-monitor-interval-seconds", + "_pluginName": "resource_monitor_plugin", + "resourceMonitorIntervalSeconds": null, "_resourceMonitorIntervalSecondsNodeosDefault": 2, - "_resourceMonitorNotShutdownOnThresholdExceededNodeosArg": "--resource-monitor-not-shutdown-on-threshold-exceeded", - "_resourceMonitorNotShutdownOnThresholdExceededNodeosDefault": false, - "_resourceMonitorSpaceThresholdNodeosArg": "--resource-monitor-space-threshold", + "_resourceMonitorIntervalSecondsNodeosArg": "--resource-monitor-interval-seconds", + "resourceMonitorSpaceThreshold": null, "_resourceMonitorSpaceThresholdNodeosDefault": 90, - "_resourceMonitorWarningIntervalNodeosArg": "--resource-monitor-warning-interval", - "_resourceMonitorWarningIntervalNodeosDefault": 30, - "resourceMonitorIntervalSeconds": null, + "_resourceMonitorSpaceThresholdNodeosArg": "--resource-monitor-space-threshold", "resourceMonitorNotShutdownOnThresholdExceeded": null, - "resourceMonitorSpaceThreshold": null, - "resourceMonitorWarningInterval": null + "_resourceMonitorNotShutdownOnThresholdExceededNodeosDefault": false, + "_resourceMonitorNotShutdownOnThresholdExceededNodeosArg": "--resource-monitor-not-shutdown-on-threshold-exceeded", + "resourceMonitorWarningInterval": null, + "_resourceMonitorWarningIntervalNodeosDefault": 30, + "_resourceMonitorWarningIntervalNodeosArg": "--resource-monitor-warning-interval" }, "signatureProviderPluginArgs": { - "_keosdProviderTimeoutNodeosArg": "--keosd-provider-timeout", - "_keosdProviderTimeoutNodeosDefault": 5, - "_pluginName": "signature_provider_plugin", "_pluginNamespace": "eosio", - "keosdProviderTimeout": null + "_pluginName": "signature_provider_plugin", + "keosdProviderTimeout": null, + "_keosdProviderTimeoutNodeosDefault": 5, + "_keosdProviderTimeoutNodeosArg": "--keosd-provider-timeout" }, "stateHistoryPluginArgs": { - "_chainStateHistoryNodeosArg": "--chain-state-history", - "_chainStateHistoryNodeosDefault": false, - "_deleteStateHistoryNodeosArg": "--delete-state-history", - "_deleteStateHistoryNodeosDefault": false, - "_pluginName": "state_history_plugin", "_pluginNamespace": "eosio", - "_stateHistoryDirNodeosArg": "--state-history-dir", + "_pluginName": "state_history_plugin", + "stateHistoryDir": null, "_stateHistoryDirNodeosDefault": "\"state-history\"", - "_stateHistoryEndpointNodeosArg": "--state-history-endpoint", - "_stateHistoryEndpointNodeosDefault": "127.0.0.1:8080", - "_stateHistoryLogRetainBlocksNodeosArg": "--state-history-log-retain-blocks", - "_stateHistoryLogRetainBlocksNodeosDefault": null, - "_stateHistoryUnixSocketPathNodeosArg": "--state-history-unix-socket-path", - "_stateHistoryUnixSocketPathNodeosDefault": null, - "_traceHistoryDebugModeNodeosArg": "--trace-history-debug-mode", - "_traceHistoryDebugModeNodeosDefault": false, - "_traceHistoryNodeosArg": "--trace-history", + "_stateHistoryDirNodeosArg": "--state-history-dir", + "traceHistory": null, "_traceHistoryNodeosDefault": false, + "_traceHistoryNodeosArg": "--trace-history", "chainStateHistory": null, - "deleteStateHistory": null, - "stateHistoryDir": null, + "_chainStateHistoryNodeosDefault": false, + "_chainStateHistoryNodeosArg": "--chain-state-history", "stateHistoryEndpoint": null, - "stateHistoryLogRetainBlocks": null, + "_stateHistoryEndpointNodeosDefault": "127.0.0.1:8080", + "_stateHistoryEndpointNodeosArg": "--state-history-endpoint", "stateHistoryUnixSocketPath": null, - "traceHistory": null, - "traceHistoryDebugMode": null + "_stateHistoryUnixSocketPathNodeosDefault": null, + "_stateHistoryUnixSocketPathNodeosArg": "--state-history-unix-socket-path", + "traceHistoryDebugMode": null, + "_traceHistoryDebugModeNodeosDefault": false, + "_traceHistoryDebugModeNodeosArg": "--trace-history-debug-mode", + "stateHistoryLogRetainBlocks": null, + "_stateHistoryLogRetainBlocksNodeosDefault": null, + "_stateHistoryLogRetainBlocksNodeosArg": "--state-history-log-retain-blocks", + "deleteStateHistory": null, + "_deleteStateHistoryNodeosDefault": false, + "_deleteStateHistoryNodeosArg": "--delete-state-history" }, "traceApiPluginArgs": { - "_pluginName": "trace_api_plugin", "_pluginNamespace": "eosio", - "_traceDirNodeosArg": "--trace-dir", + "_pluginName": "trace_api_plugin", + "traceDir": null, "_traceDirNodeosDefault": "\"traces\"", - "_traceMinimumIrreversibleHistoryBlocksNodeosArg": "--trace-minimum-irreversible-history-blocks", - "_traceMinimumIrreversibleHistoryBlocksNodeosDefault": -1, - "_traceMinimumUncompressedIrreversibleHistoryBlocksNodeosArg": "--trace-minimum-uncompressed-irreversible-history-blocks", - "_traceMinimumUncompressedIrreversibleHistoryBlocksNodeosDefault": -1, - "_traceNoAbisNodeosArg": "--trace-no-abis", - "_traceNoAbisNodeosDefault": false, - "_traceRpcAbiNodeosArg": "--trace-rpc-abi", - "_traceRpcAbiNodeosDefault": null, - "_traceSliceStrideNodeosArg": "--trace-slice-stride", + "_traceDirNodeosArg": "--trace-dir", + "traceSliceStride": null, "_traceSliceStrideNodeosDefault": 10000, - "traceDir": null, + "_traceSliceStrideNodeosArg": "--trace-slice-stride", "traceMinimumIrreversibleHistoryBlocks": null, + "_traceMinimumIrreversibleHistoryBlocksNodeosDefault": -1, + "_traceMinimumIrreversibleHistoryBlocksNodeosArg": "--trace-minimum-irreversible-history-blocks", "traceMinimumUncompressedIrreversibleHistoryBlocks": null, - "traceNoAbis": null, + "_traceMinimumUncompressedIrreversibleHistoryBlocksNodeosDefault": -1, + "_traceMinimumUncompressedIrreversibleHistoryBlocksNodeosArg": "--trace-minimum-uncompressed-irreversible-history-blocks", "traceRpcAbi": null, - "traceSliceStride": null + "_traceRpcAbiNodeosDefault": null, + "_traceRpcAbiNodeosArg": "--trace-rpc-abi", + "traceNoAbis": null, + "_traceNoAbisNodeosDefault": false, + "_traceNoAbisNodeosArg": "--trace-no-abis" } }, + "useBiosBootFile": false, "genesisPath": "tests/performance_tests/genesis.json", - "keepLogs": true, - "killAll": true, - "logDirBase": "p", - "logDirPath": "p/2023-01-11_20-01-22-20", - "logDirRoot": ".", - "logDirTimestamp": "2023-01-11_20-01-22", - "logDirTimestampedOptSuffix": "-20", + "maximumP2pPerHost": 5000, + "maximumClients": 0, "loggingDict": { "bios": "off" }, - "maximumClients": 0, - "maximumP2pPerHost": 5000, - "nodeosVers": "v4", - "nodesFile": null, - "numAddlBlocksToPrune": 2, - "pnodes": 1, - "printMissingTransactions": false, "prodsEnableTraceApi": false, - "quiet": false, + "nodeosVers": "v4", "specificExtraNodeosArgs": { - "1": "--plugin eosio::trace_api_plugin" + "2": "--plugin eosio::trace_api_plugin" }, + "_totalNodes": 3, "targetTps": 20, "testTrxGenDurationSec": 90, - "topo": "mesh", - "totalNodes": 1, "tpsLimitPerGenerator": 10, - "useBiosBootFile": false, - "verbose": true + "numAddlBlocksToPrune": 2, + "logDirRoot": ".", + "delReport": false, + "quiet": false, + "delPerfLogs": false, + "expectedTransactionsSent": 1800, + "printMissingTransactions": false, + "logDirBase": "p", + "logDirTimestamp": "2023-01-13_17-51-38", + "logDirTimestampedOptSuffix": "-20", + "logDirPath": "p/2023-01-13_17-51-38-20" }, - "completedRun": true, "env": { - "logical_cpu_count": 16, + "system": "Linux", "os": "posix", "release": "5.10.16.3-microsoft-standard-WSL2", - "system": "Linux" + "logical_cpu_count": 16 }, - "nodeosVersion": "v4.0.0-dev", - "testFinish": "2023-01-11T20:03:53.210082", - "testStart": "2023-01-11T20:01:22.768422" + "nodeosVersion": "v4.0.0-dev" } ``` diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index d14b2c1e3a..e57eb668d5 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -458,7 +458,7 @@ def default(self, obj): return json.JSONEncoder.default(self, obj) def reportAsJSON(report: dict) -> json: - return json.dumps(report, sort_keys=True, indent=2, cls=LogReaderEncoder) + return json.dumps(report, indent=2, cls=LogReaderEncoder) def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: ArtifactPaths, argsDict: dict, testStart: datetime=None, completedRun: bool=True) -> dict: scrapeLog(data, artifacts.nodeosLogPath) From f72b6fd047fe8f24c8f94e9a331257990f4ce75d Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 13 Jan 2023 12:04:05 -0600 Subject: [PATCH 075/178] redo generated report using single producer --- tests/performance_tests/README.md | 64 +++++++++++++++---------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 0ae6e0220f..38b55570f5 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -820,35 +820,35 @@ The Performance Test Basic generates, by default, a report that details results ``` json { "completedRun": true, - "testStart": "2023-01-13T17:51:38.885663", - "testFinish": "2023-01-13T17:54:16.588906", + "testStart": "2023-01-13T18:00:42.465802", + "testFinish": "2023-01-13T18:03:11.831277", "Analysis": { "BlockSize": { - "min": 1152, - "max": 2688, + "min": 1920, + "max": 1920, "avg": 1920.0, - "sigma": 316.1805963235654, + "sigma": 0.0, "emptyBlocks": 0, "numBlocks": 177 }, "BlocksGuide": { "firstBlockNum": 2, - "lastBlockNum": 316, - "totalBlocks": 315, - "testStartBlockNum": 129, - "testEndBlockNum": 315, - "setupBlocksCnt": 127, - "tearDownBlocksCnt": 1, + "lastBlockNum": 299, + "totalBlocks": 298, + "testStartBlockNum": 112, + "testEndBlockNum": 299, + "setupBlocksCnt": 110, + "tearDownBlocksCnt": 0, "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 5, + "trailingEmptyBlocksCnt": 6, "configAddlDropCnt": 2, "testAnalysisBlockCnt": 177 }, "TPS": { - "min": 16, - "max": 24, + "min": 20, + "max": 20, "avg": 20.0, - "sigma": 1.651445647689541, + "sigma": 0.0, "emptyBlocks": 0, "numBlocks": 177, "configTps": 20, @@ -860,17 +860,17 @@ The Performance Test Basic generates, by default, a report that details results "generatorCount": 2 }, "TrxCPU": { - "min": 8.0, - "max": 225.0, - "avg": 65.61944444444444, - "sigma": 39.33333558929686, + "min": 11.0, + "max": 360.0, + "avg": 63.10444444444445, + "sigma": 33.234456387280126, "samples": 1800 }, "TrxLatency": { - "min": 0.09500002861022949, - "max": 0.6970000267028809, - "avg": 0.3131494364473555, - "sigma": 0.15184769957733368, + "min": 0.06500005722045898, + "max": 0.4679999351501465, + "avg": 0.26723387837409973, + "sigma": 0.1414459711179884, "samples": 1800 }, "TrxNet": { @@ -883,8 +883,8 @@ The Performance Test Basic generates, by default, a report that details results "DroppedBlocks": {}, "DroppedBlocksCount": 0, "DroppedTransactions": 0, - "ProductionWindowsTotal": 14, - "ProductionWindowsAverageSize": 12.0, + "ProductionWindowsTotal": 0, + "ProductionWindowsAverageSize": 0, "ProductionWindowsMissed": 0, "ForkedBlocks": [], "ForksCount": 0 @@ -899,7 +899,7 @@ The Performance Test Basic generates, by default, a report that details results "verbose": true, "_killEosInstances": true, "_killWallet": true, - "pnodes": 2, + "pnodes": 1, "totalNodes": 1, "topo": "mesh", "extraNodeosArgs": { @@ -1234,13 +1234,13 @@ The Performance Test Basic generates, by default, a report that details results "produceTimeOffsetUs": 0, "_produceTimeOffsetUsNodeosDefault": 0, "_produceTimeOffsetUsNodeosArg": "--produce-time-offset-us", - "lastBlockTimeOffsetUs": -200000, + "lastBlockTimeOffsetUs": 0, "_lastBlockTimeOffsetUsNodeosDefault": -200000, "_lastBlockTimeOffsetUsNodeosArg": "--last-block-time-offset-us", "cpuEffortPercent": 100, "_cpuEffortPercentNodeosDefault": 80, "_cpuEffortPercentNodeosArg": "--cpu-effort-percent", - "lastBlockCpuEffortPercent": 80, + "lastBlockCpuEffortPercent": 100, "_lastBlockCpuEffortPercentNodeosDefault": 80, "_lastBlockCpuEffortPercentNodeosArg": "--last-block-cpu-effort-percent", "maxBlockCpuUsageThresholdUs": null, @@ -1370,9 +1370,9 @@ The Performance Test Basic generates, by default, a report that details results "prodsEnableTraceApi": false, "nodeosVers": "v4", "specificExtraNodeosArgs": { - "2": "--plugin eosio::trace_api_plugin" + "1": "--plugin eosio::trace_api_plugin" }, - "_totalNodes": 3, + "_totalNodes": 2, "targetTps": 20, "testTrxGenDurationSec": 90, "tpsLimitPerGenerator": 10, @@ -1384,9 +1384,9 @@ The Performance Test Basic generates, by default, a report that details results "expectedTransactionsSent": 1800, "printMissingTransactions": false, "logDirBase": "p", - "logDirTimestamp": "2023-01-13_17-51-38", + "logDirTimestamp": "2023-01-13_18-00-42", "logDirTimestampedOptSuffix": "-20", - "logDirPath": "p/2023-01-13_17-51-38-20" + "logDirPath": "p/2023-01-13_18-00-42-20" }, "env": { "system": "Linux", From da7dff2c39d12b6bbb764f570c3ea12de5a1a6b5 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 13 Jan 2023 14:54:51 -0600 Subject: [PATCH 076/178] refactor calcProductionWindows --- tests/performance_tests/log_reader.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index d7ac9b3f03..bc7ec805cd 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -278,21 +278,19 @@ def getProductionWindows(prodDict: dict, blockDict: dict, data: chainData): def calcProductionWindows(prodDict: dict): prodWindows = productionWindows() - prodWindows.totalWindows = len(prodDict) - 2 totalBlocksForAverage = 0 - for k, v in prodDict.items(): - if k != "0" and k != str(prodWindows.totalWindows+1): - if v.blockCount < COMPLETEPRODUCTIONWINDOWSIZE: - prodWindows.missedWindows += 1 + for i, (k, v) in enumerate(prodDict.items()): + if v.blockCount == COMPLETEPRODUCTIONWINDOWSIZE: + prodWindows.totalWindows += 1 totalBlocksForAverage += v.blockCount else: - if v.blockCount == COMPLETEPRODUCTIONWINDOWSIZE: - prodWindows.totalWindows += 1 + #First and last production windows are possibly incomplete but + #should not count against total or missed windows + if i != 0 and i != len(prodDict)-1: + prodWindows.missedWindows += 1 totalBlocksForAverage += v.blockCount - if prodWindows.totalWindows <= 0: - prodWindows.totalWindows = 0 - prodWindows.averageWindowSize = 0 - else: + prodWindows.totalWindows += 1 + if prodWindows.totalWindows > 0: prodWindows.averageWindowSize = totalBlocksForAverage / prodWindows.totalWindows return prodWindows From 9af6e9023d8ff0ff02415d646cb7628960ee1bd9 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 18 Jan 2023 12:29:20 -0600 Subject: [PATCH 077/178] seperate out new csv file containing data and rework as necessary --- tests/performance_tests/log_reader.py | 23 +++++++++++++++++-- .../performance_test_basic.py | 5 ++-- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index bc7ec805cd..bd0608fb63 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -29,6 +29,7 @@ class ArtifactPaths: trxGenLogDirPath: Path = Path("") blockTrxDataPath: Path = Path("") blockDataPath: Path = Path("") + csvDataPath: Path = Path("") @dataclass class TpsTestConfig: @@ -62,6 +63,8 @@ class trxData(): blockNum: int = 0 cpuUsageUs: int = 0 netUsageUs: int = 0 + blockTime: datetime = None + latency: float = 0 _sentTimestamp: str = "" _calcdTimeEpoch: float = 0 @@ -198,6 +201,7 @@ def selectedOpen(path): return gzip.open if path.suffix == '.gz' else open def scrapeLog(data: chainData, path): + #node_00/stderr.txt selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: line = f.read() @@ -228,16 +232,19 @@ def scrapeLog(data: chainData, path): data.forkedBlocks.append(int(fork[1]) - int(fork[3]) + 1) def scrapeTrxGenLog(trxSent, path): + #trxGenLogs/trx_data_output_*.txt selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: trxSent.update(dict([(x[0], x[1]) for x in (line.rstrip('\n').split(',') for line in f)])) def scrapeBlockTrxDataLog(trxDict, path): + #blockTrxData.txt selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: - trxDict.update(dict([(x[0], trxData(x[1], x[2], x[3])) for x in (line.rstrip('\n').split(',') for line in f)])) + trxDict.update(dict([(x[0], trxData(blockNum=x[1], blockTime=x[2], cpuUsageUs=x[3], netUsageUs=x[4])) for x in (line.rstrip('\n').split(',') for line in f)])) def scrapeBlockDataLog(blockDict, path): + #blockData.txt selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: blockDict.update(dict([(x[0], blkData(x[1], x[2], x[3], x[4])) for x in (line.rstrip('\n').split(',') for line in f)])) @@ -258,6 +265,16 @@ def populateTrxSentTimestamp(trxSent: dict, trxDict: dict, notFound): else: notFound.append(sentTrxId) +def populateTrxLatencies(blockDict: dict, trxDict: dict): + for trxId, data in trxDict.items(): + if data.calcdTimeEpoch != 0: + trxDict[trxId].latency = blockDict[data.blockNum].calcdTimeEpoch - data.calcdTimeEpoch + +def writeTransactionCsv(trxDict: dict, path): + with open(path, 'wt') as csvFile: + for trxId, data in trxDict.items(): + csvFile.write(f"{trxId},{data.blockNum},{data.blockTime},{data.cpuUsageUs},{data.netUsageUs},{data.latency},{data._sentTimestamp},{data._calcdTimeEpoch}\n") + def getProductionWindows(prodDict: dict, blockDict: dict, data: chainData): prod = "" count = 0 @@ -408,7 +425,7 @@ def calcTrxLatencyCpuNetStats(trxDict : dict, blockDict: dict): Returns: transaction latency stats as a basicStats object """ - trxLatencyCpuNetList = [((blockDict[data.blockNum].calcdTimeEpoch - data.calcdTimeEpoch), data.cpuUsageUs, data.netUsageUs) for trxId, data in trxDict.items() if data.calcdTimeEpoch != 0] + trxLatencyCpuNetList = [(data.latency, data.cpuUsageUs, data.netUsageUs) for trxId, data in trxDict.items() if data.calcdTimeEpoch != 0] npLatencyCpuNetList = np.array(trxLatencyCpuNetList, dtype=np.float) @@ -484,6 +501,8 @@ def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: Arti if argsDict.get("printMissingTransactions"): print(notFound) + populateTrxLatencies(blockDict, trxDict) + writeTransactionCsv(trxDict, artifacts.csvDataPath) guide = calcChainGuide(data, tpsTestConfig.numBlocksToPrune) trxLatencyStats, trxCpuStats, trxNetStats = calcTrxLatencyCpuNetStats(trxDict, blockDict) tpsStats = scoreTransfersPerSecond(data, guide) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 43334c497a..290b5c8be1 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -105,7 +105,7 @@ def __post_init__(self): self.specificExtraNodeosArgs.update({f"{node}" : '--plugin eosio::history_api_plugin --filter-on "*"' for node in range(self.pnodes, self._totalNodes)}) else: self.fetchBlock = lambda node, blockNum: node.processUrllibRequest("trace_api", "get_block", {"block_num":blockNum}, silentErrors=False, exitOnError=True) - self.writeTrx = lambda trxDataFile, block, blockNum: [trxDataFile.write(f"{trx['id']},{trx['block_num']},{trx['cpu_usage_us']},{trx['net_usage_words']}\n") for trx in block['payload']['transactions'] if block['payload']['transactions']] + self.writeTrx = lambda trxDataFile, block, blockNum: [trxDataFile.write(f"{trx['id']},{trx['block_num']},{trx['block_time']},{trx['cpu_usage_us']},{trx['net_usage_words']}\n") for trx in block['payload']['transactions'] if block['payload']['transactions']] self.writeBlock = lambda blockDataFile, block: blockDataFile.write(f"{block['payload']['number']},{block['payload']['id']},{block['payload']['producer']},{block['payload']['status']},{block['payload']['timestamp']}\n") self.fetchHeadBlock = lambda node, headBlock: node.processUrllibRequest("chain", "get_block_info", {"block_num":headBlock}, silentErrors=False, exitOnError=True) @@ -159,6 +159,7 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.etcEosioLogsDirPath = self.etcLogsDirPath/Path("eosio") self.blockDataLogDirPath = self.loggingConfig.logDirPath/Path("blockDataLogs") self.blockDataPath = self.blockDataLogDirPath/Path("blockData.txt") + self.csvDataPath = self.blockDataLogDirPath/Path("csv.txt") self.blockTrxDataPath = self.blockDataLogDirPath/Path("blockTrxData.txt") self.reportPath = self.loggingConfig.logDirPath/Path("data.json") @@ -385,7 +386,7 @@ def captureLowLevelArtifacts(self): def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): args = self.prepArgs() artifactsLocate = log_reader.ArtifactPaths(nodeosLogPath=self.nodeosLogPath, trxGenLogDirPath=self.trxGenLogDirPath, blockTrxDataPath=self.blockTrxDataPath, - blockDataPath=self.blockDataPath) + blockDataPath=self.blockDataPath, csvDataPath=self.csvDataPath) tpsTestConfig = log_reader.TpsTestConfig(targetTps=self.ptbConfig.targetTps, testDurationSec=self.ptbConfig.testTrxGenDurationSec, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, numBlocksToPrune=self.ptbConfig.numAddlBlocksToPrune, numTrxGensUsed=testResult.numGeneratorsUsed, targetTpsPerGenList=testResult.targetTpsPerGenList, quiet=self.ptbConfig.quiet) From 8f32316705e50707f885d851c5a8a46b4613444b Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Tue, 24 Jan 2023 17:25:56 -0600 Subject: [PATCH 078/178] add header to csv and filter out ungenerated transactions --- tests/performance_tests/log_reader.py | 1 + tests/performance_tests/performance_test_basic.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index bd0608fb63..d05ef8a7cd 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -272,6 +272,7 @@ def populateTrxLatencies(blockDict: dict, trxDict: dict): def writeTransactionCsv(trxDict: dict, path): with open(path, 'wt') as csvFile: + csvFile.write("TransactionId,BlockNumber,BlockTime,CpuUsageUs,NetUsageUs,Latency,SentTimestamp,CalcdTimeEpoch\n") for trxId, data in trxDict.items(): csvFile.write(f"{trxId},{data.blockNum},{data.blockTime},{data.cpuUsageUs},{data.netUsageUs},{data.latency},{data._sentTimestamp},{data._calcdTimeEpoch}\n") diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 290b5c8be1..10036acb06 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -92,6 +92,12 @@ class SpecifiedContract: specificExtraNodeosArgs: dict = field(default_factory=dict) _totalNodes: int = 2 + def log_transactions(self, trxDataFile, block): + for trx in block['payload']['transactions']: + for actions in trx['actions']: + if actions['account'] != 'eosio' and actions['action'] != 'onblock': + trxDataFile.write(f"{trx['id']},{trx['block_num']},{trx['block_time']},{trx['cpu_usage_us']},{trx['net_usage_words']},{trx['actions']}\n") + def __post_init__(self): self._totalNodes = self.pnodes + 1 if self.totalNodes <= self.pnodes else self.totalNodes if not self.prodsEnableTraceApi: @@ -105,7 +111,7 @@ def __post_init__(self): self.specificExtraNodeosArgs.update({f"{node}" : '--plugin eosio::history_api_plugin --filter-on "*"' for node in range(self.pnodes, self._totalNodes)}) else: self.fetchBlock = lambda node, blockNum: node.processUrllibRequest("trace_api", "get_block", {"block_num":blockNum}, silentErrors=False, exitOnError=True) - self.writeTrx = lambda trxDataFile, block, blockNum: [trxDataFile.write(f"{trx['id']},{trx['block_num']},{trx['block_time']},{trx['cpu_usage_us']},{trx['net_usage_words']}\n") for trx in block['payload']['transactions'] if block['payload']['transactions']] + self.writeTrx = lambda trxDataFile, block, blockNum:[ self.log_transactions(trxDataFile, block) ] self.writeBlock = lambda blockDataFile, block: blockDataFile.write(f"{block['payload']['number']},{block['payload']['id']},{block['payload']['producer']},{block['payload']['status']},{block['payload']['timestamp']}\n") self.fetchHeadBlock = lambda node, headBlock: node.processUrllibRequest("chain", "get_block_info", {"block_num":headBlock}, silentErrors=False, exitOnError=True) From 5f1770b58ea22c2b29e42a57920ab1d9df5ca2a6 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 26 Jan 2023 11:13:46 -0600 Subject: [PATCH 079/178] rename csv --- tests/performance_tests/performance_test_basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 10036acb06..4f5349ffe1 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -165,7 +165,7 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.etcEosioLogsDirPath = self.etcLogsDirPath/Path("eosio") self.blockDataLogDirPath = self.loggingConfig.logDirPath/Path("blockDataLogs") self.blockDataPath = self.blockDataLogDirPath/Path("blockData.txt") - self.csvDataPath = self.blockDataLogDirPath/Path("csv.txt") + self.csvDataPath = self.blockDataLogDirPath/Path("transaction_metrics.txt") self.blockTrxDataPath = self.blockDataLogDirPath/Path("blockTrxData.txt") self.reportPath = self.loggingConfig.logDirPath/Path("data.json") From 5add73c439b1791f85cec2a51ce4c99eb2bf533e Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 26 Jan 2023 15:21:21 -0600 Subject: [PATCH 080/178] change from .txt to .csv --- tests/performance_tests/performance_test_basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 4f5349ffe1..7df97124d0 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -165,7 +165,7 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.etcEosioLogsDirPath = self.etcLogsDirPath/Path("eosio") self.blockDataLogDirPath = self.loggingConfig.logDirPath/Path("blockDataLogs") self.blockDataPath = self.blockDataLogDirPath/Path("blockData.txt") - self.csvDataPath = self.blockDataLogDirPath/Path("transaction_metrics.txt") + self.csvDataPath = self.blockDataLogDirPath/Path("transaction_metrics.csv") self.blockTrxDataPath = self.blockDataLogDirPath/Path("blockTrxData.txt") self.reportPath = self.loggingConfig.logDirPath/Path("data.json") From de7c7b0e4880d3ab6f66f5635ff2601c3051e332 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 27 Jan 2023 14:35:05 -0600 Subject: [PATCH 081/178] change csv variable names to something more proper --- tests/performance_tests/log_reader.py | 12 ++++++------ tests/performance_tests/performance_test_basic.py | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index d05ef8a7cd..593d07a5f5 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -29,7 +29,7 @@ class ArtifactPaths: trxGenLogDirPath: Path = Path("") blockTrxDataPath: Path = Path("") blockDataPath: Path = Path("") - csvDataPath: Path = Path("") + transactionMetricsDataPath: Path = Path("") @dataclass class TpsTestConfig: @@ -270,11 +270,11 @@ def populateTrxLatencies(blockDict: dict, trxDict: dict): if data.calcdTimeEpoch != 0: trxDict[trxId].latency = blockDict[data.blockNum].calcdTimeEpoch - data.calcdTimeEpoch -def writeTransactionCsv(trxDict: dict, path): - with open(path, 'wt') as csvFile: - csvFile.write("TransactionId,BlockNumber,BlockTime,CpuUsageUs,NetUsageUs,Latency,SentTimestamp,CalcdTimeEpoch\n") +def writeTransactionMetrics(trxDict: dict, path): + with open(path, 'wt') as transactionMetricsFile: + transactionMetricsFile.write("TransactionId,BlockNumber,BlockTime,CpuUsageUs,NetUsageUs,Latency,SentTimestamp,CalcdTimeEpoch\n") for trxId, data in trxDict.items(): - csvFile.write(f"{trxId},{data.blockNum},{data.blockTime},{data.cpuUsageUs},{data.netUsageUs},{data.latency},{data._sentTimestamp},{data._calcdTimeEpoch}\n") + transactionMetricsFile.write(f"{trxId},{data.blockNum},{data.blockTime},{data.cpuUsageUs},{data.netUsageUs},{data.latency},{data._sentTimestamp},{data._calcdTimeEpoch}\n") def getProductionWindows(prodDict: dict, blockDict: dict, data: chainData): prod = "" @@ -503,7 +503,7 @@ def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: Arti print(notFound) populateTrxLatencies(blockDict, trxDict) - writeTransactionCsv(trxDict, artifacts.csvDataPath) + writeTransactionMetrics(trxDict, artifacts.transactionMetricsDataPath) guide = calcChainGuide(data, tpsTestConfig.numBlocksToPrune) trxLatencyStats, trxCpuStats, trxNetStats = calcTrxLatencyCpuNetStats(trxDict, blockDict) tpsStats = scoreTransfersPerSecond(data, guide) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 7df97124d0..0712730c3d 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -165,7 +165,7 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.etcEosioLogsDirPath = self.etcLogsDirPath/Path("eosio") self.blockDataLogDirPath = self.loggingConfig.logDirPath/Path("blockDataLogs") self.blockDataPath = self.blockDataLogDirPath/Path("blockData.txt") - self.csvDataPath = self.blockDataLogDirPath/Path("transaction_metrics.csv") + self.transactionMetricsDataPath = self.blockDataLogDirPath/Path("transaction_metrics.csv") self.blockTrxDataPath = self.blockDataLogDirPath/Path("blockTrxData.txt") self.reportPath = self.loggingConfig.logDirPath/Path("data.json") @@ -392,7 +392,7 @@ def captureLowLevelArtifacts(self): def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): args = self.prepArgs() artifactsLocate = log_reader.ArtifactPaths(nodeosLogPath=self.nodeosLogPath, trxGenLogDirPath=self.trxGenLogDirPath, blockTrxDataPath=self.blockTrxDataPath, - blockDataPath=self.blockDataPath, csvDataPath=self.csvDataPath) + blockDataPath=self.blockDataPath, transactionMetricsDataPath=self.transactionMetricsDataPath) tpsTestConfig = log_reader.TpsTestConfig(targetTps=self.ptbConfig.targetTps, testDurationSec=self.ptbConfig.testTrxGenDurationSec, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, numBlocksToPrune=self.ptbConfig.numAddlBlocksToPrune, numTrxGensUsed=testResult.numGeneratorsUsed, targetTpsPerGenList=testResult.targetTpsPerGenList, quiet=self.ptbConfig.quiet) From 96930a16169caf13edfbb97e19083d399c4806db Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 30 Jan 2023 11:29:56 -0600 Subject: [PATCH 082/178] Rename in prep to support additional trx desc files. --- tests/performance_tests/CMakeLists.txt | 4 ++-- tests/performance_tests/performance_test_basic.py | 2 +- .../{userTrxData.json => userTrxDataTransfer.json} | 0 3 files changed, 3 insertions(+), 3 deletions(-) rename tests/performance_tests/{userTrxData.json => userTrxDataTransfer.json} (100%) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 3d8ecc447d..aa3d180bec 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -7,10 +7,10 @@ configure_file(nodeos_log_2_0_14.txt.gz nodeos_log_2_0_14.txt.gz COPYONLY) configure_file(nodeos_log_3_2.txt.gz nodeos_log_3_2.txt.gz COPYONLY) configure_file(genesis.json genesis.json COPYONLY) configure_file(validate_nodeos_plugin_args.py validate_nodeos_plugin_args.py COPYONLY) -configure_file(userTrxData.json userTrxData.json COPYONLY) +configure_file(userTrxDataTransfer.json userTrxDataTransfer.json COPYONLY) add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --user-trx-data-file tests/performance_tests/userTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 43334c497a..3130afd361 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -524,7 +524,7 @@ def createArgumentParser(): ptbParserGroup.add_argument("--target-tps", type=int, help="The target transfers per second to send during test", default=8000) ptbParserGroup.add_argument("--test-duration-sec", type=int, help="The duration of transfer trx generation for the test in seconds", default=90) - ptbParserGroup.add_argument("--user-trx-data-file", type=str, help="Path to userTrxData.json") + ptbParserGroup.add_argument("--user-trx-data-file", type=str, help="Path to userTrxDataTransfer.json") return ptbParser diff --git a/tests/performance_tests/userTrxData.json b/tests/performance_tests/userTrxDataTransfer.json similarity index 100% rename from tests/performance_tests/userTrxData.json rename to tests/performance_tests/userTrxDataTransfer.json From a4d6ecc40b444c2b28ad5d6289b9676ce1b37d4a Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 31 Jan 2023 04:38:27 -0600 Subject: [PATCH 083/178] Rename for generalization, not strictly for transfers. --- tests/trx_generator/trx_generator.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 438a837a3b..88eeec8506 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -28,7 +28,7 @@ namespace eosio::testing { fc::crypto::private_key _second_act_priv_key; }; - signed_transaction_w_signer create_transfer_trx_w_signer(const action& act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + signed_transaction_w_signer create_trx_w_action_and_signer(const action& act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { signed_transaction trx; trx.actions.push_back(act); trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), @@ -47,8 +47,8 @@ namespace eosio::testing { trxs.reserve(2 * action_pairs_vector.size()); for(action_pair_w_keys ap: action_pairs_vector) { - trxs.emplace_back(create_transfer_trx_w_signer(ap._first_act, ap._first_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); - trxs.emplace_back(create_transfer_trx_w_signer(ap._second_act, ap._second_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); + trxs.emplace_back(create_trx_w_action_and_signer(ap._first_act, ap._first_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); + trxs.emplace_back(create_trx_w_action_and_signer(ap._second_act, ap._second_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); } return trxs; @@ -199,7 +199,7 @@ namespace eosio::testing { act.authorization = vector{{_auth_account, config::active_name}}; act.data = std::move(packed_action_data); - _trxs.emplace_back(create_transfer_trx_w_signer(act, _private_key, ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, _last_irr_block_id)); + _trxs.emplace_back(create_trx_w_action_and_signer(act, _private_key, ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, _last_irr_block_id)); ilog("Setup p2p transaction provider"); From cfe83467aae5c41d93635d1f1f0fed0c4d8d2f09 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 1 Feb 2023 09:45:50 -0600 Subject: [PATCH 084/178] Add way to specify auth acct when using json trx description. Add support for actionAuthAcct and actionAuthPrivKey for more explicit control when specifying transaction directly. Add actionAuthAcct to json transaction description Add trx generator id to allow differentiation of trx generators for use in create accounts. Factored out set_transaction_headers into own helper function. --- tests/TestHarness/Cluster.py | 6 +-- .../launch_transaction_generators.py | 22 ++++++++-- .../performance_test_basic.py | 20 +++++++-- .../userTrxDataTransfer.json | 1 + tests/trx_generator/main.cpp | 41 +++++++++++++++---- tests/trx_generator/trx_generator.cpp | 41 +++++++++++-------- tests/trx_generator/trx_generator.hpp | 16 +++++--- tests/trx_generator/trx_generator_tests.cpp | 8 +++- 8 files changed, 112 insertions(+), 43 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index 27ad8075cf..0c37513c05 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1763,7 +1763,7 @@ def stripValues(lowestMaxes,greaterThan): def launchTrxGenerators(self, contractOwnerAcctName: str, acctNamesList: list, acctPrivKeysList: list, nodeId: int=0, tpsPerGenerator: int=10, numGenerators: int=1, durationSec: int=60, - waitToComplete:bool=False, abiFile=None, actionName=None, actionData=None): + waitToComplete:bool=False, abiFile=None, actionName=None, actionAuthAcct=None, actionAuthPrivKey=None, actionData=None): Utils.Print("Configure txn generators") node=self.getNode(nodeId) p2pListenPort = self.getNodeP2pPort(nodeId) @@ -1779,8 +1779,8 @@ def launchTrxGenerators(self, contractOwnerAcctName: str, acctNamesList: list, a tpsTrxGensConfig = TpsTrxGensConfig(targetTps=targetTps, tpsLimitPerGenerator=tpsLimitPerGenerator) self.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=contractOwnerAcctName, accts=','.join(map(str, acctNamesList)), - privateKeys=','.join(map(str, acctPrivKeysList)), trxGenDurationSec=durationSec, - logDir=Utils.DataDir, abiFile=abiFile, actionName=actionName, actionData=actionData, + privateKeys=','.join(map(str, acctPrivKeysList)), trxGenDurationSec=durationSec, logDir=Utils.DataDir, + abiFile=abiFile, actionName=actionName, actionAuthAcct=actionAuthAcct, actionAuthPrivKey=actionAuthPrivKey, actionData=actionData, peerEndpoint=self.host, port=p2pListenPort, tpsTrxGensConfig=tpsTrxGensConfig) Utils.Print("Launch txn generators and start generating/sending transactions") diff --git a/tests/TestHarness/launch_transaction_generators.py b/tests/TestHarness/launch_transaction_generators.py index 4faf0e7cd8..d80b0687b1 100755 --- a/tests/TestHarness/launch_transaction_generators.py +++ b/tests/TestHarness/launch_transaction_generators.py @@ -37,7 +37,8 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int): class TransactionGeneratorsLauncher: def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAccount: str, accts: str, privateKeys: str, - trxGenDurationSec: int, logDir: str, abiFile: Path, actionName: str, actionData, peerEndpoint: str, port: int, tpsTrxGensConfig: TpsTrxGensConfig): + trxGenDurationSec: int, logDir: str, abiFile: Path, actionName: str, actionAuthAcct: str, actionAuthPrivKey: str, actionData, + peerEndpoint: str, port: int, tpsTrxGensConfig: TpsTrxGensConfig): self.chainId = chainId self.lastIrreversibleBlockId = lastIrreversibleBlockId self.contractOwnerAccount = contractOwnerAccount @@ -48,17 +49,20 @@ def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAcco self.logDir = logDir self.abiFile = abiFile self.actionName = actionName + self.actionAuthAcct = actionAuthAcct + self.actionAuthPrivKey = actionAuthPrivKey self.actionData = actionData self.peerEndpoint = peerEndpoint self.port = port def launch(self, waitToComplete=True): self.subprocess_ret_codes = [] - for targetTps in self.tpsTrxGensConfig.targetTpsPerGenList: - if self.abiFile is not None and self.actionName is not None and self.actionData is not None: + for id, targetTps in enumerate(self.tpsTrxGensConfig.targetTpsPerGenList): + if self.abiFile is not None and self.actionName is not None and self.actionData is not None and self.actionAuthAcct is not None and self.actionAuthPrivKey is not None: if Utils.Debug: Print( f'Running trx_generator: ./tests/trx_generator/trx_generator ' + f'--generator-id {id} ' f'--chain-id {self.chainId} ' f'--last-irreversible-block-id {self.lastIrreversibleBlockId} ' f'--contract-owner-account {self.contractOwnerAccount} ' @@ -68,6 +72,8 @@ def launch(self, waitToComplete=True): f'--target-tps {targetTps} ' f'--log-dir {self.logDir} ' f'--action-name {self.actionName} ' + f'--action-auth-acct {self.actionAuthAcct} ' + f'--action-auth-acct-priv-key {self.actionAuthPrivKey} ' f'--action-data {self.actionData} ' f'--abi-file {self.abiFile} ' f'--peer-endpoint {self.peerEndpoint} ' @@ -76,6 +82,7 @@ def launch(self, waitToComplete=True): self.subprocess_ret_codes.append( subprocess.Popen([ './tests/trx_generator/trx_generator', + '--generator-id', f'{id}', '--chain-id', f'{self.chainId}', '--last-irreversible-block-id', f'{self.lastIrreversibleBlockId}', '--contract-owner-account', f'{self.contractOwnerAccount}', @@ -85,6 +92,8 @@ def launch(self, waitToComplete=True): '--target-tps', f'{targetTps}', '--log-dir', f'{self.logDir}', '--action-name', f'{self.actionName}', + '--action-auth-acct', f'{self.actionAuthAcct}', + '--action-auth-acct-priv-key', f'{self.actionAuthPrivKey}', '--action-data', f'{self.actionData}', '--abi-file', f'{self.abiFile}', '--peer-endpoint', f'{self.peerEndpoint}', @@ -95,6 +104,7 @@ def launch(self, waitToComplete=True): if Utils.Debug: Print( f'Running trx_generator: ./tests/trx_generator/trx_generator ' + f'--generator-id {id} ' f'--chain-id {self.chainId} ' f'--last-irreversible-block-id {self.lastIrreversibleBlockId} ' f'--contract-owner-account {self.contractOwnerAccount} ' @@ -109,6 +119,7 @@ def launch(self, waitToComplete=True): self.subprocess_ret_codes.append( subprocess.Popen([ './tests/trx_generator/trx_generator', + '--generator-id', f'{id}', '--chain-id', f'{self.chainId}', '--last-irreversible-block-id', f'{self.lastIrreversibleBlockId}', '--contract-owner-account', f'{self.contractOwnerAccount}', @@ -145,6 +156,8 @@ def parseArgs(): parser.add_argument("tps_limit_per_generator", type=int, help="Maximum amount of transactions per second a single generator can have.", default=4000) parser.add_argument("log_dir", type=str, help="Path to directory where trx logs should be written.") parser.add_argument("action_name", type=str, help="The action name applied to the provided action data input") + parser.add_argument("action_auth_acct", type=str, help="The authorization account name used for trx action authorization") + parser.add_argument("action_auth_acct_priv_key", type=str, help="The authorization account's private key used for signing trx") parser.add_argument("action_data", type=str, help="The path to the json action data file or json action data description string to use") parser.add_argument("abi_file", type=str, help="The path to the contract abi file to use for the supplied transaction action data") parser.add_argument("peer_endpoint", type=str, help="set the peer endpoint to send transactions to", default="127.0.0.1") @@ -158,7 +171,8 @@ def main(): trxGenLauncher = TransactionGeneratorsLauncher(chainId=args.chain_id, lastIrreversibleBlockId=args.last_irreversible_block_id, contractOwnerAccount=args.contract_owner_account, accts=args.accounts, privateKeys=args.priv_keys, trxGenDurationSec=args.trx_gen_duration, logDir=args.log_dir, - abiFile=args.abi_file, actionName=args.action_name, actionData=args.action_data, + abiFile=args.abi_file, actionName=args.action_name, actionAuthAcct=args.action_auth_acct, + actionAuthPrivKey=args.action_auth_acct_priv_key, actionData=args.action_data, peerEndpoint=args.peer_endpoint, port=args.port, tpsTrxGensConfig=TpsTrxGensConfig(targetTps=args.target_tps, tpsLimitPerGenerator=args.tps_limit_per_generator)) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 3130afd361..23608bbb34 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -300,6 +300,7 @@ def setupContract(self): return None def runTpsTest(self) -> PtbTpsTestResult: + completedRun = False self.producerNode = self.cluster.getNode(self.producerNodeId) self.producerP2pPort = self.cluster.getNodeP2pPort(self.producerNodeId) @@ -312,13 +313,24 @@ def runTpsTest(self) -> PtbTpsTestResult: abiFile=None actionName=None + actionAuthAcct=None + actionAuthPrivKey=None actionData=None if (self.ptbConfig.userTrxDataFile is not None): self.readUserTrxDataFromFile(self.ptbConfig.userTrxDataFile) self.setupWalletAndAccounts(accountCnt=len(self.userTrxDataDict['accounts']), accountNames=self.userTrxDataDict['accounts']) abiFile = self.userTrxDataDict['abiFile'] actionName = self.userTrxDataDict['actionName'] + actionAuthAcct = self.userTrxDataDict['actionAuthAcct'] actionData = json.dumps(self.userTrxDataDict['actionData']) + + if actionAuthAcct == self.cluster.eosioAccount.name: + actionAuthPrivKey = self.cluster.eosioAccount.activePrivateKey + else: + for account in self.cluster.accounts: + if actionAuthAcct == account.name: + actionAuthPrivKey = account.activePrivateKey + break else: self.setupWalletAndAccounts() @@ -328,10 +340,10 @@ def runTpsTest(self) -> PtbTpsTestResult: tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator) trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, - contractOwnerAccount=self.clusterConfig.specifiedContract.accountName, accts=','.join(map(str, self.accountNames)), - privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, - logDir=self.trxGenLogDirPath, abiFile=abiFile, actionName=actionName, actionData=actionData, - peerEndpoint=self.producerNode.host, port=self.producerP2pPort, tpsTrxGensConfig=tpsTrxGensConfig) + contractOwnerAccount=self.clusterConfig.specifiedContract.accountName, accts=','.join(map(str, self.accountNames)), + privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, + abiFile=abiFile, actionName=actionName, actionAuthAcct=actionAuthAcct, actionAuthPrivKey=actionAuthPrivKey, actionData=actionData, + peerEndpoint=self.producerNode.host, port=self.producerP2pPort, tpsTrxGensConfig=tpsTrxGensConfig) trxGenExitCodes = trxGenLauncher.launch() print(f"Transaction Generator exit codes: {trxGenExitCodes}") diff --git a/tests/performance_tests/userTrxDataTransfer.json b/tests/performance_tests/userTrxDataTransfer.json index b247e44dc5..242652b4e9 100644 --- a/tests/performance_tests/userTrxDataTransfer.json +++ b/tests/performance_tests/userTrxDataTransfer.json @@ -2,6 +2,7 @@ "accounts": ["testacct1", "testacct2"], "abiFile": "unittests/contracts/eosio.token/eosio.token.abi", "actionName": "transfer", + "actionAuthAcct": "testacct1", "actionData": { "from":"testacct1", diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index b7a6c8282a..007be8f0a6 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -25,8 +25,10 @@ enum return_codes { int main(int argc, char** argv) { const int64_t TRX_EXPIRATION_MAX = 3600; + const uint16_t GENERATOR_ID_MAX = 960; variables_map vmap; options_description cli("Transaction Generator command line options."); + uint16_t gen_id; string chain_id_in; string contract_owner_acct; string accts; @@ -45,6 +47,8 @@ int main(int argc, char** argv) { bool transaction_specified = false; std::string action_name_in; + std::string action_auth_acct_in; + std::string action_auth_acct_priv_key_in; std::string action_data_file_or_str; std::string abi_file_path_in; @@ -53,6 +57,7 @@ int main(int argc, char** argv) { cli.add_options() + ("generator-id", bpo::value(&gen_id)->default_value(0), "Id for the transaction generator. Allowed range (0-960). Defaults to 0.") ("chain-id", bpo::value(&chain_id_in), "set the chain id") ("contract-owner-account", bpo::value(&contract_owner_acct), "Account name of the contract account for the transaction actions") ("accounts", bpo::value(&accts), "comma-separated list of accounts that will be used for transfers. Minimum required accounts: 2.") @@ -66,6 +71,8 @@ int main(int argc, char** argv) { ("monitor-max-lag-duration-us", bpo::value(&max_lag_duration_us)->default_value(1000000), "Max microseconds that transaction generation can be in violation before quitting. Defaults to 1000000 (1s).") ("log-dir", bpo::value(&log_dir_in), "set the logs directory") ("action-name", bpo::value(&action_name_in), "The action name applied to the provided action data input") + ("action-auth-acct", bpo::value(&action_auth_acct_in), "The action authorization account") + ("action-auth-acct-priv-key", bpo::value(&action_auth_acct_priv_key_in), "The action authorization account priv key for signing trxs") ("action-data", bpo::value(&action_data_file_or_str), "The path to the json action data file or json action data description string to use") ("abi-file", bpo::value(&abi_file_path_in), "The path to the contract abi file to use for the supplied transaction action data") ("stop-on-trx-failed", bpo::value(&stop_on_trx_failed)->default_value(true), "stop transaction generation if sending fails.") @@ -83,14 +90,15 @@ int main(int argc, char** argv) { return SUCCESS; } - if((vmap.count("action-name") || vmap.count("action-data") || vmap.count("abi-file")) && !(vmap.count("action-name") && vmap.count("action-data") && vmap.count("abi-file"))) { - ilog("Initialization error: If using action-name, action-data, or abi-file to specify a transaction type to generate, must provide all three inputs."); + if((vmap.count("action-name") || vmap.count("action-auth-acct") || vmap.count("action-auth-acct-priv-key") || vmap.count("action-data") || vmap.count("abi-file")) && + !(vmap.count("action-name") && vmap.count("action-auth-acct") && vmap.count("action-auth-acct-priv-key") && vmap.count("action-data") && vmap.count("abi-file"))) { + ilog("Initialization error: If using action-name, action-auth-acct, action-auth-acct-priv-key, action-data, or abi-file to specify a transaction type to generate, must provide all inputs."); cli.print(std::cerr); return INITIALIZE_FAIL; } - if(vmap.count("action-name") && vmap.count("action-data") && vmap.count("abi-file")) { - ilog("Specifying transaction to generate directly using action-name, action-data, and abi-file."); + if(vmap.count("action-name") && vmap.count("action-auth-acct") && vmap.count("action-auth-acct-priv-key") && vmap.count("action-data") && vmap.count("abi-file")) { + ilog("Specifying transaction to generate directly using action-name, action-auth-acct, action-auth-acct-priv-key, action-data, and abi-file."); transaction_specified = true; } @@ -155,6 +163,14 @@ int main(int argc, char** argv) { return INITIALIZE_FAIL; } + if(vmap.count("generation-id")) { + if(gen_id > GENERATOR_ID_MAX) { + ilog("Initialization error: Exceeded max value for generator id. Value must be less than ${max}.", ("max", GENERATOR_ID_MAX)); + cli.print(std::cerr); + return INITIALIZE_FAIL; + } + } + if(vmap.count("trx-expiration")) { if(trx_expr > TRX_EXPIRATION_MAX) { ilog("Initialization error: Exceeded max value for transaction expiration. Value must be less than ${max}.", ("max", TRX_EXPIRATION_MAX)); @@ -192,6 +208,7 @@ int main(int argc, char** argv) { return INITIALIZE_FAIL; } + ilog("Initial generator id ${id}", ("id", gen_id)); ilog("Initial chain id ${chainId}", ("chainId", chain_id_in)); ilog("Contract owner account ${acct}", ("acct", contract_owner_acct)); ilog("Transfer accounts ${accts}", ("accts", accts)); @@ -203,13 +220,21 @@ int main(int argc, char** argv) { ilog("Logs directory ${logDir}", ("logDir", log_dir_in)); ilog("Peer Endpoint ${peer-endpoint}:${peer-port}", ("peer-endpoint", peer_endpoint)("peer-port", port)); + if (transaction_specified) { + ilog("User Transaction Specified: Action Name ${act}", ("act", action_name_in)); + ilog("User Transaction Specified: Action Auth Acct Name ${acct}", ("acct", action_auth_acct_in)); + ilog("User Transaction Specified: Action Auth Acct Priv Key ${key}", ("key", action_auth_acct_priv_key_in)); + ilog("User Transaction Specified: Action Data File or Str ${data}", ("data", action_data_file_or_str)); + ilog("User Transaction Specified: Abi File ${abi}", ("abi", abi_file_path_in)); + } + fc::microseconds trx_expr_ms = fc::seconds(trx_expr); std::shared_ptr monitor; if (transaction_specified) { - auto generator = std::make_shared(chain_id_in, abi_file_path_in, contract_owner_acct, account_str_vector.at(0), action_name_in, - action_data_file_or_str, trx_expr_ms, private_keys_str_vector.at(0), lib_id_str, log_dir_in, - stop_on_trx_failed, peer_endpoint, port); + auto generator = std::make_shared(gen_id, chain_id_in, abi_file_path_in, contract_owner_acct, + action_name_in, action_auth_acct_in, action_auth_acct_priv_key_in, action_data_file_or_str, + trx_expr_ms, lib_id_str, log_dir_in, stop_on_trx_failed, peer_endpoint, port); monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); trx_tps_tester tester{generator, monitor, gen_duration, target_tps}; @@ -217,7 +242,7 @@ int main(int argc, char** argv) { return OTHER_FAIL; } } else { - auto generator = std::make_shared(chain_id_in, contract_owner_acct, account_str_vector, trx_expr_ms, private_keys_str_vector, + auto generator = std::make_shared(gen_id, chain_id_in, contract_owner_acct, account_str_vector, trx_expr_ms, private_keys_str_vector, lib_id_str, log_dir_in, stop_on_trx_failed, peer_endpoint, port); monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 88eeec8506..4205acc969 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -28,16 +28,23 @@ namespace eosio::testing { fc::crypto::private_key _second_act_priv_key; }; + void set_transaction_headers(transaction& trx, const block_id_type& last_irr_block_id, const fc::microseconds expiration, uint32_t delay_sec = 0) { + trx.expiration = fc::time_point::now() + expiration; + trx.set_reference_block(last_irr_block_id); + + trx.max_net_usage_words = 0;// No limit + trx.max_cpu_usage_ms = 0; // No limit + trx.delay_sec = delay_sec; + } + signed_transaction_w_signer create_trx_w_action_and_signer(const action& act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { signed_transaction trx; + set_transaction_headers(trx, last_irr_block_id, trx_expiration); trx.actions.push_back(act); trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), fc::raw::pack(std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + fc::time_point::now().time_since_epoch().count()))); - trx.set_reference_block(last_irr_block_id); - trx.expiration = fc::time_point::now() + trx_expiration; - trx.max_net_usage_words = 100; trx.sign(priv_key, chain_id); return signed_transaction_w_signer(trx, priv_key); } @@ -57,8 +64,7 @@ namespace eosio::testing { void update_resign_transaction(signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { trx.context_free_actions.clear(); trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), fc::raw::pack(std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + fc::time_point::now().time_since_epoch().count()))); - trx.set_reference_block(last_irr_block_id); - trx.expiration = fc::time_point::now() + trx_expiration; + set_transaction_headers(trx, last_irr_block_id, trx_expiration); trx.signatures.clear(); trx.sign(priv_key, chain_id); } @@ -91,14 +97,14 @@ namespace eosio::testing { return actions_pairs_vector; } - trx_generator_base::trx_generator_base(std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, + trx_generator_base::trx_generator_base(uint16_t id, std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) - : _provider(peer_endpoint, port), _chain_id(chain_id_in), _contract_owner_account(contract_owner_account), _trx_expiration(trx_expr), + : _provider(peer_endpoint, port), _id(id), _chain_id(chain_id_in), _contract_owner_account(contract_owner_account), _trx_expiration(trx_expr), _last_irr_block_id(fc::variant(lib_id_str).as()), _log_dir(log_dir), _stop_on_trx_failed(stop_on_trx_failed) {} - transfer_trx_generator::transfer_trx_generator(std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, + transfer_trx_generator::transfer_trx_generator(uint16_t id, std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, fc::microseconds trx_expr, const std::vector& private_keys_str_vector, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) - : trx_generator_base(chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), _accts(accts), _private_keys_str_vector(private_keys_str_vector) {} + : trx_generator_base(id, chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), _accts(accts), _private_keys_str_vector(private_keys_str_vector) {} vector transfer_trx_generator::get_accounts(const vector& account_str_vector) { vector acct_name_list; @@ -166,10 +172,13 @@ namespace eosio::testing { } } - trx_generator::trx_generator(std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, std::string auth_account, std::string action_name, - const std::string& action_data_file_or_str, fc::microseconds trx_expr, const std::string& private_key_str, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) - : trx_generator_base(chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), _abi_data_file_path(abi_data_file), _auth_account(auth_account), - _action(action_name), _action_data_file_or_str(action_data_file_or_str), _private_key(fc::crypto::private_key(private_key_str)) {} + trx_generator::trx_generator(uint16_t id, std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, + std::string action_name, std::string action_auth_account, const std::string& private_key_str, const std::string& action_data_file_or_str, + fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, + const std::string& peer_endpoint, unsigned short port) + : trx_generator_base(id, chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), + _abi_data_file_path(abi_data_file), _action(action_name), _action_auth_account(action_auth_account), + _action_auth_priv_key(fc::crypto::private_key(private_key_str)), _action_data_file_or_str(action_data_file_or_str) {} bool trx_generator::setup() { _nonce_prefix = 0; @@ -186,7 +195,7 @@ namespace eosio::testing { bytes packed_action_data; try { auto action_type = abi.get_action_type( _action ); - FC_ASSERT( !action_type.empty(), "Unknown action ${action} in contract ${contract}", ("action", _action)( "contract", _auth_account )); + FC_ASSERT( !action_type.empty(), "Unknown action ${action} in contract ${contract}", ("action", _action)( "contract", _action_auth_account )); packed_action_data = abi.variant_to_binary( action_type, unpacked_action_data_json, abi_serializer::create_yield_function( abi_serializer_max_time ) ); } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse unpacked action data JSON") @@ -196,10 +205,10 @@ namespace eosio::testing { eosio::chain::action act; act.account = _contract_owner_account; act.name = _action; - act.authorization = vector{{_auth_account, config::active_name}}; + act.authorization = vector{{_action_auth_account, config::active_name}}; act.data = std::move(packed_action_data); - _trxs.emplace_back(create_trx_w_action_and_signer(act, _private_key, ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, _last_irr_block_id)); + _trxs.emplace_back(create_trx_w_action_and_signer(act, _action_auth_priv_key, ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, _last_irr_block_id)); ilog("Setup p2p transaction provider"); diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index db3007672c..9ae93bdbbe 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -17,6 +17,7 @@ namespace eosio::testing { struct trx_generator_base { p2p_trx_provider _provider; + uint16_t _id; eosio::chain::chain_id_type _chain_id; eosio::chain::name _contract_owner_account; fc::microseconds _trx_expiration; @@ -33,12 +34,14 @@ namespace eosio::testing { bool _stop_on_trx_failed = true; - trx_generator_base(std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, + trx_generator_base(uint16_t id, std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); void push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); + + bool generate_and_send(); bool tear_down(); void stop_generation(); @@ -49,7 +52,7 @@ namespace eosio::testing { const std::vector _accts; std::vector _private_keys_str_vector; - transfer_trx_generator(std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, + transfer_trx_generator(uint16_t id, std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, fc::microseconds trx_expr, const std::vector& private_keys_str_vector, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); @@ -61,15 +64,16 @@ namespace eosio::testing { struct trx_generator : public trx_generator_base{ std::string _abi_data_file_path; - eosio::chain::name _auth_account; eosio::chain::name _action; + eosio::chain::name _action_auth_account; + fc::crypto::private_key _action_auth_priv_key; std::string _action_data_file_or_str; - fc::crypto::private_key _private_key; const fc::microseconds abi_serializer_max_time = fc::seconds(10); // No risk to client side serialization taking a long time - trx_generator(std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, std::string auth_account, std::string action_name, const std::string& action_data_file_or_str, - fc::microseconds trx_expr, const std::string& private_key_str, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, + trx_generator(uint16_t id, std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, + std::string action_name, std::string action_auth_account, const std::string& action_auth_priv_key_str, const std::string& action_data_file_or_str, + fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); bool setup(); diff --git a/tests/trx_generator/trx_generator_tests.cpp b/tests/trx_generator/trx_generator_tests.cpp index 9bb1a3804a..01000bd84e 100644 --- a/tests/trx_generator/trx_generator_tests.cpp +++ b/tests/trx_generator/trx_generator_tests.cpp @@ -325,21 +325,25 @@ BOOST_AUTO_TEST_CASE(tps_cant_keep_up_monitored) BOOST_AUTO_TEST_CASE(trx_generator_constructor) { + uint16_t id = 1; std::string chain_id = "999"; std::string contract_owner_account = "eosio"; std::string acct = "aaa"; std::string action_name = "transfer"; + std::string action_auth_acct = "aaa"; + std::string action_auth_priv_key_str = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"; const std::string action_data = "{\"from\":\"aaa\",\"to\":\"bbb\",\"quantity\":\"10.0000 SYS\",\"memo\":\"hello\"}"; const std::string abi_file = "../../unittests/contracts/eosio.token/eosio.token.abi"; fc::microseconds trx_expr = fc::seconds(3600); std::string log_dir = "."; std::string lib_id_str = "00000062989f69fd251df3e0b274c3364ffc2f4fce73de3f1c7b5e11a4c92f21"; - std::string private_key_str = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"; std::string peer_endpoint = "127.0.0.1"; unsigned short port = 9876; bool stop_on_trx_failed = true; - auto generator = trx_generator(chain_id, abi_file, contract_owner_account, acct, action_name, action_data, trx_expr, private_key_str, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port); + auto generator = trx_generator(id, chain_id, abi_file, contract_owner_account, + action_name, action_auth_acct, action_auth_priv_key_str, action_data, trx_expr, + lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port); } BOOST_AUTO_TEST_SUITE_END() From caa8f41586ca1fa42c1e84ecd311c7e240cdd0bb Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 1 Feb 2023 10:35:45 -0600 Subject: [PATCH 085/178] Refactoring. Moving functions into classes. --- tests/trx_generator/trx_generator.cpp | 29 +++++++++------------------ tests/trx_generator/trx_generator.hpp | 27 +++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 20 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 4205acc969..7e0cb56284 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -5,9 +5,7 @@ #include #include #include -#include #include -#include #include #include @@ -18,17 +16,8 @@ using namespace appbase; namespace bpo=boost::program_options; namespace eosio::testing { - struct action_pair_w_keys { - action_pair_w_keys(eosio::chain::action first_action, eosio::chain::action second_action, fc::crypto::private_key first_act_signer, fc::crypto::private_key second_act_signer) - : _first_act(first_action), _second_act(second_action), _first_act_priv_key(first_act_signer), _second_act_priv_key(second_act_signer) {} - eosio::chain::action _first_act; - eosio::chain::action _second_act; - fc::crypto::private_key _first_act_priv_key; - fc::crypto::private_key _second_act_priv_key; - }; - - void set_transaction_headers(transaction& trx, const block_id_type& last_irr_block_id, const fc::microseconds expiration, uint32_t delay_sec = 0) { + void trx_generator_base::set_transaction_headers(transaction& trx, const block_id_type& last_irr_block_id, const fc::microseconds expiration, uint32_t delay_sec) { trx.expiration = fc::time_point::now() + expiration; trx.set_reference_block(last_irr_block_id); @@ -37,7 +26,7 @@ namespace eosio::testing { trx.delay_sec = delay_sec; } - signed_transaction_w_signer create_trx_w_action_and_signer(const action& act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + signed_transaction_w_signer trx_generator_base::create_trx_w_action_and_signer(const action& act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { signed_transaction trx; set_transaction_headers(trx, last_irr_block_id, trx_expiration); trx.actions.push_back(act); @@ -49,7 +38,7 @@ namespace eosio::testing { return signed_transaction_w_signer(trx, priv_key); } - vector create_initial_transfer_transactions(const vector& action_pairs_vector, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + vector transfer_trx_generator::create_initial_transfer_transactions(const vector& action_pairs_vector, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { std::vector trxs; trxs.reserve(2 * action_pairs_vector.size()); @@ -61,7 +50,7 @@ namespace eosio::testing { return trxs; } - void update_resign_transaction(signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + void trx_generator_base::update_resign_transaction(signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { trx.context_free_actions.clear(); trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), fc::raw::pack(std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + fc::time_point::now().time_since_epoch().count()))); set_transaction_headers(trx, last_irr_block_id, trx_expiration); @@ -69,16 +58,16 @@ namespace eosio::testing { trx.sign(priv_key, chain_id); } - chain::bytes make_transfer_data(const chain::name& from, const chain::name& to, const chain::asset& quantity, const std::string&& memo) { + chain::bytes transfer_trx_generator::make_transfer_data(const chain::name& from, const chain::name& to, const chain::asset& quantity, const std::string&& memo) { return fc::raw::pack(from, to, quantity, memo); } - auto make_transfer_action(chain::name account, chain::name from, chain::name to, chain::asset quantity, std::string memo) { + auto transfer_trx_generator::make_transfer_action(chain::name account, chain::name from, chain::name to, chain::asset quantity, std::string memo) { return chain::action(std::vector{{from, chain::config::active_name}}, account, "transfer"_n, make_transfer_data(from, to, quantity, std::move(memo))); } - vector create_initial_transfer_actions(const std::string& salt, const uint64_t& period, const name& contract_owner_account, const vector& accounts, const vector& priv_keys) { + vector transfer_trx_generator::create_initial_transfer_actions(const std::string& salt, const uint64_t& period, const name& contract_owner_account, const vector& accounts, const vector& priv_keys) { vector actions_pairs_vector; for(size_t i = 0; i < accounts.size(); ++i) { @@ -157,7 +146,7 @@ namespace eosio::testing { return true; } - fc::variant json_from_file_or_string(const string& file_or_str, fc::json::parse_type ptype = fc::json::parse_type::legacy_parser) + fc::variant trx_generator::json_from_file_or_string(const string& file_or_str, fc::json::parse_type ptype) { regex r("^[ \t]*[\{\[]"); if ( !regex_search(file_or_str, r) && fc::is_regular_file(file_or_str) ) { @@ -253,7 +242,7 @@ namespace eosio::testing { return true; } - void log_first_trx(const std::string& log_dir, const chain::signed_transaction& trx) { + void trx_generator_base::log_first_trx(const std::string& log_dir, const chain::signed_transaction& trx) { std::ostringstream fileName; fileName << log_dir << "/first_trx_" << getpid() << ".txt"; std::ofstream out(fileName.str()); diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index 9ae93bdbbe..d308ab47fb 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -5,6 +5,8 @@ #include #include #include +#include +#include namespace eosio::testing { @@ -15,6 +17,16 @@ namespace eosio::testing { fc::crypto::private_key _signer; }; + struct action_pair_w_keys { + action_pair_w_keys(eosio::chain::action first_action, eosio::chain::action second_action, fc::crypto::private_key first_act_signer, fc::crypto::private_key second_act_signer) + : _first_act(first_action), _second_act(second_action), _first_act_priv_key(first_act_signer), _second_act_priv_key(second_act_signer) {} + + eosio::chain::action _first_act; + eosio::chain::action _second_act; + fc::crypto::private_key _first_act_priv_key; + fc::crypto::private_key _second_act_priv_key; + }; + struct trx_generator_base { p2p_trx_provider _provider; uint16_t _id; @@ -37,10 +49,17 @@ namespace eosio::testing { trx_generator_base(uint16_t id, std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); + void update_resign_transaction(eosio::chain::signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, + const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); + void push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); + void set_transaction_headers(eosio::chain::transaction& trx, const eosio::chain::block_id_type& last_irr_block_id, const fc::microseconds expiration, uint32_t delay_sec = 0); + signed_transaction_w_signer create_trx_w_action_and_signer(const eosio::chain::action& act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, + const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); + void log_first_trx(const std::string& log_dir, const eosio::chain::signed_transaction& trx); bool generate_and_send(); bool tear_down(); @@ -59,6 +78,12 @@ namespace eosio::testing { std::vector get_accounts(const std::vector& account_str_vector); std::vector get_private_keys(const std::vector& priv_key_str_vector); + std::vector create_initial_transfer_transactions(const std::vector& action_pairs_vector, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); + eosio::chain::bytes make_transfer_data(const eosio::chain::name& from, const eosio::chain::name& to, const eosio::chain::asset& quantity, const std::string&& memo); + auto make_transfer_action(eosio::chain::name account, eosio::chain::name from, eosio::chain::name to, eosio::chain::asset quantity, std::string memo); + std::vector create_initial_transfer_actions(const std::string& salt, const uint64_t& period, const eosio::chain::name& contract_owner_account, + const std::vector& accounts, const std::vector& priv_keys); + bool setup(); }; @@ -76,6 +101,8 @@ namespace eosio::testing { fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); + fc::variant json_from_file_or_string(const std::string& file_or_str, fc::json::parse_type ptype = fc::json::parse_type::legacy_parser); + bool setup(); }; } From f52beda0a266398c37c4f53e7e8c86d53c207bcf Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 1 Feb 2023 10:41:10 -0600 Subject: [PATCH 086/178] Fix compiler warnings in tests. --- tests/trx_generator/trx_generator_tests.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/trx_generator/trx_generator_tests.cpp b/tests/trx_generator/trx_generator_tests.cpp index 01000bd84e..6f185c940e 100644 --- a/tests/trx_generator/trx_generator_tests.cpp +++ b/tests/trx_generator/trx_generator_tests.cpp @@ -75,8 +75,8 @@ BOOST_AUTO_TEST_CASE(tps_short_run_high_tps) constexpr uint64_t expected_runtime_us = test_duration_s * 1000000; constexpr uint64_t allowable_runtime_deviation_per = 20; constexpr uint64_t allowable_runtime_deviation_us = expected_runtime_us / allowable_runtime_deviation_per; - constexpr uint64_t minimum_runtime_us = expected_runtime_us - allowable_runtime_deviation_us; - constexpr uint64_t maximum_runtime_us = expected_runtime_us + allowable_runtime_deviation_us; + constexpr int64_t minimum_runtime_us = expected_runtime_us - allowable_runtime_deviation_us; + constexpr int64_t maximum_runtime_us = expected_runtime_us + allowable_runtime_deviation_us; std::shared_ptr generator = std::make_shared(expected_trxs); std::shared_ptr monitor = std::make_shared(expected_trxs); @@ -109,8 +109,8 @@ BOOST_AUTO_TEST_CASE(tps_short_run_med_tps_med_delay) constexpr uint64_t expected_runtime_us = test_duration_s * 1000000; constexpr uint64_t allowable_runtime_deviation_per = 20; constexpr uint64_t allowable_runtime_deviation_us = expected_runtime_us / allowable_runtime_deviation_per; - constexpr uint64_t minimum_runtime_us = expected_runtime_us - allowable_runtime_deviation_us; - constexpr uint64_t maximum_runtime_us = expected_runtime_us + allowable_runtime_deviation_us; + constexpr int64_t minimum_runtime_us = expected_runtime_us - allowable_runtime_deviation_us; + constexpr int64_t maximum_runtime_us = expected_runtime_us + allowable_runtime_deviation_us; std::shared_ptr generator = std::make_shared(expected_trxs, trx_delay_us); std::shared_ptr monitor = std::make_shared(expected_trxs); @@ -142,8 +142,8 @@ BOOST_AUTO_TEST_CASE(tps_med_run_med_tps_med_delay) constexpr uint64_t expected_runtime_us = test_duration_s * 1000000; constexpr uint64_t allowable_runtime_deviation_per = 20; constexpr uint64_t allowable_runtime_deviation_us = expected_runtime_us / allowable_runtime_deviation_per; - constexpr uint64_t minimum_runtime_us = expected_runtime_us - allowable_runtime_deviation_us; - constexpr uint64_t maximum_runtime_us = expected_runtime_us + allowable_runtime_deviation_us; + constexpr int64_t minimum_runtime_us = expected_runtime_us - allowable_runtime_deviation_us; + constexpr int64_t maximum_runtime_us = expected_runtime_us + allowable_runtime_deviation_us; std::shared_ptr generator = std::make_shared(expected_trxs, trx_delay_us); std::shared_ptr monitor = std::make_shared(expected_trxs); @@ -174,8 +174,8 @@ BOOST_AUTO_TEST_CASE(tps_cant_keep_up) constexpr uint64_t expected_runtime_us = test_duration_s * 1000000; constexpr uint64_t allowable_runtime_deviation_per = 20; constexpr uint64_t allowable_runtime_deviation_us = expected_runtime_us / allowable_runtime_deviation_per; - constexpr uint64_t minimum_runtime_us = expected_runtime_us - allowable_runtime_deviation_us; - constexpr uint64_t maximum_runtime_us = expected_runtime_us + allowable_runtime_deviation_us; + constexpr int64_t minimum_runtime_us = expected_runtime_us - allowable_runtime_deviation_us; + constexpr int64_t maximum_runtime_us = expected_runtime_us + allowable_runtime_deviation_us; std::shared_ptr generator = std::make_shared(expected_trxs, trx_delay_us); std::shared_ptr monitor = std::make_shared(expected_trxs); @@ -206,8 +206,8 @@ BOOST_AUTO_TEST_CASE(tps_med_run_med_tps_30us_delay) constexpr uint64_t expected_runtime_us = test_duration_s * 1000000; constexpr uint64_t allowable_runtime_deviation_per = 20; constexpr uint64_t allowable_runtime_deviation_us = expected_runtime_us / allowable_runtime_deviation_per; - constexpr uint64_t minimum_runtime_us = expected_runtime_us - allowable_runtime_deviation_us; - constexpr uint64_t maximum_runtime_us = expected_runtime_us + allowable_runtime_deviation_us; + constexpr int64_t minimum_runtime_us = expected_runtime_us - allowable_runtime_deviation_us; + constexpr int64_t maximum_runtime_us = expected_runtime_us + allowable_runtime_deviation_us; std::shared_ptr generator = std::make_shared(expected_trxs, trx_delay_us); std::shared_ptr monitor = std::make_shared(expected_trxs); From b35c3d6fbaa3306c277d4f8fb3339baef029ce65 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 2 Feb 2023 13:35:14 -0600 Subject: [PATCH 087/178] Add ability to configure log level for cluster via cli arg. --- tests/performance_tests/performance_test_basic.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 23608bbb34..6c5f3d8fae 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -86,6 +86,7 @@ class SpecifiedContract: genesisPath: Path = Path("tests")/"performance_tests"/"genesis.json" maximumP2pPerHost: int = 5000 maximumClients: int = 0 + loggingLevel: str = "info" loggingDict: dict = field(default_factory=lambda: { "bios": "off" }) prodsEnableTraceApi: bool = False nodeosVers: str = "" @@ -171,7 +172,7 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste # Setup cluster and its wallet manager self.walletMgr=WalletMgr(True) - self.cluster=Cluster(walletd=True, loggingLevel="info", loggingLevelDict=self.clusterConfig.loggingDict, + self.cluster=Cluster(walletd=True, loggingLevel=self.clusterConfig.loggingLevel, loggingLevelDict=self.clusterConfig.loggingDict, nodeosVers=self.clusterConfig.nodeosVers) self.cluster.setWalletMgr(self.walletMgr) @@ -502,6 +503,10 @@ def createBaseArgumentParser(): In \"heap\" mode database is preloaded in to swappable memory and will use huge pages if available. \ In \"locked\" mode database is preloaded, locked in to memory, and will use huge pages if available.", choices=["mapped", "heap", "locked"], default="mapped") + ptbBaseParserGroup.add_argument("--cluster-log-lvl", type=str, help="Cluster log level (\"all\", \"debug\", \"info\", \"warn\", \"error\", or \"off\"). \ + Performance Harness Test Basic relies on some logging at \"info\" level, so it is recommended lowest logging level to use. \ + However, there are instances where more verbose logging can be useful.", + choices=["all", "debug", "info", "warn", "error", "off"], default="info") ptbBaseParserGroup.add_argument("--net-threads", type=int, help="Number of worker threads in net_plugin thread pool", default=2) ptbBaseParserGroup.add_argument("--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) ptbBaseParserGroup.add_argument("--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) @@ -571,11 +576,12 @@ def main(): netPluginArgs = NetPluginArgs(netThreads=args.net_threads) ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) + SC = PerformanceTestBasic.ClusterConfig.SpecifiedContract + specifiedContract=SC(accountName=args.account_name, ownerPublicKey=args.owner_public_key, activePublicKey=args.active_public_key, + contractDir=args.contract_dir, wasmFile=args.wasm_file, abiFile=args.abi_file) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, - specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(accountName=args.account_name, - ownerPublicKey=args.owner_public_key, activePublicKey=args.active_public_key, contractDir=args.contract_dir, - wasmFile=args.wasm_file, abiFile=args.abi_file), + specifiedContract=specifiedContract, loggingLevel=args.cluster_log_lvl, nodeosVers=Utils.getNodeosVersion().split('.')[0]) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs, From d8d9fa31958b7ae4e79d5e1291420fdfa2ec984b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 8 Feb 2023 17:35:47 -0600 Subject: [PATCH 088/178] Expand transaction generator to support new account trx Update user data transaction specification to support multiple actions and automatically generating an account namer per transaction to substitute into defined actions using the 'ACCT_PER_TRX' key word. Removed arguments for action data pieces that have been moved into the larger transaction spec in the json files. Added/Updated tests to exercise the new feature. --- .../launch_transaction_generators.py | 33 ++-- tests/performance_tests/CMakeLists.txt | 4 +- .../performance_test_basic.py | 89 ++++++---- .../userTrxDataNewAccount.json | 53 ++++++ .../userTrxDataTransfer.json | 26 ++- tests/trx_generator/main.cpp | 41 ++--- tests/trx_generator/trx_generator.cpp | 160 +++++++++++++++--- tests/trx_generator/trx_generator.hpp | 77 ++++++++- tests/trx_generator/trx_generator_tests.cpp | 15 +- 9 files changed, 366 insertions(+), 132 deletions(-) create mode 100644 tests/performance_tests/userTrxDataNewAccount.json diff --git a/tests/TestHarness/launch_transaction_generators.py b/tests/TestHarness/launch_transaction_generators.py index d80b0687b1..2379987840 100755 --- a/tests/TestHarness/launch_transaction_generators.py +++ b/tests/TestHarness/launch_transaction_generators.py @@ -36,8 +36,8 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int): class TransactionGeneratorsLauncher: - def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAccount: str, accts: str, privateKeys: str, - trxGenDurationSec: int, logDir: str, abiFile: Path, actionName: str, actionAuthAcct: str, actionAuthPrivKey: str, actionData, + def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAccount: str, accts: str, privateKeys: str, trxGenDurationSec: int, logDir: str, + abiFile: Path, actionsData, actionsAuths, peerEndpoint: str, port: int, tpsTrxGensConfig: TpsTrxGensConfig): self.chainId = chainId self.lastIrreversibleBlockId = lastIrreversibleBlockId @@ -48,17 +48,15 @@ def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAcco self.tpsTrxGensConfig = tpsTrxGensConfig self.logDir = logDir self.abiFile = abiFile - self.actionName = actionName - self.actionAuthAcct = actionAuthAcct - self.actionAuthPrivKey = actionAuthPrivKey - self.actionData = actionData + self.actionsData=actionsData + self.actionsAuths=actionsAuths self.peerEndpoint = peerEndpoint self.port = port def launch(self, waitToComplete=True): self.subprocess_ret_codes = [] for id, targetTps in enumerate(self.tpsTrxGensConfig.targetTpsPerGenList): - if self.abiFile is not None and self.actionName is not None and self.actionData is not None and self.actionAuthAcct is not None and self.actionAuthPrivKey is not None: + if self.abiFile is not None and self.actionsData is not None and self.actionsAuths is not None: if Utils.Debug: Print( f'Running trx_generator: ./tests/trx_generator/trx_generator ' @@ -71,11 +69,9 @@ def launch(self, waitToComplete=True): f'--trx-gen-duration {self.trxGenDurationSec} ' f'--target-tps {targetTps} ' f'--log-dir {self.logDir} ' - f'--action-name {self.actionName} ' - f'--action-auth-acct {self.actionAuthAcct} ' - f'--action-auth-acct-priv-key {self.actionAuthPrivKey} ' - f'--action-data {self.actionData} ' f'--abi-file {self.abiFile} ' + f'--actions-data {self.actionsData} ' + f'--actions-auths {self.actionsAuths} ' f'--peer-endpoint {self.peerEndpoint} ' f'--port {self.port}' ) @@ -91,11 +87,9 @@ def launch(self, waitToComplete=True): '--trx-gen-duration', f'{self.trxGenDurationSec}', '--target-tps', f'{targetTps}', '--log-dir', f'{self.logDir}', - '--action-name', f'{self.actionName}', - '--action-auth-acct', f'{self.actionAuthAcct}', - '--action-auth-acct-priv-key', f'{self.actionAuthPrivKey}', - '--action-data', f'{self.actionData}', '--abi-file', f'{self.abiFile}', + '--actions-data', f'{self.actionsData}', + '--actions-auths', f'{self.actionsAuths}', '--peer-endpoint', f'{self.peerEndpoint}', '--port', f'{self.port}' ]) @@ -155,11 +149,9 @@ def parseArgs(): parser.add_argument("target_tps", type=int, help="Goal transactions per second") parser.add_argument("tps_limit_per_generator", type=int, help="Maximum amount of transactions per second a single generator can have.", default=4000) parser.add_argument("log_dir", type=str, help="Path to directory where trx logs should be written.") - parser.add_argument("action_name", type=str, help="The action name applied to the provided action data input") - parser.add_argument("action_auth_acct", type=str, help="The authorization account name used for trx action authorization") - parser.add_argument("action_auth_acct_priv_key", type=str, help="The authorization account's private key used for signing trx") - parser.add_argument("action_data", type=str, help="The path to the json action data file or json action data description string to use") parser.add_argument("abi_file", type=str, help="The path to the contract abi file to use for the supplied transaction action data") + parser.add_argument("actions_data", type=str, help="The json actions data file or json actions data description string to use") + parser.add_argument("actions_auths", type=str, help="The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") parser.add_argument("peer_endpoint", type=str, help="set the peer endpoint to send transactions to", default="127.0.0.1") parser.add_argument("port", type=int, help="set the peer endpoint port to send transactions to", default=9876) args = parser.parse_args() @@ -171,8 +163,7 @@ def main(): trxGenLauncher = TransactionGeneratorsLauncher(chainId=args.chain_id, lastIrreversibleBlockId=args.last_irreversible_block_id, contractOwnerAccount=args.contract_owner_account, accts=args.accounts, privateKeys=args.priv_keys, trxGenDurationSec=args.trx_gen_duration, logDir=args.log_dir, - abiFile=args.abi_file, actionName=args.action_name, actionAuthAcct=args.action_auth_acct, - actionAuthPrivKey=args.action_auth_acct_priv_key, actionData=args.action_data, + abiFile=args.abi_file, actionsData=args.actions_data, actionsAuths=args.actions_auths, peerEndpoint=args.peer_endpoint, port=args.port, tpsTrxGensConfig=TpsTrxGensConfig(targetTps=args.target_tps, tpsLimitPerGenerator=args.tps_limit_per_generator)) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index aa3d180bec..b7f32ba183 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -8,9 +8,11 @@ configure_file(nodeos_log_3_2.txt.gz nodeos_log_3_2.txt.gz COPYONLY) configure_file(genesis.json genesis.json COPYONLY) configure_file(validate_nodeos_plugin_args.py validate_nodeos_plugin_args.py COPYONLY) configure_file(userTrxDataTransfer.json userTrxDataTransfer.json COPYONLY) +configure_file(userTrxDataNewAccount.json userTrxDataNewAccount.json COPYONLY) add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 6c5f3d8fae..9ceb0f8f24 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -70,8 +70,10 @@ def __str__(self) -> str: @dataclass class SpecifiedContract: - accountName: str = "c" + accountName: str = "eosio" + ownerPrivateKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" ownerPublicKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + activePrivateKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" activePublicKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" contractDir: str = "unittests/contracts/eosio.system" wasmFile: str = "eosio.system.wasm" @@ -288,20 +290,32 @@ def readUserTrxDataFromFile(self, userTrxDataFile: Path): self.userTrxDataDict = json.load(f) def setupContract(self): - specifiedAccount = Account(self.clusterConfig.specifiedContract.accountName) - specifiedAccount.ownerPublicKey = self.clusterConfig.specifiedContract.ownerPublicKey - specifiedAccount.activePublicKey = self.clusterConfig.specifiedContract.activePublicKey - self.cluster.createAccountAndVerify(specifiedAccount, self.cluster.eosioAccount, validationNodeIndex=self.validationNodeId) - print("Publishing contract") - transaction=self.cluster.biosNode.publishContract(specifiedAccount, self.clusterConfig.specifiedContract.contractDir, - self.clusterConfig.specifiedContract.wasmFile, - self.clusterConfig.specifiedContract.abiFile, waitForTransBlock=True) - if transaction is None: - print("ERROR: Failed to publish contract.") - return None + if (self.clusterConfig.specifiedContract.accountName != self.cluster.eosioAccount.name): + specifiedAccount = Account(self.clusterConfig.specifiedContract.accountName) + specifiedAccount.ownerPublicKey = self.clusterConfig.specifiedContract.ownerPublicKey + specifiedAccount.ownerPrivateKey = self.clusterConfig.specifiedContract.ownerPrivateKey + specifiedAccount.activePublicKey = self.clusterConfig.specifiedContract.activePublicKey + specifiedAccount.activePrivateKey = self.clusterConfig.specifiedContract.activePrivateKey + self.cluster.createAccountAndVerify(specifiedAccount, self.cluster.eosioAccount, validationNodeIndex=self.validationNodeId) + print("Publishing contract") + transaction=self.cluster.biosNode.publishContract(specifiedAccount, self.clusterConfig.specifiedContract.contractDir, + self.clusterConfig.specifiedContract.wasmFile, + self.clusterConfig.specifiedContract.abiFile, waitForTransBlock=True) + if transaction is None: + print("ERROR: Failed to publish contract.") + return None + else: + self.clusterConfig.specifiedContract.activePrivateKey = self.cluster.eosioAccount.activePrivateKey + self.clusterConfig.specifiedContract.activePublicKey = self.cluster.eosioAccount.activePublicKey + self.clusterConfig.specifiedContract.ownerPrivateKey = self.cluster.eosioAccount.ownerPrivateKey + self.clusterConfig.specifiedContract.ownerPublicKey = self.cluster.eosioAccount.ownerPublicKey + print(f"setupContract: default {self.clusterConfig.specifiedContract.accountName} \ + activePrivateKey: {self.clusterConfig.specifiedContract.activePrivateKey} \ + activePublicKey: {self.clusterConfig.specifiedContract.activePublicKey} \ + ownerPrivateKey: {self.clusterConfig.specifiedContract.ownerPrivateKey} \ + ownerPublicKey: {self.clusterConfig.specifiedContract.ownerPublicKey}") def runTpsTest(self) -> PtbTpsTestResult: - completedRun = False self.producerNode = self.cluster.getNode(self.producerNodeId) self.producerP2pPort = self.cluster.getNodeP2pPort(self.producerNodeId) @@ -313,25 +327,34 @@ def runTpsTest(self) -> PtbTpsTestResult: self.data = log_reader.chainData() abiFile=None - actionName=None - actionAuthAcct=None - actionAuthPrivKey=None - actionData=None + actionsDataJson=None + actionsAuthsJson=None + self.accountNames=[] + self.accountPrivKeys=[] if (self.ptbConfig.userTrxDataFile is not None): self.readUserTrxDataFromFile(self.ptbConfig.userTrxDataFile) - self.setupWalletAndAccounts(accountCnt=len(self.userTrxDataDict['accounts']), accountNames=self.userTrxDataDict['accounts']) + if self.userTrxDataDict['initAccounts']: + print(f"Creating accounts specified in userTrxData: {self.userTrxDataDict['initAccounts']}") + self.setupWalletAndAccounts(accountCnt=len(self.userTrxDataDict['initAccounts']), accountNames=self.userTrxDataDict['initAccounts']) abiFile = self.userTrxDataDict['abiFile'] - actionName = self.userTrxDataDict['actionName'] - actionAuthAcct = self.userTrxDataDict['actionAuthAcct'] - actionData = json.dumps(self.userTrxDataDict['actionData']) - if actionAuthAcct == self.cluster.eosioAccount.name: - actionAuthPrivKey = self.cluster.eosioAccount.activePrivateKey - else: - for account in self.cluster.accounts: - if actionAuthAcct == account.name: - actionAuthPrivKey = account.activePrivateKey - break + actionsDataJson = json.dumps(self.userTrxDataDict['actions']) + + authorizations={} + for act in self.userTrxDataDict['actions']: + actionAuthAcct=act["actionAuthAcct"] + actionAuthPrivKey=None + if actionAuthAcct == self.cluster.eosioAccount.name: + actionAuthPrivKey = self.cluster.eosioAccount.activePrivateKey + else: + for account in self.cluster.accounts: + if actionAuthAcct == account.name: + actionAuthPrivKey = account.activePrivateKey + break + + if actionAuthPrivKey is not None: + authorizations[actionAuthAcct]=actionAuthPrivKey + actionsAuthsJson = json.dumps(authorizations) else: self.setupWalletAndAccounts() @@ -340,10 +363,10 @@ def runTpsTest(self) -> PtbTpsTestResult: self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator) - trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, - contractOwnerAccount=self.clusterConfig.specifiedContract.accountName, accts=','.join(map(str, self.accountNames)), - privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, - abiFile=abiFile, actionName=actionName, actionAuthAcct=actionAuthAcct, actionAuthPrivKey=actionAuthPrivKey, actionData=actionData, + trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=self.clusterConfig.specifiedContract.accountName, + accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), + trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, + abiFile=abiFile, actionsData=actionsDataJson, actionsAuths=actionsAuthsJson, peerEndpoint=self.producerNode.host, port=self.producerP2pPort, tpsTrxGensConfig=tpsTrxGensConfig) trxGenExitCodes = trxGenLauncher.launch() @@ -520,7 +543,7 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--quiet", help="Whether to quiet printing intermediate results and reports to stdout", action='store_true') ptbBaseParserGroup.add_argument("--prods-enable-trace-api", help="Determines whether producer nodes should have eosio::trace_api_plugin enabled", action='store_true') ptbBaseParserGroup.add_argument("--print-missing-transactions", type=bool, help="Toggles if missing transactions are be printed upon test completion.", default=False) - ptbBaseParserGroup.add_argument("--account-name", type=str, help="Name of the account to create and assign a contract to", default="c") + ptbBaseParserGroup.add_argument("--account-name", type=str, help="Name of the account to create and assign a contract to", default="eosio") ptbBaseParserGroup.add_argument("--owner-public-key", type=str, help="Owner public key to use with specified account name", default="EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV") ptbBaseParserGroup.add_argument("--active-public-key", type=str, help="Active public key to use with specified account name", default="EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV") ptbBaseParserGroup.add_argument("--contract-dir", type=str, help="Path to contract dir", default="unittests/contracts/eosio.system") diff --git a/tests/performance_tests/userTrxDataNewAccount.json b/tests/performance_tests/userTrxDataNewAccount.json new file mode 100644 index 0000000000..de4cdc7647 --- /dev/null +++ b/tests/performance_tests/userTrxDataNewAccount.json @@ -0,0 +1,53 @@ +{ + "initAccounts": [], + "abiFile": "unittests/contracts/eosio.system/eosio.system.abi", + "actions": [ + { + "actionAuthAcct": "eosio", + "actionName": "newaccount", + "authorization": { + "actor": "eosio", + "permission": "active" + }, + "actionData": { + "creator": "eosio", + "name": "ACCT_PER_TRX", + "owner": { + "threshold": 1, + "keys": [ + { + "key": "EOS65rXebLhtk2aTTzP4e9x1AQZs7c5NNXJp89W8R3HyaA6Zyd4im", + "weight": 1 + } + ], + "accounts": [], + "waits": [] + }, + "active": { + "threshold": 1, + "keys": [ + { + "key": "EOS65rXebLhtk2aTTzP4e9x1AQZs7c5NNXJp89W8R3HyaA6Zyd4im", + "weight": 1 + } + ], + "accounts": [], + "waits": [] + } + } + }, + { + "actionAuthAcct": "eosio", + "actionName": "buyrambytes", + "authorization": { + "actor": "eosio", + "permission": "active" + }, + "actionData": { + "payer": "eosio", + "receiver": "ACCT_PER_TRX", + "bytes": "3000" + } + } + ] +} diff --git a/tests/performance_tests/userTrxDataTransfer.json b/tests/performance_tests/userTrxDataTransfer.json index 242652b4e9..4e17595eb1 100644 --- a/tests/performance_tests/userTrxDataTransfer.json +++ b/tests/performance_tests/userTrxDataTransfer.json @@ -1,13 +1,23 @@ { - "accounts": ["testacct1", "testacct2"], + "initAccounts": [ + "testacct1", + "testacct2" + ], "abiFile": "unittests/contracts/eosio.token/eosio.token.abi", - "actionName": "transfer", - "actionAuthAcct": "testacct1", - "actionData": + "actions": [ { - "from":"testacct1", - "to":"testacct2", - "quantity":"0.0001 CUR", - "memo":"transaction specified" + "actionAuthAcct": "testacct1", + "actionName": "transfer", + "authorization": { + "actor": "testacct1", + "permission": "active" + }, + "actionData": { + "from": "testacct1", + "to": "testacct2", + "quantity": "0.0001 CUR", + "memo": "transaction specified" + } } + ] } diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 007be8f0a6..686b937493 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -46,11 +46,9 @@ int main(int argc, char** argv) { unsigned short port; bool transaction_specified = false; - std::string action_name_in; - std::string action_auth_acct_in; - std::string action_auth_acct_priv_key_in; - std::string action_data_file_or_str; std::string abi_file_path_in; + std::string actions_data_json_file_or_str; + std::string actions_auths_json_file_or_str; vector account_str_vector; vector private_keys_str_vector; @@ -70,12 +68,9 @@ int main(int argc, char** argv) { ("monitor-max-lag-percent", bpo::value(&max_lag_per)->default_value(5), "Max percentage off from expected transactions sent before being in violation. Defaults to 5.") ("monitor-max-lag-duration-us", bpo::value(&max_lag_duration_us)->default_value(1000000), "Max microseconds that transaction generation can be in violation before quitting. Defaults to 1000000 (1s).") ("log-dir", bpo::value(&log_dir_in), "set the logs directory") - ("action-name", bpo::value(&action_name_in), "The action name applied to the provided action data input") - ("action-auth-acct", bpo::value(&action_auth_acct_in), "The action authorization account") - ("action-auth-acct-priv-key", bpo::value(&action_auth_acct_priv_key_in), "The action authorization account priv key for signing trxs") - ("action-data", bpo::value(&action_data_file_or_str), "The path to the json action data file or json action data description string to use") ("abi-file", bpo::value(&abi_file_path_in), "The path to the contract abi file to use for the supplied transaction action data") - ("stop-on-trx-failed", bpo::value(&stop_on_trx_failed)->default_value(true), "stop transaction generation if sending fails.") + ("actions-data", bpo::value(&actions_data_json_file_or_str), "The json actions data file or json actions data description string to use") + ("actions-auths", bpo::value(&actions_auths_json_file_or_str), "The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") ("peer-endpoint", bpo::value(&peer_endpoint)->default_value("127.0.0.1"), "set the peer endpoint to send transactions to") ("port", bpo::value(&port)->default_value(9876), "set the peer endpoint port to send transactions to") ("help,h", "print this list") @@ -90,15 +85,15 @@ int main(int argc, char** argv) { return SUCCESS; } - if((vmap.count("action-name") || vmap.count("action-auth-acct") || vmap.count("action-auth-acct-priv-key") || vmap.count("action-data") || vmap.count("abi-file")) && - !(vmap.count("action-name") && vmap.count("action-auth-acct") && vmap.count("action-auth-acct-priv-key") && vmap.count("action-data") && vmap.count("abi-file"))) { - ilog("Initialization error: If using action-name, action-auth-acct, action-auth-acct-priv-key, action-data, or abi-file to specify a transaction type to generate, must provide all inputs."); + if((vmap.count("abi-file") || vmap.count("actions-data") || vmap.count("actions-auths")) && + !(vmap.count("abi-file") && vmap.count("actions-data") && vmap.count("actions-auths"))) { + ilog("Initialization error: If using abi-file, actions-data, and actions-auths to specify a transaction type to generate, must provide all inputs."); cli.print(std::cerr); return INITIALIZE_FAIL; } - if(vmap.count("action-name") && vmap.count("action-auth-acct") && vmap.count("action-auth-acct-priv-key") && vmap.count("action-data") && vmap.count("abi-file")) { - ilog("Specifying transaction to generate directly using action-name, action-auth-acct, action-auth-acct-priv-key, action-data, and abi-file."); + if(vmap.count("abi-file") && vmap.count("actions-data") && vmap.count("actions-auths")) { + ilog("Specifying transaction to generate directly using abi-file, actions-data, and actions-auths."); transaction_specified = true; } @@ -134,11 +129,6 @@ int main(int argc, char** argv) { cli.print(std::cerr); return INITIALIZE_FAIL; } - if (transaction_specified && account_str_vector.size() < 1) { - ilog("Initialization error: Specifying transaction to generate requires at minimum 1 account."); - cli.print(std::cerr); - return INITIALIZE_FAIL; - } } else { ilog("Initialization error: did not specify transfer accounts. Auto transfer transaction generation requires at minimum 2 transfer accounts, while providing transaction action data requires at least one."); cli.print(std::cerr); @@ -152,11 +142,6 @@ int main(int argc, char** argv) { cli.print(std::cerr); return INITIALIZE_FAIL; } - if (transaction_specified && private_keys_str_vector.size() < 1) { - ilog("Initialization error: Specifying transaction to generate requires at minimum 1 private key"); - cli.print(std::cerr); - return INITIALIZE_FAIL; - } } else { ilog("Initialization error: did not specify accounts' private keys. Auto transfer transaction generation requires at minimum 2 private keys, while providing transaction action data requires at least one."); cli.print(std::cerr); @@ -221,11 +206,9 @@ int main(int argc, char** argv) { ilog("Peer Endpoint ${peer-endpoint}:${peer-port}", ("peer-endpoint", peer_endpoint)("peer-port", port)); if (transaction_specified) { - ilog("User Transaction Specified: Action Name ${act}", ("act", action_name_in)); - ilog("User Transaction Specified: Action Auth Acct Name ${acct}", ("acct", action_auth_acct_in)); - ilog("User Transaction Specified: Action Auth Acct Priv Key ${key}", ("key", action_auth_acct_priv_key_in)); - ilog("User Transaction Specified: Action Data File or Str ${data}", ("data", action_data_file_or_str)); ilog("User Transaction Specified: Abi File ${abi}", ("abi", abi_file_path_in)); + ilog("User Transaction Specified: Actions Data ${acts}", ("acts", actions_data_json_file_or_str)); + ilog("User Transaction Specified: Actions Auths ${auths}", ("auths", actions_auths_json_file_or_str)); } fc::microseconds trx_expr_ms = fc::seconds(trx_expr); @@ -233,7 +216,7 @@ int main(int argc, char** argv) { std::shared_ptr monitor; if (transaction_specified) { auto generator = std::make_shared(gen_id, chain_id_in, abi_file_path_in, contract_owner_acct, - action_name_in, action_auth_acct_in, action_auth_acct_priv_key_in, action_data_file_or_str, + actions_data_json_file_or_str, actions_auths_json_file_or_str, trx_expr_ms, lib_id_str, log_dir_in, stop_on_trx_failed, peer_endpoint, port); monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); trx_tps_tester tester{generator, monitor, gen_duration, target_tps}; diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 7e0cb56284..f5ec6f95bd 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -26,10 +26,12 @@ namespace eosio::testing { trx.delay_sec = delay_sec; } - signed_transaction_w_signer trx_generator_base::create_trx_w_action_and_signer(const action& act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + signed_transaction_w_signer trx_generator_base::create_trx_w_actions_and_signer(std::vector acts, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { signed_transaction trx; set_transaction_headers(trx, last_irr_block_id, trx_expiration); - trx.actions.push_back(act); + for (auto act:acts) { + trx.actions.push_back(act); + } trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), fc::raw::pack(std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + fc::time_point::now().time_since_epoch().count()))); @@ -42,9 +44,14 @@ namespace eosio::testing { std::vector trxs; trxs.reserve(2 * action_pairs_vector.size()); + std::vector act_vec; for(action_pair_w_keys ap: action_pairs_vector) { - trxs.emplace_back(create_trx_w_action_and_signer(ap._first_act, ap._first_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); - trxs.emplace_back(create_trx_w_action_and_signer(ap._second_act, ap._second_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); + act_vec.push_back(ap._first_act); + trxs.emplace_back(create_trx_w_actions_and_signer(act_vec, ap._first_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); + act_vec.clear(); + act_vec.push_back(ap._second_act); + trxs.emplace_back(create_trx_w_actions_and_signer(act_vec, ap._second_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); + act_vec.clear(); } return trxs; @@ -161,13 +168,102 @@ namespace eosio::testing { } } + void trx_generator::locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, fc::mutable_variant_object& action_mvo, const std::string& key_word) { + for(const mutable_variant_object::entry& e: action_mvo) { + if(e.value().get_type() == fc::variant::string_type && e.value() == key_word) { + acct_gen_fields_out.push_back(e.key()); + } else if(e.value().get_type() == fc::variant::object_type) { + auto inner_mvo = fc::mutable_variant_object(e.value()); + locate_key_words_in_action_mvo(acct_gen_fields_out, inner_mvo, key_word); + } + } + } + + void trx_generator::locate_key_words_in_action_array(std::map>& acct_gen_fields_out, fc::variants& action_array, const std::string& key_word) { + for(size_t i = 0; i < action_array.size(); ++i) { + auto action_mvo = fc::mutable_variant_object(action_array[i]); + locate_key_words_in_action_mvo(acct_gen_fields_out[i], action_mvo, key_word); + } + } + + void trx_generator::update_key_word_fields_in_sub_action(std::string key, fc::mutable_variant_object& action_mvo, std::string action_inner_key, const std::string key_word) { + auto mvo = action_mvo.find(action_inner_key); + if(mvo != action_mvo.end()) { + fc::mutable_variant_object inner_mvo = fc::mutable_variant_object(action_mvo[action_inner_key].get_object()); + if (inner_mvo.find(key) != inner_mvo.end()) { + inner_mvo.set(key, key_word); + action_mvo.set(action_inner_key, inner_mvo); + } + } + } + + void trx_generator::update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string key_word) { + for(auto key: acct_gen_fields) { + auto mvo = action_mvo.find(key); + if(mvo != action_mvo.end()) { + action_mvo.set(key, key_word); + } else { + for(auto e: action_mvo) { + if(e.value().get_type() == fc::variant::object_type) { + update_key_word_fields_in_sub_action(key, action_mvo, e.key(), key_word); + } + } + } + } + } + + void trx_generator::update_resign_transaction(signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + trx.actions.clear(); + update_actions(); + for(auto act: _actions) { + trx.actions.push_back(act); + } + trx_generator_base::update_resign_transaction(trx, priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); + } + trx_generator::trx_generator(uint16_t id, std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, - std::string action_name, std::string action_auth_account, const std::string& private_key_str, const std::string& action_data_file_or_str, + const std::string& actions_data_json_file_or_str, const std::string& actions_auths_json_file_or_str, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) : trx_generator_base(id, chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), - _abi_data_file_path(abi_data_file), _action(action_name), _action_auth_account(action_auth_account), - _action_auth_priv_key(fc::crypto::private_key(private_key_str)), _action_data_file_or_str(action_data_file_or_str) {} + _abi_data_file_path(abi_data_file), + _actions_data_json_file_or_str(actions_data_json_file_or_str), _actions_auths_json_file_or_str(actions_auths_json_file_or_str), + _acct_name_generator() {} + + void trx_generator::update_actions() { + _actions.clear(); + + if (!_acct_gen_fields.empty()) { + std::string generated_account_name = _acct_name_generator.calcName(); + _acct_name_generator.increment(); + + for (auto const& [key, val] : _acct_gen_fields) { + update_key_word_fields_in_action(_acct_gen_fields.at(key), _unpacked_actions.at(key), generated_account_name); + } + } + + for (auto action_mvo : _unpacked_actions) { + chain::name action_name = chain::name(action_mvo["actionName"].as_string()); + chain::name action_auth_acct = chain::name(action_mvo["actionAuthAcct"].as_string()); + bytes packed_action_data; + try { + auto action_type = _abi.get_action_type( action_name ); + FC_ASSERT( !action_type.empty(), "Unknown action ${action} in contract ${contract}", ("action", action_name)( "contract", action_auth_acct )); + packed_action_data = _abi.variant_to_binary( action_type, action_mvo["actionData"], abi_serializer::create_yield_function( abi_serializer_max_time ) ); + } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse unpacked action data JSON") + + eosio::chain::action act; + act.account = _contract_owner_account; + act.name = action_name; + + chain::name auth_actor = chain::name(action_mvo["authorization"].get_object()["actor"].as_string()); + chain::name auth_perm = chain::name(action_mvo["authorization"].get_object()["permission"].as_string()); + + act.authorization = vector{{auth_actor, auth_perm}}; + act.data = std::move(packed_action_data); + _actions.push_back(act); + } + } bool trx_generator::setup() { _nonce_prefix = 0; @@ -177,31 +273,47 @@ namespace eosio::testing { stop_generation(); ilog("Create Initial Transaction with action data."); - abi_serializer abi = abi_serializer(fc::json::from_file(_abi_data_file_path).as(), abi_serializer::create_yield_function( abi_serializer_max_time )); - fc::variant unpacked_action_data_json = json_from_file_or_string(_action_data_file_or_str); - ilog("action data variant: ${data}", ("data", fc::json::to_pretty_string(unpacked_action_data_json))); + _abi = abi_serializer(fc::json::from_file(_abi_data_file_path).as(), abi_serializer::create_yield_function( abi_serializer_max_time )); + fc::variant unpacked_actions_data_json = json_from_file_or_string(_actions_data_json_file_or_str); + fc::variant unpacked_actions_auths_data_json = json_from_file_or_string(_actions_auths_json_file_or_str); + ilog("Loaded actions data: ${data}", ("data", fc::json::to_pretty_string(unpacked_actions_data_json))); + ilog("Loaded actions auths data: ${auths}", ("auths", fc::json::to_pretty_string(unpacked_actions_auths_data_json))); - bytes packed_action_data; - try { - auto action_type = abi.get_action_type( _action ); - FC_ASSERT( !action_type.empty(), "Unknown action ${action} in contract ${contract}", ("action", _action)( "contract", _action_auth_account )); - packed_action_data = abi.variant_to_binary( action_type, unpacked_action_data_json, abi_serializer::create_yield_function( abi_serializer_max_time ) ); + const std::string gen_acct_name_per_trx("ACCT_PER_TRX"); - } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse unpacked action data JSON") + auto action_array = unpacked_actions_data_json.get_array(); + for (size_t i =0; i < action_array.size(); ++i ) { + _unpacked_actions.push_back(fc::mutable_variant_object(action_array[i])); + } + locate_key_words_in_action_array(_acct_gen_fields, action_array, gen_acct_name_per_trx); - ilog("${packed_data}", ("packed_data", fc::to_hex(packed_action_data.data(), packed_action_data.size()))); + if(!_acct_gen_fields.empty()) { + ilog("Located the following account names that need to be generated and populted in each transaction:"); + for(auto e: _acct_gen_fields) { + ilog("acct_gen_fields entry: ${value}", ("value", e)); + } + ilog("Priming name generator for trx generator prefix."); + _acct_name_generator.setPrefix(_id); + } + + ilog("Setting up transaction signer."); + fc::crypto::private_key signer_key; + signer_key = fc::crypto::private_key(unpacked_actions_auths_data_json.get_object()[_unpacked_actions.at(0)["actionAuthAcct"].as_string()].as_string()); - eosio::chain::action act; - act.account = _contract_owner_account; - act.name = _action; - act.authorization = vector{{_action_auth_account, config::active_name}}; - act.data = std::move(packed_action_data); + ilog("Setting up initial transaction actions."); + update_actions(); + ilog("Initial actions (${count}):", ("count", _unpacked_actions.size())); + for (size_t i = 0; i < _unpacked_actions.size(); ++i) { + ilog("Initial action ${index}: ${act}", ("index", i)("act", fc::json::to_pretty_string(_unpacked_actions.at(i)))); + ilog("Initial action packed data ${index}: ${packed_data}", ("packed_data", fc::to_hex(_actions.at(i).data.data(), _actions.at(i).data.size()))); + } - _trxs.emplace_back(create_trx_w_action_and_signer(act, _action_auth_priv_key, ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, _last_irr_block_id)); + ilog("Populate initial transaction."); + _trxs.emplace_back(create_trx_w_actions_and_signer(_actions, signer_key, ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, _last_irr_block_id)); ilog("Setup p2p transaction provider"); - ilog("Update each trx to qualify as unique and fresh timestamps, re-sign trx, and send each updated transactions via p2p transaction provider"); + ilog("Update each trx to qualify as unique and fresh timestamps and update each action with unique generated account name if necessary, re-sign trx, and send each updated transactions via p2p transaction provider"); _provider.setup(); return true; diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index d308ab47fb..01180bfb0b 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -6,6 +6,7 @@ #include #include #include +#include #include namespace eosio::testing { @@ -27,6 +28,53 @@ namespace eosio::testing { fc::crypto::private_key _second_act_priv_key; }; + struct account_name_generator { + account_name_generator() : _name_index_vec(ACCT_NAME_LEN, 0) {} + + const char* CHARMAP = "12345abcdefghijklmnopqrstuvwxyz"; + const int ACCT_NAME_CHAR_CNT = 31; + const int ACCT_NAME_LEN = 12; + const int MAX_PREFEX = 960; + std::vector _name_index_vec; + + void increment(int index) { + _name_index_vec[index]++; + if(_name_index_vec[index] >= ACCT_NAME_CHAR_CNT) { + _name_index_vec[index] = 0; + increment(index - 1); + } + } + + void increment() { + increment(_name_index_vec.size() - 1); + } + + void incrementPrefix() { + increment(1); + } + + void setPrefix(int id) { + if (id > MAX_PREFEX) { + elog("Account Name Generator Prefix above allowable ${max}", ("max", MAX_PREFEX)); + return; + } + _name_index_vec[0] = 0; + _name_index_vec[1] = 0; + for(int i = 0; i < id; i++) { + incrementPrefix(); + } + }; + + std::string calcName() { + std::string name; + name.reserve(12); + for(auto i: _name_index_vec) { + name += CHARMAP[i]; + } + return name; + } + }; + struct trx_generator_base { p2p_trx_provider _provider; uint16_t _id; @@ -49,14 +97,16 @@ namespace eosio::testing { trx_generator_base(uint16_t id, std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); - void update_resign_transaction(eosio::chain::signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, + virtual void update_resign_transaction(eosio::chain::signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); void push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); + void set_transaction_headers(eosio::chain::transaction& trx, const eosio::chain::block_id_type& last_irr_block_id, const fc::microseconds expiration, uint32_t delay_sec = 0); - signed_transaction_w_signer create_trx_w_action_and_signer(const eosio::chain::action& act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, + + signed_transaction_w_signer create_trx_w_actions_and_signer(std::vector act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); void log_first_trx(const std::string& log_dir, const eosio::chain::signed_transaction& trx); @@ -89,18 +139,31 @@ namespace eosio::testing { struct trx_generator : public trx_generator_base{ std::string _abi_data_file_path; - eosio::chain::name _action; - eosio::chain::name _action_auth_account; - fc::crypto::private_key _action_auth_priv_key; - std::string _action_data_file_or_str; + std::string _actions_data_json_file_or_str; + std::string _actions_auths_json_file_or_str; + account_name_generator _acct_name_generator; + + eosio::chain::abi_serializer _abi; + std::vector _unpacked_actions; + std::map> _acct_gen_fields; + std::vector _actions; const fc::microseconds abi_serializer_max_time = fc::seconds(10); // No risk to client side serialization taking a long time trx_generator(uint16_t id, std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, - std::string action_name, std::string action_auth_account, const std::string& action_auth_priv_key_str, const std::string& action_data_file_or_str, + const std::string& actions_data_json_file_or_str, const std::string& actions_auths_json_file_or_str, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); + void locate_key_words_in_action_mvo(std::vector& acctGenFieldsOut, fc::mutable_variant_object& action_mvo, const std::string& keyword); + void locate_key_words_in_action_array(std::map>& acctGenFieldsOut, fc::variants& action_array, const std::string& keyword); + void update_key_word_fields_in_sub_action(std::string key, fc::mutable_variant_object& action_mvo, std::string action_inner_key, const std::string keyWord); + void update_key_word_fields_in_action(std::vector& acctGenFields, fc::mutable_variant_object& action_mvo, const std::string keyWord); + + void update_actions(); + virtual void update_resign_transaction(eosio::chain::signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, + const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); + fc::variant json_from_file_or_string(const std::string& file_or_str, fc::json::parse_type ptype = fc::json::parse_type::legacy_parser); bool setup(); diff --git a/tests/trx_generator/trx_generator_tests.cpp b/tests/trx_generator/trx_generator_tests.cpp index 6f185c940e..99d5abdafb 100644 --- a/tests/trx_generator/trx_generator_tests.cpp +++ b/tests/trx_generator/trx_generator_tests.cpp @@ -327,23 +327,20 @@ BOOST_AUTO_TEST_CASE(trx_generator_constructor) { uint16_t id = 1; std::string chain_id = "999"; - std::string contract_owner_account = "eosio"; - std::string acct = "aaa"; - std::string action_name = "transfer"; - std::string action_auth_acct = "aaa"; - std::string action_auth_priv_key_str = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"; - const std::string action_data = "{\"from\":\"aaa\",\"to\":\"bbb\",\"quantity\":\"10.0000 SYS\",\"memo\":\"hello\"}"; const std::string abi_file = "../../unittests/contracts/eosio.token/eosio.token.abi"; + std::string contract_owner_account = "eosio"; + std::string actions_data = "[{\"actionAuthAcct\": \"testacct1\",\"actionName\": \"transfer\",\"authorization\": {\"actor\": \"testacct1\",\"permission\": \"active\"},\"actionData\": {\"from\": \"testacct1\",\"to\": \"testacct2\",\"quantity\": \"0.0001 CUR\",\"memo\": \"transaction specified\"}}]"; + std::string action_auths = "{\"testacct1\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\",\"testacct2\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\",\"eosio\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\"}"; fc::microseconds trx_expr = fc::seconds(3600); std::string log_dir = "."; std::string lib_id_str = "00000062989f69fd251df3e0b274c3364ffc2f4fce73de3f1c7b5e11a4c92f21"; + bool stop_on_trx_failed = true; std::string peer_endpoint = "127.0.0.1"; unsigned short port = 9876; - bool stop_on_trx_failed = true; auto generator = trx_generator(id, chain_id, abi_file, contract_owner_account, - action_name, action_auth_acct, action_auth_priv_key_str, action_data, trx_expr, - lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port); + actions_data, action_auths, + trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port); } BOOST_AUTO_TEST_SUITE_END() From df12ef4835b6ebe92d39108a328ccfd25da0902d Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Feb 2023 08:07:06 -0600 Subject: [PATCH 089/178] Fix argument names. --- tests/trx_generator/trx_generator.hpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index 01180bfb0b..6082ac5d24 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -155,10 +155,10 @@ namespace eosio::testing { fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); - void locate_key_words_in_action_mvo(std::vector& acctGenFieldsOut, fc::mutable_variant_object& action_mvo, const std::string& keyword); - void locate_key_words_in_action_array(std::map>& acctGenFieldsOut, fc::variants& action_array, const std::string& keyword); - void update_key_word_fields_in_sub_action(std::string key, fc::mutable_variant_object& action_mvo, std::string action_inner_key, const std::string keyWord); - void update_key_word_fields_in_action(std::vector& acctGenFields, fc::mutable_variant_object& action_mvo, const std::string keyWord); + void locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, fc::mutable_variant_object& action_mvo, const std::string& key_word); + void locate_key_words_in_action_array(std::map>& acct_gen_fields_out, fc::variants& action_array, const std::string& key_word); + void update_key_word_fields_in_sub_action(std::string key, fc::mutable_variant_object& action_mvo, std::string action_inner_key, const std::string key_word); + void update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string key_word); void update_actions(); virtual void update_resign_transaction(eosio::chain::signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, From 9a8d79212d796d340b717c03e76989a9e83f9d91 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Feb 2023 08:07:44 -0600 Subject: [PATCH 090/178] Control test-duration-sec for these tests. Default is 90 sec, don't need that long for these tests. --- tests/performance_tests/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index b7f32ba183..1fcf61f0c0 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -10,9 +10,9 @@ configure_file(validate_nodeos_plugin_args.py validate_nodeos_plugin_args.py COP configure_file(userTrxDataTransfer.json userTrxDataTransfer.json COPYONLY) configure_file(userTrxDataNewAccount.json userTrxDataNewAccount.json COPYONLY) -add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) From 3022c7c9f7526007bbab3c45bd08f8aaa71777b3 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Feb 2023 09:18:16 -0600 Subject: [PATCH 091/178] Fix validate_nodeos_plugin_args test by updating NetPlugingArgs default. --- tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py | 2 +- tests/performance_tests/performance_test_basic.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py index a8e8bbc962..20a090acf0 100755 --- a/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py @@ -51,7 +51,7 @@ class NetPluginArgs(BasePluginArgs): _p2pDedupCacheExpireTimeSecNodeosDefault: int=10 _p2pDedupCacheExpireTimeSecNodeosArg: str="--p2p-dedup-cache-expire-time-sec" netThreads: int=None - _netThreadsNodeosDefault: int=2 + _netThreadsNodeosDefault: int=4 _netThreadsNodeosArg: str="--net-threads" syncFetchSpan: int=None _syncFetchSpanNodeosDefault: int=100 diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 0712730c3d..a7b914cdfb 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -497,7 +497,7 @@ def createBaseArgumentParser(): In \"heap\" mode database is preloaded in to swappable memory and will use huge pages if available. \ In \"locked\" mode database is preloaded, locked in to memory, and will use huge pages if available.", choices=["mapped", "heap", "locked"], default="mapped") - ptbBaseParserGroup.add_argument("--net-threads", type=int, help="Number of worker threads in net_plugin thread pool", default=2) + ptbBaseParserGroup.add_argument("--net-threads", type=int, help="Number of worker threads in net_plugin thread pool", default=4) ptbBaseParserGroup.add_argument("--disable-subjective-billing", type=bool, help="Disable subjective CPU billing for API/P2P transactions", default=True) ptbBaseParserGroup.add_argument("--last-block-time-offset-us", type=int, help="Offset of last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) ptbBaseParserGroup.add_argument("--produce-time-offset-us", type=int, help="Offset of non last block producing time in microseconds. Valid range 0 .. -block_time_interval.", default=0) From 68fe107e1a54bf4886911f04340505ed29b621e2 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Feb 2023 10:08:32 -0600 Subject: [PATCH 092/178] Apply nonparallelizable_tests flag to new tests. --- tests/performance_tests/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 1fcf61f0c0..08d232f60d 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -16,5 +16,7 @@ add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performa add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_ex_transfer_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_ex_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests) add_subdirectory( NodeosPluginArgs ) From 310e6b8de7272d2ce3dc9174a4f356ebf6857a62 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Feb 2023 10:52:39 -0600 Subject: [PATCH 093/178] Make actions data and auths const. --- tests/trx_generator/trx_generator_tests.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/trx_generator/trx_generator_tests.cpp b/tests/trx_generator/trx_generator_tests.cpp index 99d5abdafb..509d603383 100644 --- a/tests/trx_generator/trx_generator_tests.cpp +++ b/tests/trx_generator/trx_generator_tests.cpp @@ -329,8 +329,8 @@ BOOST_AUTO_TEST_CASE(trx_generator_constructor) std::string chain_id = "999"; const std::string abi_file = "../../unittests/contracts/eosio.token/eosio.token.abi"; std::string contract_owner_account = "eosio"; - std::string actions_data = "[{\"actionAuthAcct\": \"testacct1\",\"actionName\": \"transfer\",\"authorization\": {\"actor\": \"testacct1\",\"permission\": \"active\"},\"actionData\": {\"from\": \"testacct1\",\"to\": \"testacct2\",\"quantity\": \"0.0001 CUR\",\"memo\": \"transaction specified\"}}]"; - std::string action_auths = "{\"testacct1\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\",\"testacct2\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\",\"eosio\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\"}"; + const std::string actions_data = "[{\"actionAuthAcct\": \"testacct1\",\"actionName\": \"transfer\",\"authorization\": {\"actor\": \"testacct1\",\"permission\": \"active\"},\"actionData\": {\"from\": \"testacct1\",\"to\": \"testacct2\",\"quantity\": \"0.0001 CUR\",\"memo\": \"transaction specified\"}}]"; + const std::string action_auths = "{\"testacct1\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\",\"testacct2\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\",\"eosio\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\"}"; fc::microseconds trx_expr = fc::seconds(3600); std::string log_dir = "."; std::string lib_id_str = "00000062989f69fd251df3e0b274c3364ffc2f4fce73de3f1c7b5e11a4c92f21"; From 299c9e9ee0ed1fe28e1d24455cb724c27eccb8ce Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Feb 2023 10:53:00 -0600 Subject: [PATCH 094/178] Fix Cluster's arguments for TransactionGeneratorsLauncher. --- tests/TestHarness/Cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index 81976d24b6..c0d66391d6 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1763,7 +1763,7 @@ def stripValues(lowestMaxes,greaterThan): def launchTrxGenerators(self, contractOwnerAcctName: str, acctNamesList: list, acctPrivKeysList: list, nodeId: int=0, tpsPerGenerator: int=10, numGenerators: int=1, durationSec: int=60, - waitToComplete:bool=False, abiFile=None, actionName=None, actionAuthAcct=None, actionAuthPrivKey=None, actionData=None): + waitToComplete:bool=False, abiFile=None, actionsData=None, actionsAuths=None): Utils.Print("Configure txn generators") node=self.getNode(nodeId) p2pListenPort = self.getNodeP2pPort(nodeId) @@ -1780,7 +1780,7 @@ def launchTrxGenerators(self, contractOwnerAcctName: str, acctNamesList: list, a self.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=contractOwnerAcctName, accts=','.join(map(str, acctNamesList)), privateKeys=','.join(map(str, acctPrivKeysList)), trxGenDurationSec=durationSec, logDir=Utils.DataDir, - abiFile=abiFile, actionName=actionName, actionAuthAcct=actionAuthAcct, actionAuthPrivKey=actionAuthPrivKey, actionData=actionData, + abiFile=abiFile, actionsData=actionsData, actionsAuths=actionsAuths, peerEndpoint=self.host, port=p2pListenPort, tpsTrxGensConfig=tpsTrxGensConfig) Utils.Print("Launch txn generators and start generating/sending transactions") From f6f56e5c5d2e2d174f4a544a6217c6fdc2596f57 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Feb 2023 14:31:13 -0600 Subject: [PATCH 095/178] Fix logic in log_transactions. Now that default contract is loaded to eosio again, it teased out a logic error here. Should be 'or' not 'and'. --- tests/performance_tests/performance_test_basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index da19eceeea..3cf9407837 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -98,7 +98,7 @@ class SpecifiedContract: def log_transactions(self, trxDataFile, block): for trx in block['payload']['transactions']: for actions in trx['actions']: - if actions['account'] != 'eosio' and actions['action'] != 'onblock': + if actions['account'] != 'eosio' or actions['action'] != 'onblock': trxDataFile.write(f"{trx['id']},{trx['block_num']},{trx['block_time']},{trx['cpu_usage_us']},{trx['net_usage_words']},{trx['actions']}\n") def __post_init__(self): From 0dc7b3ae582278575be3c2b73c74705ff486de91 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 10 Feb 2023 13:04:45 -0600 Subject: [PATCH 096/178] Rename id to generator_id to be more specific. --- tests/trx_generator/trx_generator.cpp | 14 +++++++------- tests/trx_generator/trx_generator.hpp | 14 +++++++------- tests/trx_generator/trx_generator_tests.cpp | 4 ++-- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index f5ec6f95bd..863f0c6f03 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -93,14 +93,14 @@ namespace eosio::testing { return actions_pairs_vector; } - trx_generator_base::trx_generator_base(uint16_t id, std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, + trx_generator_base::trx_generator_base(uint16_t generator_id, std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) - : _provider(peer_endpoint, port), _id(id), _chain_id(chain_id_in), _contract_owner_account(contract_owner_account), _trx_expiration(trx_expr), + : _provider(peer_endpoint, port), _generator_id(generator_id), _chain_id(chain_id_in), _contract_owner_account(contract_owner_account), _trx_expiration(trx_expr), _last_irr_block_id(fc::variant(lib_id_str).as()), _log_dir(log_dir), _stop_on_trx_failed(stop_on_trx_failed) {} - transfer_trx_generator::transfer_trx_generator(uint16_t id, std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, + transfer_trx_generator::transfer_trx_generator(uint16_t generator_id, std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, fc::microseconds trx_expr, const std::vector& private_keys_str_vector, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) - : trx_generator_base(id, chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), _accts(accts), _private_keys_str_vector(private_keys_str_vector) {} + : trx_generator_base(generator_id, chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), _accts(accts), _private_keys_str_vector(private_keys_str_vector) {} vector transfer_trx_generator::get_accounts(const vector& account_str_vector) { vector acct_name_list; @@ -221,11 +221,11 @@ namespace eosio::testing { trx_generator_base::update_resign_transaction(trx, priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); } - trx_generator::trx_generator(uint16_t id, std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, + trx_generator::trx_generator(uint16_t generator_id, std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, const std::string& actions_data_json_file_or_str, const std::string& actions_auths_json_file_or_str, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) - : trx_generator_base(id, chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), + : trx_generator_base(generator_id, chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), _abi_data_file_path(abi_data_file), _actions_data_json_file_or_str(actions_data_json_file_or_str), _actions_auths_json_file_or_str(actions_auths_json_file_or_str), _acct_name_generator() {} @@ -293,7 +293,7 @@ namespace eosio::testing { ilog("acct_gen_fields entry: ${value}", ("value", e)); } ilog("Priming name generator for trx generator prefix."); - _acct_name_generator.setPrefix(_id); + _acct_name_generator.setPrefix(_generator_id); } ilog("Setting up transaction signer."); diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index 6082ac5d24..66ce82005f 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -53,14 +53,14 @@ namespace eosio::testing { increment(1); } - void setPrefix(int id) { - if (id > MAX_PREFEX) { + void setPrefix(int generator_id) { + if (generator_id > MAX_PREFEX) { elog("Account Name Generator Prefix above allowable ${max}", ("max", MAX_PREFEX)); return; } _name_index_vec[0] = 0; _name_index_vec[1] = 0; - for(int i = 0; i < id; i++) { + for(int i = 0; i < generator_id; i++) { incrementPrefix(); } }; @@ -77,7 +77,7 @@ namespace eosio::testing { struct trx_generator_base { p2p_trx_provider _provider; - uint16_t _id; + uint16_t _generator_id; eosio::chain::chain_id_type _chain_id; eosio::chain::name _contract_owner_account; fc::microseconds _trx_expiration; @@ -94,7 +94,7 @@ namespace eosio::testing { bool _stop_on_trx_failed = true; - trx_generator_base(uint16_t id, std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, + trx_generator_base(uint16_t generator_id, std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); virtual void update_resign_transaction(eosio::chain::signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, @@ -121,7 +121,7 @@ namespace eosio::testing { const std::vector _accts; std::vector _private_keys_str_vector; - transfer_trx_generator(uint16_t id, std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, + transfer_trx_generator(uint16_t generator_id, std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, fc::microseconds trx_expr, const std::vector& private_keys_str_vector, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); @@ -150,7 +150,7 @@ namespace eosio::testing { const fc::microseconds abi_serializer_max_time = fc::seconds(10); // No risk to client side serialization taking a long time - trx_generator(uint16_t id, std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, + trx_generator(uint16_t generator_id, std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, const std::string& actions_data_json_file_or_str, const std::string& actions_auths_json_file_or_str, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); diff --git a/tests/trx_generator/trx_generator_tests.cpp b/tests/trx_generator/trx_generator_tests.cpp index 509d603383..c92fb21f55 100644 --- a/tests/trx_generator/trx_generator_tests.cpp +++ b/tests/trx_generator/trx_generator_tests.cpp @@ -325,7 +325,7 @@ BOOST_AUTO_TEST_CASE(tps_cant_keep_up_monitored) BOOST_AUTO_TEST_CASE(trx_generator_constructor) { - uint16_t id = 1; + uint16_t generator_id = 1; std::string chain_id = "999"; const std::string abi_file = "../../unittests/contracts/eosio.token/eosio.token.abi"; std::string contract_owner_account = "eosio"; @@ -338,7 +338,7 @@ BOOST_AUTO_TEST_CASE(trx_generator_constructor) std::string peer_endpoint = "127.0.0.1"; unsigned short port = 9876; - auto generator = trx_generator(id, chain_id, abi_file, contract_owner_account, + auto generator = trx_generator(generator_id, chain_id, abi_file, contract_owner_account, actions_data, action_auths, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port); } From 1ad0888997851e98d132d614a55513117915d33f Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 10 Feb 2023 13:19:45 -0600 Subject: [PATCH 097/178] Address some peer review comments. --- tests/trx_generator/trx_generator.cpp | 24 ++++++++++++------------ tests/trx_generator/trx_generator.hpp | 8 ++++---- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 863f0c6f03..bac91ce5be 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -17,7 +17,7 @@ namespace bpo=boost::program_options; namespace eosio::testing { - void trx_generator_base::set_transaction_headers(transaction& trx, const block_id_type& last_irr_block_id, const fc::microseconds expiration, uint32_t delay_sec) { + void trx_generator_base::set_transaction_headers(transaction& trx, const block_id_type& last_irr_block_id, const fc::microseconds& expiration, uint32_t delay_sec) { trx.expiration = fc::time_point::now() + expiration; trx.set_reference_block(last_irr_block_id); @@ -186,9 +186,9 @@ namespace eosio::testing { } } - void trx_generator::update_key_word_fields_in_sub_action(std::string key, fc::mutable_variant_object& action_mvo, std::string action_inner_key, const std::string key_word) { - auto mvo = action_mvo.find(action_inner_key); - if(mvo != action_mvo.end()) { + void trx_generator::update_key_word_fields_in_sub_action(const std::string& key, fc::mutable_variant_object& action_mvo, const std::string& action_inner_key, const std::string& key_word) { + auto itr = action_mvo.find(action_inner_key); + if(itr != action_mvo.end()) { fc::mutable_variant_object inner_mvo = fc::mutable_variant_object(action_mvo[action_inner_key].get_object()); if (inner_mvo.find(key) != inner_mvo.end()) { inner_mvo.set(key, key_word); @@ -197,13 +197,13 @@ namespace eosio::testing { } } - void trx_generator::update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string key_word) { - for(auto key: acct_gen_fields) { - auto mvo = action_mvo.find(key); - if(mvo != action_mvo.end()) { + void trx_generator::update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string& key_word) { + for(const auto& key: acct_gen_fields) { + auto itr = action_mvo.find(key); + if(itr != action_mvo.end()) { action_mvo.set(key, key_word); } else { - for(auto e: action_mvo) { + for(const auto& e: action_mvo) { if(e.value().get_type() == fc::variant::object_type) { update_key_word_fields_in_sub_action(key, action_mvo, e.key(), key_word); } @@ -215,7 +215,7 @@ namespace eosio::testing { void trx_generator::update_resign_transaction(signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { trx.actions.clear(); update_actions(); - for(auto act: _actions) { + for(const auto& act: _actions) { trx.actions.push_back(act); } trx_generator_base::update_resign_transaction(trx, priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); @@ -242,7 +242,7 @@ namespace eosio::testing { } } - for (auto action_mvo : _unpacked_actions) { + for (const auto& action_mvo : _unpacked_actions) { chain::name action_name = chain::name(action_mvo["actionName"].as_string()); chain::name action_auth_acct = chain::name(action_mvo["actionAuthAcct"].as_string()); bytes packed_action_data; @@ -289,7 +289,7 @@ namespace eosio::testing { if(!_acct_gen_fields.empty()) { ilog("Located the following account names that need to be generated and populted in each transaction:"); - for(auto e: _acct_gen_fields) { + for(const auto& e: _acct_gen_fields) { ilog("acct_gen_fields entry: ${value}", ("value", e)); } ilog("Priming name generator for trx generator prefix."); diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index 66ce82005f..2f7c089a1e 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -77,7 +77,7 @@ namespace eosio::testing { struct trx_generator_base { p2p_trx_provider _provider; - uint16_t _generator_id; + uint16_t _generator_id = 0; eosio::chain::chain_id_type _chain_id; eosio::chain::name _contract_owner_account; fc::microseconds _trx_expiration; @@ -104,7 +104,7 @@ namespace eosio::testing { uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); - void set_transaction_headers(eosio::chain::transaction& trx, const eosio::chain::block_id_type& last_irr_block_id, const fc::microseconds expiration, uint32_t delay_sec = 0); + void set_transaction_headers(eosio::chain::transaction& trx, const eosio::chain::block_id_type& last_irr_block_id, const fc::microseconds& expiration, uint32_t delay_sec = 0); signed_transaction_w_signer create_trx_w_actions_and_signer(std::vector act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); @@ -157,8 +157,8 @@ namespace eosio::testing { void locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, fc::mutable_variant_object& action_mvo, const std::string& key_word); void locate_key_words_in_action_array(std::map>& acct_gen_fields_out, fc::variants& action_array, const std::string& key_word); - void update_key_word_fields_in_sub_action(std::string key, fc::mutable_variant_object& action_mvo, std::string action_inner_key, const std::string key_word); - void update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string key_word); + void update_key_word_fields_in_sub_action(const std::string& key, fc::mutable_variant_object& action_mvo, const std::string& action_inner_key, const std::string& key_word); + void update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string& key_word); void update_actions(); virtual void update_resign_transaction(eosio::chain::signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, From 24e376cf87f5ee0902567685644be2d9d836a34d Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 10 Feb 2023 13:51:14 -0600 Subject: [PATCH 098/178] Additional peer review comments being addressed. --- tests/trx_generator/main.cpp | 12 ++++++------ tests/trx_generator/trx_generator.cpp | 2 +- tests/trx_generator/trx_generator.hpp | 22 +++++++++++----------- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 686b937493..a4a797aaf0 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -24,8 +24,8 @@ enum return_codes { }; int main(int argc, char** argv) { - const int64_t TRX_EXPIRATION_MAX = 3600; - const uint16_t GENERATOR_ID_MAX = 960; + const int64_t trx_expiration_max = 3600; + const uint16_t generator_id_max = 960; variables_map vmap; options_description cli("Transaction Generator command line options."); uint16_t gen_id; @@ -149,16 +149,16 @@ int main(int argc, char** argv) { } if(vmap.count("generation-id")) { - if(gen_id > GENERATOR_ID_MAX) { - ilog("Initialization error: Exceeded max value for generator id. Value must be less than ${max}.", ("max", GENERATOR_ID_MAX)); + if(gen_id > generator_id_max) { + ilog("Initialization error: Exceeded max value for generator id. Value must be less than ${max}.", ("max", generator_id_max)); cli.print(std::cerr); return INITIALIZE_FAIL; } } if(vmap.count("trx-expiration")) { - if(trx_expr > TRX_EXPIRATION_MAX) { - ilog("Initialization error: Exceeded max value for transaction expiration. Value must be less than ${max}.", ("max", TRX_EXPIRATION_MAX)); + if(trx_expr > trx_expiration_max) { + ilog("Initialization error: Exceeded max value for transaction expiration. Value must be less than ${max}.", ("max", trx_expiration_max)); cli.print(std::cerr); return INITIALIZE_FAIL; } diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index bac91ce5be..07c14a3721 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -234,7 +234,7 @@ namespace eosio::testing { _actions.clear(); if (!_acct_gen_fields.empty()) { - std::string generated_account_name = _acct_name_generator.calcName(); + std::string generated_account_name = _acct_name_generator.calc_name(); _acct_name_generator.increment(); for (auto const& [key, val] : _acct_gen_fields) { diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index 2f7c089a1e..d4af803518 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -20,7 +20,7 @@ namespace eosio::testing { struct action_pair_w_keys { action_pair_w_keys(eosio::chain::action first_action, eosio::chain::action second_action, fc::crypto::private_key first_act_signer, fc::crypto::private_key second_act_signer) - : _first_act(first_action), _second_act(second_action), _first_act_priv_key(first_act_signer), _second_act_priv_key(second_act_signer) {} + : _first_act(std::move(first_action)), _second_act(std::move(second_action)), _first_act_priv_key(std::move(first_act_signer)), _second_act_priv_key(std::move(second_act_signer)) {} eosio::chain::action _first_act; eosio::chain::action _second_act; @@ -29,17 +29,17 @@ namespace eosio::testing { }; struct account_name_generator { - account_name_generator() : _name_index_vec(ACCT_NAME_LEN, 0) {} + account_name_generator() : _name_index_vec(acct_name_len, 0) {} - const char* CHARMAP = "12345abcdefghijklmnopqrstuvwxyz"; - const int ACCT_NAME_CHAR_CNT = 31; - const int ACCT_NAME_LEN = 12; - const int MAX_PREFEX = 960; + static constexpr char char_map[] = "12345abcdefghijklmnopqrstuvwxyz"; + static constexpr int acct_name_char_cnt = sizeof(char_map) - 1; + const int acct_name_len = 12; + const int prefix_max = 960; std::vector _name_index_vec; void increment(int index) { _name_index_vec[index]++; - if(_name_index_vec[index] >= ACCT_NAME_CHAR_CNT) { + if(_name_index_vec[index] >= acct_name_char_cnt) { _name_index_vec[index] = 0; increment(index - 1); } @@ -54,8 +54,8 @@ namespace eosio::testing { } void setPrefix(int generator_id) { - if (generator_id > MAX_PREFEX) { - elog("Account Name Generator Prefix above allowable ${max}", ("max", MAX_PREFEX)); + if (generator_id > prefix_max) { + elog("Account Name Generator Prefix above allowable ${max}", ("max", prefix_max)); return; } _name_index_vec[0] = 0; @@ -65,11 +65,11 @@ namespace eosio::testing { } }; - std::string calcName() { + std::string calc_name() { std::string name; name.reserve(12); for(auto i: _name_index_vec) { - name += CHARMAP[i]; + name += char_map[i]; } return name; } From 12868230fbe1663b95511a8952846ac99a3c74e2 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 10 Feb 2023 15:29:34 -0600 Subject: [PATCH 099/178] Address peer review comment. Virtual destructor added. --- tests/trx_generator/trx_generator.hpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index d4af803518..0200be603f 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -97,6 +97,8 @@ namespace eosio::testing { trx_generator_base(uint16_t generator_id, std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); + virtual ~trx_generator_base() = default; + virtual void update_resign_transaction(eosio::chain::signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); From 56863d0f69e19f8552330164de748661e2769297 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 10 Feb 2023 15:44:15 -0600 Subject: [PATCH 100/178] Address additional peer review comments. --- tests/trx_generator/trx_generator.cpp | 59 ++++++++++++--------------- 1 file changed, 26 insertions(+), 33 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 07c14a3721..8e4652f7a2 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -44,14 +44,9 @@ namespace eosio::testing { std::vector trxs; trxs.reserve(2 * action_pairs_vector.size()); - std::vector act_vec; - for(action_pair_w_keys ap: action_pairs_vector) { - act_vec.push_back(ap._first_act); - trxs.emplace_back(create_trx_w_actions_and_signer(act_vec, ap._first_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); - act_vec.clear(); - act_vec.push_back(ap._second_act); - trxs.emplace_back(create_trx_w_actions_and_signer(act_vec, ap._second_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); - act_vec.clear(); + for (action_pair_w_keys ap: action_pairs_vector) { + trxs.emplace_back(create_trx_w_actions_and_signer({ap._first_act}, ap._first_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); + trxs.emplace_back(create_trx_w_actions_and_signer({ap._second_act}, ap._second_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); } return trxs; @@ -77,8 +72,8 @@ namespace eosio::testing { vector transfer_trx_generator::create_initial_transfer_actions(const std::string& salt, const uint64_t& period, const name& contract_owner_account, const vector& accounts, const vector& priv_keys) { vector actions_pairs_vector; - for(size_t i = 0; i < accounts.size(); ++i) { - for(size_t j = i + 1; j < accounts.size(); ++j) { + for (size_t i = 0; i < accounts.size(); ++i) { + for (size_t j = i + 1; j < accounts.size(); ++j) { //create the actions here ilog("create_initial_transfer_actions: creating transfer from ${acctA} to ${acctB}", ("acctA", accounts.at(i))("acctB", accounts.at(j))); action act_a_to_b = make_transfer_action(contract_owner_account, accounts.at(i), accounts.at(j), asset::from_string("1.0000 CUR"), salt); @@ -104,7 +99,7 @@ namespace eosio::testing { vector transfer_trx_generator::get_accounts(const vector& account_str_vector) { vector acct_name_list; - for(string account_name: account_str_vector) { + for (string account_name: account_str_vector) { ilog("get_account about to try to create name for ${acct}", ("acct", account_name)); acct_name_list.push_back(eosio::chain::name(account_name)); } @@ -113,7 +108,7 @@ namespace eosio::testing { vector transfer_trx_generator::get_private_keys(const vector& priv_key_str_vector) { vector key_list; - for(const string& private_key: priv_key_str_vector) { + for (const string& private_key: priv_key_str_vector) { ilog("get_private_keys about to try to create private_key for ${key} : gen key ${newKey}", ("key", private_key)("newKey", fc::crypto::private_key(private_key))); key_list.push_back(fc::crypto::private_key(private_key)); } @@ -169,10 +164,10 @@ namespace eosio::testing { } void trx_generator::locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, fc::mutable_variant_object& action_mvo, const std::string& key_word) { - for(const mutable_variant_object::entry& e: action_mvo) { - if(e.value().get_type() == fc::variant::string_type && e.value() == key_word) { + for (const mutable_variant_object::entry& e: action_mvo) { + if (e.value().get_type() == fc::variant::string_type && e.value() == key_word) { acct_gen_fields_out.push_back(e.key()); - } else if(e.value().get_type() == fc::variant::object_type) { + } else if (e.value().get_type() == fc::variant::object_type) { auto inner_mvo = fc::mutable_variant_object(e.value()); locate_key_words_in_action_mvo(acct_gen_fields_out, inner_mvo, key_word); } @@ -180,31 +175,29 @@ namespace eosio::testing { } void trx_generator::locate_key_words_in_action_array(std::map>& acct_gen_fields_out, fc::variants& action_array, const std::string& key_word) { - for(size_t i = 0; i < action_array.size(); ++i) { + for (size_t i = 0; i < action_array.size(); ++i) { auto action_mvo = fc::mutable_variant_object(action_array[i]); locate_key_words_in_action_mvo(acct_gen_fields_out[i], action_mvo, key_word); } } void trx_generator::update_key_word_fields_in_sub_action(const std::string& key, fc::mutable_variant_object& action_mvo, const std::string& action_inner_key, const std::string& key_word) { - auto itr = action_mvo.find(action_inner_key); - if(itr != action_mvo.end()) { - fc::mutable_variant_object inner_mvo = fc::mutable_variant_object(action_mvo[action_inner_key].get_object()); - if (inner_mvo.find(key) != inner_mvo.end()) { + if (action_mvo.find(action_inner_key) != action_mvo.end()) { + if (action_mvo[action_inner_key].get_object().find(key) != action_mvo[action_inner_key].get_object().end()) { + fc::mutable_variant_object inner_mvo = fc::mutable_variant_object(action_mvo[action_inner_key].get_object()); inner_mvo.set(key, key_word); - action_mvo.set(action_inner_key, inner_mvo); + action_mvo.set(action_inner_key, std::move(inner_mvo)); } } } void trx_generator::update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string& key_word) { - for(const auto& key: acct_gen_fields) { - auto itr = action_mvo.find(key); - if(itr != action_mvo.end()) { + for (const auto& key: acct_gen_fields) { + if (action_mvo.find(key) != action_mvo.end()) { action_mvo.set(key, key_word); } else { - for(const auto& e: action_mvo) { - if(e.value().get_type() == fc::variant::object_type) { + for (const auto& e: action_mvo) { + if (e.value().get_type() == fc::variant::object_type) { update_key_word_fields_in_sub_action(key, action_mvo, e.key(), key_word); } } @@ -215,7 +208,7 @@ namespace eosio::testing { void trx_generator::update_resign_transaction(signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { trx.actions.clear(); update_actions(); - for(const auto& act: _actions) { + for (const auto& act: _actions) { trx.actions.push_back(act); } trx_generator_base::update_resign_transaction(trx, priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); @@ -261,7 +254,7 @@ namespace eosio::testing { act.authorization = vector{{auth_actor, auth_perm}}; act.data = std::move(packed_action_data); - _actions.push_back(act); + _actions.emplace_back(std::move(act)); } } @@ -283,13 +276,13 @@ namespace eosio::testing { auto action_array = unpacked_actions_data_json.get_array(); for (size_t i =0; i < action_array.size(); ++i ) { - _unpacked_actions.push_back(fc::mutable_variant_object(action_array[i])); + _unpacked_actions.emplace_back(fc::mutable_variant_object(action_array[i])); } locate_key_words_in_action_array(_acct_gen_fields, action_array, gen_acct_name_per_trx); - if(!_acct_gen_fields.empty()) { - ilog("Located the following account names that need to be generated and populted in each transaction:"); - for(const auto& e: _acct_gen_fields) { + if (!_acct_gen_fields.empty()) { + ilog("Located the following account names that need to be generated and populated in each transaction:"); + for (const auto& e: _acct_gen_fields) { ilog("acct_gen_fields entry: ${value}", ("value", e)); } ilog("Priming name generator for trx generator prefix."); @@ -374,7 +367,7 @@ namespace eosio::testing { void trx_generator_base::stop_generation() { ilog("Stopping transaction generation"); - if(_txcount) { + if (_txcount) { ilog("${d} transactions executed, ${t}us / transaction", ("d", _txcount)("t", _total_us / (double) _txcount)); _txcount = _total_us = 0; } From 1ca46a897c36bb0d6736dffb37d9cff489dbb8fe Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 10 Feb 2023 19:13:48 -0600 Subject: [PATCH 101/178] add eosiomechanics cpu run to performance test basic --- tests/TestHarness/Cluster.py | 46 +++---- .../launch_transaction_generators.py | 20 ++- tests/performance_tests/CMakeLists.txt | 2 + .../performance_test_basic.py | 68 ++++++---- tests/trx_generator/main.cpp | 24 ++-- tests/trx_generator/trx_generator.cpp | 8 +- tests/trx_generator/trx_generator.hpp | 4 +- tests/trx_generator/trx_generator_tests.cpp | 2 +- unittests/contracts/CMakeLists.txt | 1 + unittests/contracts/eosio.mechanics/LICENSE | 21 ++++ unittests/contracts/eosio.mechanics/README.md | 28 +++++ .../contracts/eosmechanics.abi | 70 +++++++++++ .../contracts/eosmechanics.cpp | 119 ++++++++++++++++++ .../contracts/eosmechanics.wasm | Bin 0 -> 9046 bytes .../eosio.mechanics/scripts/mech_actions.sh | 16 +++ .../eosio.mechanics/scripts/mech_check.sh | 9 ++ .../eosio.mechanics/scripts/mech_loop.sh | 12 ++ 17 files changed, 380 insertions(+), 70 deletions(-) create mode 100644 unittests/contracts/eosio.mechanics/LICENSE create mode 100644 unittests/contracts/eosio.mechanics/README.md create mode 100644 unittests/contracts/eosio.mechanics/contracts/eosmechanics.abi create mode 100644 unittests/contracts/eosio.mechanics/contracts/eosmechanics.cpp create mode 100644 unittests/contracts/eosio.mechanics/contracts/eosmechanics.wasm create mode 100644 unittests/contracts/eosio.mechanics/scripts/mech_actions.sh create mode 100644 unittests/contracts/eosio.mechanics/scripts/mech_check.sh create mode 100644 unittests/contracts/eosio.mechanics/scripts/mech_loop.sh diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index 27ad8075cf..5d09743e7f 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -94,7 +94,7 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 defproduceraPrvtKey: Defproducera account private key defproducerbPrvtKey: Defproducerb account private key """ - self.accounts={} + self.accounts=[] self.nodes=[] self.unstartedNodes=[] self.localCluster=localCluster @@ -705,7 +705,9 @@ def createAccountKeys(count): # create account keys and import into wallet. Wallet initialization will be user responsibility # also imports defproducera and defproducerb accounts - def populateWallet(self, accountsCount, wallet, accountNames: list=None): + def populateWallet(self, accountsCount, wallet, accountNames: list=None, createProducerAccounts: bool=False): + if accountsCount == 0 and len(accountNames) == 0: + return True if self.walletMgr is None: Utils.Print("ERROR: WalletMgr hasn't been initialized.") return False @@ -718,15 +720,16 @@ def populateWallet(self, accountsCount, wallet, accountNames: list=None): Utils.Print("Account keys creation failed.") return False - Utils.Print("Importing keys for account %s into wallet %s." % (self.defproduceraAccount.name, wallet.name)) - if not self.walletMgr.importKey(self.defproduceraAccount, wallet): - Utils.Print("ERROR: Failed to import key for account %s" % (self.defproduceraAccount.name)) - return False + if createProducerAccounts: + Utils.Print("Importing keys for account %s into wallet %s." % (self.defproduceraAccount.name, wallet.name)) + if not self.walletMgr.importKey(self.defproduceraAccount, wallet): + Utils.Print("ERROR: Failed to import key for account %s" % (self.defproduceraAccount.name)) + return False - Utils.Print("Importing keys for account %s into wallet %s." % (self.defproducerbAccount.name, wallet.name)) - if not self.walletMgr.importKey(self.defproducerbAccount, wallet): - Utils.Print("ERROR: Failed to import key for account %s" % (self.defproducerbAccount.name)) - return False + Utils.Print("Importing keys for account %s into wallet %s." % (self.defproducerbAccount.name, wallet.name)) + if not self.walletMgr.importKey(self.defproducerbAccount, wallet): + Utils.Print("ERROR: Failed to import key for account %s" % (self.defproducerbAccount.name)) + return False if accountNames is not None: for idx, name in enumerate(accountNames): @@ -737,8 +740,8 @@ def populateWallet(self, accountsCount, wallet, accountNames: list=None): if not self.walletMgr.importKey(account, wallet): Utils.Print("ERROR: Failed to import key for account %s" % (account.name)) return False + self.accounts.append(account) - self.accounts=accounts return True def getNodeP2pPort(self, nodeId: int): @@ -1592,20 +1595,21 @@ def cleanup(self): os.remove(f) # Create accounts and validates that the last transaction is received on root node - def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000, validationNodeIndex=0): + def createAccounts(self, creator, nameList: list, waitForTransBlock=True, stakedDeposit=1000, validationNodeIndex=0): if self.accounts is None: return True - transId=None for account in self.accounts: - if Utils.Debug: Utils.Print("Create account %s." % (account.name)) - if Utils.Debug: Utils.Print("Validation node %s" % validationNodeIndex) - trans=self.createAccountAndVerify(account, creator, stakedDeposit, validationNodeIndex=validationNodeIndex) - if trans is None: - Utils.Print("ERROR: Failed to create account %s." % (account.name)) - return False - if Utils.Debug: Utils.Print("Account %s created." % (account.name)) - transId=Node.getTransId(trans) + ret = self.biosNode.getEosAccount(account.name) + if (len(nameList) == 0 and ret is None) or account.name in nameList: + if Utils.Debug: Utils.Print("Create account %s." % (account.name)) + if Utils.Debug: Utils.Print("Validation node %s" % validationNodeIndex) + trans=self.createAccountAndVerify(account, creator, stakedDeposit, validationNodeIndex=validationNodeIndex) + if trans is None: + Utils.Print("ERROR: Failed to create account %s." % (account.name)) + return False + if Utils.Debug: Utils.Print("Account %s created." % (account.name)) + transId=Node.getTransId(trans) if waitForTransBlock and transId is not None: node=self.nodes[validationNodeIndex] diff --git a/tests/TestHarness/launch_transaction_generators.py b/tests/TestHarness/launch_transaction_generators.py index 4faf0e7cd8..b822423ba2 100755 --- a/tests/TestHarness/launch_transaction_generators.py +++ b/tests/TestHarness/launch_transaction_generators.py @@ -37,7 +37,8 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int): class TransactionGeneratorsLauncher: def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAccount: str, accts: str, privateKeys: str, - trxGenDurationSec: int, logDir: str, abiFile: Path, actionName: str, actionData, peerEndpoint: str, port: int, tpsTrxGensConfig: TpsTrxGensConfig): + trxGenDurationSec: int, logDir: str, abiFile: Path, actionName: str, actionData, peerEndpoint: str, port: int, tpsTrxGensConfig: TpsTrxGensConfig, + ownerPrivateKey: str): self.chainId = chainId self.lastIrreversibleBlockId = lastIrreversibleBlockId self.contractOwnerAccount = contractOwnerAccount @@ -51,6 +52,7 @@ def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAcco self.actionData = actionData self.peerEndpoint = peerEndpoint self.port = port + self.ownerPrivateKey=ownerPrivateKey def launch(self, waitToComplete=True): self.subprocess_ret_codes = [] @@ -71,7 +73,8 @@ def launch(self, waitToComplete=True): f'--action-data {self.actionData} ' f'--abi-file {self.abiFile} ' f'--peer-endpoint {self.peerEndpoint} ' - f'--port {self.port}' + f'--port {self.port} ' + f'--owner-private-key {self.ownerPrivateKey}' ) self.subprocess_ret_codes.append( subprocess.Popen([ @@ -88,7 +91,8 @@ def launch(self, waitToComplete=True): '--action-data', f'{self.actionData}', '--abi-file', f'{self.abiFile}', '--peer-endpoint', f'{self.peerEndpoint}', - '--port', f'{self.port}' + '--port', f'{self.port}', + '--owner-private-key', f'{self.ownerPrivateKey}' ]) ) else: @@ -104,7 +108,8 @@ def launch(self, waitToComplete=True): f'--target-tps {targetTps} ' f'--log-dir {self.logDir} ' f'--peer-endpoint {self.peerEndpoint} ' - f'--port {self.port}' + f'--port {self.port} ' + f'--owner-private-key {self.ownerPrivateKey}' ) self.subprocess_ret_codes.append( subprocess.Popen([ @@ -118,7 +123,8 @@ def launch(self, waitToComplete=True): '--target-tps', f'{targetTps}', '--log-dir', f'{self.logDir}', '--peer-endpoint', f'{self.peerEndpoint}', - '--port', f'{self.port}' + '--port', f'{self.port}', + '--owner-private-key', f'{self.ownerPrivateKey}' ]) ) exitCodes=None @@ -149,6 +155,7 @@ def parseArgs(): parser.add_argument("abi_file", type=str, help="The path to the contract abi file to use for the supplied transaction action data") parser.add_argument("peer_endpoint", type=str, help="set the peer endpoint to send transactions to", default="127.0.0.1") parser.add_argument("port", type=int, help="set the peer endpoint port to send transactions to", default=9876) + parser.add_argument("owner_private_key", type=str, help="ownerPrivateKey of the contract owner") args = parser.parse_args() return args @@ -160,7 +167,8 @@ def main(): privateKeys=args.priv_keys, trxGenDurationSec=args.trx_gen_duration, logDir=args.log_dir, abiFile=args.abi_file, actionName=args.action_name, actionData=args.action_data, peerEndpoint=args.peer_endpoint, port=args.port, - tpsTrxGensConfig=TpsTrxGensConfig(targetTps=args.target_tps, tpsLimitPerGenerator=args.tps_limit_per_generator)) + tpsTrxGensConfig=TpsTrxGensConfig(targetTps=args.target_tps, tpsLimitPerGenerator=args.tps_limit_per_generator), + ownerPrivateKey=args.owner_private_key) exit_codes = trxGenLauncher.launch() diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 3d8ecc447d..78de81f9ca 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -8,9 +8,11 @@ configure_file(nodeos_log_3_2.txt.gz nodeos_log_3_2.txt.gz COPYONLY) configure_file(genesis.json genesis.json COPYONLY) configure_file(validate_nodeos_plugin_args.py validate_nodeos_plugin_args.py COPYONLY) configure_file(userTrxData.json userTrxData.json COPYONLY) +configure_file(cpuTrxData.json cpuTrxData.json COPYONLY) add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --user-trx-data-file tests/performance_tests/userTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --account-name "c" --clean-run --user-trx-data-file tests/performance_tests/cpuTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 43334c497a..c74a7ae117 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -70,12 +70,11 @@ def __str__(self) -> str: @dataclass class SpecifiedContract: - accountName: str = "c" - ownerPublicKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - activePublicKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + accountName: str = "eosio" contractDir: str = "unittests/contracts/eosio.system" wasmFile: str = "eosio.system.wasm" abiFile: str = "eosio.system.abi" + account: Account = Account(accountName) pnodes: int = 1 totalNodes: int = 2 @@ -266,18 +265,27 @@ def launchCluster(self): ) def setupWalletAndAccounts(self, accountCnt: int=2, accountNames: list=None): - self.wallet = self.walletMgr.create('default') self.accountNames=[] + newAccountNames=[] self.accountPrivKeys=[] if accountNames is not None: - self.cluster.populateWallet(accountsCount=len(accountNames), wallet=self.wallet, accountNames=accountNames) - self.cluster.createAccounts(self.cluster.eosioAccount, stakedDeposit=0, validationNodeIndex=self.validationNodeId) - for index in range(0, len(accountNames)): - self.accountNames.append(self.cluster.accounts[index].name) - self.accountPrivKeys.append(self.cluster.accounts[index].activePrivateKey) + for name in accountNames: + if name == self.clusterConfig.specifiedContract.account.name: + self.accountNames.append(self.clusterConfig.specifiedContract.account.name) + self.accountPrivKeys.append(self.clusterConfig.specifiedContract.account.ownerPrivateKey) + else: + ret = self.cluster.biosNode.getEosAccount(name) + if ret is None: + newAccountNames.append(name) + self.cluster.populateWallet(accountsCount=len(newAccountNames), wallet=self.wallet, accountNames=newAccountNames, createProducerAccounts=True) + self.cluster.createAccounts(self.cluster.eosioAccount, newAccountNames, stakedDeposit=0, validationNodeIndex=self.validationNodeId) + if newAccountNames is not None and len(newAccountNames) != 0: + for index in range(0, len(accountNames)): + self.accountNames.append(self.cluster.accounts[index].name) + self.accountPrivKeys.append(self.cluster.accounts[index].activePrivateKey) else: - self.cluster.populateWallet(accountsCount=accountCnt, wallet=self.wallet) - self.cluster.createAccounts(self.cluster.eosioAccount, stakedDeposit=0, validationNodeIndex=self.validationNodeId) + self.cluster.populateWallet(accountsCount=accountCnt, wallet=self.wallet, createProducerAccounts=True) + self.cluster.createAccounts(self.cluster.eosioAccount, newAccountNames, stakedDeposit=0, validationNodeIndex=self.validationNodeId) for index in range(0, accountCnt): self.accountNames.append(self.cluster.accounts[index].name) self.accountPrivKeys.append(self.cluster.accounts[index].activePrivateKey) @@ -287,23 +295,28 @@ def readUserTrxDataFromFile(self, userTrxDataFile: Path): self.userTrxDataDict = json.load(f) def setupContract(self): - specifiedAccount = Account(self.clusterConfig.specifiedContract.accountName) - specifiedAccount.ownerPublicKey = self.clusterConfig.specifiedContract.ownerPublicKey - specifiedAccount.activePublicKey = self.clusterConfig.specifiedContract.activePublicKey - self.cluster.createAccountAndVerify(specifiedAccount, self.cluster.eosioAccount, validationNodeIndex=self.validationNodeId) - print("Publishing contract") - transaction=self.cluster.biosNode.publishContract(specifiedAccount, self.clusterConfig.specifiedContract.contractDir, - self.clusterConfig.specifiedContract.wasmFile, - self.clusterConfig.specifiedContract.abiFile, waitForTransBlock=True) - if transaction is None: - print("ERROR: Failed to publish contract.") - return None + if (self.clusterConfig.specifiedContract.accountName != self.cluster.eosioAccount.name): + self.cluster.populateWallet(accountsCount=1, wallet=self.wallet,accountNames=[self.clusterConfig.specifiedContract.accountName]) + self.cluster.createAccounts(self.cluster.eosioAccount, [self.clusterConfig.specifiedContract.accountName], stakedDeposit=0, validationNodeIndex=self.validationNodeId) + print("Publishing contract") + for index in range(0, len(self.cluster.accounts)): + if self.cluster.accounts[index].name == self.clusterConfig.specifiedContract.accountName: + self.clusterConfig.specifiedContract.account = self.cluster.accounts[index] + transaction=self.cluster.biosNode.publishContract(self.clusterConfig.specifiedContract.account, self.clusterConfig.specifiedContract.contractDir, + self.clusterConfig.specifiedContract.wasmFile, + self.clusterConfig.specifiedContract.abiFile, waitForTransBlock=True) + if transaction is None: + print("ERROR: Failed to publish contract.") + return None + else: + self.clusterConfig.specifiedContract.account = self.cluster.eosioAccount def runTpsTest(self) -> PtbTpsTestResult: completedRun = False self.producerNode = self.cluster.getNode(self.producerNodeId) self.producerP2pPort = self.cluster.getNodeP2pPort(self.producerNodeId) self.validationNode = self.cluster.getNode(self.validationNodeId) + self.wallet = self.walletMgr.create('default') self.setupContract() info = self.producerNode.getInfo() chainId = info['chain_id'] @@ -331,7 +344,8 @@ def runTpsTest(self) -> PtbTpsTestResult: contractOwnerAccount=self.clusterConfig.specifiedContract.accountName, accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, abiFile=abiFile, actionName=actionName, actionData=actionData, - peerEndpoint=self.producerNode.host, port=self.producerP2pPort, tpsTrxGensConfig=tpsTrxGensConfig) + peerEndpoint=self.producerNode.host, port=self.producerP2pPort, tpsTrxGensConfig=tpsTrxGensConfig, + ownerPrivateKey=str(self.clusterConfig.specifiedContract.account.ownerPrivateKey)) trxGenExitCodes = trxGenLauncher.launch() print(f"Transaction Generator exit codes: {trxGenExitCodes}") @@ -393,6 +407,8 @@ def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): completedRun=testResult.completedRun) jsonReport = None + self.ptbConfig.quiet = True + self.ptbConfig.delReport = True if not self.ptbConfig.quiet or not self.ptbConfig.delReport: jsonReport = log_reader.reportAsJSON(self.report) @@ -503,9 +519,7 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--quiet", help="Whether to quiet printing intermediate results and reports to stdout", action='store_true') ptbBaseParserGroup.add_argument("--prods-enable-trace-api", help="Determines whether producer nodes should have eosio::trace_api_plugin enabled", action='store_true') ptbBaseParserGroup.add_argument("--print-missing-transactions", type=bool, help="Toggles if missing transactions are be printed upon test completion.", default=False) - ptbBaseParserGroup.add_argument("--account-name", type=str, help="Name of the account to create and assign a contract to", default="c") - ptbBaseParserGroup.add_argument("--owner-public-key", type=str, help="Owner public key to use with specified account name", default="EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV") - ptbBaseParserGroup.add_argument("--active-public-key", type=str, help="Active public key to use with specified account name", default="EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV") + ptbBaseParserGroup.add_argument("--account-name", type=str, help="Name of the account to create and assign a contract to", default="eosio") ptbBaseParserGroup.add_argument("--contract-dir", type=str, help="Path to contract dir", default="unittests/contracts/eosio.system") ptbBaseParserGroup.add_argument("--wasm-file", type=str, help="WASM file name for contract", default="eosio.system.wasm") ptbBaseParserGroup.add_argument("--abi-file", type=str, help="ABI file name for contract", default="eosio.system.abi") @@ -562,7 +576,7 @@ def main(): testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(accountName=args.account_name, - ownerPublicKey=args.owner_public_key, activePublicKey=args.active_public_key, contractDir=args.contract_dir, + contractDir=args.contract_dir, wasmFile=args.wasm_file, abiFile=args.abi_file), nodeosVers=Utils.getNodeosVersion().split('.')[0]) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index b7a6c8282a..fef4df20b4 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -42,6 +42,7 @@ int main(int argc, char** argv) { bool stop_on_trx_failed; std::string peer_endpoint; unsigned short port; + string owner_private_key; bool transaction_specified = false; std::string action_name_in; @@ -71,6 +72,7 @@ int main(int argc, char** argv) { ("stop-on-trx-failed", bpo::value(&stop_on_trx_failed)->default_value(true), "stop transaction generation if sending fails.") ("peer-endpoint", bpo::value(&peer_endpoint)->default_value("127.0.0.1"), "set the peer endpoint to send transactions to") ("port", bpo::value(&port)->default_value(9876), "set the peer endpoint port to send transactions to") + ("owner-private-key", bpo::value(&owner_private_key), "ownerPrivateKey of the contract owner") ("help,h", "print this list") ; @@ -121,13 +123,8 @@ int main(int argc, char** argv) { if(vmap.count("accounts")) { boost::split(account_str_vector, accts, boost::is_any_of(",")); - if(!transaction_specified && account_str_vector.size() < 2) { - ilog("Initialization error: requires at minimum 2 transfer accounts"); - cli.print(std::cerr); - return INITIALIZE_FAIL; - } - if (transaction_specified && account_str_vector.size() < 1) { - ilog("Initialization error: Specifying transaction to generate requires at minimum 1 account."); + if(account_str_vector.size() < 1) { + ilog("Initialization error: requires at minimum 1 account"); cli.print(std::cerr); return INITIALIZE_FAIL; } @@ -186,6 +183,12 @@ int main(int argc, char** argv) { return INITIALIZE_FAIL; } } + + if(!vmap.count("owner-private-key")) { + ilog("Initialization error: missing owner-private-key"); + cli.print(std::cerr); + return INITIALIZE_FAIL; + } } catch(bpo::unknown_option& ex) { std::cerr << ex.what() << std::endl; cli.print(std::cerr); @@ -202,14 +205,15 @@ int main(int argc, char** argv) { ilog("Target generation Transaction Per Second (TPS) ${tps}", ("tps", target_tps)); ilog("Logs directory ${logDir}", ("logDir", log_dir_in)); ilog("Peer Endpoint ${peer-endpoint}:${peer-port}", ("peer-endpoint", peer_endpoint)("peer-port", port)); + ilog("OwnerPrivateKey ${key}", ("key", owner_private_key)); fc::microseconds trx_expr_ms = fc::seconds(trx_expr); std::shared_ptr monitor; if (transaction_specified) { - auto generator = std::make_shared(chain_id_in, abi_file_path_in, contract_owner_acct, account_str_vector.at(0), action_name_in, - action_data_file_or_str, trx_expr_ms, private_keys_str_vector.at(0), lib_id_str, log_dir_in, - stop_on_trx_failed, peer_endpoint, port); + auto generator = std::make_shared(chain_id_in, abi_file_path_in, contract_owner_acct, owner_private_key, account_str_vector.at(0), + action_name_in, action_data_file_or_str, trx_expr_ms, private_keys_str_vector.at(0), lib_id_str, + log_dir_in, stop_on_trx_failed, peer_endpoint, port); monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); trx_tps_tester tester{generator, monitor, gen_duration, target_tps}; diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 438a837a3b..c10af5279d 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -166,9 +166,9 @@ namespace eosio::testing { } } - trx_generator::trx_generator(std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, std::string auth_account, std::string action_name, + trx_generator::trx_generator(std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, const std::string& owner_private_key, std::string auth_account, std::string action_name, const std::string& action_data_file_or_str, fc::microseconds trx_expr, const std::string& private_key_str, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) - : trx_generator_base(chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), _abi_data_file_path(abi_data_file), _auth_account(auth_account), + : trx_generator_base(chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), _abi_data_file_path(abi_data_file), _owner_private_key(fc::crypto::private_key(owner_private_key)), _auth_account(auth_account), _action(action_name), _action_data_file_or_str(action_data_file_or_str), _private_key(fc::crypto::private_key(private_key_str)) {} bool trx_generator::setup() { @@ -196,10 +196,10 @@ namespace eosio::testing { eosio::chain::action act; act.account = _contract_owner_account; act.name = _action; - act.authorization = vector{{_auth_account, config::active_name}}; + act.authorization = vector{{_contract_owner_account, config::owner_name}}; act.data = std::move(packed_action_data); - _trxs.emplace_back(create_transfer_trx_w_signer(act, _private_key, ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, _last_irr_block_id)); + _trxs.emplace_back(create_transfer_trx_w_signer(act, _owner_private_key, ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, _last_irr_block_id)); ilog("Setup p2p transaction provider"); diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index db3007672c..30f0ceab1b 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -65,10 +65,12 @@ namespace eosio::testing { eosio::chain::name _action; std::string _action_data_file_or_str; fc::crypto::private_key _private_key; + fc::crypto::private_key _owner_private_key; const fc::microseconds abi_serializer_max_time = fc::seconds(10); // No risk to client side serialization taking a long time - trx_generator(std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, std::string auth_account, std::string action_name, const std::string& action_data_file_or_str, + trx_generator(std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, const std::string& owner_private_key, + std::string auth_account, std::string action_name, const std::string& action_data_file_or_str, fc::microseconds trx_expr, const std::string& private_key_str, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); diff --git a/tests/trx_generator/trx_generator_tests.cpp b/tests/trx_generator/trx_generator_tests.cpp index 9bb1a3804a..1ca567ec25 100644 --- a/tests/trx_generator/trx_generator_tests.cpp +++ b/tests/trx_generator/trx_generator_tests.cpp @@ -339,7 +339,7 @@ BOOST_AUTO_TEST_CASE(trx_generator_constructor) unsigned short port = 9876; bool stop_on_trx_failed = true; - auto generator = trx_generator(chain_id, abi_file, contract_owner_account, acct, action_name, action_data, trx_expr, private_key_str, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port); + auto generator = trx_generator(chain_id, abi_file, contract_owner_account, private_key_str, acct, action_name, action_data, trx_expr, private_key_str, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port); } BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/contracts/CMakeLists.txt b/unittests/contracts/CMakeLists.txt index 1c1c300390..94eee13963 100644 --- a/unittests/contracts/CMakeLists.txt +++ b/unittests/contracts/CMakeLists.txt @@ -6,6 +6,7 @@ file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.msig/ DESTINATION ${CMAKE_CURRENT_BI file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.system/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/eosio.system/) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.token/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/eosio.token/) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.wrap/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/eosio.wrap/) +file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.mechanics/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/eosio.mechanics/) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/old_versions/v1.6.0-rc3/eosio.bios/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/old_versions/v1.6.0-rc3/eosio.bios/) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/old_versions/v1.7.0-develop-preactivate_feature/eosio.bios/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/old_versions/v1.7.0-develop-preactivate_feature/eosio.bios/) diff --git a/unittests/contracts/eosio.mechanics/LICENSE b/unittests/contracts/eosio.mechanics/LICENSE new file mode 100644 index 0000000000..764bbfaa4f --- /dev/null +++ b/unittests/contracts/eosio.mechanics/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Aloha EOS + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/unittests/contracts/eosio.mechanics/README.md b/unittests/contracts/eosio.mechanics/README.md new file mode 100644 index 0000000000..5668289e57 --- /dev/null +++ b/unittests/contracts/eosio.mechanics/README.md @@ -0,0 +1,28 @@ +# EOS Mechanics +This is a collection of EOS contracts and utilities in use by the [EOS Mechanics](https://eosmechanics.com/) research group. + +Please visit us on [Telegram](https://t.me/EOSMechanics) for any feedback or questions. + +## Benchmarks +The benchmarks below are EOS contracts which are set on the `eosmechanics` account on Mainnet, CryptoKylin Testnet, and Jungle Testnet. They are executed during each block producers' schedule, and the timings recorded on-chain using the standard `cpu_usage_us` transaction field. The data is [freely available](https://eosflare.io/account/eosmechanics) to view and analyze, and we encourage doing so to help identify issues and improve block producer performance. + +Example tools that utilize this data: + +- [EOS Block Producer Benchmarks](https://www.alohaeos.com/tools/benchmarks) by [Aloha EOS](https://www.alohaeos.com/) +- [Block Producer Performance](https://labs.eostitan.com/#/block-producer-performance) by [EOS Titan](https://eostitan.com/) + + + +### CPU Benchmark + +This benchmark targets the CPU by calculating Mersenne prime numbers. Calculating primes is an industry standard for measuring CPU performance and it uses code operations that are common in software development. + +### RAM Benchmark + +This benchmark targets EOS RAM by rapidly writing to and reading from a RAM table. Due to inefficiencies within the EOSIO software this benchmark is currently CPU heavy and thus we consider it experimental and very similar to the CPU benchmark. As the software performance is improved we expect the results of this benchmark to become more meaningful. + +-- + +#### Disclaimer +*EOS Token holders should not rely on EOS Mechanics benchmark results as an absolute indication of block producers’ performance. Results are mainly intended for operators of the EOS infrastructure to have additional metrics that can aid them in testing different infrastructures, configurations and features while identifying performance differences and potential bottlenecks. These statistics are not subject to verification by other nodes on the network; therefore it is possible for block producers to manipulate them. Furthermore, running custom software or configurations may impact the measurement of these metrics.* + diff --git a/unittests/contracts/eosio.mechanics/contracts/eosmechanics.abi b/unittests/contracts/eosio.mechanics/contracts/eosmechanics.abi new file mode 100644 index 0000000000..dd5bdb0319 --- /dev/null +++ b/unittests/contracts/eosio.mechanics/contracts/eosmechanics.abi @@ -0,0 +1,70 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.2", + "types": [], + "structs": [ + { + "name": "cpu", + "base": "", + "fields": [] + }, + { + "name": "net", + "base": "", + "fields": [ + { + "name": "input", + "type": "string" + } + ] + }, + { + "name": "ram", + "base": "", + "fields": [] + }, + { + "name": "ramdata", + "base": "", + "fields": [ + { + "name": "id", + "type": "uint64" + }, + { + "name": "one", + "type": "string" + } + ] + } + ], + "actions": [ + { + "name": "cpu", + "type": "cpu", + "ricardian_contract": "" + }, + { + "name": "net", + "type": "net", + "ricardian_contract": "" + }, + { + "name": "ram", + "type": "ram", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "ramdata", + "type": "ramdata", + "index_type": "i64", + "key_names": [], + "key_types": [] + } + ], + "ricardian_clauses": [], + "variants": [], + "action_results": [] +} \ No newline at end of file diff --git a/unittests/contracts/eosio.mechanics/contracts/eosmechanics.cpp b/unittests/contracts/eosio.mechanics/contracts/eosmechanics.cpp new file mode 100644 index 0000000000..edaaf1d69f --- /dev/null +++ b/unittests/contracts/eosio.mechanics/contracts/eosmechanics.cpp @@ -0,0 +1,119 @@ +#include +//#include +#include +#pragma precision=log10l(ULLONG_MAX)/2 +typedef enum { FALSE=0, TRUE=1 } BOOL; + +// Max when calculating primes in cpu test +#define CPU_PRIME_MAX 375 + +// Number of rows to write/read in ram test +#define RAM_ROWS 75 + +using namespace eosio; + +CONTRACT eosmechanics : public eosio::contract { + public: + using contract::contract; + + /** + * Simple CPU benchmark that calculates Mersenne prime numbers. + */ + [[eosio::action]] void cpu() { + // Only let us run this + require_auth(_self); + + int p; + + //eosio::print_f("Mersenne primes:\n"); + for (p = 2; p <= CPU_PRIME_MAX; p += 1) { + if (is_prime(p) && is_mersenne_prime(p)) { + // We need to keep an eye on this to make sure it doesn't get optimized out. So far so good. + //eosio::print_f(" %u", p); + } + } + } + + /** + * Simple EOS RAM benchmark which reads and writes a table. + */ + [[eosio::action]] void ram() { + ramdata_index ramdata(_self, _self.value); + + // Only let us run this + require_auth(_self); + + int i; + + // Write + for (i = 0; i < RAM_ROWS; i++) { + ramdata.emplace(_self, [&](auto& row) { + row.id = i; + row.one = "aloha"; + }); + } + + // Read + for (const auto& row: ramdata) { + //eosio::print_f("read %d: %s\n", row.id, row.one); + i = row.id; + } + + // Delete + for(auto itr = ramdata.begin(); itr != ramdata.end();) { + itr = ramdata.erase(itr); + } + } + + /** + * Simple EOS Net benchmark which just accepts any string passed in. + */ + [[eosio::action]] void net(std::string input) { + // Only let us run this + require_auth(_self); + } + + private: + + BOOL is_prime(int p) { + if (p == 2) { + return TRUE; + } else if (p <= 1 || p % 2 == 0) { + return FALSE; + } + + BOOL prime = TRUE; + const int to = sqrt(p); + int i; + for (i = 3; i <= to; i += 2) { + if (!((prime = BOOL(p)) % i)) break; + } + return prime; + } + + BOOL is_mersenne_prime(int p) { + if (p == 2) return TRUE; + + const long long unsigned m_p = (1LLU << p) - 1; + long long unsigned s = 4; + int i; + for (i = 3; i <= p; i++) { + s = (s * s - 2) % m_p; + } + return BOOL(s == 0); + } + + // @abi table ramdata i64 + struct [[eosio::table]] ramdata { + uint64_t id; + std::string one; + + auto primary_key()const { return id; } + EOSLIB_SERIALIZE(ramdata, (id)(one)) + }; + + typedef eosio::multi_index<"ramdata"_n, ramdata> ramdata_index; + +}; + +EOSIO_DISPATCH(eosmechanics, (cpu)(ram)(net)) diff --git a/unittests/contracts/eosio.mechanics/contracts/eosmechanics.wasm b/unittests/contracts/eosio.mechanics/contracts/eosmechanics.wasm new file mode 100644 index 0000000000000000000000000000000000000000..2bcf9cfa62514e4908a46e4449aab3a38b1131f2 GIT binary patch literal 9046 zcmb`NUyNMWUB}Nk_uiS=oppTeILX#_n=?12tV1Fh8m*%uHG3`6CM7^y5U=a;dYbI4 zy?kX|S`uxwGEi-ye*|X{dE`aC2k# zD=srOFMb%CT+<%bYP+MI-JSixb=_RQwZT%Z(95EKZ3pH~_idQp+&LH|?n@iDMx(*@ zczwKibKqKgBd=yNN0Dnxjo)rzmeoDHEU;k{d#}gU*F&St$}NcK{*=q zuN#NvoipO4(O~b^=4i0qzcqfzbu{zT^=s=lc3vKguI=2~zP`S>`jlJHc$e|*!R@h) z81boDl-4(Pt`FRztu>W@G}ss*@hIuA1cUwY4vUFrS2GtGl4cq(U{5eG8VCv-^I6q% zOP+sAK0lHBj(^~!FMTLayyKJcdv4*;k7jgLtT!e`(=@H?}wTnJvTky<+WdP_2hGUTfctKmA)IZ zZgAaI?n^PXcf|fK>qc9qcF%DA`dar>K1>kJM|ZC0!&(Oa@}bI| z@U`o~XeIpHS{0r>;bS=E+>+~dqwBU{Qa;@Mjz`hxM5bT3WP8t^aJqZap?&0c>m&pw zx;WbeA3d*o!BncU9;}}|5sbC3UqMNmnGY+E>{X~I`=D_Dr$2dlxL4nO^><#otwjU` zqkfNNye%WDC!IgW_k>#tNSxJ^VK3v##BSlr!gc>5$$rIM4oF#dFpE%=@RF`J#<0E^ zY=(sG43UQ^5u<9_O2{GF{WVFj5<0!Cfg#l0=NZF_X4Sl0Mn#@iL(V0y{m7U^zZyDA zSkH_+{*}IF|e3cHqen9aAf#SFEJO( zPPg9aEilE`|8f8R{kB}I+gp$U#?@j(In?K}`20CEnw7q{(2WZy5FpOQImAxPB^3j$ zG>{KtJGPp!EEZe%biP%!58t+iA{xemiOn=$FMbwoo$_O*KS)G}zO7-ECzuu*m;!{R zd(!-Gyw4|i=-FPV7lpWKT>q5lC&wVqxHxR+Unfwh&+~;MN8pAR- zW+MyMdZJN&|ES{QTghXuJO$TWnWI;j*wUPbJZ8NXa_g2b%IfX`VU*WfL%pgW8dt3z ziZHpG)HTVRxmxJ86J-HqAxzN|iQ}F~mYbs!f(mBZ!k-Me>& zJaYBVorC5R2k?Xfi60(A0K#rm<{8WUIpkJ9BjKnWZ~BOGi#Uq|4{%;4@42v01t>Ll@@g z;4@3NJWEGfIb2U^Xcidw%+ftFLkHN0bOQbuog8e&5D(AL&Ddei4D;rDXohOW2y-@= zGr^e|q8SU!8DI|o2WN<8;GaW(4*b(IL^IgWK|hE5sTraf;OB6kgS|W=nn8UY^f}B= z9uXZuejM?6z#ljwIl}um+;eE3IHEWL`#9EfP%j-(93g!i=sBFbM-)deA4ho(-_M3`Ux{DUgabw&9gp6A z?XP-{V%PnFPd(@*T7V0boidhZ;P0dRlC_25iWKMPU;be7#p5IY_&trp!x?}6x6mld z({Vi^HCDq47-Sc-mCG3o7r$BgGiPb<(wqxi=&)j%6EA0-G>5KD(*&Sco#&FRhUGME z`Eph^)5>AFa+U&E_Ly;Lc)@l5(C6$0C;A`V#mtpUzvR--TF=*^7qT@gF+q(?NF~bb z=gX?fzR^s6uimik;;mk;GAZRTygGyo=c%)v!}uY(T~%#|^Hj8mFR0K)->~@aXWr=H z2j+0~j{|)`xBGYA7wYySNUKJJcAy_s?WIZWwIoQnShcOZ2Ua%8+V!isTGMvF(!#lGg8N}q$th{_&{g6rM%Acd9BZo|vSqUqZ`gd~!)PF)fx;o0-nqg$l>X!gS z7K6;7M?7aF>ahv`w6kC-CRyLE4}4D!DM(jr`A7p#!0MgHoQk9cQh^0f_eUWsFtu$| zU-uv2yXZ;pC-0pKJoTH8;J5g}b1|3CtRY)+;iq-C8lFm*OL*QT;F56V6KA>U@U#-1 zmOIGXSFmvM_WWXYsp=#r$K{t{0AmFJgORDgj?2sWdH4r6<$B&WTOSk*dHo(U7s~Th zL0|-I67whB)dt{#tsCH_!7ns}3x|VKC=^0z&RR1E6`nJT)UW##SlinS~DlG z%U`gqRoW>c9J;N9XhsklaM6B4mm|l`jvUvVQ%-U@A9~ zI=5|AtF|KmSO1wrkhLzTW6;K}!v-B@+`t{S!o8y~DdYvJo0!cmGWUX_!Tcjx8qm&T zX6n+BnIV&znc7K-WjWcNq%dn{Co^a1Lpe5?85`O;LavG-uj(PL6eH;-6LVTK%}q*u zz;3BWAE}VXqWkSE3mDl>L}dgG0tG)Q<#|XnHzkOWrTg+8Iez;9+fK6k-0w=F}^sLjyLWG<-TQ{GN%R zoVr{-*vNrZ1+G`pmVYHDfggzuU~mss02suOOSa7aK)THkDC&Shmm{=_BeaTy)-Il` zWY~m?cojT!hDe*5F4?C_316$QrU_M;+0eC%FexImJicNQvF?Y*V_Vv*?V&v(^1E+j zhaCJadXY0B0*qL* zQ=GtI)AZmb9on@x^CBD+TdE?h!_z3dzhhb|x7eASb^^@JbWs_Yuz6KQM0bwtY{8=w zMPIvL)B&^=OnPk{>M^EOnn@BK!&3v&3)R*bWC6^f4#rdiUd#iZ{}=uefL2MFB5e(X zq#bGDla@>5Ac3gH5}bb0bDbk~`hw%}rbT{?+^_b8vVGWRaPDI5W#f^w*->ZHTX(u_O02go>@bUsRefh@?KNfl z1=|F%MZ>D*ZN*HJO*vLq*lL<=b=BRk`cy+{v}dY=v^?Hge`H-P#Q_BARiQdPx_3}+hnZ@#-zWdg@YB=k+bkiKMfH8>Jw7mZ3x88kR^+1_I z$JP2agD*nbQnbf8ONym-y?leBz}DS-M7^w1uwsWd6~;0=7}Jr0C2SfVI@vTyYK^hnFg3<0|5K>b0=xhS2oArS^2S7co=%5U zQPYw>`IQxHX+)pm%13$ImP%kOBZGl#2Umm!QaI4=AW>Tqsc-3d?xi z*@|*bLg@a~TQg?ceT4!g1w>Q*t1|g3AA9HCo`ymu2Y)kFV>vjRyDt$K?QzNjqffh& z@HdNQTeZFnIrL8pCHvkl$j|*zSjsm*lqT9@Wkv$r<^awFSLxw1V4 zt`!S*@$a+N`@}zK!5o%6@cZGI%XX3%V-pkwxO9LC_>91P&ttkj$X=_@|vu1wBJ4#odc zXPIQdB5H1?GN_UW+av}1cpZIe(c0@c0$bWQm-d?PtfTpreZ!aBi>?ZzTc?9Z=nFNu zV)rM36v$WF-CzH5fAhxRdK~Y>{tf=TqCXzQ-TsS%eYery-rgC<4gUUsv7Kwf!Nzz$ zZf?hM|Jn@(z8w3!e`zp^8#~+M5r4hlra$!$;rx5S9{V?TUg|sjQN#WiRySjRJRaQK z9j7&49&Or!`{M|!IM}}K2BXo=DC(~sHn(4lydL%W+k+3}povI4!yt*OG5&Z%ios!j z5I6Uu^g)4}w{DC#*EhGX4{lGjOjACRl8yXgoc`dU!3G8T/dev/null 2>&1 || $SCRIPT_DIR/mech_loop.sh & + diff --git a/unittests/contracts/eosio.mechanics/scripts/mech_loop.sh b/unittests/contracts/eosio.mechanics/scripts/mech_loop.sh new file mode 100644 index 0000000000..bec88eb91d --- /dev/null +++ b/unittests/contracts/eosio.mechanics/scripts/mech_loop.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# +# EOS Mechanics loop script. Loop and run the actions with a random sleep. +# + +SCRIPT_DIR="/path/to/mech/scripts" +ACTIONS_LOG="/path/to/actions.log" + +while :; do + $SCRIPT_DIR/mech_actions.sh >>$ACTIONS_LOG 2>&1 + sleep $(shuf -i 12-18 -n 1) +done From dd4fb9143fe47456bd90bb3df02e6dc98403e528 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 13 Feb 2023 11:20:10 -0600 Subject: [PATCH 102/178] Add commentary and unit tests for account name generator. --- tests/trx_generator/trx_generator.hpp | 20 +++ tests/trx_generator/trx_generator_tests.cpp | 151 ++++++++++++++++---- 2 files changed, 147 insertions(+), 24 deletions(-) diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index 0200be603f..dc09c03b95 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -29,11 +29,31 @@ namespace eosio::testing { }; struct account_name_generator { + // This account_name_generator provides the means to generate 12 character account names where the left-most 2 characters are reserved + // to identify the trx generator. The right 10 characters are determined based on incrementing through the allowed char_map one at a + // before incrementing the next character to the left. + // The _name_index_vec tracks the index into the char_map for each of the 12 characters of the name. + // For example: + // Transaction Generators would create new account names as follows: + // generator ID: 5 generator ID: 41 + // 1a1111111111 2f1111111111 + // 1a1111111112 2f1111111112 + // 1a1111111113 2f1111111113 + // 1a1111111114 2f1111111114 + // 1a1111111115 2f1111111115 + // ... ... + // 1a111111111z 2f111111111z + // 1a1111111121 2f1111111121 + // 1a1111111122 2f1111111122 account_name_generator() : _name_index_vec(acct_name_len, 0) {} static constexpr char char_map[] = "12345abcdefghijklmnopqrstuvwxyz"; static constexpr int acct_name_char_cnt = sizeof(char_map) - 1; const int acct_name_len = 12; + + // Reserving the first 2 characters in the 12 char account name to identify the transaction generator sending the trx to create the new account. + // So 31 ^ 2 gives 961 (so w/0 based index, 960) since more than 31 generators may be desired, but > 961 is likely unwarranted. + // This provides an easy way to deduplicate the names being generated by the parallel trx generators. const int prefix_max = 960; std::vector _name_index_vec; diff --git a/tests/trx_generator/trx_generator_tests.cpp b/tests/trx_generator/trx_generator_tests.cpp index c92fb21f55..c5bb3832fd 100644 --- a/tests/trx_generator/trx_generator_tests.cpp +++ b/tests/trx_generator/trx_generator_tests.cpp @@ -60,7 +60,7 @@ BOOST_AUTO_TEST_CASE(tps_short_run_low_tps) fc::time_point start = fc::time_point::now(); t1.run(); fc::time_point end = fc::time_point::now(); - fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch() ; + fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch(); BOOST_REQUIRE_EQUAL(generator->_calls.size(), expected_trxs); BOOST_REQUIRE_GT(runtime_us.count(), minimum_runtime_us); @@ -87,17 +87,16 @@ BOOST_AUTO_TEST_CASE(tps_short_run_high_tps) fc::time_point start = fc::time_point::now(); t1.run(); fc::time_point end = fc::time_point::now(); - fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch() ; + fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch(); BOOST_REQUIRE_EQUAL(generator->_calls.size(), expected_trxs); BOOST_REQUIRE_GT(runtime_us.count(), minimum_runtime_us); if (runtime_us.count() > maximum_runtime_us) { ilog("couldn't sustain transaction rate. ran ${rt}us vs expected max ${mx}us", - ("rt", runtime_us.count())("mx", maximum_runtime_us ) ); + ("rt", runtime_us.count())("mx", maximum_runtime_us)); BOOST_REQUIRE_LT(monitor->_calls.back().time_to_next_trx_us, 0); } - } BOOST_AUTO_TEST_CASE(tps_short_run_med_tps_med_delay) @@ -121,14 +120,14 @@ BOOST_AUTO_TEST_CASE(tps_short_run_med_tps_med_delay) fc::time_point start = fc::time_point::now(); t1.run(); fc::time_point end = fc::time_point::now(); - fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch() ; + fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch(); BOOST_REQUIRE_EQUAL(generator->_calls.size(), expected_trxs); BOOST_REQUIRE_GT(runtime_us.count(), minimum_runtime_us); if (runtime_us.count() > maximum_runtime_us) { ilog("couldn't sustain transaction rate. ran ${rt}us vs expected max ${mx}us", - ("rt", runtime_us.count())("mx", maximum_runtime_us ) ); + ("rt", runtime_us.count())("mx", maximum_runtime_us)); BOOST_REQUIRE_LT(monitor->_calls.back().time_to_next_trx_us, 0); } } @@ -154,17 +153,18 @@ BOOST_AUTO_TEST_CASE(tps_med_run_med_tps_med_delay) fc::time_point start = fc::time_point::now(); t1.run(); fc::time_point end = fc::time_point::now(); - fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch() ; + fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch(); BOOST_REQUIRE_EQUAL(generator->_calls.size(), expected_trxs); BOOST_REQUIRE_GT(runtime_us.count(), minimum_runtime_us); if (runtime_us.count() > maximum_runtime_us) { ilog("couldn't sustain transaction rate. ran ${rt}us vs expected max ${mx}us", - ("rt", runtime_us.count())("mx", maximum_runtime_us ) ); + ("rt", runtime_us.count())("mx", maximum_runtime_us)); BOOST_REQUIRE_LT(monitor->_calls.back().time_to_next_trx_us, 0); } } + BOOST_AUTO_TEST_CASE(tps_cant_keep_up) { constexpr uint32_t test_duration_s = 5; @@ -186,17 +186,18 @@ BOOST_AUTO_TEST_CASE(tps_cant_keep_up) fc::time_point start = fc::time_point::now(); t1.run(); fc::time_point end = fc::time_point::now(); - fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch() ; + fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch(); BOOST_REQUIRE_EQUAL(generator->_calls.size(), expected_trxs); BOOST_REQUIRE_GT(runtime_us.count(), minimum_runtime_us); if (runtime_us.count() > maximum_runtime_us) { ilog("couldn't sustain transaction rate. ran ${rt}us vs expected max ${mx}us", - ("rt", runtime_us.count())("mx", maximum_runtime_us ) ); + ("rt", runtime_us.count())("mx", maximum_runtime_us)); BOOST_REQUIRE_LT(monitor->_calls.back().time_to_next_trx_us, 0); } } + BOOST_AUTO_TEST_CASE(tps_med_run_med_tps_30us_delay) { constexpr uint32_t test_duration_s = 15; @@ -218,17 +219,16 @@ BOOST_AUTO_TEST_CASE(tps_med_run_med_tps_30us_delay) fc::time_point start = fc::time_point::now(); t1.run(); fc::time_point end = fc::time_point::now(); - fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch() ; + fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch(); BOOST_REQUIRE_EQUAL(generator->_calls.size(), expected_trxs); BOOST_REQUIRE_GT(runtime_us.count(), minimum_runtime_us); if (runtime_us.count() > maximum_runtime_us) { ilog("couldn't sustain transaction rate. ran ${rt}us vs expected max ${mx}us", - ("rt", runtime_us.count())("mx", maximum_runtime_us ) ); + ("rt", runtime_us.count())("mx", maximum_runtime_us)); BOOST_REQUIRE_LT(monitor->_calls.back().time_to_next_trx_us, 0); } - } BOOST_AUTO_TEST_CASE(tps_performance_monitor_during_spin_up) @@ -241,11 +241,11 @@ BOOST_AUTO_TEST_CASE(tps_performance_monitor_during_spin_up) stats.trxs_sent = 90; // behind, but still within spin up window - stats.last_run = fc::time_point{fc::microseconds{100000}}; + stats.last_run = fc::time_point{fc::microseconds{100000}}; BOOST_REQUIRE(monitor.monitor_test(stats)); // violation, but still within spin up window - stats.last_run = fc::time_point{fc::microseconds{1100000}}; + stats.last_run = fc::time_point{fc::microseconds{1100000}}; BOOST_REQUIRE(monitor.monitor_test(stats)); } @@ -259,11 +259,11 @@ BOOST_AUTO_TEST_CASE(tps_performance_monitor_outside_spin_up) stats.trxs_sent = 90; // behind, out of spin up window - stats.last_run = fc::time_point{fc::microseconds{5500000}}; + stats.last_run = fc::time_point{fc::microseconds{5500000}}; BOOST_REQUIRE(monitor.monitor_test(stats)); // violation, out of spin up window - stats.last_run = fc::time_point{fc::microseconds{6600000}}; + stats.last_run = fc::time_point{fc::microseconds{6600000}}; BOOST_REQUIRE(!monitor.monitor_test(stats)); } @@ -277,25 +277,25 @@ BOOST_AUTO_TEST_CASE(tps_performance_monitor_outside_spin_up_within_limit) stats.trxs_sent = 90; // outside of limit, out of spin up window - stats.last_run = fc::time_point{fc::microseconds{5500000}}; + stats.last_run = fc::time_point{fc::microseconds{5500000}}; BOOST_REQUIRE(monitor.monitor_test(stats)); // outside of limit, less than max violation duration - stats.last_run = fc::time_point{fc::microseconds{6000000}}; + stats.last_run = fc::time_point{fc::microseconds{6000000}}; BOOST_REQUIRE(monitor.monitor_test(stats)); stats.trxs_sent = 98; // behind, but within limit, out of spin up window - stats.last_run = fc::time_point{fc::microseconds{6600000}}; + stats.last_run = fc::time_point{fc::microseconds{6600000}}; BOOST_REQUIRE(monitor.monitor_test(stats)); stats.expected_sent = 150; // outside of limit again, out of spin up window - stats.last_run = fc::time_point{fc::microseconds{7000000}}; + stats.last_run = fc::time_point{fc::microseconds{7000000}}; BOOST_REQUIRE(monitor.monitor_test(stats)); // outside of limit for too long - stats.last_run = fc::time_point{fc::microseconds{8100000}}; + stats.last_run = fc::time_point{fc::microseconds{8100000}}; BOOST_REQUIRE(!monitor.monitor_test(stats)); } @@ -316,11 +316,10 @@ BOOST_AUTO_TEST_CASE(tps_cant_keep_up_monitored) fc::time_point start = fc::time_point::now(); t1.run(); fc::time_point end = fc::time_point::now(); - fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch() ; + fc::microseconds runtime_us = end.time_since_epoch() - start.time_since_epoch(); BOOST_REQUIRE_LT(runtime_us.count(), expected_runtime_us); BOOST_REQUIRE_LT(generator->_calls.size(), expected_trxs); - } BOOST_AUTO_TEST_CASE(trx_generator_constructor) @@ -343,4 +342,108 @@ BOOST_AUTO_TEST_CASE(trx_generator_constructor) trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port); } +BOOST_AUTO_TEST_CASE(account_name_generator_tests) +{ + auto acct_gen = account_name_generator(); + BOOST_REQUIRE_EQUAL(acct_gen.calc_name(), "111111111111"); + + //Test account name prefixes for differentiating between transaction generator instances + acct_gen.setPrefix(1); + BOOST_REQUIRE_EQUAL(acct_gen.calc_name(), "121111111111"); + acct_gen.setPrefix(30); + BOOST_REQUIRE_EQUAL(acct_gen.calc_name(), "1z1111111111"); + acct_gen.setPrefix(31); + BOOST_REQUIRE_EQUAL(acct_gen.calc_name(), "211111111111"); + acct_gen.setPrefix(960); + BOOST_REQUIRE_EQUAL(acct_gen.calc_name(), "zz1111111111"); + + //Test account name generation + std::vector expected = { + "zz1111111111", + "zz1111111112", + "zz1111111113", + "zz1111111114", + "zz1111111115", + "zz111111111a", + "zz111111111b", + "zz111111111c", + "zz111111111d", + "zz111111111e", + "zz111111111f", + "zz111111111g", + "zz111111111h", + "zz111111111i", + "zz111111111j", + "zz111111111k", + "zz111111111l", + "zz111111111m", + "zz111111111n", + "zz111111111o", + "zz111111111p", + "zz111111111q", + "zz111111111r", + "zz111111111s", + "zz111111111t", + "zz111111111u", + "zz111111111v", + "zz111111111w", + "zz111111111x", + "zz111111111y", + "zz111111111z", + "zz1111111121", + "zz1111111122"}; + for(size_t i = 0; i < expected.size(); ++i) { + BOOST_REQUIRE_EQUAL(acct_gen.calc_name(), expected.at(i)); + acct_gen.increment(); + } + + + //Test account name generation starting at 31 ^ 5 - 1 = 28629150 + std::vector expected2 = { + "1211111zzzzz", + "121111211111", + "121111211112", + "121111211113", + "121111211114", + "121111211115", + "12111121111a", + "12111121111b", + "12111121111c", + "12111121111d", + "12111121111e", + "12111121111f", + "12111121111g", + "12111121111h", + "12111121111i", + "12111121111j", + "12111121111k", + "12111121111l", + "12111121111m", + "12111121111n", + "12111121111o", + "12111121111p", + "12111121111q", + "12111121111r", + "12111121111s", + "12111121111t", + "12111121111u", + "12111121111v", + "12111121111w", + "12111121111x", + "12111121111y", + "12111121111z", + "121111211121", + "121111211122"}; + auto acct_gen2 = account_name_generator(); + acct_gen2.setPrefix(1); + int initialVal = 28629150; + for(int i = 0; i < initialVal; ++i) { + acct_gen2.increment(); + } + for(size_t i = 0; i < expected2.size(); ++i) { + BOOST_REQUIRE_EQUAL(acct_gen2.calc_name(), expected2.at(i)); + acct_gen2.increment(); + } +} + BOOST_AUTO_TEST_SUITE_END() From 77ba8eacfe643c9afa9da479e1a187ff75ff7f10 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 13 Feb 2023 11:42:34 -0600 Subject: [PATCH 103/178] Fix typo in argument. --- tests/trx_generator/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index a4a797aaf0..56951f0c51 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -148,7 +148,7 @@ int main(int argc, char** argv) { return INITIALIZE_FAIL; } - if(vmap.count("generation-id")) { + if(vmap.count("generator-id")) { if(gen_id > generator_id_max) { ilog("Initialization error: Exceeded max value for generator id. Value must be less than ${max}.", ("max", generator_id_max)); cli.print(std::cerr); From 43844554811e50d22db56d466e4805850a812da5 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 13 Feb 2023 12:03:59 -0600 Subject: [PATCH 104/178] Make private helper to protect usage. --- tests/trx_generator/trx_generator.hpp | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index dc09c03b95..fe5e74cd53 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -57,14 +57,6 @@ namespace eosio::testing { const int prefix_max = 960; std::vector _name_index_vec; - void increment(int index) { - _name_index_vec[index]++; - if(_name_index_vec[index] >= acct_name_char_cnt) { - _name_index_vec[index] = 0; - increment(index - 1); - } - } - void increment() { increment(_name_index_vec.size() - 1); } @@ -93,6 +85,15 @@ namespace eosio::testing { } return name; } + + private: + void increment(int index) { + _name_index_vec[index]++; + if(_name_index_vec[index] >= acct_name_char_cnt) { + _name_index_vec[index] = 0; + increment(index - 1); + } + } }; struct trx_generator_base { From 66fc7ddeecfe193f989c6a77eb758c267466eac0 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 13 Feb 2023 12:04:40 -0600 Subject: [PATCH 105/178] Correct the default priv keys to be valid key format. --- tests/performance_tests/performance_test_basic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 3cf9407837..b2d1df31bd 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -71,9 +71,9 @@ def __str__(self) -> str: @dataclass class SpecifiedContract: accountName: str = "eosio" - ownerPrivateKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + ownerPrivateKey: str = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" ownerPublicKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - activePrivateKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + activePrivateKey: str = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" activePublicKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" contractDir: str = "unittests/contracts/eosio.system" wasmFile: str = "eosio.system.wasm" From 239fa1fdc80f7daf8ff619a0dd78d991b7cc40d1 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 13 Feb 2023 14:09:20 -0600 Subject: [PATCH 106/178] Addressing peer review comments. --- tests/trx_generator/trx_generator.cpp | 10 +++++----- tests/trx_generator/trx_generator.hpp | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 8e4652f7a2..2663787095 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -88,13 +88,13 @@ namespace eosio::testing { return actions_pairs_vector; } - trx_generator_base::trx_generator_base(uint16_t generator_id, std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, + trx_generator_base::trx_generator_base(uint16_t generator_id, const std::string& chain_id_in, const std::string& contract_owner_account, const fc::microseconds& trx_expr, const std::string& lib_id_str, const std::string& log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) : _provider(peer_endpoint, port), _generator_id(generator_id), _chain_id(chain_id_in), _contract_owner_account(contract_owner_account), _trx_expiration(trx_expr), _last_irr_block_id(fc::variant(lib_id_str).as()), _log_dir(log_dir), _stop_on_trx_failed(stop_on_trx_failed) {} - transfer_trx_generator::transfer_trx_generator(uint16_t generator_id, std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, - fc::microseconds trx_expr, const std::vector& private_keys_str_vector, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) + transfer_trx_generator::transfer_trx_generator(uint16_t generator_id, const std::string& chain_id_in, const std::string& contract_owner_account, const std::vector& accts, + const fc::microseconds& trx_expr, const std::vector& private_keys_str_vector, const std::string& lib_id_str, const std::string& log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) : trx_generator_base(generator_id, chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), _accts(accts), _private_keys_str_vector(private_keys_str_vector) {} vector transfer_trx_generator::get_accounts(const vector& account_str_vector) { @@ -214,9 +214,9 @@ namespace eosio::testing { trx_generator_base::update_resign_transaction(trx, priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); } - trx_generator::trx_generator(uint16_t generator_id, std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, + trx_generator::trx_generator(uint16_t generator_id, const std::string& chain_id_in, const std::string& abi_data_file, const std::string& contract_owner_account, const std::string& actions_data_json_file_or_str, const std::string& actions_auths_json_file_or_str, - fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, + const fc::microseconds& trx_expr, const std::string& lib_id_str, const std::string& log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) : trx_generator_base(generator_id, chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), _abi_data_file_path(abi_data_file), diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index fe5e74cd53..c53b7ebdd3 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -115,7 +115,7 @@ namespace eosio::testing { bool _stop_on_trx_failed = true; - trx_generator_base(uint16_t generator_id, std::string chain_id_in, std::string contract_owner_account, fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, + trx_generator_base(uint16_t generator_id, const std::string& chain_id_in, const std::string& contract_owner_account, const fc::microseconds& trx_expr, const std::string& lib_id_str, const std::string& log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); virtual ~trx_generator_base() = default; @@ -144,8 +144,8 @@ namespace eosio::testing { const std::vector _accts; std::vector _private_keys_str_vector; - transfer_trx_generator(uint16_t generator_id, std::string chain_id_in, std::string contract_owner_account, const std::vector& accts, - fc::microseconds trx_expr, const std::vector& private_keys_str_vector, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, + transfer_trx_generator(uint16_t generator_id, const std::string& chain_id_in, const std::string& contract_owner_account, const std::vector& accts, + const fc::microseconds& trx_expr, const std::vector& private_keys_str_vector, const std::string& lib_id_str, const std::string& log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); std::vector get_accounts(const std::vector& account_str_vector); @@ -173,9 +173,9 @@ namespace eosio::testing { const fc::microseconds abi_serializer_max_time = fc::seconds(10); // No risk to client side serialization taking a long time - trx_generator(uint16_t generator_id, std::string chain_id_in, const std::string& abi_data_file, std::string contract_owner_account, + trx_generator(uint16_t generator_id, const std::string& chain_id_in, const std::string& abi_data_file, const std::string& contract_owner_account, const std::string& actions_data_json_file_or_str, const std::string& actions_auths_json_file_or_str, - fc::microseconds trx_expr, std::string lib_id_str, std::string log_dir, bool stop_on_trx_failed, + const fc::microseconds& trx_expr, const std::string& lib_id_str, const std::string& log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); void locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, fc::mutable_variant_object& action_mvo, const std::string& key_word); From f25de1b5a8e69588160184bdd4b1316433a776c2 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 13 Feb 2023 15:34:21 -0600 Subject: [PATCH 107/178] Addressing peer review comments. --- tests/trx_generator/main.cpp | 18 +++++++++--------- tests/trx_generator/trx_generator.cpp | 25 +++++++++++++------------ tests/trx_generator/trx_generator.hpp | 6 +++--- 3 files changed, 25 insertions(+), 24 deletions(-) diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 56951f0c51..20fea0444d 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -28,22 +28,22 @@ int main(int argc, char** argv) { const uint16_t generator_id_max = 960; variables_map vmap; options_description cli("Transaction Generator command line options."); - uint16_t gen_id; + uint16_t gen_id = 0; string chain_id_in; string contract_owner_acct; string accts; string p_keys; - int64_t trx_expr; - uint32_t gen_duration; - uint32_t target_tps; + int64_t trx_expr = 3600; + uint32_t gen_duration = 60; + uint32_t target_tps = 1; string lib_id_str; - int64_t spinup_time_us; - uint32_t max_lag_per; - int64_t max_lag_duration_us; + int64_t spinup_time_us = 1000000; + uint32_t max_lag_per = 5; + int64_t max_lag_duration_us = 1000000; string log_dir_in; bool stop_on_trx_failed; - std::string peer_endpoint; - unsigned short port; + std::string peer_endpoint = "127.0.0.1"; + unsigned short port = 9876; bool transaction_specified = false; std::string abi_file_path_in; diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 2663787095..9df763505a 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -29,8 +29,8 @@ namespace eosio::testing { signed_transaction_w_signer trx_generator_base::create_trx_w_actions_and_signer(std::vector acts, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { signed_transaction trx; set_transaction_headers(trx, last_irr_block_id, trx_expiration); - for (auto act:acts) { - trx.actions.push_back(act); + for (auto& act : acts) { + trx.actions.emplace_back(std::move(act)); } trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), fc::raw::pack(std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + @@ -44,7 +44,7 @@ namespace eosio::testing { std::vector trxs; trxs.reserve(2 * action_pairs_vector.size()); - for (action_pair_w_keys ap: action_pairs_vector) { + for (const action_pair_w_keys& ap : action_pairs_vector) { trxs.emplace_back(create_trx_w_actions_and_signer({ap._first_act}, ap._first_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); trxs.emplace_back(create_trx_w_actions_and_signer({ap._second_act}, ap._second_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); } @@ -52,7 +52,8 @@ namespace eosio::testing { return trxs; } - void trx_generator_base::update_resign_transaction(signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + void trx_generator_base::update_resign_transaction(signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, + const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { trx.context_free_actions.clear(); trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), fc::raw::pack(std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + fc::time_point::now().time_since_epoch().count()))); set_transaction_headers(trx, last_irr_block_id, trx_expiration); @@ -60,7 +61,7 @@ namespace eosio::testing { trx.sign(priv_key, chain_id); } - chain::bytes transfer_trx_generator::make_transfer_data(const chain::name& from, const chain::name& to, const chain::asset& quantity, const std::string&& memo) { + chain::bytes transfer_trx_generator::make_transfer_data(const chain::name& from, const chain::name& to, const chain::asset& quantity, const std::string& memo) { return fc::raw::pack(from, to, quantity, memo); } @@ -81,7 +82,7 @@ namespace eosio::testing { ilog("create_initial_transfer_actions: creating transfer from ${acctB} to ${acctA}", ("acctB", accounts.at(j))("acctA", accounts.at(i))); action act_b_to_a = make_transfer_action(contract_owner_account, accounts.at(j), accounts.at(i), asset::from_string("1.0000 CUR"), salt); - actions_pairs_vector.push_back(action_pair_w_keys(act_a_to_b, act_b_to_a, priv_keys.at(i), priv_keys.at(j))); + actions_pairs_vector.emplace_back(action_pair_w_keys(act_a_to_b, act_b_to_a, priv_keys.at(i), priv_keys.at(j))); } } ilog("create_initial_transfer_actions: total action pairs created: ${pairs}", ("pairs", actions_pairs_vector.size())); @@ -99,9 +100,9 @@ namespace eosio::testing { vector transfer_trx_generator::get_accounts(const vector& account_str_vector) { vector acct_name_list; - for (string account_name: account_str_vector) { + for (const string& account_name : account_str_vector) { ilog("get_account about to try to create name for ${acct}", ("acct", account_name)); - acct_name_list.push_back(eosio::chain::name(account_name)); + acct_name_list.emplace_back(eosio::chain::name(account_name)); } return acct_name_list; } @@ -110,7 +111,7 @@ namespace eosio::testing { vector key_list; for (const string& private_key: priv_key_str_vector) { ilog("get_private_keys about to try to create private_key for ${key} : gen key ${newKey}", ("key", private_key)("newKey", fc::crypto::private_key(private_key))); - key_list.push_back(fc::crypto::private_key(private_key)); + key_list.emplace_back(fc::crypto::private_key(private_key)); } return key_list; } @@ -166,7 +167,7 @@ namespace eosio::testing { void trx_generator::locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, fc::mutable_variant_object& action_mvo, const std::string& key_word) { for (const mutable_variant_object::entry& e: action_mvo) { if (e.value().get_type() == fc::variant::string_type && e.value() == key_word) { - acct_gen_fields_out.push_back(e.key()); + acct_gen_fields_out.emplace_back(e.key()); } else if (e.value().get_type() == fc::variant::object_type) { auto inner_mvo = fc::mutable_variant_object(e.value()); locate_key_words_in_action_mvo(acct_gen_fields_out, inner_mvo, key_word); @@ -205,11 +206,11 @@ namespace eosio::testing { } } - void trx_generator::update_resign_transaction(signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + void trx_generator::update_resign_transaction(signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { trx.actions.clear(); update_actions(); for (const auto& act: _actions) { - trx.actions.push_back(act); + trx.actions.emplace_back(act); } trx_generator_base::update_resign_transaction(trx, priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); } diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index c53b7ebdd3..c00dea2805 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -120,7 +120,7 @@ namespace eosio::testing { virtual ~trx_generator_base() = default; - virtual void update_resign_transaction(eosio::chain::signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, + virtual void update_resign_transaction(eosio::chain::signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); void push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, @@ -152,7 +152,7 @@ namespace eosio::testing { std::vector get_private_keys(const std::vector& priv_key_str_vector); std::vector create_initial_transfer_transactions(const std::vector& action_pairs_vector, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); - eosio::chain::bytes make_transfer_data(const eosio::chain::name& from, const eosio::chain::name& to, const eosio::chain::asset& quantity, const std::string&& memo); + eosio::chain::bytes make_transfer_data(const eosio::chain::name& from, const eosio::chain::name& to, const eosio::chain::asset& quantity, const std::string& memo); auto make_transfer_action(eosio::chain::name account, eosio::chain::name from, eosio::chain::name to, eosio::chain::asset quantity, std::string memo); std::vector create_initial_transfer_actions(const std::string& salt, const uint64_t& period, const eosio::chain::name& contract_owner_account, const std::vector& accounts, const std::vector& priv_keys); @@ -184,7 +184,7 @@ namespace eosio::testing { void update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string& key_word); void update_actions(); - virtual void update_resign_transaction(eosio::chain::signed_transaction& trx, fc::crypto::private_key priv_key, uint64_t& nonce_prefix, uint64_t& nonce, + virtual void update_resign_transaction(eosio::chain::signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); fc::variant json_from_file_or_string(const std::string& file_or_str, fc::json::parse_type ptype = fc::json::parse_type::legacy_parser); From d00908e0f4e459faa57d30b38f1d468169f70548 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 15 Feb 2023 16:18:44 -0600 Subject: [PATCH 108/178] add variable to be saved in report which captures arg string in full --- tests/performance_tests/performance_test_basic.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index b2d1df31bd..18c287fdef 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -131,6 +131,7 @@ class PtbConfig: expectedTransactionsSent: int = field(default_factory=int, init=False) printMissingTransactions: bool=False userTrxDataFile: Path=None + argsString: str="" def __post_init__(self): self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps @@ -616,7 +617,7 @@ def main(): ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs, printMissingTransactions=args.print_missing_transactions, - userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None) + userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None, argsString=' '.join(sys.argv[1:])) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() From 1b7705198725c1d8b9c5d0c7063e639467828444 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 15 Feb 2023 16:47:33 -0600 Subject: [PATCH 109/178] improve reporting for command line in performance tests. Replace README output with new data. --- tests/performance_tests/README.md | 71 +++++++++++-------- tests/performance_tests/performance_test.py | 1 + .../performance_test_basic.py | 4 +- 3 files changed, 45 insertions(+), 31 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 6956758633..96e7536488 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -832,8 +832,8 @@ The Performance Test Basic generates, by default, a report that details results ``` json { "completedRun": true, - "testStart": "2023-01-13T18:00:42.465802", - "testFinish": "2023-01-13T18:03:11.831277", + "testStart": "2023-02-15T22:32:36.946671", + "testFinish": "2023-02-15T22:33:41.280861", "Analysis": { "BlockSize": { "min": 1920, @@ -841,20 +841,20 @@ The Performance Test Basic generates, by default, a report that details results "avg": 1920.0, "sigma": 0.0, "emptyBlocks": 0, - "numBlocks": 177 + "numBlocks": 7 }, "BlocksGuide": { "firstBlockNum": 2, - "lastBlockNum": 299, - "totalBlocks": 298, - "testStartBlockNum": 112, - "testEndBlockNum": 299, - "setupBlocksCnt": 110, + "lastBlockNum": 129, + "totalBlocks": 128, + "testStartBlockNum": 113, + "testEndBlockNum": 129, + "setupBlocksCnt": 111, "tearDownBlocksCnt": 0, "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 6, + "trailingEmptyBlocksCnt": 5, "configAddlDropCnt": 2, - "testAnalysisBlockCnt": 177 + "testAnalysisBlockCnt": 7 }, "TPS": { "min": 20, @@ -862,9 +862,9 @@ The Performance Test Basic generates, by default, a report that details results "avg": 20.0, "sigma": 0.0, "emptyBlocks": 0, - "numBlocks": 177, + "numBlocks": 7, "configTps": 20, - "configTestDuration": 90, + "configTestDuration": 5, "tpsPerGenerator": [ 10, 10 @@ -872,25 +872,25 @@ The Performance Test Basic generates, by default, a report that details results "generatorCount": 2 }, "TrxCPU": { - "min": 11.0, - "max": 360.0, - "avg": 63.10444444444445, - "sigma": 33.234456387280126, - "samples": 1800 + "min": 20.0, + "max": 141.0, + "avg": 72.15, + "sigma": 31.078408903932, + "samples": 100 }, "TrxLatency": { - "min": 0.06500005722045898, - "max": 0.4679999351501465, - "avg": 0.26723387837409973, - "sigma": 0.1414459711179884, - "samples": 1800 + "min": 0.0409998893737793, + "max": 0.4419999122619629, + "avg": 0.24149999618530274, + "sigma": 0.14142224015850113, + "samples": 100 }, "TrxNet": { "min": 24.0, "max": 24.0, "avg": 24.0, "sigma": 0.0, - "samples": 1800 + "samples": 100 }, "DroppedBlocks": {}, "DroppedBlocksCount": 0, @@ -902,6 +902,7 @@ The Performance Test Basic generates, by default, a report that details results "ForksCount": 0 }, "args": { + "rawCmdLine ": "/home/leap/build/tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run", "killAll": true, "dontKill": false, "keepLogs": true, @@ -1197,8 +1198,8 @@ The Performance Test Basic generates, by default, a report that details results "p2pDedupCacheExpireTimeSec": null, "_p2pDedupCacheExpireTimeSecNodeosDefault": 10, "_p2pDedupCacheExpireTimeSecNodeosArg": "--p2p-dedup-cache-expire-time-sec", - "netThreads": 2, - "_netThreadsNodeosDefault": 2, + "netThreads": 4, + "_netThreadsNodeosDefault": 4, "_netThreadsNodeosArg": "--net-threads", "syncFetchSpan": null, "_syncFetchSpanNodeosDefault": 100, @@ -1372,10 +1373,21 @@ The Performance Test Basic generates, by default, a report that details results "_traceNoAbisNodeosArg": "--trace-no-abis" } }, + "specifiedContract": { + "accountName": "eosio", + "ownerPrivateKey": "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", + "ownerPublicKey": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "activePrivateKey": "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", + "activePublicKey": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "contractDir": "unittests/contracts/eosio.system", + "wasmFile": "eosio.system.wasm", + "abiFile": "eosio.system.abi" + }, "useBiosBootFile": false, "genesisPath": "tests/performance_tests/genesis.json", "maximumP2pPerHost": 5000, "maximumClients": 0, + "loggingLevel": "info", "loggingDict": { "bios": "off" }, @@ -1386,19 +1398,20 @@ The Performance Test Basic generates, by default, a report that details results }, "_totalNodes": 2, "targetTps": 20, - "testTrxGenDurationSec": 90, + "testTrxGenDurationSec": 5, "tpsLimitPerGenerator": 10, "numAddlBlocksToPrune": 2, "logDirRoot": ".", "delReport": false, "quiet": false, "delPerfLogs": false, - "expectedTransactionsSent": 1800, + "expectedTransactionsSent": 100, "printMissingTransactions": false, + "userTrxDataFile": null, "logDirBase": "p", - "logDirTimestamp": "2023-01-13_18-00-42", + "logDirTimestamp": "2023-02-15_22-32-36", "logDirTimestampedOptSuffix": "-20", - "logDirPath": "p/2023-01-13_18-00-42-20" + "logDirPath": "p/2023-02-15_22-32-36-20" }, "env": { "system": "Linux", diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 7f270e0f4f..47b6369dcd 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -341,6 +341,7 @@ def createArtifactsDir(path): def prepArgsDict(self) -> dict: argsDict = {} + argsDict.update({"rawCmdLine ": ' '.join(sys.argv[0:])}) argsDict.update(asdict(self.testHelperConfig)) argsDict.update(asdict(self.clusterConfig)) argsDict.update(asdict(self.ptConfig)) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 18c287fdef..a8c6941d05 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -131,7 +131,6 @@ class PtbConfig: expectedTransactionsSent: int = field(default_factory=int, init=False) printMissingTransactions: bool=False userTrxDataFile: Path=None - argsString: str="" def __post_init__(self): self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps @@ -398,6 +397,7 @@ def runTpsTest(self) -> PtbTpsTestResult: def prepArgs(self) -> dict: args = {} + args.update({"rawCmdLine ": ' '.join(sys.argv[0:])}) args.update(asdict(self.testHelperConfig)) args.update(asdict(self.clusterConfig)) args.update(asdict(self.ptbConfig)) @@ -617,7 +617,7 @@ def main(): ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs, printMissingTransactions=args.print_missing_transactions, - userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None, argsString=' '.join(sys.argv[1:])) + userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() From 94d4b4be24bf00311a74c6c5412a62118dbc050c Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 16 Feb 2023 11:03:18 -0600 Subject: [PATCH 110/178] add missing cpuTrxData.json file --- tests/performance_tests/cpuTrxData.json | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 tests/performance_tests/cpuTrxData.json diff --git a/tests/performance_tests/cpuTrxData.json b/tests/performance_tests/cpuTrxData.json new file mode 100644 index 0000000000..d79b0c185d --- /dev/null +++ b/tests/performance_tests/cpuTrxData.json @@ -0,0 +1,16 @@ +{ + "initAccounts": ["c"], + "abiFile": "unittests/contracts/eosio.mechanics/contracts/eosmechanics.abi", + "actions": [ + { + "actionName": "cpu", + "actionData": { + }, + "actionAuthAcct": "c", + "authorization": { + "actor": "c", + "permission": "active" + } + } + ] +} From 96ae1ec49a0e210cae78771099941408356fcc70 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 16 Feb 2023 15:48:43 -0600 Subject: [PATCH 111/178] revert some no longer necessary changes following merge and resolve test failures in doing so. --- tests/TestHarness/Cluster.py | 23 +++++++++---------- tests/performance_tests/CMakeLists.txt | 1 + .../performance_test_basic.py | 8 +++---- tests/trx_generator/main.cpp | 2 +- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index b79bd539c9..9f77547aaa 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -705,7 +705,7 @@ def createAccountKeys(count): # create account keys and import into wallet. Wallet initialization will be user responsibility # also imports defproducera and defproducerb accounts - def populateWallet(self, accountsCount, wallet, accountNames: list=None, createProducerAccounts: bool=False): + def populateWallet(self, accountsCount, wallet, accountNames: list=None): if accountsCount == 0 and len(accountNames) == 0: return True if self.walletMgr is None: @@ -720,16 +720,15 @@ def populateWallet(self, accountsCount, wallet, accountNames: list=None, createP Utils.Print("Account keys creation failed.") return False - if createProducerAccounts: - Utils.Print("Importing keys for account %s into wallet %s." % (self.defproduceraAccount.name, wallet.name)) - if not self.walletMgr.importKey(self.defproduceraAccount, wallet): - Utils.Print("ERROR: Failed to import key for account %s" % (self.defproduceraAccount.name)) - return False + Utils.Print("Importing keys for account %s into wallet %s." % (self.defproduceraAccount.name, wallet.name)) + if not self.walletMgr.importKey(self.defproduceraAccount, wallet): + Utils.Print("ERROR: Failed to import key for account %s" % (self.defproduceraAccount.name)) + return False - Utils.Print("Importing keys for account %s into wallet %s." % (self.defproducerbAccount.name, wallet.name)) - if not self.walletMgr.importKey(self.defproducerbAccount, wallet): - Utils.Print("ERROR: Failed to import key for account %s" % (self.defproducerbAccount.name)) - return False + Utils.Print("Importing keys for account %s into wallet %s." % (self.defproducerbAccount.name, wallet.name)) + if not self.walletMgr.importKey(self.defproducerbAccount, wallet): + Utils.Print("ERROR: Failed to import key for account %s" % (self.defproducerbAccount.name)) + return False if accountNames is not None: for idx, name in enumerate(accountNames): @@ -1595,13 +1594,13 @@ def cleanup(self): os.remove(f) # Create accounts and validates that the last transaction is received on root node - def createAccounts(self, creator, nameList: list, waitForTransBlock=True, stakedDeposit=1000, validationNodeIndex=0): + def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000, validationNodeIndex=0): if self.accounts is None: return True transId=None for account in self.accounts: ret = self.biosNode.getEosAccount(account.name) - if (len(nameList) == 0 and ret is None) or account.name in nameList: + if ret is None: if Utils.Debug: Utils.Print("Create account %s." % (account.name)) if Utils.Debug: Utils.Print("Validation node %s" % validationNodeIndex) trans=self.createAccountAndVerify(account, creator, stakedDeposit, validationNodeIndex=validationNodeIndex) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 5233d33bbd..bc93792b42 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -20,5 +20,6 @@ add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/valida set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_ex_transfer_trx_spec PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_ex_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_ex_cpu_trx_spec PROPERTY LABELS nonparallelizable_tests) add_subdirectory( NodeosPluginArgs ) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index e514a75e0b..cf5c79d4bf 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -291,16 +291,16 @@ def setupWalletAndAccounts(self, accountCnt: int=2, accountNames: list=None): ret = self.cluster.biosNode.getEosAccount(name) if ret is None: newAccountNames.append(name) - self.cluster.populateWallet(accountsCount=len(newAccountNames), wallet=self.wallet, accountNames=newAccountNames, createProducerAccounts=True) - self.cluster.createAccounts(self.cluster.eosioAccount, newAccountNames, stakedDeposit=0, validationNodeIndex=self.validationNodeId) + self.cluster.populateWallet(accountsCount=len(newAccountNames), wallet=self.wallet, accountNames=newAccountNames) + self.cluster.createAccounts(self.cluster.eosioAccount, stakedDeposit=0, validationNodeIndex=self.validationNodeId) if len(newAccountNames) != 0: for index in range(len(self.accountNames), len(accountNames)): self.accountNames.append(self.cluster.accounts[index].name) self.accountPrivKeys.append(self.cluster.accounts[index].activePrivateKey) self.accountPrivKeys.append(self.cluster.accounts[index].ownerPrivateKey) else: - self.cluster.populateWallet(accountsCount=accountCnt, wallet=self.wallet, createProducerAccounts=True) - self.cluster.createAccounts(self.cluster.eosioAccount, newAccountNames, stakedDeposit=0, validationNodeIndex=self.validationNodeId) + self.cluster.populateWallet(accountsCount=accountCnt, wallet=self.wallet) + self.cluster.createAccounts(self.cluster.eosioAccount, stakedDeposit=0, validationNodeIndex=self.validationNodeId) for index in range(0, accountCnt): self.accountNames.append(self.cluster.accounts[index].name) self.accountPrivKeys.append(self.cluster.accounts[index].activePrivateKey) diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 16482bfa8b..5ed4374350 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -124,7 +124,7 @@ int main(int argc, char** argv) { if(vmap.count("accounts")) { boost::split(account_str_vector, accts, boost::is_any_of(",")); - if(account_str_vector.size() < 1) { + if(!transaction_specified && account_str_vector.size() < 1) { ilog("Initialization error: requires at minimum 1 account"); cli.print(std::cerr); return INITIALIZE_FAIL; From 724ba6cdae9cf9c3f7a7a1c8a92c88e468b4da3a Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 16 Feb 2023 16:29:44 -0600 Subject: [PATCH 112/178] readd test-duration-sec to cpu performance test, remove lingering commas, set back assert in trx_generator main --- tests/TestHarness/launch_transaction_generators.py | 4 ++-- tests/performance_tests/CMakeLists.txt | 2 +- tests/trx_generator/main.cpp | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/TestHarness/launch_transaction_generators.py b/tests/TestHarness/launch_transaction_generators.py index 256530b8b3..6498765c3a 100755 --- a/tests/TestHarness/launch_transaction_generators.py +++ b/tests/TestHarness/launch_transaction_generators.py @@ -91,7 +91,7 @@ def launch(self, waitToComplete=True): '--actions-data', f'{self.actionsData}', '--actions-auths', f'{self.actionsAuths}', '--peer-endpoint', f'{self.peerEndpoint}', - '--port', f'{self.port}', + '--port', f'{self.port}' ]) ) else: @@ -123,7 +123,7 @@ def launch(self, waitToComplete=True): '--target-tps', f'{targetTps}', '--log-dir', f'{self.logDir}', '--peer-endpoint', f'{self.peerEndpoint}', - '--port', f'{self.port}', + '--port', f'{self.port}' ]) ) exitCodes=None diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index bc93792b42..162e23831c 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -14,7 +14,7 @@ configure_file(userTrxDataNewAccount.json userTrxDataNewAccount.json COPYONLY) add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run --user-trx-data-file tests/performance_tests/cpuTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --user-trx-data-file tests/performance_tests/cpuTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 5ed4374350..fcaee83277 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -124,8 +124,8 @@ int main(int argc, char** argv) { if(vmap.count("accounts")) { boost::split(account_str_vector, accts, boost::is_any_of(",")); - if(!transaction_specified && account_str_vector.size() < 1) { - ilog("Initialization error: requires at minimum 1 account"); + if(!transaction_specified && account_str_vector.size() < 2) { + ilog("Initialization error: requires at minimum 2 accounts"); cli.print(std::cerr); return INITIALIZE_FAIL; } From 5a67397e1c584af844ba68d7188d8cf73361ee97 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 16 Feb 2023 17:10:08 -0600 Subject: [PATCH 113/178] reduce size of specifiedContract through use of .account --- .../performance_test_basic.py | 43 ++++++------------- 1 file changed, 12 insertions(+), 31 deletions(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index cf5c79d4bf..87dfee09d4 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -70,15 +70,10 @@ def __str__(self) -> str: @dataclass class SpecifiedContract: - accountName: str = "eosio" - ownerPrivateKey: str = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" - ownerPublicKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - activePrivateKey: str = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" - activePublicKey: str = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" contractDir: str = "unittests/contracts/eosio.system" wasmFile: str = "eosio.system.wasm" abiFile: str = "eosio.system.abi" - account: Account = Account(accountName) + account: Account = Account("eosio") pnodes: int = 1 totalNodes: int = 2 @@ -282,7 +277,7 @@ def setupWalletAndAccounts(self, accountCnt: int=2, accountNames: list=None): self.accountPrivKeys=[] if accountNames is not None: for name in accountNames: - if name == self.clusterConfig.specifiedContract.accountName: + if name == self.clusterConfig.specifiedContract.account.name: self.cluster.accounts.append(self.clusterConfig.specifiedContract.account) self.accountNames.append(self.clusterConfig.specifiedContract.account.name) self.accountPrivKeys.append(self.clusterConfig.specifiedContract.account.ownerPrivateKey) @@ -310,17 +305,10 @@ def readUserTrxDataFromFile(self, userTrxDataFile: Path): self.userTrxDataDict = json.load(f) def setupContract(self): - if self.clusterConfig.specifiedContract.accountName != self.cluster.eosioAccount.name: - specifiedAccount = Account(self.clusterConfig.specifiedContract.accountName) - specifiedAccount.name = self.clusterConfig.specifiedContract.accountName - specifiedAccount.ownerPublicKey = self.clusterConfig.specifiedContract.ownerPublicKey - specifiedAccount.ownerPrivateKey = self.clusterConfig.specifiedContract.ownerPrivateKey - specifiedAccount.activePublicKey = self.clusterConfig.specifiedContract.activePublicKey - specifiedAccount.activePrivateKey = self.clusterConfig.specifiedContract.activePrivateKey - self.cluster.createAccountAndVerify(specifiedAccount, self.cluster.eosioAccount, validationNodeIndex=self.validationNodeId) - self.clusterConfig.specifiedContract.account = specifiedAccount + if self.clusterConfig.specifiedContract.account.name != self.cluster.eosioAccount.name: + self.cluster.createAccountAndVerify(self.clusterConfig.specifiedContract.account, self.cluster.eosioAccount, validationNodeIndex=self.validationNodeId) print("Publishing contract") - transaction=self.cluster.biosNode.publishContract(specifiedAccount, self.clusterConfig.specifiedContract.contractDir, + transaction=self.cluster.biosNode.publishContract(self.clusterConfig.specifiedContract.account, self.clusterConfig.specifiedContract.contractDir, self.clusterConfig.specifiedContract.wasmFile, self.clusterConfig.specifiedContract.abiFile, waitForTransBlock=True) @@ -328,15 +316,11 @@ def setupContract(self): print("ERROR: Failed to publish contract.") return None else: - self.clusterConfig.specifiedContract.activePrivateKey = self.cluster.eosioAccount.activePrivateKey - self.clusterConfig.specifiedContract.activePublicKey = self.cluster.eosioAccount.activePublicKey - self.clusterConfig.specifiedContract.ownerPrivateKey = self.cluster.eosioAccount.ownerPrivateKey - self.clusterConfig.specifiedContract.ownerPublicKey = self.cluster.eosioAccount.ownerPublicKey - print(f"setupContract: default {self.clusterConfig.specifiedContract.accountName} \ - activePrivateKey: {self.clusterConfig.specifiedContract.activePrivateKey} \ - activePublicKey: {self.clusterConfig.specifiedContract.activePublicKey} \ - ownerPrivateKey: {self.clusterConfig.specifiedContract.ownerPrivateKey} \ - ownerPublicKey: {self.clusterConfig.specifiedContract.ownerPublicKey}") + print(f"setupContract: default {self.clusterConfig.specifiedContract.account.name} \ + activePrivateKey: {self.clusterConfig.specifiedContract.account.activePrivateKey} \ + activePublicKey: {self.clusterConfig.specifiedContract.account.activePublicKey} \ + ownerPrivateKey: {self.clusterConfig.specifiedContract.account.ownerPrivateKey} \ + ownerPublicKey: {self.clusterConfig.specifiedContract.account.ownerPublicKey}") def runTpsTest(self) -> PtbTpsTestResult: completedRun = False @@ -385,7 +369,7 @@ def runTpsTest(self) -> PtbTpsTestResult: self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator) - trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=self.clusterConfig.specifiedContract.accountName, + trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=self.clusterConfig.specifiedContract.account.name, accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, abiFile=abiFile, actionsData=actionsDataJson, actionsAuths=actionsAuthsJson, @@ -567,8 +551,6 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--prods-enable-trace-api", help="Determines whether producer nodes should have eosio::trace_api_plugin enabled", action='store_true') ptbBaseParserGroup.add_argument("--print-missing-transactions", type=bool, help="Toggles if missing transactions are be printed upon test completion.", default=False) ptbBaseParserGroup.add_argument("--account-name", type=str, help="Name of the account to create and assign a contract to", default="eosio") - ptbBaseParserGroup.add_argument("--owner-public-key", type=str, help="Owner public key to use with specified account name", default="EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV") - ptbBaseParserGroup.add_argument("--active-public-key", type=str, help="Active public key to use with specified account name", default="EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV") ptbBaseParserGroup.add_argument("--contract-dir", type=str, help="Path to contract dir", default="unittests/contracts/eosio.system") ptbBaseParserGroup.add_argument("--wasm-file", type=str, help="WASM file name for contract", default="eosio.system.wasm") ptbBaseParserGroup.add_argument("--abi-file", type=str, help="ABI file name for contract", default="eosio.system.abi") @@ -623,8 +605,7 @@ def main(): ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) SC = PerformanceTestBasic.ClusterConfig.SpecifiedContract - specifiedContract=SC(accountName=args.account_name, ownerPublicKey=args.owner_public_key, activePublicKey=args.active_public_key, - contractDir=args.contract_dir, wasmFile=args.wasm_file, abiFile=args.abi_file) + specifiedContract=SC(contractDir=args.contract_dir, wasmFile=args.wasm_file, abiFile=args.abi_file, account=Account(args.account_name)) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, specifiedContract=specifiedContract, loggingLevel=args.cluster_log_lvl, From e27ebfa1b808803c7e046e2f7a9462a382e886a2 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 17 Feb 2023 13:49:15 -0600 Subject: [PATCH 114/178] revert a number of inadvertant whitespace changes due to removed code from merge. Fix printing of account type in json. --- tests/performance_tests/log_reader.py | 4 +++- tests/performance_tests/performance_test_basic.py | 6 +++--- tests/trx_generator/main.cpp | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 593d07a5f5..4e097614f7 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -10,7 +10,7 @@ from pathlib import Path, PurePath sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) -from TestHarness import Utils +from TestHarness import Utils, Account from dataclasses import dataclass, asdict, field from platform import release, system from datetime import datetime @@ -474,6 +474,8 @@ def default(self, obj): return "Unknown" if isinstance(obj, Path): return str(obj) + if isinstance(obj, Account): + return str(obj) return json.JSONEncoder.default(self, obj) def reportAsJSON(report: dict) -> json: diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 87dfee09d4..1afd9b0557 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -311,7 +311,6 @@ def setupContract(self): transaction=self.cluster.biosNode.publishContract(self.clusterConfig.specifiedContract.account, self.clusterConfig.specifiedContract.contractDir, self.clusterConfig.specifiedContract.wasmFile, self.clusterConfig.specifiedContract.abiFile, waitForTransBlock=True) - if transaction is None: print("ERROR: Failed to publish contract.") return None @@ -345,6 +344,7 @@ def runTpsTest(self) -> PtbTpsTestResult: print(f"Creating accounts specified in userTrxData: {self.userTrxDataDict['initAccounts']}") self.setupWalletAndAccounts(accountCnt=len(self.userTrxDataDict['initAccounts']), accountNames=self.userTrxDataDict['initAccounts']) abiFile = self.userTrxDataDict['abiFile'] + actionsDataJson = json.dumps(self.userTrxDataDict['actions']) authorizations={} @@ -369,11 +369,13 @@ def runTpsTest(self) -> PtbTpsTestResult: self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator) + trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=self.clusterConfig.specifiedContract.account.name, accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, abiFile=abiFile, actionsData=actionsDataJson, actionsAuths=actionsAuthsJson, peerEndpoint=self.producerNode.host, port=self.producerP2pPort, tpsTrxGensConfig=tpsTrxGensConfig) + trxGenExitCodes = trxGenLauncher.launch() print(f"Transaction Generator exit codes: {trxGenExitCodes}") for exitCode in trxGenExitCodes: @@ -434,8 +436,6 @@ def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): completedRun=testResult.completedRun) jsonReport = None - self.ptbConfig.quiet = True - self.ptbConfig.delReport = True if not self.ptbConfig.quiet or not self.ptbConfig.delReport: jsonReport = log_reader.reportAsJSON(self.report) diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index fcaee83277..20fea0444d 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -125,7 +125,7 @@ int main(int argc, char** argv) { if(vmap.count("accounts")) { boost::split(account_str_vector, accts, boost::is_any_of(",")); if(!transaction_specified && account_str_vector.size() < 2) { - ilog("Initialization error: requires at minimum 2 accounts"); + ilog("Initialization error: requires at minimum 2 transfer accounts"); cli.print(std::cerr); return INITIALIZE_FAIL; } From 270ea091f9fb4a66d3b24a0170a9d5c91786ac47 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 17 Feb 2023 14:44:10 -0600 Subject: [PATCH 115/178] improve visibility of TypeErrors and still output JSON when one occurs --- tests/performance_tests/log_reader.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 4e097614f7..c5ae3232a6 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -476,7 +476,12 @@ def default(self, obj): return str(obj) if isinstance(obj, Account): return str(obj) - return json.JSONEncoder.default(self, obj) + defaultStr = "" + try: + defaultStr = json.JSONEncoder.default(self, obj) + except TypeError as err: + defaultStr = f"ERROR: {str(err)}" + return defaultStr def reportAsJSON(report: dict) -> json: return json.dumps(report, indent=2, cls=LogReaderEncoder) From 726a0333e78143f472a3d1ed743b07a51b056c44 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 17 Feb 2023 19:28:21 -0600 Subject: [PATCH 116/178] rerun test and update readme --- tests/performance_tests/README.md | 117 ++++++++++++++++-------------- 1 file changed, 64 insertions(+), 53 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 96e7536488..9919bae1d2 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -832,69 +832,80 @@ The Performance Test Basic generates, by default, a report that details results ``` json { "completedRun": true, - "testStart": "2023-02-15T22:32:36.946671", - "testFinish": "2023-02-15T22:33:41.280861", + "testStart": "2023-02-17T22:00:41.305618", + "testFinish": "2023-02-17T22:02:21.597430", "Analysis": { "BlockSize": { - "min": 1920, - "max": 1920, - "avg": 1920.0, - "sigma": 0.0, + "min": 925248, + "max": 1551936, + "avg": 1332244.3636363635, + "sigma": 144713.34505483133, "emptyBlocks": 0, - "numBlocks": 7 + "numBlocks": 44 }, "BlocksGuide": { "firstBlockNum": 2, - "lastBlockNum": 129, - "totalBlocks": 128, - "testStartBlockNum": 113, - "testEndBlockNum": 129, - "setupBlocksCnt": 111, - "tearDownBlocksCnt": 0, + "lastBlockNum": 193, + "totalBlocks": 192, + "testStartBlockNum": 112, + "testEndBlockNum": 160, + "setupBlocksCnt": 110, + "tearDownBlocksCnt": 33, "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 5, + "trailingEmptyBlocksCnt": 0, "configAddlDropCnt": 2, - "testAnalysisBlockCnt": 7 + "testAnalysisBlockCnt": 44 }, "TPS": { - "min": 20, - "max": 20, - "avg": 20.0, - "sigma": 0.0, + "min": 10265, + "max": 15774, + "avg": 13882.232558139534, + "sigma": 1454.0837894863364, "emptyBlocks": 0, - "numBlocks": 7, - "configTps": 20, - "configTestDuration": 5, + "numBlocks": 44, + "configTps": 50000, + "configTestDuration": 10, "tpsPerGenerator": [ - 10, - 10 + 3846, + 3846, + 3846, + 3846, + 3846, + 3846, + 3846, + 3846, + 3846, + 3846, + 3846, + 3847, + 3847 ], - "generatorCount": 2 + "generatorCount": 13 }, "TrxCPU": { - "min": 20.0, - "max": 141.0, - "avg": 72.15, - "sigma": 31.078408903932, - "samples": 100 + "min": 6.0, + "max": 15292.0, + "avg": 25.024962251222377, + "sigma": 49.9778703823556, + "samples": 322527 }, "TrxLatency": { - "min": 0.0409998893737793, - "max": 0.4419999122619629, - "avg": 0.24149999618530274, - "sigma": 0.14142224015850113, - "samples": 100 + "min": 0.11500000953674316, + "max": 16.91100001335144, + "avg": 8.950405516519615, + "sigma": 4.844012708597167, + "samples": 322527 }, "TrxNet": { "min": 24.0, "max": 24.0, "avg": 24.0, "sigma": 0.0, - "samples": 100 + "samples": 322527 }, "DroppedBlocks": {}, "DroppedBlocksCount": 0, - "DroppedTransactions": 0, + "DroppedTransactions": 177473, "ProductionWindowsTotal": 0, "ProductionWindowsAverageSize": 0, "ProductionWindowsMissed": 0, @@ -902,18 +913,18 @@ The Performance Test Basic generates, by default, a report that details results "ForksCount": 0 }, "args": { - "rawCmdLine ": "/home/leap/build/tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run", - "killAll": true, + "rawCmdLine ": "tests/performance_tests/performance_test.py --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax", + "killAll": false, "dontKill": false, - "keepLogs": true, + "keepLogs": false, "dumpErrorDetails": false, "delay": 1, "nodesFile": null, - "verbose": true, + "verbose": false, "_killEosInstances": true, "_killWallet": true, "pnodes": 1, - "totalNodes": 1, + "totalNodes": 0, "topo": "mesh", "extraNodeosArgs": { "chainPluginArgs": { @@ -1397,21 +1408,21 @@ The Performance Test Basic generates, by default, a report that details results "1": "--plugin eosio::trace_api_plugin" }, "_totalNodes": 2, - "targetTps": 20, - "testTrxGenDurationSec": 5, - "tpsLimitPerGenerator": 10, + "targetTps": 50000, + "testTrxGenDurationSec": 10, + "tpsLimitPerGenerator": 4000, "numAddlBlocksToPrune": 2, - "logDirRoot": ".", - "delReport": false, + "logDirRoot": "p/2023-02-17_22-00-41/pluginThreadOptRunLogs", + "delReport": true, "quiet": false, - "delPerfLogs": false, - "expectedTransactionsSent": 100, + "delPerfLogs": true, + "expectedTransactionsSent": 500000, "printMissingTransactions": false, "userTrxDataFile": null, - "logDirBase": "p", - "logDirTimestamp": "2023-02-15_22-32-36", - "logDirTimestampedOptSuffix": "-20", - "logDirPath": "p/2023-02-15_22-32-36-20" + "logDirBase": "p/2023-02-17_22-00-41/pluginThreadOptRunLogs/p", + "logDirTimestamp": "2023-02-17_22-00-41", + "logDirTimestampedOptSuffix": "-50000", + "logDirPath": "p/2023-02-17_22-00-41/pluginThreadOptRunLogs/p/2023-02-17_22-00-41-50000" }, "env": { "system": "Linux", From 8f75c30e2ee5147d9da99b70feccd6f0c4bc6297 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Mon, 20 Feb 2023 16:14:05 -0600 Subject: [PATCH 117/178] add ram performance test. Rework how keys are put into contract account for performance test. revert the reversion of the setupWallet change --- tests/TestHarness/Cluster.py | 19 ++++++++++--------- .../launch_transaction_generators.py | 4 ++-- tests/performance_tests/CMakeLists.txt | 5 ++++- .../performance_test_basic.py | 4 +++- tests/performance_tests/ramTrxData.json | 16 ++++++++++++++++ 5 files changed, 35 insertions(+), 13 deletions(-) create mode 100644 tests/performance_tests/ramTrxData.json diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index 9f77547aaa..bf9e153394 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -705,7 +705,7 @@ def createAccountKeys(count): # create account keys and import into wallet. Wallet initialization will be user responsibility # also imports defproducera and defproducerb accounts - def populateWallet(self, accountsCount, wallet, accountNames: list=None): + def populateWallet(self, accountsCount, wallet, accountNames: list=None, createProducerAccounts: bool=True): if accountsCount == 0 and len(accountNames) == 0: return True if self.walletMgr is None: @@ -720,15 +720,16 @@ def populateWallet(self, accountsCount, wallet, accountNames: list=None): Utils.Print("Account keys creation failed.") return False - Utils.Print("Importing keys for account %s into wallet %s." % (self.defproduceraAccount.name, wallet.name)) - if not self.walletMgr.importKey(self.defproduceraAccount, wallet): - Utils.Print("ERROR: Failed to import key for account %s" % (self.defproduceraAccount.name)) - return False + if createProducerAccounts: + Utils.Print("Importing keys for account %s into wallet %s." % (self.defproduceraAccount.name, wallet.name)) + if not self.walletMgr.importKey(self.defproduceraAccount, wallet): + Utils.Print("ERROR: Failed to import key for account %s" % (self.defproduceraAccount.name)) + return False - Utils.Print("Importing keys for account %s into wallet %s." % (self.defproducerbAccount.name, wallet.name)) - if not self.walletMgr.importKey(self.defproducerbAccount, wallet): - Utils.Print("ERROR: Failed to import key for account %s" % (self.defproducerbAccount.name)) - return False + Utils.Print("Importing keys for account %s into wallet %s." % (self.defproducerbAccount.name, wallet.name)) + if not self.walletMgr.importKey(self.defproducerbAccount, wallet): + Utils.Print("ERROR: Failed to import key for account %s" % (self.defproducerbAccount.name)) + return False if accountNames is not None: for idx, name in enumerate(accountNames): diff --git a/tests/TestHarness/launch_transaction_generators.py b/tests/TestHarness/launch_transaction_generators.py index 6498765c3a..2379987840 100755 --- a/tests/TestHarness/launch_transaction_generators.py +++ b/tests/TestHarness/launch_transaction_generators.py @@ -73,7 +73,7 @@ def launch(self, waitToComplete=True): f'--actions-data {self.actionsData} ' f'--actions-auths {self.actionsAuths} ' f'--peer-endpoint {self.peerEndpoint} ' - f'--port {self.port} ' + f'--port {self.port}' ) self.subprocess_ret_codes.append( subprocess.Popen([ @@ -108,7 +108,7 @@ def launch(self, waitToComplete=True): f'--target-tps {targetTps} ' f'--log-dir {self.logDir} ' f'--peer-endpoint {self.peerEndpoint} ' - f'--port {self.port} ' + f'--port {self.port}' ) self.subprocess_ret_codes.append( subprocess.Popen([ diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 162e23831c..c36dc5dfd4 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -8,18 +8,21 @@ configure_file(nodeos_log_3_2.txt.gz nodeos_log_3_2.txt.gz COPYONLY) configure_file(genesis.json genesis.json COPYONLY) configure_file(validate_nodeos_plugin_args.py validate_nodeos_plugin_args.py COPYONLY) configure_file(cpuTrxData.json cpuTrxData.json COPYONLY) +configure_file(ramTrxData.json ramTrxData.json COPYONLY) configure_file(userTrxDataTransfer.json userTrxDataTransfer.json COPYONLY) configure_file(userTrxDataNewAccount.json userTrxDataNewAccount.json COPYONLY) add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --user-trx-data-file tests/performance_tests/cpuTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics/contracts --user-trx-data-file tests/performance_tests/cpuTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics/contracts --user-trx-data-file tests/performance_tests/ramTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_ex_transfer_trx_spec PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_ex_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_ex_cpu_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_ex_ram_trx_spec PROPERTY LABELS nonparallelizable_tests) add_subdirectory( NodeosPluginArgs ) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 1afd9b0557..d56ee7ff59 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -306,7 +306,9 @@ def readUserTrxDataFromFile(self, userTrxDataFile: Path): def setupContract(self): if self.clusterConfig.specifiedContract.account.name != self.cluster.eosioAccount.name: - self.cluster.createAccountAndVerify(self.clusterConfig.specifiedContract.account, self.cluster.eosioAccount, validationNodeIndex=self.validationNodeId) + self.cluster.populateWallet(accountsCount=1, wallet=self.wallet, accountNames=[self.clusterConfig.specifiedContract.account.name], createProducerAccounts=False) + self.cluster.createAccounts(self.cluster.eosioAccount, stakedDeposit=0, validationNodeIndex=self.validationNodeId) + self.clusterConfig.specifiedContract.account = self.cluster.accounts[0] print("Publishing contract") transaction=self.cluster.biosNode.publishContract(self.clusterConfig.specifiedContract.account, self.clusterConfig.specifiedContract.contractDir, self.clusterConfig.specifiedContract.wasmFile, diff --git a/tests/performance_tests/ramTrxData.json b/tests/performance_tests/ramTrxData.json new file mode 100644 index 0000000000..0968a4dfd3 --- /dev/null +++ b/tests/performance_tests/ramTrxData.json @@ -0,0 +1,16 @@ +{ + "initAccounts": ["r"], + "abiFile": "unittests/contracts/eosio.mechanics/contracts/eosmechanics.abi", + "actions": [ + { + "actionName": "ram", + "actionData": { + }, + "actionAuthAcct": "r", + "authorization": { + "actor": "r", + "permission": "active" + } + } + ] +} From 074471b413fef8671b4f7523f48dd25ba26acb60 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Tue, 21 Feb 2023 15:40:45 -0600 Subject: [PATCH 118/178] address PR comment to make contracts folder consistent. --- tests/performance_tests/CMakeLists.txt | 6 +++--- unittests/contracts/eosio.mechanics/CMakeLists.txt | 7 ++++++- .../eosio.mechanics/contracts/CMakeLists.txt | 6 ------ .../{contracts => }/eosmechanics.abi | 0 .../{contracts => }/eosmechanics.cpp | 0 .../{contracts => }/eosmechanics.wasm | Bin .../eosio.mechanics/{scripts => }/mech_actions.sh | 0 .../eosio.mechanics/{scripts => }/mech_check.sh | 0 .../eosio.mechanics/{scripts => }/mech_loop.sh | 0 9 files changed, 9 insertions(+), 10 deletions(-) delete mode 100644 unittests/contracts/eosio.mechanics/contracts/CMakeLists.txt rename unittests/contracts/eosio.mechanics/{contracts => }/eosmechanics.abi (100%) rename unittests/contracts/eosio.mechanics/{contracts => }/eosmechanics.cpp (100%) rename unittests/contracts/eosio.mechanics/{contracts => }/eosmechanics.wasm (100%) rename unittests/contracts/eosio.mechanics/{scripts => }/mech_actions.sh (100%) rename unittests/contracts/eosio.mechanics/{scripts => }/mech_check.sh (100%) rename unittests/contracts/eosio.mechanics/{scripts => }/mech_loop.sh (100%) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index c36dc5dfd4..44bb03ba7c 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -12,11 +12,11 @@ configure_file(ramTrxData.json ramTrxData.json COPYONLY) configure_file(userTrxDataTransfer.json userTrxDataTransfer.json COPYONLY) configure_file(userTrxDataNewAccount.json userTrxDataNewAccount.json COPYONLY) -add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics/contracts --user-trx-data-file tests/performance_tests/cpuTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics/contracts --user-trx-data-file tests/performance_tests/ramTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/ramTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) diff --git a/unittests/contracts/eosio.mechanics/CMakeLists.txt b/unittests/contracts/eosio.mechanics/CMakeLists.txt index f8dca9d757..bef432cb15 100644 --- a/unittests/contracts/eosio.mechanics/CMakeLists.txt +++ b/unittests/contracts/eosio.mechanics/CMakeLists.txt @@ -1 +1,6 @@ -file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/contracts/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/contracts/) +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( eosmechanics eosmechanics eosmechanics.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/eosmechanics.wasm ${CMAKE_CURRENT_BINARY_DIR}/eosmechanics.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/eosmechanics.abi ${CMAKE_CURRENT_BINARY_DIR}/eosmechanics.abi COPYONLY ) +endif() diff --git a/unittests/contracts/eosio.mechanics/contracts/CMakeLists.txt b/unittests/contracts/eosio.mechanics/contracts/CMakeLists.txt deleted file mode 100644 index 2eea697b25..0000000000 --- a/unittests/contracts/eosio.mechanics/contracts/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -if( EOSIO_COMPILE_TEST_CONTRACTS ) - add_contract( eosio.mechanics eosmechanics eosmechanics.cpp ) -else() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/eosmechanics.wasm ${CMAKE_CURRENT_BINARY_DIR}/eosmechanics.wasm COPYONLY ) - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/eosmechanics.abi ${CMAKE_CURRENT_BINARY_DIR}/eosmechanics.abi COPYONLY ) -endif() diff --git a/unittests/contracts/eosio.mechanics/contracts/eosmechanics.abi b/unittests/contracts/eosio.mechanics/eosmechanics.abi similarity index 100% rename from unittests/contracts/eosio.mechanics/contracts/eosmechanics.abi rename to unittests/contracts/eosio.mechanics/eosmechanics.abi diff --git a/unittests/contracts/eosio.mechanics/contracts/eosmechanics.cpp b/unittests/contracts/eosio.mechanics/eosmechanics.cpp similarity index 100% rename from unittests/contracts/eosio.mechanics/contracts/eosmechanics.cpp rename to unittests/contracts/eosio.mechanics/eosmechanics.cpp diff --git a/unittests/contracts/eosio.mechanics/contracts/eosmechanics.wasm b/unittests/contracts/eosio.mechanics/eosmechanics.wasm similarity index 100% rename from unittests/contracts/eosio.mechanics/contracts/eosmechanics.wasm rename to unittests/contracts/eosio.mechanics/eosmechanics.wasm diff --git a/unittests/contracts/eosio.mechanics/scripts/mech_actions.sh b/unittests/contracts/eosio.mechanics/mech_actions.sh similarity index 100% rename from unittests/contracts/eosio.mechanics/scripts/mech_actions.sh rename to unittests/contracts/eosio.mechanics/mech_actions.sh diff --git a/unittests/contracts/eosio.mechanics/scripts/mech_check.sh b/unittests/contracts/eosio.mechanics/mech_check.sh similarity index 100% rename from unittests/contracts/eosio.mechanics/scripts/mech_check.sh rename to unittests/contracts/eosio.mechanics/mech_check.sh diff --git a/unittests/contracts/eosio.mechanics/scripts/mech_loop.sh b/unittests/contracts/eosio.mechanics/mech_loop.sh similarity index 100% rename from unittests/contracts/eosio.mechanics/scripts/mech_loop.sh rename to unittests/contracts/eosio.mechanics/mech_loop.sh From 1cf32d615def1b19aecd8e393b2c2593650dd360 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Tue, 21 Feb 2023 15:42:48 -0600 Subject: [PATCH 119/178] give ability to cleanup transaction generator in TestHelper shutdown --- tests/TestHarness/TestHelper.py | 8 +++++++- tests/nodeos_contrl_c_test.py | 4 ++-- tests/nodeos_snapshot_diff_test.py | 2 +- tests/nodeos_startup_catchup.py | 2 +- tests/performance_tests/log_reader.py | 2 +- tests/performance_tests/performance_test_basic.py | 10 +++------- 6 files changed, 15 insertions(+), 13 deletions(-) diff --git a/tests/TestHarness/TestHelper.py b/tests/TestHarness/TestHelper.py index 01874c6be0..68e4d353b2 100644 --- a/tests/TestHarness/TestHelper.py +++ b/tests/TestHarness/TestHelper.py @@ -1,6 +1,7 @@ from .testUtils import Utils from .Cluster import Cluster from .WalletMgr import WalletMgr +from .launch_transaction_generators import TransactionGeneratorsLauncher from datetime import datetime import platform @@ -148,12 +149,14 @@ def printSystemInfo(prefix): @staticmethod # pylint: disable=too-many-arguments - def shutdown(cluster, walletMgr, testSuccessful=True, killEosInstances=True, killWallet=True, keepLogs=False, cleanRun=True, dumpErrorDetails=False): + def shutdown(cluster, walletMgr, trxGenLauncher=None, testSuccessful=True, killEosInstances=True, killWallet=True, keepLogs=False, cleanRun=True, dumpErrorDetails=False): """Cluster and WalletMgr shutdown and cleanup.""" assert(cluster) assert(isinstance(cluster, Cluster)) if walletMgr: assert(isinstance(walletMgr, WalletMgr)) + if trxGenLauncher is not None: + assert(isinstance(trxGenLauncher, TransactionGeneratorsLauncher)) assert(isinstance(testSuccessful, bool)) assert(isinstance(killEosInstances, bool)) assert(isinstance(killWallet, bool)) @@ -199,3 +202,6 @@ def reportProductionAnalysis(thresholdMs): Utils.Print("Cleanup wallet data.") walletMgr.cleanup() + if trxGenLauncher is not None: + Utils.Print("Shut down the TransactionGeneratorsLauncher and TransactionGenerators") + trxGenLauncher.killAll() diff --git a/tests/nodeos_contrl_c_test.py b/tests/nodeos_contrl_c_test.py index 2960b339ec..d71b7786ab 100755 --- a/tests/nodeos_contrl_c_test.py +++ b/tests/nodeos_contrl_c_test.py @@ -112,11 +112,11 @@ testSuccessful = nonProdNode.kill(signal.SIGTERM) if not testSuccessful: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True) + TestHelper.shutdown(cluster, walletMgr, cluster.trxGenLauncher, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True) errorExit("Failed to kill the seed node") finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True) + TestHelper.shutdown(cluster, walletMgr, cluster.trxGenLauncher, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True) errorCode = 0 if testSuccessful else 1 exit(errorCode) \ No newline at end of file diff --git a/tests/nodeos_snapshot_diff_test.py b/tests/nodeos_snapshot_diff_test.py index 13f2570d77..e46f30bf48 100755 --- a/tests/nodeos_snapshot_diff_test.py +++ b/tests/nodeos_snapshot_diff_test.py @@ -199,7 +199,7 @@ def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportI testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, cluster.trxGenLauncher, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 exit(exitCode) \ No newline at end of file diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index a358057061..164ebd60e0 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -201,7 +201,7 @@ def waitForNodeStarted(node): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, cluster.trxGenLauncher, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 exit(exitCode) \ No newline at end of file diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 593d07a5f5..5448003f70 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -10,7 +10,7 @@ from pathlib import Path, PurePath sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) -from TestHarness import Utils +from TestHarness import Utils, TransactionGeneratorsLauncher from dataclasses import dataclass, asdict, field from platform import release, system from datetime import datetime diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index b2d1df31bd..b9ae851999 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -370,13 +370,13 @@ def runTpsTest(self) -> PtbTpsTestResult: self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator) - trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=self.clusterConfig.specifiedContract.accountName, + self.cluster.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=self.clusterConfig.specifiedContract.accountName, accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, abiFile=abiFile, actionsData=actionsDataJson, actionsAuths=actionsAuthsJson, peerEndpoint=self.producerNode.host, port=self.producerP2pPort, tpsTrxGensConfig=tpsTrxGensConfig) - trxGenExitCodes = trxGenLauncher.launch() + trxGenExitCodes = self.cluster.trxGenLauncher.launch() print(f"Transaction Generator exit codes: {trxGenExitCodes}") for exitCode in trxGenExitCodes: if exitCode != 0: @@ -460,7 +460,6 @@ def postTpsTestSteps(self): def runTest(self) -> bool: testSuccessful = False - completedRun = False try: # Kill any existing instances and launch cluster @@ -488,6 +487,7 @@ def runTest(self) -> bool: TestHelper.shutdown( self.cluster, self.walletMgr, + self.cluster.trxGenLauncher, testSuccessful, self.testHelperConfig._killEosInstances, self.testHelperConfig._killWallet, @@ -499,10 +499,6 @@ def runTest(self) -> bool: if not self.ptbConfig.delPerfLogs: self.captureLowLevelArtifacts() - if not completedRun: - os.system("pkill trx_generator") - print("Test run cancelled early via SIGINT") - if self.ptbConfig.delPerfLogs: print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") self.testDirsCleanup(self.ptbConfig.delReport) From a2ee13ad39bb311a96874ff1de1523b4490e54e0 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Tue, 21 Feb 2023 20:40:44 -0600 Subject: [PATCH 120/178] rearrange shutdown arguments to accomodate the 20 or so tests using shutdown without keyword arguments --- tests/TestHarness/TestHelper.py | 2 +- tests/nodeos_contrl_c_test.py | 4 ++-- tests/nodeos_snapshot_diff_test.py | 2 +- tests/nodeos_startup_catchup.py | 2 +- tests/performance_tests/performance_test_basic.py | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/TestHarness/TestHelper.py b/tests/TestHarness/TestHelper.py index 68e4d353b2..f1504f8a60 100644 --- a/tests/TestHarness/TestHelper.py +++ b/tests/TestHarness/TestHelper.py @@ -149,7 +149,7 @@ def printSystemInfo(prefix): @staticmethod # pylint: disable=too-many-arguments - def shutdown(cluster, walletMgr, trxGenLauncher=None, testSuccessful=True, killEosInstances=True, killWallet=True, keepLogs=False, cleanRun=True, dumpErrorDetails=False): + def shutdown(cluster, walletMgr, testSuccessful=True, killEosInstances=True, killWallet=True, keepLogs=False, cleanRun=True, dumpErrorDetails=False, trxGenLauncher=None): """Cluster and WalletMgr shutdown and cleanup.""" assert(cluster) assert(isinstance(cluster, Cluster)) diff --git a/tests/nodeos_contrl_c_test.py b/tests/nodeos_contrl_c_test.py index d71b7786ab..9ccaf631dd 100755 --- a/tests/nodeos_contrl_c_test.py +++ b/tests/nodeos_contrl_c_test.py @@ -112,11 +112,11 @@ testSuccessful = nonProdNode.kill(signal.SIGTERM) if not testSuccessful: - TestHelper.shutdown(cluster, walletMgr, cluster.trxGenLauncher, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True, trxGenLauncher=cluster.trxGenLauncher) errorExit("Failed to kill the seed node") finally: - TestHelper.shutdown(cluster, walletMgr, cluster.trxGenLauncher, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True, trxGenLauncher=cluster.trxGenLauncher) errorCode = 0 if testSuccessful else 1 exit(errorCode) \ No newline at end of file diff --git a/tests/nodeos_snapshot_diff_test.py b/tests/nodeos_snapshot_diff_test.py index e46f30bf48..caba2a91c4 100755 --- a/tests/nodeos_snapshot_diff_test.py +++ b/tests/nodeos_snapshot_diff_test.py @@ -199,7 +199,7 @@ def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportI testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, cluster.trxGenLauncher, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails, trxGenLauncher=cluster.trxGenLauncher) exitCode = 0 if testSuccessful else 1 exit(exitCode) \ No newline at end of file diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index 164ebd60e0..0660807da4 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -201,7 +201,7 @@ def waitForNodeStarted(node): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, cluster.trxGenLauncher, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails, trxGenLauncher=cluster.trxGenLauncher) exitCode = 0 if testSuccessful else 1 exit(exitCode) \ No newline at end of file diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index b9ae851999..cb8cacd1c3 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -487,13 +487,13 @@ def runTest(self) -> bool: TestHelper.shutdown( self.cluster, self.walletMgr, - self.cluster.trxGenLauncher, testSuccessful, self.testHelperConfig._killEosInstances, self.testHelperConfig._killWallet, self.testHelperConfig.keepLogs, self.testHelperConfig.killAll, - self.testHelperConfig.dumpErrorDetails + self.testHelperConfig.dumpErrorDetails, + trxGenLauncher=self.cluster.trxGenLauncher ) if not self.ptbConfig.delPerfLogs: From fd4cdbd52cfffacb4d9f5d11ffdcf8617ca91e48 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 22 Feb 2023 11:18:14 -0600 Subject: [PATCH 121/178] Update README for new command line arguments to Performance Harness scripts. --- tests/performance_tests/README.md | 182 +++++++++++++++++++++--------- 1 file changed, 126 insertions(+), 56 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 9919bae1d2..c525779933 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -198,6 +198,10 @@ The Performance Harness main script `performance_test.py` can be configured usin
Expand Argument List +Test Helper Arguments: + Test Helper configuration items used to configure and spin up the regression test framework and blockchain environment. + +* `-?` show this help message and exit * `-p P` producing nodes count (default: 1) * `-n N` total nodes (default: 0) * `-d D` delay between nodes startup (default: 1) @@ -208,15 +212,10 @@ The Performance Harness main script `performance_test.py` can be configured usin * `-v` verbose logging (default: False) * `--leave-running` Leave cluster running after test finishes (default: False) * `--clean-run` Kill all nodeos and keosd instances (default: False) -* `--max-tps-to-test MAX_TPS_TO_TEST` - The max target transfers realistic as ceiling of test range (default: 50000) -* `--test-iteration-duration-sec TEST_ITERATION_DURATION_SEC` - The duration of transfer trx generation for each iteration of the test during the initial search (seconds) (default: 30) -* `--test-iteration-min-step TEST_ITERATION_MIN_STEP` - The step size determining granularity of tps result during initial search (default: 500) -* `--final-iterations-duration-sec FINAL_ITERATIONS_DURATION_SEC` - The duration of transfer trx generation for each final longer run iteration of the test during - the final search (seconds) (default: 90) + +Performance Test Basic Base: + Performance Test Basic base configuration items. + * `--tps-limit-per-generator TPS_LIMIT_PER_GENERATOR` Maximum amount of transactions per second a single generator can have. (default: 4000) * `--genesis GENESIS` Path to genesis.json (default: tests/performance_tests/genesis.json) @@ -229,14 +228,18 @@ The Performance Harness main script `performance_test.py` can be configured usin * `--chain-state-db-size-mb CHAIN_STATE_DB_SIZE_MB` Maximum size (in MiB) of the chain state database (default: 10240) * `--chain-threads CHAIN_THREADS` - Number of worker threads in controller thread pool (default: 3) + Number of worker threads in controller thread pool (default: 2) * `--database-map-mode {mapped,heap,locked}` Database map mode ("mapped", "heap", or "locked"). In "mapped" mode database is memory mapped as a file. In "heap" mode database is preloaded in to swappable memory and will use huge pages if available. In "locked" mode database is preloaded, locked in to memory, and will use huge pages if available. (default: mapped) +* `--cluster-log-lvl {all,debug,info,warn,error,off}` + Cluster log level ("all", "debug", "info", "warn", "error", or "off"). Performance Harness Test Basic relies on some logging at + "info" level, so it is recommended lowest logging level to use. However, there are instances where more verbose logging can be + useful. (default: info) * `--net-threads NET_THREADS` - Number of worker threads in net_plugin thread pool (default: 2) + Number of worker threads in net_plugin thread pool (default: 4) * `--disable-subjective-billing DISABLE_SUBJECTIVE_BILLING` Disable subjective CPU billing for API/P2P transactions (default: True) * `--last-block-time-offset-us LAST_BLOCK_TIME_OFFSET_US` @@ -248,15 +251,30 @@ The Performance Harness main script `performance_test.py` can be configured usin * `--last-block-cpu-effort-percent LAST_BLOCK_CPU_EFFORT_PERCENT` Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80% (default: 100) * `--producer-threads PRODUCER_THREADS` - Number of worker threads in producer thread pool (default: 6) + Number of worker threads in producer thread pool (default: 2) * `--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS` Maximum time for processing a request, -1 for unlimited (default: 990000) * `--del-perf-logs` Whether to delete performance test specific logs. (default: False) * `--del-report` Whether to delete overarching performance run report. (default: False) -* `--del-test-report` Whether to save json reports from each test scenario. (default: False) * `--quiet` Whether to quiet printing intermediate results and reports to stdout (default: False) * `--prods-enable-trace-api` Determines whether producer nodes should have eosio::trace_api_plugin enabled (default: False) +* `--print-missing-transactions PRINT_MISSING_TRANSACTIONS` + Toggles if missing transactions are be printed upon test completion. (default: False) +* `--account-name ACCOUNT_NAME` + Name of the account to create and assign a contract to (default: eosio) +* `--owner-public-key OWNER_PUBLIC_KEY` + Owner public key to use with specified account name (default: EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV) +* `--active-public-key ACTIVE_PUBLIC_KEY` + Active public key to use with specified account name (default: EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV) +* `--contract-dir CONTRACT_DIR` + Path to contract dir (default: unittests/contracts/eosio.system) +* `--wasm-file WASM_FILE` WASM file name for contract (default: eosio.system.wasm) +* `--abi-file ABI_FILE` ABI file name for contract (default: eosio.system.abi) + +Performance Harness: + Performance Harness testing configuration items. + * `--skip-tps-test` Determines whether to skip the max TPS measurement tests (default: False) * `--calc-producer-threads {none,lmax,full}` Determines whether to calculate number of worker threads to use in producer thread pool ("none", "lmax", or "full"). @@ -273,12 +291,20 @@ The Performance Harness main script `performance_test.py` can be configured usin In "none" mode, the default, no calculation will be attempted and default configured --net-threads value will be used. In "lmax" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. In "full" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) -* `--account-name` Name of the account to create and assign a contract to -* `--owner-public-key` Owner public key to use with specified account name -* `--active-public-key` Active public key to use with specified account name -* `--contract-dir` Path to contract dir -* `--wasm-file` WASM file name for contract -* `--abi-file` ABI file name for contract +* `--del-test-report` Whether to save json reports from each test scenario. (default: False) + +Performance Harness - TPS Test Config: + TPS Performance Test configuration items. + +* `--max-tps-to-test MAX_TPS_TO_TEST` + The max target transfers realistic as ceiling of test range (default: 50000) +* `--test-iteration-duration-sec TEST_ITERATION_DURATION_SEC` + The duration of transfer trx generation for each iteration of the test during the initial search (seconds) (default: 150) +* `--test-iteration-min-step TEST_ITERATION_MIN_STEP` + The step size determining granularity of tps result during initial search (default: 500) +* `--final-iterations-duration-sec FINAL_ITERATIONS_DURATION_SEC` + The duration of transfer trx generation for each final longer run iteration of the test during the final search (seconds) + (default: 300)
### Support Scripts @@ -292,6 +318,10 @@ The following scripts are typically used by the Performance Harness main script
Expand Argument List +Test Helper Arguments: + Test Helper configuration items used to configure and spin up the regression test framework and blockchain environment. + +* `-?` show this help message and exit * `-p P` producing nodes count (default: 1) * `-n N` total nodes (default: 0) * `-d D` delay between nodes startup (default: 1) @@ -302,12 +332,12 @@ The following scripts are typically used by the Performance Harness main script * `-v` verbose logging (default: False) * `--leave-running` Leave cluster running after test finishes (default: False) * `--clean-run` Kill all nodeos and keosd instances (default: False) -* `--target-tps TARGET_TPS` - The target transfers per second to send during test (default: 8000) + +Performance Test Basic Base: + Performance Test Basic base configuration items. + * `--tps-limit-per-generator TPS_LIMIT_PER_GENERATOR` Maximum amount of transactions per second a single generator can have. (default: 4000) -* `--test-duration-sec TEST_DURATION_SEC` - The duration of transfer trx generation for the test in seconds (default: 30) * `--genesis GENESIS` Path to genesis.json (default: tests/performance_tests/genesis.json) * `--num-blocks-to-prune NUM_BLOCKS_TO_PRUNE` The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, to prune from the beginning and end @@ -317,14 +347,18 @@ The following scripts are typically used by the Performance Harness main script * `--chain-state-db-size-mb CHAIN_STATE_DB_SIZE_MB` Maximum size (in MiB) of the chain state database (default: 10240) * `--chain-threads CHAIN_THREADS` - Number of worker threads in controller thread pool (default: 3) + Number of worker threads in controller thread pool (default: 2) * `--database-map-mode {mapped,heap,locked}` Database map mode ("mapped", "heap", or "locked"). In "mapped" mode database is memory mapped as a file. In "heap" mode database is preloaded in to swappable memory and will use huge pages if available. In "locked" mode database is preloaded, locked in to memory, and will use huge pages if available. (default: mapped) +* `--cluster-log-lvl {all,debug,info,warn,error,off}` + Cluster log level ("all", "debug", "info", "warn", "error", or "off"). Performance Harness Test Basic relies on some logging at + "info" level, so it is recommended lowest logging level to use. However, there are instances where more verbose logging can be + useful. (default: info) * `--net-threads NET_THREADS` - Number of worker threads in net_plugin thread pool (default: 2) + Number of worker threads in net_plugin thread pool (default: 4) * `--disable-subjective-billing DISABLE_SUBJECTIVE_BILLING` Disable subjective CPU billing for API/P2P transactions (default: True) * `--last-block-time-offset-us LAST_BLOCK_TIME_OFFSET_US` @@ -336,7 +370,7 @@ The following scripts are typically used by the Performance Harness main script * `--last-block-cpu-effort-percent LAST_BLOCK_CPU_EFFORT_PERCENT` Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80% (default: 100) * `--producer-threads PRODUCER_THREADS` - Number of worker threads in producer thread pool (default: 6) + Number of worker threads in producer thread pool (default: 2) * `--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS` Maximum time for processing a request, -1 for unlimited (default: 990000) * `--del-perf-logs` Whether to delete performance test specific logs. (default: False) @@ -344,14 +378,30 @@ The following scripts are typically used by the Performance Harness main script * `--quiet` Whether to quiet printing intermediate results and reports to stdout (default: False) * `--prods-enable-trace-api` Determines whether producer nodes should have eosio::trace_api_plugin enabled (default: False) -* `--print-missing-transactions` +* `--print-missing-transactions PRINT_MISSING_TRANSACTIONS` Toggles if missing transactions are be printed upon test completion. (default: False) -* `--account-name` Name of the account to create and assign a contract to -* `--owner-public-key` Owner public key to use with specified account name -* `--active-public-key` Active public key to use with specified account name -* `--contract-dir` Path to contract dir -* `--wasm-file` WASM file name for contract -* `--abi-file` ABI file name for contract +* `--account-name ACCOUNT_NAME` + Name of the account to create and assign a contract to (default: eosio) +* `--owner-public-key OWNER_PUBLIC_KEY` + Owner public key to use with specified account name (default: EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV) +* `--active-public-key ACTIVE_PUBLIC_KEY` + Active public key to use with specified account name (default: EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV) +* `--contract-dir CONTRACT_DIR` + Path to contract dir (default: unittests/contracts/eosio.system) +* `--wasm-file WASM_FILE` + WASM file name for contract (default: eosio.system.wasm) +* `--abi-file ABI_FILE` ABI file name for contract (default: eosio.system.abi) + +Performance Test Basic Single Test: + Performance Test Basic single test configuration items. Useful for running a single test directly. These items may not be directly configurable from + higher level scripts as the scripts themselves may configure these internally. + +* `--target-tps TARGET_TPS` + The target transfers per second to send during test (default: 8000) +* `--test-duration-sec TEST_DURATION_SEC` + The duration of transfer trx generation for the test in seconds (default: 90) +* `--user-trx-data-file USER_TRX_DATA_FILE` + Path to userTrxDataTransfer.json (default: None)
#### Launch Transaction Generators (TestHarness) @@ -360,7 +410,6 @@ The following scripts are typically used by the Performance Harness main script
Expand Argument List - * `chain_id` set the chain id * `last_irreversible_block_id` Current last-irreversible-block-id (LIB ID) to use for transactions. * `contract_owner_account` Account name of the contract owner account for the transfer actions @@ -370,6 +419,11 @@ The following scripts are typically used by the Performance Harness main script * `target_tps` Target transactions per second to generate/send. * `tps_limit_per_generator` Maximum amount of transactions per second a single generator can have. * `log_dir` set the logs directory +* `abi_file` The path to the contract abi file to use for the supplied transaction action data +* `actions_data` The json actions data file or json actions data description string to use +* `actions_auths` The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs. +* `peer_endpoint` set the peer endpoint to send transactions to, default="127.0.0.1" +* `port` set the peer endpoint port to send transactions to, default=9876
#### Transaction Generator @@ -378,38 +432,54 @@ The following scripts are typically used by the Performance Harness main script
Expand Argument List +* `--generator-id arg` (=0) Id for the transaction generator. + Allowed range (0-960). Defaults to 0. * `--chain-id arg` set the chain id -* `--contract-owner-account arg` Account name of the contract owner account for - the transfer actions -* `--accounts arg` comma-separated list of accounts that - will be used for transfers. Minimum +* `--contract-owner-account arg` Account name of the contract account for + the transaction actions +* `--accounts arg` comma-separated list of accounts that + will be used for transfers. Minimum required accounts: 2. * `--priv-keys arg` comma-separated list of private keys in - same order of accounts list that will - be used to sign transactions. Minimum + same order of accounts list that will + be used to sign transactions. Minimum required: 2. * `--trx-expiration arg` (=3600) transaction expiration time in seconds. - Defaults to 3,600. Maximum allowed: + Defaults to 3,600. Maximum allowed: 3,600 -* `--trx-gen-duration arg` (=60) Transaction generation duration +* `--trx-gen-duration arg` (=60) Transaction generation duration (seconds). Defaults to 60 seconds. -* `--target-tps arg` (=1) Target transactions per second to - generate/send. Defaults to 1 +* `--target-tps arg` (=1) Target transactions per second to + generate/send. Defaults to 1 transaction per second. -* `--last-irreversible-block-id arg` Current last-irreversible-block-id (LIB - ID) to use for transactions. +* `--last-irreversible-block-id arg` Current last-irreversible-block-id (LIB + ID) to use for transactions. * `--monitor-spinup-time-us arg` (=1000000) - Number of microseconds to wait before - monitoring TPS. Defaults to 1000000 - (1s). -* `--monitor-max-lag-percent arg` (=5) Max percentage off from expected - transactions sent before being in - violation. Defaults to 5. + Number of microseconds to wait before + monitoring TPS. Defaults to 1000000 + (1s). +* `--monitor-max-lag-percent arg` (=5) Max percentage off from expected + transactions sent before being in + violation. Defaults to 5. * `--monitor-max-lag-duration-us arg` (=1000000) - Max microseconds that transaction - generation can be in violation before - quitting. Defaults to 1000000 (1s). -* `--log-dir arg` set the logs directory + Max microseconds that transaction + generation can be in violation before + quitting. Defaults to 1000000 (1s). +* `--log-dir arg` set the logs directory +* `--abi-file arg` The path to the contract abi file to + use for the supplied transaction action + data +* `--actions-data arg` The json actions data file or json + actions data description string to use +* `--actions-auths arg` The json actions auth file or json + actions auths description string to + use, containting authAcctName to + activePrivateKey pairs. +* `--peer-endpoint arg` (=127.0.0.1) set the peer endpoint to send + transactions to +* `--port arg` (=9876) set the peer endpoint port to send + transactions to +* `-h [ --help ]` print this list
## Result Reports From 97ed6b61180632f948de2cf57987d51200201b94 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 22 Feb 2023 11:31:41 -0600 Subject: [PATCH 122/178] Update test report examples in README. --- tests/performance_tests/README.md | 845 ++++++++++++++++++++++-------- 1 file changed, 623 insertions(+), 222 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index c525779933..450e2154ad 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -505,23 +505,23 @@ Next, a summary of the search scenario conducted and respective results is inclu Expand Search Scenario Summary Example ``` json - "1": { + "2": { "success": true, - "searchTarget": 26000, + "searchTarget": 12500, "searchFloor": 0, - "searchCeiling": 26500, + "searchCeiling": 24500, "basicTestResult": { - "targetTPS": 26000, - "resultAvgTps": 25986.9375, - "expectedTxns": 260000, - "resultTxns": 260000, + "targetTPS": 12500, + "resultAvgTps": 12499.8125, + "expectedTxns": 125000, + "resultTxns": 125000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-18-52-26000", - "testStart": "2022-11-23T15:18:52.115767", - "testEnd": "2022-11-23T15:20:16.911367" + "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-07-47-12500", + "testStart": "2023-02-22T17:07:47.714382", + "testEnd": "2023-02-22T17:09:00.351289" } } ``` @@ -554,12 +554,12 @@ Finally, the full detail test report for each of the determined max TPS throughp ``` json { - "perfTestsBegin": "2022-11-23T12:56:58.699686", - "perfTestsFinish": "2022-11-23T15:20:16.979815", - "InitialMaxTpsAchieved": 26500, - "LongRunningMaxTpsAchieved": 26000, - "tpsTestStart": "2022-11-23T15:05:42.005050", - "tpsTestFinish": "2022-11-23T15:20:16.979800", + "perfTestsBegin": "2023-02-22T15:17:12.080867", + "perfTestsFinish": "2023-02-22T17:15:43.310101", + "InitialMaxTpsAchieved": 15500, + "LongRunningMaxTpsAchieved": 15500, + "tpsTestStart": "2023-02-22T17:04:36.629749", + "tpsTestFinish": "2023-02-22T17:15:43.310092", "InitialSearchResults": { "0": { "success": false, @@ -568,149 +568,130 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 50000, "basicTestResult": { "targetTPS": 50000, - "resultAvgTps": 23784.324324324323, + "resultAvgTps": 14888.279069767443, "expectedTxns": 500000, - "resultTxns": 500000, + "resultTxns": 342429, "tpsExpectMet": false, - "trxExpectMet": true, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 38, - "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-05-42-50000", - "testStart": "2022-11-23T15:05:42.005080", - "testEnd": "2022-11-23T15:07:24.111044" + "trxExpectMet": false, + "basicTestSuccess": false, + "testAnalysisBlockCnt": 44, + "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-04-36-50000", + "testStart": "2023-02-22T17:04:36.629809", + "testEnd": "2023-02-22T17:06:15.893419" } }, "1": { - "success": true, + "success": false, "searchTarget": 25000, "searchFloor": 0, "searchCeiling": 49500, "basicTestResult": { "targetTPS": 25000, - "resultAvgTps": 25013.3125, + "resultAvgTps": 15541.464285714286, "expectedTxns": 250000, "resultTxns": 250000, - "tpsExpectMet": true, + "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 17, - "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-07-24-25000", - "testStart": "2022-11-23T15:07:24.225706", - "testEnd": "2022-11-23T15:08:47.510691" + "testAnalysisBlockCnt": 29, + "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-06-16-25000", + "testStart": "2023-02-22T17:06:16.000708", + "testEnd": "2023-02-22T17:07:47.634132" } }, "2": { - "success": false, - "searchTarget": 37500, - "searchFloor": 25500, - "searchCeiling": 49500, + "success": true, + "searchTarget": 12500, + "searchFloor": 0, + "searchCeiling": 24500, "basicTestResult": { - "targetTPS": 37500, - "resultAvgTps": 24912.576923076922, - "expectedTxns": 375000, - "resultTxns": 375000, - "tpsExpectMet": false, + "targetTPS": 12500, + "resultAvgTps": 12499.8125, + "expectedTxns": 125000, + "resultTxns": 125000, + "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 27, - "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-08-47-37500", - "testStart": "2022-11-23T15:08:47.579754", - "testEnd": "2022-11-23T15:10:23.342881" + "testAnalysisBlockCnt": 17, + "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-07-47-12500", + "testStart": "2023-02-22T17:07:47.714382", + "testEnd": "2023-02-22T17:09:00.351289" } }, "3": { "success": false, - "searchTarget": 31500, - "searchFloor": 25500, - "searchCeiling": 37000, + "searchTarget": 19000, + "searchFloor": 13000, + "searchCeiling": 24500, "basicTestResult": { - "targetTPS": 31500, - "resultAvgTps": 24525.095238095237, - "expectedTxns": 315000, - "resultTxns": 315000, + "targetTPS": 19000, + "resultAvgTps": 15566.0, + "expectedTxns": 190000, + "resultTxns": 190000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 22, - "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-10-23-31500", - "testStart": "2022-11-23T15:10:23.432821", - "testEnd": "2022-11-23T15:11:53.366694" + "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-09-00-19000", + "testStart": "2023-02-22T17:09:00.404183", + "testEnd": "2023-02-22T17:10:24.711309" } }, "4": { "success": false, - "searchTarget": 28500, - "searchFloor": 25500, - "searchCeiling": 31000, + "searchTarget": 16000, + "searchFloor": 13000, + "searchCeiling": 18500, "basicTestResult": { - "targetTPS": 28500, - "resultAvgTps": 25896.666666666668, - "expectedTxns": 285000, - "resultTxns": 285000, + "targetTPS": 16000, + "resultAvgTps": 14776.235294117647, + "expectedTxns": 160000, + "resultTxns": 160000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 19, - "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-11-53-28500", - "testStart": "2022-11-23T15:11:53.448449", - "testEnd": "2022-11-23T15:13:17.714663" + "testAnalysisBlockCnt": 18, + "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-10-24-16000", + "testStart": "2023-02-22T17:10:24.776702", + "testEnd": "2023-02-22T17:11:46.433363" } }, "5": { - "success": false, - "searchTarget": 27000, - "searchFloor": 25500, - "searchCeiling": 28000, - "basicTestResult": { - "targetTPS": 27000, - "resultAvgTps": 26884.625, - "expectedTxns": 270000, - "resultTxns": 270000, - "tpsExpectMet": false, - "trxExpectMet": true, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 17, - "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-13-17-27000", - "testStart": "2022-11-23T15:13:17.787205", - "testEnd": "2022-11-23T15:14:40.753850" - } - }, - "6": { "success": true, - "searchTarget": 26000, - "searchFloor": 25500, - "searchCeiling": 26500, + "searchTarget": 14500, + "searchFloor": 13000, + "searchCeiling": 15500, "basicTestResult": { - "targetTPS": 26000, - "resultAvgTps": 25959.0, - "expectedTxns": 260000, - "resultTxns": 260000, + "targetTPS": 14500, + "resultAvgTps": 14570.125, + "expectedTxns": 145000, + "resultTxns": 145000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-14-40-26000", - "testStart": "2022-11-23T15:14:40.823681", - "testEnd": "2022-11-23T15:16:02.884525" + "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-11-46-14500", + "testStart": "2023-02-22T17:11:46.494769", + "testEnd": "2023-02-22T17:13:06.882690" } }, - "7": { + "6": { "success": true, - "searchTarget": 26500, - "searchFloor": 26500, - "searchCeiling": 26500, + "searchTarget": 15500, + "searchFloor": 15000, + "searchCeiling": 15500, "basicTestResult": { - "targetTPS": 26500, - "resultAvgTps": 26400.5625, - "expectedTxns": 265000, - "resultTxns": 265000, + "targetTPS": 15500, + "resultAvgTps": 15448.4375, + "expectedTxns": 155000, + "resultTxns": 155000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-16-02-26500", - "testStart": "2022-11-23T15:16:02.953195", - "testEnd": "2022-11-23T15:17:28.412837" + "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-13-06-15500", + "testStart": "2023-02-22T17:13:06.948372", + "testEnd": "2023-02-22T17:14:24.937779" } } }, @@ -729,41 +710,22 @@ Finally, the full detail test report for each of the determined max TPS throughp }, "LongRunningSearchResults": { "0": { - "success": false, - "searchTarget": 26500, - "searchFloor": 0, - "searchCeiling": 26500, - "basicTestResult": { - "targetTPS": 26500, - "resultAvgTps": 22554.42105263158, - "expectedTxns": 265000, - "resultTxns": 265000, - "tpsExpectMet": false, - "trxExpectMet": true, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 20, - "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-17-28-26500", - "testStart": "2022-11-23T15:17:28.483195", - "testEnd": "2022-11-23T15:18:52.048868" - } - }, - "1": { "success": true, - "searchTarget": 26000, + "searchTarget": 15500, "searchFloor": 0, - "searchCeiling": 26500, + "searchCeiling": 15500, "basicTestResult": { - "targetTPS": 26000, - "resultAvgTps": 25986.9375, - "expectedTxns": 260000, - "resultTxns": 260000, + "targetTPS": 15500, + "resultAvgTps": 15482.375, + "expectedTxns": 155000, + "resultTxns": 155000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "./performance_test/2022-11-23_12-56-58/testRunLogs/performance_test_basic/2022-11-23_15-18-52-26000", - "testStart": "2022-11-23T15:18:52.115767", - "testEnd": "2022-11-23T15:20:16.911367" + "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-14-24-15500", + "testStart": "2023-02-22T17:14:24.998249", + "testEnd": "2023-02-22T17:15:43.248732" } } }, @@ -784,35 +746,35 @@ Finally, the full detail test report for each of the determined max TPS throughp "recommendedThreadCount": 6, "threadToMaxTpsDict": { "2": 16000, - "3": 21000, - "4": 24000, - "5": 25500, - "6": 27000, - "7": 26000 + "3": 18000, + "4": 20000, + "5": 22000, + "6": 22500, + "7": 22000 }, - "analysisStart": "2022-11-23T12:56:58.730271", - "analysisFinish": "2022-11-23T14:05:45.727625" + "analysisStart": "2023-02-22T15:17:12.124072", + "analysisFinish": "2023-02-22T16:25:05.332487" }, "ChainThreadAnalysis": { - "recommendedThreadCount": 3, + "recommendedThreadCount": 2, "threadToMaxTpsDict": { - "2": 25000, - "3": 26500, - "4": 26500 + "2": 15500, + "3": 15500 }, - "analysisStart": "2022-11-23T14:05:45.728348", - "analysisFinish": "2022-11-23T14:41:43.721885" + "analysisStart": "2023-02-22T16:25:05.333425", + "analysisFinish": "2023-02-22T16:44:52.600693" }, "NetThreadAnalysis": { "recommendedThreadCount": 2, "threadToMaxTpsDict": { - "2": 25500, - "3": 25000 + "2": 15500, + "3": 15500 }, - "analysisStart": "2022-11-23T14:41:43.722862", - "analysisFinish": "2022-11-23T15:05:42.004421" + "analysisStart": "2023-02-22T16:44:52.601488", + "analysisFinish": "2023-02-22T17:04:36.629427" }, "args": { + "rawCmdLine ": "./tests/performance_tests/performance_test.py --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax", "killAll": false, "dontKill": false, "keepLogs": true, @@ -827,34 +789,482 @@ Finally, the full detail test report for each of the determined max TPS throughp "topo": "mesh", "extraNodeosArgs": { "chainPluginArgs": { - "signatureCpuBillablePct": 0, + "_pluginNamespace": "eosio", + "_pluginName": "chain_plugin", + "blocksDir": null, + "_blocksDirNodeosDefault": "\"blocks\"", + "_blocksDirNodeosArg": "--blocks-dir", + "stateDir": null, + "_stateDirNodeosDefault": "\"state\"", + "_stateDirNodeosArg": "--state-dir", + "protocolFeaturesDir": null, + "_protocolFeaturesDirNodeosDefault": "\"protocol_features\"", + "_protocolFeaturesDirNodeosArg": "--protocol-features-dir", + "checkpoint": null, + "_checkpointNodeosDefault": null, + "_checkpointNodeosArg": "--checkpoint", + "wasmRuntime": null, + "_wasmRuntimeNodeosDefault": "eos-vm-jit", + "_wasmRuntimeNodeosArg": "--wasm-runtime", + "profileAccount": null, + "_profileAccountNodeosDefault": null, + "_profileAccountNodeosArg": "--profile-account", + "abiSerializerMaxTimeMs": null, + "_abiSerializerMaxTimeMsNodeosDefault": 15, + "_abiSerializerMaxTimeMsNodeosArg": "--abi-serializer-max-time-ms", "chainStateDbSizeMb": 10240, - "chainThreads": 3, - "databaseMapMode": "mapped" + "_chainStateDbSizeMbNodeosDefault": 1024, + "_chainStateDbSizeMbNodeosArg": "--chain-state-db-size-mb", + "chainStateDbGuardSizeMb": null, + "_chainStateDbGuardSizeMbNodeosDefault": 128, + "_chainStateDbGuardSizeMbNodeosArg": "--chain-state-db-guard-size-mb", + "signatureCpuBillablePct": 0, + "_signatureCpuBillablePctNodeosDefault": 50, + "_signatureCpuBillablePctNodeosArg": "--signature-cpu-billable-pct", + "chainThreads": 2, + "_chainThreadsNodeosDefault": 2, + "_chainThreadsNodeosArg": "--chain-threads", + "contractsConsole": null, + "_contractsConsoleNodeosDefault": false, + "_contractsConsoleNodeosArg": "--contracts-console", + "deepMind": null, + "_deepMindNodeosDefault": false, + "_deepMindNodeosArg": "--deep-mind", + "actorWhitelist": null, + "_actorWhitelistNodeosDefault": null, + "_actorWhitelistNodeosArg": "--actor-whitelist", + "actorBlacklist": null, + "_actorBlacklistNodeosDefault": null, + "_actorBlacklistNodeosArg": "--actor-blacklist", + "contractWhitelist": null, + "_contractWhitelistNodeosDefault": null, + "_contractWhitelistNodeosArg": "--contract-whitelist", + "contractBlacklist": null, + "_contractBlacklistNodeosDefault": null, + "_contractBlacklistNodeosArg": "--contract-blacklist", + "actionBlacklist": null, + "_actionBlacklistNodeosDefault": null, + "_actionBlacklistNodeosArg": "--action-blacklist", + "keyBlacklist": null, + "_keyBlacklistNodeosDefault": null, + "_keyBlacklistNodeosArg": "--key-blacklist", + "senderBypassWhiteblacklist": null, + "_senderBypassWhiteblacklistNodeosDefault": null, + "_senderBypassWhiteblacklistNodeosArg": "--sender-bypass-whiteblacklist", + "readMode": null, + "_readModeNodeosDefault": "head", + "_readModeNodeosArg": "--read-mode", + "apiAcceptTransactions": null, + "_apiAcceptTransactionsNodeosDefault": 1, + "_apiAcceptTransactionsNodeosArg": "--api-accept-transactions", + "validationMode": null, + "_validationModeNodeosDefault": "full", + "_validationModeNodeosArg": "--validation-mode", + "disableRamBillingNotifyChecks": null, + "_disableRamBillingNotifyChecksNodeosDefault": false, + "_disableRamBillingNotifyChecksNodeosArg": "--disable-ram-billing-notify-checks", + "maximumVariableSignatureLength": null, + "_maximumVariableSignatureLengthNodeosDefault": 16384, + "_maximumVariableSignatureLengthNodeosArg": "--maximum-variable-signature-length", + "trustedProducer": null, + "_trustedProducerNodeosDefault": null, + "_trustedProducerNodeosArg": "--trusted-producer", + "databaseMapMode": "mapped", + "_databaseMapModeNodeosDefault": "mapped", + "_databaseMapModeNodeosArg": "--database-map-mode", + "eosVmOcCacheSizeMb": null, + "_eosVmOcCacheSizeMbNodeosDefault": 1024, + "_eosVmOcCacheSizeMbNodeosArg": "--eos-vm-oc-cache-size-mb", + "eosVmOcCompileThreads": null, + "_eosVmOcCompileThreadsNodeosDefault": 1, + "_eosVmOcCompileThreadsNodeosArg": "--eos-vm-oc-compile-threads", + "eosVmOcEnable": null, + "_eosVmOcEnableNodeosDefault": false, + "_eosVmOcEnableNodeosArg": "--eos-vm-oc-enable", + "enableAccountQueries": null, + "_enableAccountQueriesNodeosDefault": 0, + "_enableAccountQueriesNodeosArg": "--enable-account-queries", + "maxNonprivilegedInlineActionSize": null, + "_maxNonprivilegedInlineActionSizeNodeosDefault": 4096, + "_maxNonprivilegedInlineActionSizeNodeosArg": "--max-nonprivileged-inline-action-size", + "transactionRetryMaxStorageSizeGb": null, + "_transactionRetryMaxStorageSizeGbNodeosDefault": null, + "_transactionRetryMaxStorageSizeGbNodeosArg": "--transaction-retry-max-storage-size-gb", + "transactionRetryIntervalSec": null, + "_transactionRetryIntervalSecNodeosDefault": 20, + "_transactionRetryIntervalSecNodeosArg": "--transaction-retry-interval-sec", + "transactionRetryMaxExpirationSec": null, + "_transactionRetryMaxExpirationSecNodeosDefault": 120, + "_transactionRetryMaxExpirationSecNodeosArg": "--transaction-retry-max-expiration-sec", + "transactionFinalityStatusMaxStorageSizeGb": null, + "_transactionFinalityStatusMaxStorageSizeGbNodeosDefault": null, + "_transactionFinalityStatusMaxStorageSizeGbNodeosArg": "--transaction-finality-status-max-storage-size-gb", + "transactionFinalityStatusSuccessDurationSec": null, + "_transactionFinalityStatusSuccessDurationSecNodeosDefault": 180, + "_transactionFinalityStatusSuccessDurationSecNodeosArg": "--transaction-finality-status-success-duration-sec", + "transactionFinalityStatusFailureDurationSec": null, + "_transactionFinalityStatusFailureDurationSecNodeosDefault": 180, + "_transactionFinalityStatusFailureDurationSecNodeosArg": "--transaction-finality-status-failure-duration-sec", + "integrityHashOnStart": null, + "_integrityHashOnStartNodeosDefault": false, + "_integrityHashOnStartNodeosArg": "--integrity-hash-on-start", + "integrityHashOnStop": null, + "_integrityHashOnStopNodeosDefault": false, + "_integrityHashOnStopNodeosArg": "--integrity-hash-on-stop", + "blockLogRetainBlocks": null, + "_blockLogRetainBlocksNodeosDefault": null, + "_blockLogRetainBlocksNodeosArg": "--block-log-retain-blocks", + "genesisJson": null, + "_genesisJsonNodeosDefault": null, + "_genesisJsonNodeosArg": "--genesis-json", + "genesisTimestamp": null, + "_genesisTimestampNodeosDefault": null, + "_genesisTimestampNodeosArg": "--genesis-timestamp", + "printGenesisJson": null, + "_printGenesisJsonNodeosDefault": false, + "_printGenesisJsonNodeosArg": "--print-genesis-json", + "extractGenesisJson": null, + "_extractGenesisJsonNodeosDefault": null, + "_extractGenesisJsonNodeosArg": "--extract-genesis-json", + "printBuildInfo": null, + "_printBuildInfoNodeosDefault": false, + "_printBuildInfoNodeosArg": "--print-build-info", + "extractBuildInfo": null, + "_extractBuildInfoNodeosDefault": null, + "_extractBuildInfoNodeosArg": "--extract-build-info", + "forceAllChecks": null, + "_forceAllChecksNodeosDefault": false, + "_forceAllChecksNodeosArg": "--force-all-checks", + "disableReplayOpts": null, + "_disableReplayOptsNodeosDefault": false, + "_disableReplayOptsNodeosArg": "--disable-replay-opts", + "replayBlockchain": null, + "_replayBlockchainNodeosDefault": false, + "_replayBlockchainNodeosArg": "--replay-blockchain", + "hardReplayBlockchain": null, + "_hardReplayBlockchainNodeosDefault": false, + "_hardReplayBlockchainNodeosArg": "--hard-replay-blockchain", + "deleteAllBlocks": null, + "_deleteAllBlocksNodeosDefault": false, + "_deleteAllBlocksNodeosArg": "--delete-all-blocks", + "truncateAtBlock": null, + "_truncateAtBlockNodeosDefault": 0, + "_truncateAtBlockNodeosArg": "--truncate-at-block", + "terminateAtBlock": null, + "_terminateAtBlockNodeosDefault": 0, + "_terminateAtBlockNodeosArg": "--terminate-at-block", + "snapshot": null, + "_snapshotNodeosDefault": null, + "_snapshotNodeosArg": "--snapshot" + }, + "httpClientPluginArgs": { + "_pluginNamespace": "eosio", + "_pluginName": "http_client_plugin", + "httpsClientRootCert": null, + "_httpsClientRootCertNodeosDefault": null, + "_httpsClientRootCertNodeosArg": "--https-client-root-cert", + "httpsClientValidatePeers": null, + "_httpsClientValidatePeersNodeosDefault": 1, + "_httpsClientValidatePeersNodeosArg": "--https-client-validate-peers" + }, + "httpPluginArgs": { + "_pluginNamespace": "eosio", + "_pluginName": "http_plugin", + "unixSocketPath": null, + "_unixSocketPathNodeosDefault": null, + "_unixSocketPathNodeosArg": "--unix-socket-path", + "httpServerAddress": null, + "_httpServerAddressNodeosDefault": "127.0.0.1:8888", + "_httpServerAddressNodeosArg": "--http-server-address", + "httpsServerAddress": null, + "_httpsServerAddressNodeosDefault": null, + "_httpsServerAddressNodeosArg": "--https-server-address", + "httpsCertificateChainFile": null, + "_httpsCertificateChainFileNodeosDefault": null, + "_httpsCertificateChainFileNodeosArg": "--https-certificate-chain-file", + "httpsPrivateKeyFile": null, + "_httpsPrivateKeyFileNodeosDefault": null, + "_httpsPrivateKeyFileNodeosArg": "--https-private-key-file", + "httpsEcdhCurve": null, + "_httpsEcdhCurveNodeosDefault": "secp384r1", + "_httpsEcdhCurveNodeosArg": "--https-ecdh-curve", + "accessControlAllowOrigin": null, + "_accessControlAllowOriginNodeosDefault": null, + "_accessControlAllowOriginNodeosArg": "--access-control-allow-origin", + "accessControlAllowHeaders": null, + "_accessControlAllowHeadersNodeosDefault": null, + "_accessControlAllowHeadersNodeosArg": "--access-control-allow-headers", + "accessControlMaxAge": null, + "_accessControlMaxAgeNodeosDefault": null, + "_accessControlMaxAgeNodeosArg": "--access-control-max-age", + "accessControlAllowCredentials": null, + "_accessControlAllowCredentialsNodeosDefault": false, + "_accessControlAllowCredentialsNodeosArg": "--access-control-allow-credentials", + "maxBodySize": null, + "_maxBodySizeNodeosDefault": 2097152, + "_maxBodySizeNodeosArg": "--max-body-size", + "httpMaxBytesInFlightMb": null, + "_httpMaxBytesInFlightMbNodeosDefault": 500, + "_httpMaxBytesInFlightMbNodeosArg": "--http-max-bytes-in-flight-mb", + "httpMaxInFlightRequests": null, + "_httpMaxInFlightRequestsNodeosDefault": -1, + "_httpMaxInFlightRequestsNodeosArg": "--http-max-in-flight-requests", + "httpMaxResponseTimeMs": 990000, + "_httpMaxResponseTimeMsNodeosDefault": 30, + "_httpMaxResponseTimeMsNodeosArg": "--http-max-response-time-ms", + "verboseHttpErrors": null, + "_verboseHttpErrorsNodeosDefault": false, + "_verboseHttpErrorsNodeosArg": "--verbose-http-errors", + "httpValidateHost": null, + "_httpValidateHostNodeosDefault": 1, + "_httpValidateHostNodeosArg": "--http-validate-host", + "httpAlias": null, + "_httpAliasNodeosDefault": null, + "_httpAliasNodeosArg": "--http-alias", + "httpThreads": null, + "_httpThreadsNodeosDefault": 2, + "_httpThreadsNodeosArg": "--http-threads", + "httpKeepAlive": null, + "_httpKeepAliveNodeosDefault": 1, + "_httpKeepAliveNodeosArg": "--http-keep-alive" + }, + "netPluginArgs": { + "_pluginNamespace": "eosio", + "_pluginName": "net_plugin", + "p2pListenEndpoint": null, + "_p2pListenEndpointNodeosDefault": "0.0.0.0:9876", + "_p2pListenEndpointNodeosArg": "--p2p-listen-endpoint", + "p2pServerAddress": null, + "_p2pServerAddressNodeosDefault": null, + "_p2pServerAddressNodeosArg": "--p2p-server-address", + "p2pPeerAddress": null, + "_p2pPeerAddressNodeosDefault": null, + "_p2pPeerAddressNodeosArg": "--p2p-peer-address", + "p2pMaxNodesPerHost": null, + "_p2pMaxNodesPerHostNodeosDefault": 1, + "_p2pMaxNodesPerHostNodeosArg": "--p2p-max-nodes-per-host", + "p2pAcceptTransactions": null, + "_p2pAcceptTransactionsNodeosDefault": 1, + "_p2pAcceptTransactionsNodeosArg": "--p2p-accept-transactions", + "agentName": null, + "_agentNameNodeosDefault": "EOS Test Agent", + "_agentNameNodeosArg": "--agent-name", + "allowedConnection": null, + "_allowedConnectionNodeosDefault": "any", + "_allowedConnectionNodeosArg": "--allowed-connection", + "peerKey": null, + "_peerKeyNodeosDefault": null, + "_peerKeyNodeosArg": "--peer-key", + "peerPrivateKey": null, + "_peerPrivateKeyNodeosDefault": null, + "_peerPrivateKeyNodeosArg": "--peer-private-key", + "maxClients": null, + "_maxClientsNodeosDefault": 25, + "_maxClientsNodeosArg": "--max-clients", + "connectionCleanupPeriod": null, + "_connectionCleanupPeriodNodeosDefault": 30, + "_connectionCleanupPeriodNodeosArg": "--connection-cleanup-period", + "maxCleanupTimeMsec": null, + "_maxCleanupTimeMsecNodeosDefault": 10, + "_maxCleanupTimeMsecNodeosArg": "--max-cleanup-time-msec", + "p2pDedupCacheExpireTimeSec": null, + "_p2pDedupCacheExpireTimeSecNodeosDefault": 10, + "_p2pDedupCacheExpireTimeSecNodeosArg": "--p2p-dedup-cache-expire-time-sec", + "netThreads": 4, + "_netThreadsNodeosDefault": 4, + "_netThreadsNodeosArg": "--net-threads", + "syncFetchSpan": null, + "_syncFetchSpanNodeosDefault": 100, + "_syncFetchSpanNodeosArg": "--sync-fetch-span", + "useSocketReadWatermark": null, + "_useSocketReadWatermarkNodeosDefault": 0, + "_useSocketReadWatermarkNodeosArg": "--use-socket-read-watermark", + "peerLogFormat": null, + "_peerLogFormatNodeosDefault": "[\"${_name}\" - ${_cid} ${_ip}:${_port}] ", + "_peerLogFormatNodeosArg": "--peer-log-format", + "p2pKeepaliveIntervalMs": null, + "_p2pKeepaliveIntervalMsNodeosDefault": 10000, + "_p2pKeepaliveIntervalMsNodeosArg": "--p2p-keepalive-interval-ms" }, "producerPluginArgs": { - "disableSubjectiveBilling": true, - "lastBlockTimeOffsetUs": 0, + "_pluginNamespace": "eosio", + "_pluginName": "producer_plugin", + "enableStaleProduction": null, + "_enableStaleProductionNodeosDefault": false, + "_enableStaleProductionNodeosArg": "--enable-stale-production", + "pauseOnStartup": null, + "_pauseOnStartupNodeosDefault": false, + "_pauseOnStartupNodeosArg": "--pause-on-startup", + "maxTransactionTime": null, + "_maxTransactionTimeNodeosDefault": 30, + "_maxTransactionTimeNodeosArg": "--max-transaction-time", + "maxIrreversibleBlockAge": null, + "_maxIrreversibleBlockAgeNodeosDefault": -1, + "_maxIrreversibleBlockAgeNodeosArg": "--max-irreversible-block-age", + "producerName": null, + "_producerNameNodeosDefault": null, + "_producerNameNodeosArg": "--producer-name", + "privateKey": null, + "_privateKeyNodeosDefault": null, + "_privateKeyNodeosArg": "--private-key", + "signatureProvider": null, + "_signatureProviderNodeosDefault": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", + "_signatureProviderNodeosArg": "--signature-provider", + "greylistAccount": null, + "_greylistAccountNodeosDefault": null, + "_greylistAccountNodeosArg": "--greylist-account", + "greylistLimit": null, + "_greylistLimitNodeosDefault": 1000, + "_greylistLimitNodeosArg": "--greylist-limit", "produceTimeOffsetUs": 0, + "_produceTimeOffsetUsNodeosDefault": 0, + "_produceTimeOffsetUsNodeosArg": "--produce-time-offset-us", + "lastBlockTimeOffsetUs": 0, + "_lastBlockTimeOffsetUsNodeosDefault": -200000, + "_lastBlockTimeOffsetUsNodeosArg": "--last-block-time-offset-us", "cpuEffortPercent": 100, + "_cpuEffortPercentNodeosDefault": 80, + "_cpuEffortPercentNodeosArg": "--cpu-effort-percent", "lastBlockCpuEffortPercent": 100, - "producerThreads": 6 + "_lastBlockCpuEffortPercentNodeosDefault": 80, + "_lastBlockCpuEffortPercentNodeosArg": "--last-block-cpu-effort-percent", + "maxBlockCpuUsageThresholdUs": null, + "_maxBlockCpuUsageThresholdUsNodeosDefault": 5000, + "_maxBlockCpuUsageThresholdUsNodeosArg": "--max-block-cpu-usage-threshold-us", + "maxBlockNetUsageThresholdBytes": null, + "_maxBlockNetUsageThresholdBytesNodeosDefault": 1024, + "_maxBlockNetUsageThresholdBytesNodeosArg": "--max-block-net-usage-threshold-bytes", + "maxScheduledTransactionTimePerBlockMs": null, + "_maxScheduledTransactionTimePerBlockMsNodeosDefault": 100, + "_maxScheduledTransactionTimePerBlockMsNodeosArg": "--max-scheduled-transaction-time-per-block-ms", + "subjectiveCpuLeewayUs": null, + "_subjectiveCpuLeewayUsNodeosDefault": 31000, + "_subjectiveCpuLeewayUsNodeosArg": "--subjective-cpu-leeway-us", + "subjectiveAccountMaxFailures": null, + "_subjectiveAccountMaxFailuresNodeosDefault": 3, + "_subjectiveAccountMaxFailuresNodeosArg": "--subjective-account-max-failures", + "subjectiveAccountDecayTimeMinutes": null, + "_subjectiveAccountDecayTimeMinutesNodeosDefault": 1440, + "_subjectiveAccountDecayTimeMinutesNodeosArg": "--subjective-account-decay-time-minutes", + "incomingDeferRatio": null, + "_incomingDeferRatioNodeosDefault": 1, + "_incomingDeferRatioNodeosArg": "--incoming-defer-ratio", + "incomingTransactionQueueSizeMb": null, + "_incomingTransactionQueueSizeMbNodeosDefault": 1024, + "_incomingTransactionQueueSizeMbNodeosArg": "--incoming-transaction-queue-size-mb", + "disableSubjectiveBilling": true, + "_disableSubjectiveBillingNodeosDefault": 1, + "_disableSubjectiveBillingNodeosArg": "--disable-subjective-billing", + "disableSubjectiveAccountBilling": null, + "_disableSubjectiveAccountBillingNodeosDefault": false, + "_disableSubjectiveAccountBillingNodeosArg": "--disable-subjective-account-billing", + "disableSubjectiveP2pBilling": null, + "_disableSubjectiveP2pBillingNodeosDefault": 1, + "_disableSubjectiveP2pBillingNodeosArg": "--disable-subjective-p2p-billing", + "disableSubjectiveApiBilling": null, + "_disableSubjectiveApiBillingNodeosDefault": 1, + "_disableSubjectiveApiBillingNodeosArg": "--disable-subjective-api-billing", + "producerThreads": 2, + "_producerThreadsNodeosDefault": 2, + "_producerThreadsNodeosArg": "--producer-threads", + "snapshotsDir": null, + "_snapshotsDirNodeosDefault": "\"snapshots\"", + "_snapshotsDirNodeosArg": "--snapshots-dir" }, - "httpPluginArgs": { - "httpMaxResponseTimeMs": 990000 + "resourceMonitorPluginArgs": { + "_pluginNamespace": "eosio", + "_pluginName": "resource_monitor_plugin", + "resourceMonitorIntervalSeconds": null, + "_resourceMonitorIntervalSecondsNodeosDefault": 2, + "_resourceMonitorIntervalSecondsNodeosArg": "--resource-monitor-interval-seconds", + "resourceMonitorSpaceThreshold": null, + "_resourceMonitorSpaceThresholdNodeosDefault": 90, + "_resourceMonitorSpaceThresholdNodeosArg": "--resource-monitor-space-threshold", + "resourceMonitorNotShutdownOnThresholdExceeded": null, + "_resourceMonitorNotShutdownOnThresholdExceededNodeosDefault": false, + "_resourceMonitorNotShutdownOnThresholdExceededNodeosArg": "--resource-monitor-not-shutdown-on-threshold-exceeded", + "resourceMonitorWarningInterval": null, + "_resourceMonitorWarningIntervalNodeosDefault": 30, + "_resourceMonitorWarningIntervalNodeosArg": "--resource-monitor-warning-interval" }, - "netPluginArgs": { - "netThreads": 2 + "signatureProviderPluginArgs": { + "_pluginNamespace": "eosio", + "_pluginName": "signature_provider_plugin", + "keosdProviderTimeout": null, + "_keosdProviderTimeoutNodeosDefault": 5, + "_keosdProviderTimeoutNodeosArg": "--keosd-provider-timeout" + }, + "stateHistoryPluginArgs": { + "_pluginNamespace": "eosio", + "_pluginName": "state_history_plugin", + "stateHistoryDir": null, + "_stateHistoryDirNodeosDefault": "\"state-history\"", + "_stateHistoryDirNodeosArg": "--state-history-dir", + "traceHistory": null, + "_traceHistoryNodeosDefault": false, + "_traceHistoryNodeosArg": "--trace-history", + "chainStateHistory": null, + "_chainStateHistoryNodeosDefault": false, + "_chainStateHistoryNodeosArg": "--chain-state-history", + "stateHistoryEndpoint": null, + "_stateHistoryEndpointNodeosDefault": "127.0.0.1:8080", + "_stateHistoryEndpointNodeosArg": "--state-history-endpoint", + "stateHistoryUnixSocketPath": null, + "_stateHistoryUnixSocketPathNodeosDefault": null, + "_stateHistoryUnixSocketPathNodeosArg": "--state-history-unix-socket-path", + "traceHistoryDebugMode": null, + "_traceHistoryDebugModeNodeosDefault": false, + "_traceHistoryDebugModeNodeosArg": "--trace-history-debug-mode", + "stateHistoryLogRetainBlocks": null, + "_stateHistoryLogRetainBlocksNodeosDefault": null, + "_stateHistoryLogRetainBlocksNodeosArg": "--state-history-log-retain-blocks", + "deleteStateHistory": null, + "_deleteStateHistoryNodeosDefault": false, + "_deleteStateHistoryNodeosArg": "--delete-state-history" + }, + "traceApiPluginArgs": { + "_pluginNamespace": "eosio", + "_pluginName": "trace_api_plugin", + "traceDir": null, + "_traceDirNodeosDefault": "\"traces\"", + "_traceDirNodeosArg": "--trace-dir", + "traceSliceStride": null, + "_traceSliceStrideNodeosDefault": 10000, + "_traceSliceStrideNodeosArg": "--trace-slice-stride", + "traceMinimumIrreversibleHistoryBlocks": null, + "_traceMinimumIrreversibleHistoryBlocksNodeosDefault": -1, + "_traceMinimumIrreversibleHistoryBlocksNodeosArg": "--trace-minimum-irreversible-history-blocks", + "traceMinimumUncompressedIrreversibleHistoryBlocks": null, + "_traceMinimumUncompressedIrreversibleHistoryBlocksNodeosDefault": -1, + "_traceMinimumUncompressedIrreversibleHistoryBlocksNodeosArg": "--trace-minimum-uncompressed-irreversible-history-blocks", + "traceRpcAbi": null, + "_traceRpcAbiNodeosDefault": null, + "_traceRpcAbiNodeosArg": "--trace-rpc-abi", + "traceNoAbis": null, + "_traceNoAbisNodeosDefault": false, + "_traceNoAbisNodeosArg": "--trace-no-abis" } }, + "specifiedContract": { + "accountName": "eosio", + "ownerPrivateKey": "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", + "ownerPublicKey": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "activePrivateKey": "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", + "activePublicKey": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "contractDir": "unittests/contracts/eosio.system", + "wasmFile": "eosio.system.wasm", + "abiFile": "eosio.system.abi" + }, "useBiosBootFile": false, "genesisPath": "tests/performance_tests/genesis.json", "maximumP2pPerHost": 5000, "maximumClients": 0, + "loggingLevel": "info", "loggingDict": { "bios": "off" }, "prodsEnableTraceApi": false, + "nodeosVers": "v4", "specificExtraNodeosArgs": { "1": "--plugin eosio::trace_api_plugin" }, @@ -874,16 +1284,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "calcProducerThreads": "lmax", "calcChainThreads": "lmax", "calcNetThreads": "lmax", - "logDirBase": "./performance_test", - "logDirTimestamp": "2022-11-23_12-56-58", - "logDirPath": "./performance_test/2022-11-23_12-56-58", - "ptbLogsDirPath": "./performance_test/2022-11-23_12-56-58/testRunLogs", - "pluginThreadOptLogsDirPath": "./performance_test/2022-11-23_12-56-58/pluginThreadOptRunLogs" + "logDirBase": "p", + "logDirTimestamp": "2023-02-22_15-17-12", + "logDirPath": "p/2023-02-22_15-17-12", + "ptbLogsDirPath": "p/2023-02-22_15-17-12/testRunLogs", + "pluginThreadOptLogsDirPath": "p/2023-02-22_15-17-12/pluginThreadOptRunLogs" }, "env": { "system": "Linux", "os": "posix", - "release": "5.15.74.2-microsoft-standard-WSL2", + "release": "5.15.79.1-microsoft-standard-WSL2", "logical_cpu_count": 16 }, "nodeosVersion": "v4.0.0-dev" @@ -902,80 +1312,71 @@ The Performance Test Basic generates, by default, a report that details results ``` json { "completedRun": true, - "testStart": "2023-02-17T22:00:41.305618", - "testFinish": "2023-02-17T22:02:21.597430", + "testStart": "2023-02-22T17:14:24.998249", + "testFinish": "2023-02-22T17:15:43.248732", "Analysis": { "BlockSize": { - "min": 925248, - "max": 1551936, - "avg": 1332244.3636363635, - "sigma": 144713.34505483133, + "min": 1310400, + "max": 1619520, + "avg": 1484092.2352941176, + "sigma": 67283.84495512008, "emptyBlocks": 0, - "numBlocks": 44 + "numBlocks": 17 }, "BlocksGuide": { "firstBlockNum": 2, - "lastBlockNum": 193, - "totalBlocks": 192, + "lastBlockNum": 153, + "totalBlocks": 152, "testStartBlockNum": 112, - "testEndBlockNum": 160, + "testEndBlockNum": 142, "setupBlocksCnt": 110, - "tearDownBlocksCnt": 33, + "tearDownBlocksCnt": 11, "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 0, + "trailingEmptyBlocksCnt": 9, "configAddlDropCnt": 2, - "testAnalysisBlockCnt": 44 + "testAnalysisBlockCnt": 17 }, "TPS": { - "min": 10265, - "max": 15774, - "avg": 13882.232558139534, - "sigma": 1454.0837894863364, + "min": 14579, + "max": 16242, + "avg": 15482.375, + "sigma": 381.4460766281389, "emptyBlocks": 0, - "numBlocks": 44, - "configTps": 50000, + "numBlocks": 17, + "configTps": 15500, "configTestDuration": 10, "tpsPerGenerator": [ - 3846, - 3846, - 3846, - 3846, - 3846, - 3846, - 3846, - 3846, - 3846, - 3846, - 3846, - 3847, - 3847 + 3875, + 3875, + 3875, + 3875 ], - "generatorCount": 13 + "generatorCount": 4 }, "TrxCPU": { "min": 6.0, - "max": 15292.0, - "avg": 25.024962251222377, - "sigma": 49.9778703823556, - "samples": 322527 + "max": 3501.0, + "avg": 22.077154838709678, + "sigma": 15.627253758549179, + "samples": 155000 }, "TrxLatency": { - "min": 0.11500000953674316, - "max": 16.91100001335144, - "avg": 8.950405516519615, - "sigma": 4.844012708597167, - "samples": 322527 + "min": 0.0009999275207519531, + "max": 0.5750000476837158, + "avg": 0.2690663419123619, + "sigma": 0.14536933582820064, + "samples": 155000 }, "TrxNet": { "min": 24.0, "max": 24.0, "avg": 24.0, "sigma": 0.0, - "samples": 322527 + "samples": 155000 }, "DroppedBlocks": {}, "DroppedBlocksCount": 0, - "DroppedTransactions": 177473, + "DroppedTransactions": 0, "ProductionWindowsTotal": 0, "ProductionWindowsAverageSize": 0, "ProductionWindowsMissed": 0, @@ -983,10 +1384,10 @@ The Performance Test Basic generates, by default, a report that details results "ForksCount": 0 }, "args": { - "rawCmdLine ": "tests/performance_tests/performance_test.py --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax", + "rawCmdLine ": "./tests/performance_tests/performance_test.py --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax", "killAll": false, "dontKill": false, - "keepLogs": false, + "keepLogs": true, "dumpErrorDetails": false, "delay": 1, "nodesFile": null, @@ -1478,26 +1879,26 @@ The Performance Test Basic generates, by default, a report that details results "1": "--plugin eosio::trace_api_plugin" }, "_totalNodes": 2, - "targetTps": 50000, + "targetTps": 15500, "testTrxGenDurationSec": 10, "tpsLimitPerGenerator": 4000, "numAddlBlocksToPrune": 2, - "logDirRoot": "p/2023-02-17_22-00-41/pluginThreadOptRunLogs", - "delReport": true, + "logDirRoot": "p/2023-02-22_15-17-12/testRunLogs", + "delReport": false, "quiet": false, - "delPerfLogs": true, - "expectedTransactionsSent": 500000, + "delPerfLogs": false, + "expectedTransactionsSent": 155000, "printMissingTransactions": false, "userTrxDataFile": null, - "logDirBase": "p/2023-02-17_22-00-41/pluginThreadOptRunLogs/p", - "logDirTimestamp": "2023-02-17_22-00-41", - "logDirTimestampedOptSuffix": "-50000", - "logDirPath": "p/2023-02-17_22-00-41/pluginThreadOptRunLogs/p/2023-02-17_22-00-41-50000" + "logDirBase": "p/2023-02-22_15-17-12/testRunLogs/p", + "logDirTimestamp": "2023-02-22_17-14-24", + "logDirTimestampedOptSuffix": "-15500", + "logDirPath": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-14-24-15500" }, "env": { "system": "Linux", "os": "posix", - "release": "5.10.16.3-microsoft-standard-WSL2", + "release": "5.15.79.1-microsoft-standard-WSL2", "logical_cpu_count": 16 }, "nodeosVersion": "v4.0.0-dev" From 3fe0fdf357bdade06d7b8e4c5ec2c22bb0cbad86 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 22 Feb 2023 12:19:51 -0600 Subject: [PATCH 123/178] Update docs for trx_generator README. --- tests/trx_generator/README.md | 50 +++++++++++++++++++++++------------ 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/tests/trx_generator/README.md b/tests/trx_generator/README.md index 1d9006a98e..26a15bdec7 100644 --- a/tests/trx_generator/README.md +++ b/tests/trx_generator/README.md @@ -12,36 +12,52 @@ The Transaction Generator logs each transaction's id and sent timestamp at the m
Expand Argument List +* `--generator-id arg` (=0) Id for the transaction generator. + Allowed range (0-960). Defaults to 0. * `--chain-id arg` set the chain id -* `--handler-account arg` Account name of the handler account for - the transfer actions -* `--accounts arg` comma-separated list of accounts that - will be used for transfers. Minimum +* `--contract-owner-account arg` Account name of the contract account + for the transaction actions +* `--accounts arg` comma-separated list of accounts that + will be used for transfers. Minimum required accounts: 2. * `--priv-keys arg` comma-separated list of private keys in - same order of accounts list that will - be used to sign transactions. Minimum + same order of accounts list that will + be used to sign transactions. Minimum required: 2. * `--trx-expiration arg` (=3600) transaction expiration time in seconds. - Defaults to 3,600. Maximum allowed: + Defaults to 3,600. Maximum allowed: 3,600 -* `--trx-gen-duration arg` (=60) Transaction generation duration +* `--trx-gen-duration arg` (=60) Transaction generation duration (seconds). Defaults to 60 seconds. -* `--target-tps arg` (=1) Target transactions per second to - generate/send. Defaults to 1 +* `--target-tps arg` (=1) Target transactions per second to + generate/send. Defaults to 1 transaction per second. * `--last-irreversible-block-id arg` Current last-irreversible-block-id (LIB ID) to use for transactions. * `--monitor-spinup-time-us arg` (=1000000) - Number of microseconds to wait before - monitoring TPS. Defaults to 1000000 + Number of microseconds to wait before + monitoring TPS. Defaults to 1000000 (1s). -* `--monitor-max-lag-percent arg` (=5) Max percentage off from expected - transactions sent before being in +* `--monitor-max-lag-percent arg` (=5) Max percentage off from expected + transactions sent before being in violation. Defaults to 5. * `--monitor-max-lag-duration-us arg` (=1000000) - Max microseconds that transaction - generation can be in violation before + Max microseconds that transaction + generation can be in violation before quitting. Defaults to 1000000 (1s). * `--log-dir arg` set the logs directory -
\ No newline at end of file +* `--abi-file arg` The path to the contract abi file to + use for the supplied transaction action + data +* `--actions-data arg` The json actions data file or json + actions data description string to use +* `--actions-auths arg` The json actions auth file or json + actions auths description string to + use, containting authAcctName to + activePrivateKey pairs. +* `--peer-endpoint arg` (=127.0.0.1) set the peer endpoint to send + transactions to +* `--port arg` (=9876) set the peer endpoint port to send + transactions to +* `-h [ --help ]` print this list + From 2fd6ad6057ce86df41f2e74bf810c87a9f604b15 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 22 Feb 2023 12:37:52 -0600 Subject: [PATCH 124/178] Unwind shutdown changes as cluster killall will handle the transactiongeneratorlauncher now that it is set --- tests/TestHarness/TestHelper.py | 7 +------ tests/nodeos_contrl_c_test.py | 4 ++-- tests/nodeos_snapshot_diff_test.py | 2 +- tests/performance_tests/log_reader.py | 2 +- tests/performance_tests/performance_test_basic.py | 3 +-- 5 files changed, 6 insertions(+), 12 deletions(-) diff --git a/tests/TestHarness/TestHelper.py b/tests/TestHarness/TestHelper.py index f1504f8a60..2fac4e6919 100644 --- a/tests/TestHarness/TestHelper.py +++ b/tests/TestHarness/TestHelper.py @@ -149,14 +149,12 @@ def printSystemInfo(prefix): @staticmethod # pylint: disable=too-many-arguments - def shutdown(cluster, walletMgr, testSuccessful=True, killEosInstances=True, killWallet=True, keepLogs=False, cleanRun=True, dumpErrorDetails=False, trxGenLauncher=None): + def shutdown(cluster, walletMgr, testSuccessful=True, killEosInstances=True, killWallet=True, keepLogs=False, cleanRun=True, dumpErrorDetails=False): """Cluster and WalletMgr shutdown and cleanup.""" assert(cluster) assert(isinstance(cluster, Cluster)) if walletMgr: assert(isinstance(walletMgr, WalletMgr)) - if trxGenLauncher is not None: - assert(isinstance(trxGenLauncher, TransactionGeneratorsLauncher)) assert(isinstance(testSuccessful, bool)) assert(isinstance(killEosInstances, bool)) assert(isinstance(killWallet, bool)) @@ -202,6 +200,3 @@ def reportProductionAnalysis(thresholdMs): Utils.Print("Cleanup wallet data.") walletMgr.cleanup() - if trxGenLauncher is not None: - Utils.Print("Shut down the TransactionGeneratorsLauncher and TransactionGenerators") - trxGenLauncher.killAll() diff --git a/tests/nodeos_contrl_c_test.py b/tests/nodeos_contrl_c_test.py index 9ccaf631dd..2960b339ec 100755 --- a/tests/nodeos_contrl_c_test.py +++ b/tests/nodeos_contrl_c_test.py @@ -112,11 +112,11 @@ testSuccessful = nonProdNode.kill(signal.SIGTERM) if not testSuccessful: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True, trxGenLauncher=cluster.trxGenLauncher) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True) errorExit("Failed to kill the seed node") finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True, trxGenLauncher=cluster.trxGenLauncher) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True) errorCode = 0 if testSuccessful else 1 exit(errorCode) \ No newline at end of file diff --git a/tests/nodeos_snapshot_diff_test.py b/tests/nodeos_snapshot_diff_test.py index caba2a91c4..13f2570d77 100755 --- a/tests/nodeos_snapshot_diff_test.py +++ b/tests/nodeos_snapshot_diff_test.py @@ -199,7 +199,7 @@ def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportI testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails, trxGenLauncher=cluster.trxGenLauncher) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 exit(exitCode) \ No newline at end of file diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 5448003f70..593d07a5f5 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -10,7 +10,7 @@ from pathlib import Path, PurePath sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) -from TestHarness import Utils, TransactionGeneratorsLauncher +from TestHarness import Utils from dataclasses import dataclass, asdict, field from platform import release, system from datetime import datetime diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index cb8cacd1c3..cb847f0f8a 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -492,8 +492,7 @@ def runTest(self) -> bool: self.testHelperConfig._killWallet, self.testHelperConfig.keepLogs, self.testHelperConfig.killAll, - self.testHelperConfig.dumpErrorDetails, - trxGenLauncher=self.cluster.trxGenLauncher + self.testHelperConfig.dumpErrorDetails ) if not self.ptbConfig.delPerfLogs: From 1935103562e6850a1d6bf16b44290f5fd5399198 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 22 Feb 2023 12:42:38 -0600 Subject: [PATCH 125/178] remove shutdown change from nodeos_startup_catchup --- tests/nodeos_startup_catchup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index 0660807da4..a358057061 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -201,7 +201,7 @@ def waitForNodeStarted(node): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails, trxGenLauncher=cluster.trxGenLauncher) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 exit(exitCode) \ No newline at end of file From c15ed53794df431e3c832a937e21e8226c74a01d Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 22 Feb 2023 12:43:10 -0600 Subject: [PATCH 126/178] remove unused import --- tests/TestHarness/TestHelper.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/TestHarness/TestHelper.py b/tests/TestHarness/TestHelper.py index 2fac4e6919..01874c6be0 100644 --- a/tests/TestHarness/TestHelper.py +++ b/tests/TestHarness/TestHelper.py @@ -1,7 +1,6 @@ from .testUtils import Utils from .Cluster import Cluster from .WalletMgr import WalletMgr -from .launch_transaction_generators import TransactionGeneratorsLauncher from datetime import datetime import platform From 3f191a9adc38afc1994f240820fc13d8916585b3 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 22 Feb 2023 14:11:03 -0600 Subject: [PATCH 127/178] update transaction datafiles to match new eosio.mechanics directory layout --- tests/performance_tests/cpuTrxData.json | 2 +- tests/performance_tests/ramTrxData.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/performance_tests/cpuTrxData.json b/tests/performance_tests/cpuTrxData.json index d79b0c185d..6c335b838a 100644 --- a/tests/performance_tests/cpuTrxData.json +++ b/tests/performance_tests/cpuTrxData.json @@ -1,6 +1,6 @@ { "initAccounts": ["c"], - "abiFile": "unittests/contracts/eosio.mechanics/contracts/eosmechanics.abi", + "abiFile": "unittests/contracts/eosio.mechanics/eosmechanics.abi", "actions": [ { "actionName": "cpu", diff --git a/tests/performance_tests/ramTrxData.json b/tests/performance_tests/ramTrxData.json index 0968a4dfd3..c14d5d3ae5 100644 --- a/tests/performance_tests/ramTrxData.json +++ b/tests/performance_tests/ramTrxData.json @@ -1,6 +1,6 @@ { "initAccounts": ["r"], - "abiFile": "unittests/contracts/eosio.mechanics/contracts/eosmechanics.abi", + "abiFile": "unittests/contracts/eosio.mechanics/eosmechanics.abi", "actions": [ { "actionName": "ram", From 4f90bf6ff0a63d51f160f40ef974e22921d59345 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 22 Feb 2023 14:18:16 -0600 Subject: [PATCH 128/178] Update log directory structure example. --- tests/performance_tests/README.md | 331 ++++++++++++++++-------------- 1 file changed, 181 insertions(+), 150 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 450e2154ad..bf5666263e 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -34,158 +34,189 @@ Please refer to [Leap: Build and Install from Source](https://github.com/Antelop Expand Example Directory Structure ``` bash - performance_test/ - └── 2022-10-27_15-28-09 - ├── report.json + p/ + └── 2023-02-22_15-17-12 ├── pluginThreadOptRunLogs - │ ├── performance_test_basic - │ ├── chainThreadResults.txt - │ ├── netThreadResults.txt - │ └── producerThreadResults.txt + │   ├── chainThreadResults.txt + │   ├── netThreadResults.txt + │   ├── p + │   └── producerThreadResults.txt + ├── report.json └── testRunLogs - └── performance_test_basic - └── 2022-10-19_10-29-07 - ├── blockDataLogs - │ ├── blockData.txt - │ └── blockTrxData.txt - ├── data.json - ├── etc - │ └── eosio - │ ├── launcher - │ │ └── testnet.template - │ ├── node_00 - │ │ ├── config.ini - │ │ ├── genesis.json - │ │ ├── logging.json - │ │ └── protocol_features - │ │ ├── BUILTIN-ACTION_RETURN_VALUE.json - │ │ ├── BUILTIN-BLOCKCHAIN_PARAMETERS.json - │ │ ├── BUILTIN-CONFIGURABLE_WASM_LIMITS2.json - │ │ ├── BUILTIN-CRYPTO_PRIMITIVES.json - │ │ ├── BUILTIN-DISALLOW_EMPTY_PRODUCER_SCHEDULE.json - │ │ ├── BUILTIN-FIX_LINKAUTH_RESTRICTION.json - │ │ ├── BUILTIN-FORWARD_SETCODE.json - │ │ ├── BUILTIN-GET_BLOCK_NUM.json - │ │ ├── BUILTIN-GET_CODE_HASH.json - │ │ ├── BUILTIN-GET_SENDER.json - │ │ ├── BUILTIN-NO_DUPLICATE_DEFERRED_ID.json - │ │ ├── BUILTIN-ONLY_BILL_FIRST_AUTHORIZER.json - │ │ ├── BUILTIN-ONLY_LINK_TO_EXISTING_PERMISSION.json - │ │ ├── BUILTIN-PREACTIVATE_FEATURE.json - │ │ ├── BUILTIN-RAM_RESTRICTIONS.json - │ │ ├── BUILTIN-REPLACE_DEFERRED.json - │ │ ├── BUILTIN-RESTRICT_ACTION_TO_SELF.json - │ │ ├── BUILTIN-WEBAUTHN_KEY.json - │ │ └── BUILTIN-WTMSIG_BLOCK_SIGNATURES.json - │ ├── node_01 - │ │ ├── config.ini - │ │ ├── genesis.json - │ │ ├── logging.json - │ │ └── protocol_features - │ │ ├── BUILTIN-ACTION_RETURN_VALUE.json - │ │ ├── BUILTIN-BLOCKCHAIN_PARAMETERS.json - │ │ ├── BUILTIN-CONFIGURABLE_WASM_LIMITS2.json - │ │ ├── BUILTIN-CRYPTO_PRIMITIVES.json - │ │ ├── BUILTIN-DISALLOW_EMPTY_PRODUCER_SCHEDULE.json - │ │ ├── BUILTIN-FIX_LINKAUTH_RESTRICTION.json - │ │ ├── BUILTIN-FORWARD_SETCODE.json - │ │ ├── BUILTIN-GET_BLOCK_NUM.json - │ │ ├── BUILTIN-GET_CODE_HASH.json - │ │ ├── BUILTIN-GET_SENDER.json - │ │ ├── BUILTIN-NO_DUPLICATE_DEFERRED_ID.json - │ │ ├── BUILTIN-ONLY_BILL_FIRST_AUTHORIZER.json - │ │ ├── BUILTIN-ONLY_LINK_TO_EXISTING_PERMISSION.json - │ │ ├── BUILTIN-PREACTIVATE_FEATURE.json - │ │ ├── BUILTIN-RAM_RESTRICTIONS.json - │ │ ├── BUILTIN-REPLACE_DEFERRED.json - │ │ ├── BUILTIN-RESTRICT_ACTION_TO_SELF.json - │ │ ├── BUILTIN-WEBAUTHN_KEY.json - │ │ └── BUILTIN-WTMSIG_BLOCK_SIGNATURES.json - │ └── node_bios - │ ├── config.ini - │ ├── genesis.json - │ ├── logging.json - │ └── protocol_features - │ ├── BUILTIN-ACTION_RETURN_VALUE.json - │ ├── BUILTIN-BLOCKCHAIN_PARAMETERS.json - │ ├── BUILTIN-CONFIGURABLE_WASM_LIMITS2.json - │ ├── BUILTIN-CRYPTO_PRIMITIVES.json - │ ├── BUILTIN-DISALLOW_EMPTY_PRODUCER_SCHEDULE.json - │ ├── BUILTIN-FIX_LINKAUTH_RESTRICTION.json - │ ├── BUILTIN-FORWARD_SETCODE.json - │ ├── BUILTIN-GET_BLOCK_NUM.json - │ ├── BUILTIN-GET_CODE_HASH.json - │ ├── BUILTIN-GET_SENDER.json - │ ├── BUILTIN-NO_DUPLICATE_DEFERRED_ID.json - │ ├── BUILTIN-ONLY_BILL_FIRST_AUTHORIZER.json - │ ├── BUILTIN-ONLY_LINK_TO_EXISTING_PERMISSION.json - │ ├── BUILTIN-PREACTIVATE_FEATURE.json - │ ├── BUILTIN-RAM_RESTRICTIONS.json - │ ├── BUILTIN-REPLACE_DEFERRED.json - │ ├── BUILTIN-RESTRICT_ACTION_TO_SELF.json - │ ├── BUILTIN-WEBAUTHN_KEY.json - │ └── BUILTIN-WTMSIG_BLOCK_SIGNATURES.json - ├── trxGenLogs - │ ├── trx_data_output_26451.txt - │ ├── trx_data_output_26452.txt - │ ├── trx_data_output_26453.txt - │ └── trx_data_output_26454.txt - └── var - └── var - ├── lib - │ ├── node_00 - │ │ ├── blocks - │ │ │ ├── blocks.index - │ │ │ ├── blocks.log - │ │ │ └── reversible - │ │ ├── nodeos.pid - │ │ ├── snapshots - │ │ ├── state - │ │ │ └── shared_memory.bin - │ │ ├── stderr.2022_10_27_10_49_01.txt - │ │ ├── stderr.txt -> stderr.2022_10_27_10_49_01.txt - │ │ └── stdout.txt - │ ├── node_01 - │ │ ├── blocks - │ │ │ ├── blocks.index - │ │ │ ├── blocks.log - │ │ │ └── reversible - │ │ ├── nodeos.pid - │ │ ├── snapshots - │ │ ├── state - │ │ │ └── shared_memory.bin - │ │ ├── stderr.2022_10_27_10_49_01.txt - │ │ ├── stderr.txt -> stderr.2022_10_27_10_49_01.txt - │ │ ├── stdout.txt - │ │ └── traces - │ │ ├── trace_0000000000-0000010000.log - │ │ ├── trace_index_0000000000-0000010000.log - │ │ └── trace_trx_id_0000000000-0000010000.log - │ └── node_bios - │ ├── blocks - │ │ ├── blocks.index - │ │ ├── blocks.log - │ │ └── reversible - │ │ └── fork_db.dat - │ ├── nodeos.pid - │ ├── snapshots - │ ├── state - │ │ └── shared_memory.bin - │ ├── stderr.2022_10_27_10_49_01.txt - │ ├── stderr.txt -> stderr.2022_10_27_10_49_01.txt - │ ├── stdout.txt - │ └── traces - │ ├── trace_0000000000-0000010000.log - │ ├── trace_index_0000000000-0000010000.log - │ └── trace_trx_id_0000000000-0000010000.log - ├── test_keosd_err.log - ├── test_keosd_out.log - └── test_wallet_0 - ├── config.ini - ├── default.wallet - ├── ignition.wallet - ├── keosd.sock - └── wallet.lock + └── p + ├── 2023-02-22_17-04-36-50000 + │   ├── blockDataLogs + │   │   ├── blockData.txt + │   │   ├── blockTrxData.txt + │   │   └── transaction_metrics.csv + │   ├── data.json + │   ├── etc + │   │   └── eosio + │   │   ├── launcher + │   │   │   └── testnet.template + │   │   ├── node_00 + │   │   │   ├── config.ini + │   │   │   ├── genesis.json + │   │   │   ├── logging.json + │   │   │   └── protocol_features + │   │   │   ├── BUILTIN-ACTION_RETURN_VALUE.json + │   │   │   ├── BUILTIN-BLOCKCHAIN_PARAMETERS.json + │   │   │   ├── BUILTIN-CONFIGURABLE_WASM_LIMITS2.json + │   │   │   ├── BUILTIN-CRYPTO_PRIMITIVES.json + │   │   │   ├── BUILTIN-DISALLOW_EMPTY_PRODUCER_SCHEDULE.json + │   │   │   ├── BUILTIN-FIX_LINKAUTH_RESTRICTION.json + │   │   │   ├── BUILTIN-FORWARD_SETCODE.json + │   │   │   ├── BUILTIN-GET_BLOCK_NUM.json + │   │   │   ├── BUILTIN-GET_CODE_HASH.json + │   │   │   ├── BUILTIN-GET_SENDER.json + │   │   │   ├── BUILTIN-NO_DUPLICATE_DEFERRED_ID.json + │   │   │   ├── BUILTIN-ONLY_BILL_FIRST_AUTHORIZER.json + │   │   │   ├── BUILTIN-ONLY_LINK_TO_EXISTING_PERMISSION.json + │   │   │   ├── BUILTIN-PREACTIVATE_FEATURE.json + │   │   │   ├── BUILTIN-RAM_RESTRICTIONS.json + │   │   │   ├── BUILTIN-REPLACE_DEFERRED.json + │   │   │   ├── BUILTIN-RESTRICT_ACTION_TO_SELF.json + │   │   │   ├── BUILTIN-WEBAUTHN_KEY.json + │   │   │   └── BUILTIN-WTMSIG_BLOCK_SIGNATURES.json + │   │   ├── node_01 + │   │   │   ├── config.ini + │   │   │   ├── genesis.json + │   │   │   ├── logging.json + │   │   │   └── protocol_features + │   │   │   ├── BUILTIN-ACTION_RETURN_VALUE.json + │   │   │   ├── BUILTIN-BLOCKCHAIN_PARAMETERS.json + │   │   │   ├── BUILTIN-CONFIGURABLE_WASM_LIMITS2.json + │   │   │   ├── BUILTIN-CRYPTO_PRIMITIVES.json + │   │   │   ├── BUILTIN-DISALLOW_EMPTY_PRODUCER_SCHEDULE.json + │   │   │   ├── BUILTIN-FIX_LINKAUTH_RESTRICTION.json + │   │   │   ├── BUILTIN-FORWARD_SETCODE.json + │   │   │   ├── BUILTIN-GET_BLOCK_NUM.json + │   │   │   ├── BUILTIN-GET_CODE_HASH.json + │   │   │   ├── BUILTIN-GET_SENDER.json + │   │   │   ├── BUILTIN-NO_DUPLICATE_DEFERRED_ID.json + │   │   │   ├── BUILTIN-ONLY_BILL_FIRST_AUTHORIZER.json + │   │   │   ├── BUILTIN-ONLY_LINK_TO_EXISTING_PERMISSION.json + │   │   │   ├── BUILTIN-PREACTIVATE_FEATURE.json + │   │   │   ├── BUILTIN-RAM_RESTRICTIONS.json + │   │   │   ├── BUILTIN-REPLACE_DEFERRED.json + │   │   │   ├── BUILTIN-RESTRICT_ACTION_TO_SELF.json + │   │   │   ├── BUILTIN-WEBAUTHN_KEY.json + │   │   │   └── BUILTIN-WTMSIG_BLOCK_SIGNATURES.json + │   │   └── node_bios + │   │   ├── config.ini + │   │   ├── genesis.json + │   │   ├── logging.json + │   │   └── protocol_features + │   │   ├── BUILTIN-ACTION_RETURN_VALUE.json + │   │   ├── BUILTIN-BLOCKCHAIN_PARAMETERS.json + │   │   ├── BUILTIN-CONFIGURABLE_WASM_LIMITS2.json + │   │   ├── BUILTIN-CRYPTO_PRIMITIVES.json + │   │   ├── BUILTIN-DISALLOW_EMPTY_PRODUCER_SCHEDULE.json + │   │   ├── BUILTIN-FIX_LINKAUTH_RESTRICTION.json + │   │   ├── BUILTIN-FORWARD_SETCODE.json + │   │   ├── BUILTIN-GET_BLOCK_NUM.json + │   │   ├── BUILTIN-GET_CODE_HASH.json + │   │   ├── BUILTIN-GET_SENDER.json + │   │   ├── BUILTIN-NO_DUPLICATE_DEFERRED_ID.json + │   │   ├── BUILTIN-ONLY_BILL_FIRST_AUTHORIZER.json + │   │   ├── BUILTIN-ONLY_LINK_TO_EXISTING_PERMISSION.json + │   │   ├── BUILTIN-PREACTIVATE_FEATURE.json + │   │   ├── BUILTIN-RAM_RESTRICTIONS.json + │   │   ├── BUILTIN-REPLACE_DEFERRED.json + │   │   ├── BUILTIN-RESTRICT_ACTION_TO_SELF.json + │   │   ├── BUILTIN-WEBAUTHN_KEY.json + │   │   └── BUILTIN-WTMSIG_BLOCK_SIGNATURES.json + │   ├── trxGenLogs + │   │   ├── first_trx_12330.txt + │   │   ├── first_trx_12331.txt + │   │   ├── first_trx_12332.txt + │   │   ├── first_trx_12333.txt + │   │   ├── first_trx_12334.txt + │   │   ├── first_trx_12335.txt + │   │   ├── first_trx_12336.txt + │   │   ├── first_trx_12337.txt + │   │   ├── first_trx_12338.txt + │   │   ├── first_trx_12339.txt + │   │   ├── first_trx_12340.txt + │   │   ├── first_trx_12341.txt + │   │   ├── first_trx_12342.txt + │   │   ├── trx_data_output_12330.txt + │   │   ├── trx_data_output_12331.txt + │   │   ├── trx_data_output_12332.txt + │   │   ├── trx_data_output_12333.txt + │   │   ├── trx_data_output_12334.txt + │   │   ├── trx_data_output_12335.txt + │   │   ├── trx_data_output_12336.txt + │   │   ├── trx_data_output_12337.txt + │   │   ├── trx_data_output_12338.txt + │   │   ├── trx_data_output_12339.txt + │   │   ├── trx_data_output_12340.txt + │   │   ├── trx_data_output_12341.txt + │   │   └── trx_data_output_12342.txt + │   └── var + │   └── var + │   ├── lib + │   │   ├── node_00 + │   │   │   ├── blocks + │   │   │   │   ├── blocks.index + │   │   │   │   ├── blocks.log + │   │   │   │   └── reversible + │   │   │   ├── nodeos.pid + │   │   │   ├── snapshots + │   │   │   ├── state + │   │   │   │   └── shared_memory.bin + │   │   │   ├── stderr.2023_02_22_11_04_36.txt + │   │   │   ├── stderr.txt -> stderr.2023_02_22_11_04_36.txt + │   │   │   └── stdout.txt + │   │   ├── node_01 + │   │   │   ├── blocks + │   │   │   │   ├── blocks.index + │   │   │   │   ├── blocks.log + │   │   │   │   └── reversible + │   │   │   ├── nodeos.pid + │   │   │   ├── snapshots + │   │   │   ├── state + │   │   │   │   └── shared_memory.bin + │   │   │   ├── stderr.2023_02_22_11_04_36.txt + │   │   │   ├── stderr.txt -> stderr.2023_02_22_11_04_36.txt + │   │   │   ├── stdout.txt + │   │   │   └── traces + │   │   │   ├── trace_0000000000-0000010000.log + │   │   │   ├── trace_index_0000000000-0000010000.log + │   │   │   └── trace_trx_id_0000000000-0000010000.log + │   │   └── node_bios + │   │   ├── blocks + │   │   │   ├── blocks.index + │   │   │   ├── blocks.log + │   │   │   └── reversible + │   │   │   └── fork_db.dat + │   │   ├── nodeos.pid + │   │   ├── snapshots + │   │   ├── state + │   │   │   └── shared_memory.bin + │   │   ├── stderr.2023_02_22_11_04_36.txt + │   │   ├── stderr.txt -> stderr.2023_02_22_11_04_36.txt + │   │   ├── stdout.txt + │   │   └── traces + │   │   ├── trace_0000000000-0000010000.log + │   │   ├── trace_index_0000000000-0000010000.log + │   │   └── trace_trx_id_0000000000-0000010000.log + │   ├── subprocess_results.log + │   ├── test_keosd_err.log + │   ├── test_keosd_out.log + │   └── test_wallet_0 + │   ├── config.ini + │   ├── default.wallet + │   ├── ignition.wallet + │   ├── keosd.sock + │   └── wallet.lock + ├── 2023-02-22_17-06-16-25000 + ├── 2023-02-22_17-07-47-12500 + ├── 2023-02-22_17-09-00-19000 + ├── 2023-02-22_17-10-24-16000 + ├── 2023-02-22_17-11-46-14500 + ├── 2023-02-22_17-13-06-15500 + └── 2023-02-22_17-14-24-15500 ``` From fd0c702d18331334ae73e20113989c0ce238c242 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 22 Feb 2023 14:20:25 -0600 Subject: [PATCH 129/178] remove unavailable options from README for performance test --- tests/performance_tests/README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 6956758633..e078ae46db 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -274,8 +274,6 @@ The Performance Harness main script `performance_test.py` can be configured usin In "lmax" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. In "full" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) * `--account-name` Name of the account to create and assign a contract to -* `--owner-public-key` Owner public key to use with specified account name -* `--active-public-key` Active public key to use with specified account name * `--contract-dir` Path to contract dir * `--wasm-file` WASM file name for contract * `--abi-file` ABI file name for contract @@ -347,8 +345,6 @@ The following scripts are typically used by the Performance Harness main script * `--print-missing-transactions` Toggles if missing transactions are be printed upon test completion. (default: False) * `--account-name` Name of the account to create and assign a contract to -* `--owner-public-key` Owner public key to use with specified account name -* `--active-public-key` Active public key to use with specified account name * `--contract-dir` Path to contract dir * `--wasm-file` WASM file name for contract * `--abi-file` ABI file name for contract From b1ce2718b784e8015226fd238e75e5fb4a6bf0f8 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 22 Feb 2023 15:40:31 -0600 Subject: [PATCH 130/178] re-report the readme report due to changes, and update performance test to accomodate chanes --- tests/performance_tests/README.md | 72 ++++++++++----------- tests/performance_tests/performance_test.py | 5 +- 2 files changed, 36 insertions(+), 41 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index dd9604ce4c..152ae14105 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -828,37 +828,37 @@ The Performance Test Basic generates, by default, a report that details results ``` json { "completedRun": true, - "testStart": "2023-02-17T22:00:41.305618", - "testFinish": "2023-02-17T22:02:21.597430", + "testStart": "2023-02-22T21:28:58.867999", + "testFinish": "2023-02-22T21:30:39.932852", "Analysis": { "BlockSize": { - "min": 925248, - "max": 1551936, - "avg": 1332244.3636363635, - "sigma": 144713.34505483133, + "min": 1069248, + "max": 1521216, + "avg": 1335995.7333333334, + "sigma": 107425.6639278012, "emptyBlocks": 0, - "numBlocks": 44 + "numBlocks": 45 }, "BlocksGuide": { "firstBlockNum": 2, - "lastBlockNum": 193, - "totalBlocks": 192, + "lastBlockNum": 194, + "totalBlocks": 193, "testStartBlockNum": 112, - "testEndBlockNum": 160, + "testEndBlockNum": 161, "setupBlocksCnt": 110, "tearDownBlocksCnt": 33, "leadingEmptyBlocksCnt": 1, "trailingEmptyBlocksCnt": 0, "configAddlDropCnt": 2, - "testAnalysisBlockCnt": 44 + "testAnalysisBlockCnt": 45 }, "TPS": { - "min": 10265, - "max": 15774, - "avg": 13882.232558139534, - "sigma": 1454.0837894863364, + "min": 11982, + "max": 15496, + "avg": 13922.954545454546, + "sigma": 1026.4435279720471, "emptyBlocks": 0, - "numBlocks": 44, + "numBlocks": 45, "configTps": 50000, "configTestDuration": 10, "tpsPerGenerator": [ @@ -879,29 +879,29 @@ The Performance Test Basic generates, by default, a report that details results "generatorCount": 13 }, "TrxCPU": { - "min": 6.0, - "max": 15292.0, - "avg": 25.024962251222377, - "sigma": 49.9778703823556, - "samples": 322527 + "min": 7.0, + "max": 8186.0, + "avg": 24.41423694803685, + "sigma": 32.88703511468667, + "samples": 326924 }, "TrxLatency": { - "min": 0.11500000953674316, - "max": 16.91100001335144, - "avg": 8.950405516519615, - "sigma": 4.844012708597167, - "samples": 322527 + "min": 0.023999929428100586, + "max": 17.144999980926514, + "avg": 9.035915723544147, + "sigma": 4.873430717683421, + "samples": 326924 }, "TrxNet": { "min": 24.0, "max": 24.0, "avg": 24.0, "sigma": 0.0, - "samples": 322527 + "samples": 326924 }, "DroppedBlocks": {}, "DroppedBlocksCount": 0, - "DroppedTransactions": 177473, + "DroppedTransactions": 173076, "ProductionWindowsTotal": 0, "ProductionWindowsAverageSize": 0, "ProductionWindowsMissed": 0, @@ -1381,14 +1381,10 @@ The Performance Test Basic generates, by default, a report that details results } }, "specifiedContract": { - "accountName": "eosio", - "ownerPrivateKey": "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", - "ownerPublicKey": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "activePrivateKey": "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", - "activePublicKey": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", "contractDir": "unittests/contracts/eosio.system", "wasmFile": "eosio.system.wasm", - "abiFile": "eosio.system.abi" + "abiFile": "eosio.system.abi", + "account": "Name: eosio" }, "useBiosBootFile": false, "genesisPath": "tests/performance_tests/genesis.json", @@ -1408,17 +1404,17 @@ The Performance Test Basic generates, by default, a report that details results "testTrxGenDurationSec": 10, "tpsLimitPerGenerator": 4000, "numAddlBlocksToPrune": 2, - "logDirRoot": "p/2023-02-17_22-00-41/pluginThreadOptRunLogs", + "logDirRoot": "p/2023-02-22_21-28-58/pluginThreadOptRunLogs", "delReport": true, "quiet": false, "delPerfLogs": true, "expectedTransactionsSent": 500000, "printMissingTransactions": false, "userTrxDataFile": null, - "logDirBase": "p/2023-02-17_22-00-41/pluginThreadOptRunLogs/p", - "logDirTimestamp": "2023-02-17_22-00-41", + "logDirBase": "p/2023-02-22_21-28-58/pluginThreadOptRunLogs/p", + "logDirTimestamp": "2023-02-22_21-28-58", "logDirTimestampedOptSuffix": "-50000", - "logDirPath": "p/2023-02-17_22-00-41/pluginThreadOptRunLogs/p/2023-02-17_22-00-41-50000" + "logDirPath": "p/2023-02-22_21-28-58/pluginThreadOptRunLogs/p/2023-02-22_21-28-58-50000" }, "env": { "system": "Linux", diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 47b6369dcd..c99132ad2a 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -12,7 +12,7 @@ sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) from NodeosPluginArgs import ChainPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs -from TestHarness import TestHelper, Utils +from TestHarness import TestHelper, Utils, Account from performance_test_basic import PerformanceTestBasic, PtbArgumentsHandler from platform import release, system from dataclasses import dataclass, asdict, field @@ -509,8 +509,7 @@ def main(): extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, - specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(accountName=args.account_name, - ownerPublicKey=args.owner_public_key, activePublicKey=args.active_public_key, + specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(account=Account(args.account_name), contractDir=args.contract_dir, wasmFile=args.wasm_file, abiFile=args.abi_file), nodeosVers=Utils.getNodeosVersion().split('.')[0]) From db731420950bc396c4c5a9cdb97e926b0705ae70 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 22 Feb 2023 18:24:25 -0600 Subject: [PATCH 131/178] Expose a number of nodeos options via the CLI in performance_test_basic. Also hardcode some others. --- tests/TestHarness/Cluster.py | 11 +++++- tests/performance_tests/README.md | 38 +++++++++++++++++-- .../performance_test_basic.py | 27 +++++++++---- 3 files changed, 63 insertions(+), 13 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index c0d66391d6..3d6e29b5bd 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -257,8 +257,15 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me cmdArr=cmd.split() if self.staging: cmdArr.append("--nogen") - - nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --p2p-max-nodes-per-host %d --max-clients %d" % (maximumP2pPerHost, maximumClients) + nodeosArgs="" + if extraNodeosArgs.find("--max-transaction-time") == -1: + nodeosArgs += " --max-transaction-time -1" + if extraNodeosArgs.find("--abi-serializer-max-time-ms") == -1: + nodeosArgs += " --abi-serializer-max-time-ms 990000" + if extraNodeosArgs.find("--p2p-max-nodes-per-host") == -1: + nodeosArgs += f" --p2p-max-nodes-per-host {maximumP2pPerHost}" + if extraNodeosArgs.find("--max-clients") == -1: + nodeosArgs += f" --max-clients {maximumClients}" if not self.walletd: nodeosArgs += " --plugin eosio::wallet_api_plugin" if Utils.Debug: diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 9919bae1d2..543a4c90f7 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -226,8 +226,6 @@ The Performance Harness main script `performance_test.py` can be configured usin (default: 2) * `--signature-cpu-billable-pct SIGNATURE_CPU_BILLABLE_PCT` Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50% (default: 0) -* `--chain-state-db-size-mb CHAIN_STATE_DB_SIZE_MB` - Maximum size (in MiB) of the chain state database (default: 10240) * `--chain-threads CHAIN_THREADS` Number of worker threads in controller thread pool (default: 3) * `--database-map-mode {mapped,heap,locked}` @@ -279,6 +277,23 @@ The Performance Harness main script `performance_test.py` can be configured usin * `--contract-dir` Path to contract dir * `--wasm-file` WASM file name for contract * `--abi-file` ABI file name for contract +* `--wasm-runtime RUNTIME` + Override default WASM runtime ("eos-vm-jit", "eos-vm") + "eos-vm-jit" : A WebAssembly runtime that compiles WebAssembly code to native x86 code prior to + execution. "eos-vm" : A WebAssembly interpreter. (default: eos-vm-jit) +* `--contracts-console` print contract's output to console (default: False) +* `--eos-vm-oc-cache-size-mb CACHE_SIZE_MiB` + Maximum size (in MiB) of the EOS VM OC code cache (default: 1024) +* `--eos-vm-oc-compile-threads COMPILE_THREADS` + Number of threads to use for EOS VM OC tier-up (default: 1) +* `--eos-vm-oc-enable` + Enable EOS VM OC tier-up runtime (default: False) +* `--block-log-retain-blocks BLOCKS_TO_RETAIN` + If set to greater than 0, periodically prune the block log to + store only configured number of most recent blocks. If set to 0, no blocks are be written to the block log; + block log file is removed after startup. (default: None) +* `--http-threads HTTP_THREADS` + Number of worker threads in http thread pool (default: 2) ### Support Scripts @@ -314,8 +329,6 @@ The following scripts are typically used by the Performance Harness main script of the range of blocks of interest for evaluation. (default: 2) * `--signature-cpu-billable-pct SIGNATURE_CPU_BILLABLE_PCT` Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50% (default: 0) -* `--chain-state-db-size-mb CHAIN_STATE_DB_SIZE_MB` - Maximum size (in MiB) of the chain state database (default: 10240) * `--chain-threads CHAIN_THREADS` Number of worker threads in controller thread pool (default: 3) * `--database-map-mode {mapped,heap,locked}` @@ -352,6 +365,23 @@ The following scripts are typically used by the Performance Harness main script * `--contract-dir` Path to contract dir * `--wasm-file` WASM file name for contract * `--abi-file` ABI file name for contract +* `--wasm-runtime RUNTIME` + Override default WASM runtime ("eos-vm-jit", "eos-vm") + "eos-vm-jit" : A WebAssembly runtime that compiles WebAssembly code to native x86 code prior to + execution. "eos-vm" : A WebAssembly interpreter. (default: eos-vm-jit) +* `--contracts-console` print contract's output to console (default: False) +* `--eos-vm-oc-cache-size-mb CACHE_SIZE_MiB` + Maximum size (in MiB) of the EOS VM OC code cache (default: 1024) +* `--eos-vm-oc-compile-threads COMPILE_THREADS` + Number of threads to use for EOS VM OC tier-up (default: 1) +* `--eos-vm-oc-enable` + Enable EOS VM OC tier-up runtime (default: False) +* `--block-log-retain-blocks BLOCKS_TO_RETAIN` + If set to greater than 0, periodically prune the block log to + store only configured number of most recent blocks. If set to 0, no blocks are be written to the block log; + block log file is removed after startup. (default: None) +* `--http-threads HTTP_THREADS` + Number of worker threads in http thread pool (default: 2) #### Launch Transaction Generators (TestHarness) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index a8c6941d05..c2f1581c42 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -527,7 +527,6 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--num-blocks-to-prune", type=int, help=("The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, " "to prune from the beginning and end of the range of blocks of interest for evaluation."), default=2) ptbBaseParserGroup.add_argument("--signature-cpu-billable-pct", type=int, help="Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50%%", default=0) - ptbBaseParserGroup.add_argument("--chain-state-db-size-mb", type=int, help="Maximum size (in MiB) of the chain state database", default=10*1024) ptbBaseParserGroup.add_argument("--chain-threads", type=int, help="Number of worker threads in controller thread pool", default=2) ptbBaseParserGroup.add_argument("--database-map-mode", type=str, help="Database map mode (\"mapped\", \"heap\", or \"locked\"). \ In \"mapped\" mode database is memory mapped as a file. \ @@ -557,6 +556,18 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--contract-dir", type=str, help="Path to contract dir", default="unittests/contracts/eosio.system") ptbBaseParserGroup.add_argument("--wasm-file", type=str, help="WASM file name for contract", default="eosio.system.wasm") ptbBaseParserGroup.add_argument("--abi-file", type=str, help="ABI file name for contract", default="eosio.system.abi") + ptbBaseParserGroup.add_argument("--wasm-runtime", type=str, help="Override default WASM runtime (\"eos-vm-jit\", \"eos-vm\")\ + \"eos-vm-jit\" : A WebAssembly runtime that compiles WebAssembly code to native x86 code prior to\ + execution. \"eos-vm\" : A WebAssembly interpreter.", default="eos-vm-jit") + ptbBaseParserGroup.add_argument("--contracts-console", type=bool, help="print contract's output to console", default=False) + ptbBaseParserGroup.add_argument("--eos-vm-oc-cache-size-mb", type=int, help="Maximum size (in MiB) of the EOS VM OC code cache", default=1024) + ptbBaseParserGroup.add_argument("--eos-vm-oc-compile-threads", type=int, help="Number of threads to use for EOS VM OC tier-up", default=1) + ptbBaseParserGroup.add_argument("--eos-vm-oc-enable", type=bool, help="Enable EOS VM OC tier-up runtime", default=False) + ptbBaseParserGroup.add_argument("--block-log-retain-blocks", type=int, help="If set to greater than 0, periodically prune the block log to\ + store only configured number of most recent blocks. If set to 0, no blocks are be written to the block log;\ + block log file is removed after startup.", default=None) + ptbBaseParserGroup.add_argument("--http-threads", type=int, help="Number of worker threads in http thread pool", default=2) + return ptbBaseParser @staticmethod @@ -580,6 +591,8 @@ def createArgumentParser(): def parseArgs(): ptbParser=PtbArgumentsHandler.createArgumentParser() args=ptbParser.parse_args() + if args.contracts_console: + print("Enabling contracts-console will not print anything unless debug level is 'debug' or higher.") return args def main(): @@ -590,8 +603,7 @@ def main(): testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, dumpErrorDetails=args.dump_error_details, delay=args.d, nodesFile=args.nodes_file, verbose=args.v) - chainPluginArgs = ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, - chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) + chainPluginArgs = ChainPluginArgs(chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode, abiSerializerMaxTimeMs=990000, chainStateDbSizeMb=256000) lbto = args.last_block_time_offset_us lbcep = args.last_block_cpu_effort_percent @@ -602,11 +614,12 @@ def main(): producerPluginArgs = ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=lbto, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=lbcep, - producerThreads=args.producer_threads) - httpPluginArgs = HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - netPluginArgs = NetPluginArgs(netThreads=args.net_threads) + producerThreads=args.producer_threads, maxTransactionTime=-1) + httpPluginArgs = HttpPluginArgs(httpMaxBytesInFlightMb=-1, httpMaxResponseTimeMs=-1) + netPluginArgs = NetPluginArgs(netThreads=args.net_threads, maxClients=0) + resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=True) ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs - extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) + extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs, resourceMonitorPluginArgs=resourceMonitorPluginArgs) SC = PerformanceTestBasic.ClusterConfig.SpecifiedContract specifiedContract=SC(accountName=args.account_name, ownerPublicKey=args.owner_public_key, activePublicKey=args.active_public_key, contractDir=args.contract_dir, wasmFile=args.wasm_file, abiFile=args.abi_file) From dcd8bc6df16b25067571682e66258d087e446f3c Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 23 Feb 2023 14:52:42 -0600 Subject: [PATCH 132/178] Restore accidentally removed config option. Added new option and updated README. Use choices for wasm-runtime. Add new options to the *Args constructors in both performance tests. --- tests/TestHarness/Cluster.py | 10 ++++---- tests/performance_tests/README.md | 10 ++++++-- tests/performance_tests/performance_test.py | 21 ++++++++++------ .../performance_test_basic.py | 25 +++++++++++++------ 4 files changed, 45 insertions(+), 21 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index 004cd40d66..edba218d8b 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -258,17 +258,17 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me if self.staging: cmdArr.append("--nogen") nodeosArgs="" - if extraNodeosArgs.find("--max-transaction-time") == -1: + if "--max-transaction-time" not in extraNodeosArgs: nodeosArgs += " --max-transaction-time -1" - if extraNodeosArgs.find("--abi-serializer-max-time-ms") == -1: + if "--abi-serializer-max-time-ms" not in extraNodeosArgs: nodeosArgs += " --abi-serializer-max-time-ms 990000" - if extraNodeosArgs.find("--p2p-max-nodes-per-host") == -1: + if "--p2p-max-nodes-per-host" not in extraNodeosArgs: nodeosArgs += f" --p2p-max-nodes-per-host {maximumP2pPerHost}" - if extraNodeosArgs.find("--max-clients") == -1: + if "--max-clients" not in extraNodeosArgs: nodeosArgs += f" --max-clients {maximumClients}" if not self.walletd: nodeosArgs += " --plugin eosio::wallet_api_plugin" - if Utils.Debug: + if Utils.Debug and "--contracts-console" not in extraNodeosArgs: nodeosArgs += " --contracts-console" if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): nodeosArgs += " --plugin eosio::producer_api_plugin" diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 9c7357704a..0b3ce2a951 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -248,7 +248,10 @@ The Performance Harness main script `performance_test.py` can be configured usin * `--producer-threads PRODUCER_THREADS` Number of worker threads in producer thread pool (default: 6) * `--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS` - Maximum time for processing a request, -1 for unlimited (default: 990000) + Maximum time for processing a request, -1 for unlimited (default: -1) +* `--http-max-bytes-inflight-mb HTTP_MAX_BYTES_IN FLIGHT` + Maximum size in megabytes http_plugin should use for processing http requests. -1 for unlimited. + 429 error response when exceeded. (default: -1) * `--del-perf-logs` Whether to delete performance test specific logs. (default: False) * `--del-report` Whether to delete overarching performance run report. (default: False) * `--del-test-report` Whether to save json reports from each test scenario. (default: False) @@ -349,7 +352,10 @@ The following scripts are typically used by the Performance Harness main script * `--producer-threads PRODUCER_THREADS` Number of worker threads in producer thread pool (default: 6) * `--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS` - Maximum time for processing a request, -1 for unlimited (default: 990000) + Maximum time for processing a request, -1 for unlimited (default: -1) +* `--http-max-bytes-inflight-mb HTTP_MAX_BYTES_IN FLIGHT` + Maximum size in megabytes http_plugin should use for processing http requests. -1 for unlimited. + 429 error response when exceeded. (default: -1) * `--del-perf-logs` Whether to delete performance test specific logs. (default: False) * `--del-report` Whether to delete overarching performance run report. (default: False) * `--quiet` Whether to quiet printing intermediate results and reports to stdout (default: False) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index c99132ad2a..e20c0ca710 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -11,7 +11,7 @@ from pathlib import Path, PurePath sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) -from NodeosPluginArgs import ChainPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs +from NodeosPluginArgs import ChainPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs from TestHarness import TestHelper, Utils, Account from performance_test_basic import PerformanceTestBasic, PtbArgumentsHandler from platform import release, system @@ -498,15 +498,22 @@ def main(): verbose=args.v) ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs - chainPluginArgs = ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainStateDbSizeMb=args.chain_state_db_size_mb, - chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode) + chainPluginArgs = ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, + chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode, + wasmRuntime=args.wasm_runtime, contractsConsole=args.contracts_console, + eosVmOcCacheSizeMb=args.eos_vm_oc_cache_size_mb, eosVmOcCompileThreads=args.eos_vm_oc_compile_threads, + eosVmOcEnable=args.eos_vm_oc_enable, blockLogRetainBlocks=args.block_log_retain_blocks, + abiSerializerMaxTimeMs=990000, chainStateDbSizeMb=256000) producerPluginArgs = ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=args.last_block_cpu_effort_percent, - producerThreads=args.producer_threads) - httpPluginArgs = HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms) - netPluginArgs = NetPluginArgs(netThreads=args.net_threads) - extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs) + producerThreads=args.producer_threads, maxTransactionTime=-1) + httpPluginArgs = HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms, httpMaxBytesInFlightMb=args.http_max_bytes_in_flight_mb, + httpThreads=args.http_threads) + netPluginArgs = NetPluginArgs(netThreads=args.net_threads, maxClients=0) + resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=True) + extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs, + resourceMonitorPluginArgs=resourceMonitorPluginArgs) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(account=Account(args.account_name), diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index eb3a38758d..57c6024f2c 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -541,7 +541,9 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=100) ptbBaseParserGroup.add_argument("--last-block-cpu-effort-percent", type=int, help="Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80%%", default=100) ptbBaseParserGroup.add_argument("--producer-threads", type=int, help="Number of worker threads in producer thread pool", default=2) - ptbBaseParserGroup.add_argument("--http-max-response-time-ms", type=int, help="Maximum time for processing a request, -1 for unlimited", default=990000) + ptbBaseParserGroup.add_argument("--http-max-response-time-ms", type=int, help="Maximum time for processing a request, -1 for unlimited", default=-1) + ptbBaseParserGroup.add_argument("--http-max-bytes-in-flight-mb", type=int, help="Maximum size in megabytes http_plugin should use for processing http requests. -1 for unlimited. 429\ + error response when exceeded.", default=-1) ptbBaseParserGroup.add_argument("--del-perf-logs", help="Whether to delete performance test specific logs.", action='store_true') ptbBaseParserGroup.add_argument("--del-report", help="Whether to delete overarching performance run report.", action='store_true') ptbBaseParserGroup.add_argument("--quiet", help="Whether to quiet printing intermediate results and reports to stdout", action='store_true') @@ -553,7 +555,8 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--abi-file", type=str, help="ABI file name for contract", default="eosio.system.abi") ptbBaseParserGroup.add_argument("--wasm-runtime", type=str, help="Override default WASM runtime (\"eos-vm-jit\", \"eos-vm\")\ \"eos-vm-jit\" : A WebAssembly runtime that compiles WebAssembly code to native x86 code prior to\ - execution. \"eos-vm\" : A WebAssembly interpreter.", default="eos-vm-jit") + execution. \"eos-vm\" : A WebAssembly interpreter.", + choices=["eos-vm-jit", "eos-vm", "eos-vm-jit"], default="eos-vm-jit") ptbBaseParserGroup.add_argument("--contracts-console", type=bool, help="print contract's output to console", default=False) ptbBaseParserGroup.add_argument("--eos-vm-oc-cache-size-mb", type=int, help="Maximum size (in MiB) of the EOS VM OC code cache", default=1024) ptbBaseParserGroup.add_argument("--eos-vm-oc-compile-threads", type=int, help="Number of threads to use for EOS VM OC tier-up", default=1) @@ -586,8 +589,6 @@ def createArgumentParser(): def parseArgs(): ptbParser=PtbArgumentsHandler.createArgumentParser() args=ptbParser.parse_args() - if args.contracts_console: - print("Enabling contracts-console will not print anything unless debug level is 'debug' or higher.") return args def main(): @@ -598,7 +599,12 @@ def main(): testHelperConfig = PerformanceTestBasic.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, dumpErrorDetails=args.dump_error_details, delay=args.d, nodesFile=args.nodes_file, verbose=args.v) - chainPluginArgs = ChainPluginArgs(chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode, abiSerializerMaxTimeMs=990000, chainStateDbSizeMb=256000) + chainPluginArgs = ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, + chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode, + wasmRuntime=args.wasm_runtime, contractsConsole=args.contracts_console, + eosVmOcCacheSizeMb=args.eos_vm_oc_cache_size_mb, eosVmOcCompileThreads=args.eos_vm_oc_compile_threads, + eosVmOcEnable=args.eos_vm_oc_enable, blockLogRetainBlocks=args.block_log_retain_blocks, + abiSerializerMaxTimeMs=990000, chainStateDbSizeMb=256000) lbto = args.last_block_time_offset_us lbcep = args.last_block_cpu_effort_percent @@ -610,17 +616,22 @@ def main(): lastBlockTimeOffsetUs=lbto, produceTimeOffsetUs=args.produce_time_offset_us, cpuEffortPercent=args.cpu_effort_percent, lastBlockCpuEffortPercent=lbcep, producerThreads=args.producer_threads, maxTransactionTime=-1) - httpPluginArgs = HttpPluginArgs(httpMaxBytesInFlightMb=-1, httpMaxResponseTimeMs=-1) + httpPluginArgs = HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms, httpMaxBytesInFlightMb=args.http_max_bytes_in_flight_mb, + httpThreads=args.http_threads) netPluginArgs = NetPluginArgs(netThreads=args.net_threads, maxClients=0) resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=True) ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs - extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs, resourceMonitorPluginArgs=resourceMonitorPluginArgs) + extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs, + resourceMonitorPluginArgs=resourceMonitorPluginArgs) SC = PerformanceTestBasic.ClusterConfig.SpecifiedContract specifiedContract=SC(contractDir=args.contract_dir, wasmFile=args.wasm_file, abiFile=args.abi_file, account=Account(args.account_name)) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, specifiedContract=specifiedContract, loggingLevel=args.cluster_log_lvl, nodeosVers=Utils.getNodeosVersion().split('.')[0]) + if args.contracts_console and testClusterConfig.loggingLevel != "debug" and testClusterConfig.loggingLevel != "all": + print("Enabling contracts-console will not print anything unless debug level is 'debug' or higher." + f" Current debug level is: {testClusterConfig.loggingLevel}") ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs, printMissingTransactions=args.print_missing_transactions, From 481811969efa19bd9215c261afc5b7447ee82aa6 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 23 Feb 2023 14:55:50 -0600 Subject: [PATCH 133/178] fix some README issues in performance test --- tests/performance_tests/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 0b3ce2a951..7aaac9a321 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -249,7 +249,7 @@ The Performance Harness main script `performance_test.py` can be configured usin Number of worker threads in producer thread pool (default: 6) * `--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS` Maximum time for processing a request, -1 for unlimited (default: -1) -* `--http-max-bytes-inflight-mb HTTP_MAX_BYTES_IN FLIGHT` +* `--http-max-bytes-in-flight-mb HTTP_MAX_IN_FLIGHT_BYTES` Maximum size in megabytes http_plugin should use for processing http requests. -1 for unlimited. 429 error response when exceeded. (default: -1) * `--del-perf-logs` Whether to delete performance test specific logs. (default: False) @@ -353,7 +353,7 @@ The following scripts are typically used by the Performance Harness main script Number of worker threads in producer thread pool (default: 6) * `--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS` Maximum time for processing a request, -1 for unlimited (default: -1) -* `--http-max-bytes-inflight-mb HTTP_MAX_BYTES_IN FLIGHT` +* `--http-max-bytes-in-flight-mb HTTP_MAX_IN_FLIGHT_BYTES` Maximum size in megabytes http_plugin should use for processing http requests. -1 for unlimited. 429 error response when exceeded. (default: -1) * `--del-perf-logs` Whether to delete performance test specific logs. (default: False) From a13f48ba3c4c7335220d22eeebefb674e3c40c3a Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 24 Feb 2023 13:17:55 -0600 Subject: [PATCH 134/178] merge from main and rerun generate_nodeos_plugin_args --- .../NodeosPluginArgs/ResourceMonitorPluginArgs.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py index cc731d9133..54d3e54031 100755 --- a/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py @@ -17,6 +17,9 @@ class ResourceMonitorPluginArgs(BasePluginArgs): resourceMonitorSpaceThreshold: int=None _resourceMonitorSpaceThresholdNodeosDefault: int=90 _resourceMonitorSpaceThresholdNodeosArg: str="--resource-monitor-space-threshold" + resourceMonitorSpaceAbsoluteGb: str=None + _resourceMonitorSpaceAbsoluteGbNodeosDefault: str=None + _resourceMonitorSpaceAbsoluteGbNodeosArg: str="--resource-monitor-space-absolute-gb" resourceMonitorNotShutdownOnThresholdExceeded: bool=None _resourceMonitorNotShutdownOnThresholdExceededNodeosDefault: bool=False _resourceMonitorNotShutdownOnThresholdExceededNodeosArg: str="--resource-monitor-not-shutdown-on-threshold-exceeded" From 93f7a50d1f060267225a76f681d5f1758d322818 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Mon, 27 Feb 2023 11:57:59 -0600 Subject: [PATCH 135/178] add a sanity check for number of transactions scraped from trx gen --- tests/performance_tests/performance_test_basic.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 42eb222913..cae8365076 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -390,6 +390,8 @@ def runTpsTest(self) -> PtbTpsTestResult: # Get stats after transaction generation stops trxSent = {} log_reader.scrapeTrxGenTrxSentDataLogs(trxSent, self.trxGenLogDirPath, self.ptbConfig.quiet) + if len(trxSent) != self.ptbConfig.expectedTransactionsSent: + print(f"ERROR: Transactions generated: {len(trxSent)} does not match the expected number of transactions: {self.ptbConfig.expectedTransactionsSent}") blocksToWait = 2 * self.ptbConfig.testTrxGenDurationSec + 10 trxSent = self.validationNode.waitForTransactionsInBlockRange(trxSent, self.data.startBlock, blocksToWait) self.data.ceaseBlock = self.validationNode.getHeadBlockNum() From 6dfb66edaae0d93e48c551fe14ea648b19de7612 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Mon, 27 Feb 2023 15:44:53 -0600 Subject: [PATCH 136/178] resolve test failure for ram and cpu performance tests due to id collision --- tests/TestHarness/Node.py | 14 +++++++------- tests/trx_generator/trx_generator.cpp | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/TestHarness/Node.py b/tests/TestHarness/Node.py index a2d8c1bd91..7833fbadb2 100644 --- a/tests/TestHarness/Node.py +++ b/tests/TestHarness/Node.py @@ -518,18 +518,18 @@ def checkBlockForTransactions(self, transIds, blockNum): return transIds def waitForTransactionsInBlockRange(self, transIds, startBlock=2, maxFutureBlocks=0): - lastBlockProcessed = startBlock - overallFinalBlock = startBlock + maxFutureBlocks + nextBlockToProcess = startBlock + overallEndBlock = startBlock + maxFutureBlocks while len(transIds) > 0: currentLoopEndBlock = self.getHeadBlockNum() - if currentLoopEndBlock > overallFinalBlock: - currentLoopEndBlock = overallFinalBlock - for blockNum in range(currentLoopEndBlock, lastBlockProcessed - 1, -1): + if currentLoopEndBlock > overallEndBlock: + currentLoopEndBlock = overallEndBlock + for blockNum in range(nextBlockToProcess, currentLoopEndBlock + 1): transIds = self.checkBlockForTransactions(transIds, blockNum) if len(transIds) == 0: return transIds - lastBlockProcessed = currentLoopEndBlock - if currentLoopEndBlock == overallFinalBlock: + nextBlockToProcess = currentLoopEndBlock + 1 + if currentLoopEndBlock == overallEndBlock: Utils.Print("ERROR: Transactions were missing upon expiration of waitOnblockTransactions") break self.waitForHeadToAdvance() diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 9df763505a..97ec8d948a 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -33,7 +33,7 @@ namespace eosio::testing { trx.actions.emplace_back(std::move(act)); } trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), - fc::raw::pack(std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + + fc::raw::pack(std::to_string(_generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + fc::time_point::now().time_since_epoch().count()))); trx.sign(priv_key, chain_id); From 1ce9036357e409fa47096941d94f4d1fb8760eb2 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Mon, 27 Feb 2023 16:36:38 -0600 Subject: [PATCH 137/178] change enable args in performance_test_basic to be store_true instead of bools. Put eos-vm-oc-enable in ptb only onto non producer nodes. Remove duplicate choice in wasm-runtime --- .../performance_tests/performance_test_basic.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 57c6024f2c..6ed9eb7777 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -90,6 +90,7 @@ class SpecifiedContract: nodeosVers: str = "" specificExtraNodeosArgs: dict = field(default_factory=dict) _totalNodes: int = 2 + nonprodsEnableEosVmOcEnable: bool = False def log_transactions(self, trxDataFile, block): for trx in block['payload']['transactions']: @@ -101,6 +102,8 @@ def __post_init__(self): self._totalNodes = self.pnodes + 1 if self.totalNodes <= self.pnodes else self.totalNodes if not self.prodsEnableTraceApi: self.specificExtraNodeosArgs.update({f"{node}" : "--plugin eosio::trace_api_plugin" for node in range(self.pnodes, self._totalNodes)}) + if self.nonprodsEnableEosVmOcEnable: + self.specificExtraNodeosArgs.update({f"{node}" : "--eos-vm-oc-enable" for node in range(self.pnodes, self._totalNodes)}) assert self.nodeosVers != "v1" and self.nodeosVers != "v0", f"nodeos version {Utils.getNodeosVersion().split('.')[0]} is unsupported by performance test" if self.nodeosVers == "v2": self.fetchBlock = lambda node, blockNum: node.processUrllibRequest("chain", "get_block", {"block_num_or_id":blockNum}, silentErrors=False, exitOnError=True) @@ -548,7 +551,7 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--del-report", help="Whether to delete overarching performance run report.", action='store_true') ptbBaseParserGroup.add_argument("--quiet", help="Whether to quiet printing intermediate results and reports to stdout", action='store_true') ptbBaseParserGroup.add_argument("--prods-enable-trace-api", help="Determines whether producer nodes should have eosio::trace_api_plugin enabled", action='store_true') - ptbBaseParserGroup.add_argument("--print-missing-transactions", type=bool, help="Toggles if missing transactions are be printed upon test completion.", default=False) + ptbBaseParserGroup.add_argument("--print-missing-transactions", help="Toggles if missing transactions are be printed upon test completion.", action='store_true') ptbBaseParserGroup.add_argument("--account-name", type=str, help="Name of the account to create and assign a contract to", default="eosio") ptbBaseParserGroup.add_argument("--contract-dir", type=str, help="Path to contract dir", default="unittests/contracts/eosio.system") ptbBaseParserGroup.add_argument("--wasm-file", type=str, help="WASM file name for contract", default="eosio.system.wasm") @@ -556,11 +559,11 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--wasm-runtime", type=str, help="Override default WASM runtime (\"eos-vm-jit\", \"eos-vm\")\ \"eos-vm-jit\" : A WebAssembly runtime that compiles WebAssembly code to native x86 code prior to\ execution. \"eos-vm\" : A WebAssembly interpreter.", - choices=["eos-vm-jit", "eos-vm", "eos-vm-jit"], default="eos-vm-jit") - ptbBaseParserGroup.add_argument("--contracts-console", type=bool, help="print contract's output to console", default=False) + choices=["eos-vm-jit", "eos-vm"], default="eos-vm-jit") + ptbBaseParserGroup.add_argument("--contracts-console", help="print contract's output to console", action='store_true') ptbBaseParserGroup.add_argument("--eos-vm-oc-cache-size-mb", type=int, help="Maximum size (in MiB) of the EOS VM OC code cache", default=1024) ptbBaseParserGroup.add_argument("--eos-vm-oc-compile-threads", type=int, help="Number of threads to use for EOS VM OC tier-up", default=1) - ptbBaseParserGroup.add_argument("--eos-vm-oc-enable", type=bool, help="Enable EOS VM OC tier-up runtime", default=False) + ptbBaseParserGroup.add_argument("--eos-vm-oc-enable", help="Enable EOS VM OC tier-up runtime", action='store_true') ptbBaseParserGroup.add_argument("--block-log-retain-blocks", type=int, help="If set to greater than 0, periodically prune the block log to\ store only configured number of most recent blocks. If set to 0, no blocks are be written to the block log;\ block log file is removed after startup.", default=None) @@ -602,8 +605,7 @@ def main(): chainPluginArgs = ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode, wasmRuntime=args.wasm_runtime, contractsConsole=args.contracts_console, - eosVmOcCacheSizeMb=args.eos_vm_oc_cache_size_mb, eosVmOcCompileThreads=args.eos_vm_oc_compile_threads, - eosVmOcEnable=args.eos_vm_oc_enable, blockLogRetainBlocks=args.block_log_retain_blocks, + eosVmOcCacheSizeMb=args.eos_vm_oc_cache_size_mb, eosVmOcCompileThreads=args.eos_vm_oc_compile_threads, blockLogRetainBlocks=args.block_log_retain_blocks, abiSerializerMaxTimeMs=990000, chainStateDbSizeMb=256000) lbto = args.last_block_time_offset_us @@ -628,7 +630,8 @@ def main(): testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, specifiedContract=specifiedContract, loggingLevel=args.cluster_log_lvl, - nodeosVers=Utils.getNodeosVersion().split('.')[0]) + nodeosVers=Utils.getNodeosVersion().split('.')[0], nonprodsEnableEosVmOcEnable=args.eos_vm_oc_enable) + if args.contracts_console and testClusterConfig.loggingLevel != "debug" and testClusterConfig.loggingLevel != "all": print("Enabling contracts-console will not print anything unless debug level is 'debug' or higher." f" Current debug level is: {testClusterConfig.loggingLevel}") From 2940d5aa92e8fa2adfec78e0bb0bd518318d1c93 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 28 Feb 2023 10:18:19 -0600 Subject: [PATCH 138/178] Updates per peer review. --- tests/performance_tests/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index bf5666263e..f4c789a903 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -267,7 +267,7 @@ Performance Test Basic Base: In "locked" mode database is preloaded, locked in to memory, and will use huge pages if available. (default: mapped) * `--cluster-log-lvl {all,debug,info,warn,error,off}` Cluster log level ("all", "debug", "info", "warn", "error", or "off"). Performance Harness Test Basic relies on some logging at - "info" level, so it is recommended lowest logging level to use. However, there are instances where more verbose logging can be + "info" level, so it is the lowest recommended logging level to use. However, there are instances where more verbose logging can be useful. (default: info) * `--net-threads NET_THREADS` Number of worker threads in net_plugin thread pool (default: 4) @@ -386,7 +386,7 @@ Performance Test Basic Base: In "locked" mode database is preloaded, locked in to memory, and will use huge pages if available. (default: mapped) * `--cluster-log-lvl {all,debug,info,warn,error,off}` Cluster log level ("all", "debug", "info", "warn", "error", or "off"). Performance Harness Test Basic relies on some logging at - "info" level, so it is recommended lowest logging level to use. However, there are instances where more verbose logging can be + "info" level, so it is the lowest recommended logging level to use. However, there are instances where more verbose logging can be useful. (default: info) * `--net-threads NET_THREADS` Number of worker threads in net_plugin thread pool (default: 4) @@ -432,7 +432,7 @@ Performance Test Basic Single Test: * `--test-duration-sec TEST_DURATION_SEC` The duration of transfer trx generation for the test in seconds (default: 90) * `--user-trx-data-file USER_TRX_DATA_FILE` - Path to userTrxDataTransfer.json (default: None) + Path to transaction data JSON file (default: None) #### Launch Transaction Generators (TestHarness) From 078197250dd1007e5f5e1e6639666e94961fcb1e Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 28 Feb 2023 13:36:13 -0600 Subject: [PATCH 139/178] Update reports in readme. --- tests/performance_tests/README.md | 281 +++++++++++++++++------------- 1 file changed, 160 insertions(+), 121 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index a3f3ad41fd..5abcf8e20d 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -535,16 +535,16 @@ Next, a summary of the search scenario conducted and respective results is inclu "searchCeiling": 24500, "basicTestResult": { "targetTPS": 12500, - "resultAvgTps": 12499.8125, + "resultAvgTps": 12507.6875, "expectedTxns": 125000, "resultTxns": 125000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-07-47-12500", - "testStart": "2023-02-22T17:07:47.714382", - "testEnd": "2023-02-22T17:09:00.351289" + "logsDir": "p/2023-02-28_17-10-36/testRunLogs/p/2023-02-28_19-15-15-12500", + "testStart": "2023-02-28T19:15:15.406134", + "testEnd": "2023-02-28T19:16:34.379216" } } ``` @@ -577,12 +577,12 @@ Finally, the full detail test report for each of the determined max TPS throughp ``` json { - "perfTestsBegin": "2023-02-22T15:17:12.080867", - "perfTestsFinish": "2023-02-22T17:15:43.310101", - "InitialMaxTpsAchieved": 15500, - "LongRunningMaxTpsAchieved": 15500, - "tpsTestStart": "2023-02-22T17:04:36.629749", - "tpsTestFinish": "2023-02-22T17:15:43.310092", + "perfTestsBegin": "2023-02-28T17:10:36.281418", + "perfTestsFinish": "2023-02-28T19:26:06.224176", + "InitialMaxTpsAchieved": 15000, + "LongRunningMaxTpsAchieved": 14500, + "tpsTestStart": "2023-02-28T19:12:06.501739", + "tpsTestFinish": "2023-02-28T19:26:06.224167", "InitialSearchResults": { "0": { "success": false, @@ -591,16 +591,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 50000, "basicTestResult": { "targetTPS": 50000, - "resultAvgTps": 14888.279069767443, + "resultAvgTps": 14271.463414634147, "expectedTxns": 500000, - "resultTxns": 342429, + "resultTxns": 315135, "tpsExpectMet": false, "trxExpectMet": false, "basicTestSuccess": false, - "testAnalysisBlockCnt": 44, - "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-04-36-50000", - "testStart": "2023-02-22T17:04:36.629809", - "testEnd": "2023-02-22T17:06:15.893419" + "testAnalysisBlockCnt": 42, + "logsDir": "p/2023-02-28_17-10-36/testRunLogs/p/2023-02-28_19-12-06-50000", + "testStart": "2023-02-28T19:12:06.501793", + "testEnd": "2023-02-28T19:13:45.664215" } }, "1": { @@ -610,16 +610,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 49500, "basicTestResult": { "targetTPS": 25000, - "resultAvgTps": 15541.464285714286, + "resultAvgTps": 14964.896551724138, "expectedTxns": 250000, "resultTxns": 250000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 29, - "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-06-16-25000", - "testStart": "2023-02-22T17:06:16.000708", - "testEnd": "2023-02-22T17:07:47.634132" + "testAnalysisBlockCnt": 30, + "logsDir": "p/2023-02-28_17-10-36/testRunLogs/p/2023-02-28_19-13-45-25000", + "testStart": "2023-02-28T19:13:45.773450", + "testEnd": "2023-02-28T19:15:15.330054" } }, "2": { @@ -629,16 +629,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 24500, "basicTestResult": { "targetTPS": 12500, - "resultAvgTps": 12499.8125, + "resultAvgTps": 12507.6875, "expectedTxns": 125000, "resultTxns": 125000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-07-47-12500", - "testStart": "2023-02-22T17:07:47.714382", - "testEnd": "2023-02-22T17:09:00.351289" + "logsDir": "p/2023-02-28_17-10-36/testRunLogs/p/2023-02-28_19-15-15-12500", + "testStart": "2023-02-28T19:15:15.406134", + "testEnd": "2023-02-28T19:16:34.379216" } }, "3": { @@ -648,16 +648,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 24500, "basicTestResult": { "targetTPS": 19000, - "resultAvgTps": 15566.0, + "resultAvgTps": 14874.90909090909, "expectedTxns": 190000, "resultTxns": 190000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, - "testAnalysisBlockCnt": 22, - "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-09-00-19000", - "testStart": "2023-02-22T17:09:00.404183", - "testEnd": "2023-02-22T17:10:24.711309" + "testAnalysisBlockCnt": 23, + "logsDir": "p/2023-02-28_17-10-36/testRunLogs/p/2023-02-28_19-16-34-19000", + "testStart": "2023-02-28T19:16:34.432286", + "testEnd": "2023-02-28T19:17:59.828271" } }, "4": { @@ -667,16 +667,16 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 18500, "basicTestResult": { "targetTPS": 16000, - "resultAvgTps": 14776.235294117647, + "resultAvgTps": 15246.941176470587, "expectedTxns": 160000, "resultTxns": 160000, "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 18, - "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-10-24-16000", - "testStart": "2023-02-22T17:10:24.776702", - "testEnd": "2023-02-22T17:11:46.433363" + "logsDir": "p/2023-02-28_17-10-36/testRunLogs/p/2023-02-28_19-17-59-16000", + "testStart": "2023-02-28T19:17:59.893538", + "testEnd": "2023-02-28T19:19:21.997058" } }, "5": { @@ -686,35 +686,54 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchCeiling": 15500, "basicTestResult": { "targetTPS": 14500, - "resultAvgTps": 14570.125, + "resultAvgTps": 14543.125, "expectedTxns": 145000, "resultTxns": 145000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-11-46-14500", - "testStart": "2023-02-22T17:11:46.494769", - "testEnd": "2023-02-22T17:13:06.882690" + "logsDir": "p/2023-02-28_17-10-36/testRunLogs/p/2023-02-28_19-19-22-14500", + "testStart": "2023-02-28T19:19:22.056683", + "testEnd": "2023-02-28T19:20:39.705683" } }, "6": { - "success": true, + "success": false, "searchTarget": 15500, "searchFloor": 15000, "searchCeiling": 15500, "basicTestResult": { "targetTPS": 15500, - "resultAvgTps": 15448.4375, + "resultAvgTps": 15353.4375, "expectedTxns": 155000, "resultTxns": 155000, - "tpsExpectMet": true, + "tpsExpectMet": false, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-13-06-15500", - "testStart": "2023-02-22T17:13:06.948372", - "testEnd": "2023-02-22T17:14:24.937779" + "logsDir": "p/2023-02-28_17-10-36/testRunLogs/p/2023-02-28_19-20-39-15500", + "testStart": "2023-02-28T19:20:39.761125", + "testEnd": "2023-02-28T19:22:01.537270" + } + }, + "7": { + "success": true, + "searchTarget": 15000, + "searchFloor": 15000, + "searchCeiling": 15000, + "basicTestResult": { + "targetTPS": 15000, + "resultAvgTps": 14963.529411764706, + "expectedTxns": 150000, + "resultTxns": 150000, + "tpsExpectMet": true, + "trxExpectMet": true, + "basicTestSuccess": true, + "testAnalysisBlockCnt": 18, + "logsDir": "p/2023-02-28_17-10-36/testRunLogs/p/2023-02-28_19-22-01-15000", + "testStart": "2023-02-28T19:22:01.594970", + "testEnd": "2023-02-28T19:23:22.901483" } } }, @@ -733,22 +752,41 @@ Finally, the full detail test report for each of the determined max TPS throughp }, "LongRunningSearchResults": { "0": { + "success": false, + "searchTarget": 15000, + "searchFloor": 0, + "searchCeiling": 15000, + "basicTestResult": { + "targetTPS": 15000, + "resultAvgTps": 14361.529411764706, + "expectedTxns": 150000, + "resultTxns": 150000, + "tpsExpectMet": false, + "trxExpectMet": true, + "basicTestSuccess": true, + "testAnalysisBlockCnt": 18, + "logsDir": "p/2023-02-28_17-10-36/testRunLogs/p/2023-02-28_19-23-22-15000", + "testStart": "2023-02-28T19:23:22.962336", + "testEnd": "2023-02-28T19:24:44.753772" + } + }, + "1": { "success": true, - "searchTarget": 15500, + "searchTarget": 14500, "searchFloor": 0, - "searchCeiling": 15500, + "searchCeiling": 15000, "basicTestResult": { - "targetTPS": 15500, - "resultAvgTps": 15482.375, - "expectedTxns": 155000, - "resultTxns": 155000, + "targetTPS": 14500, + "resultAvgTps": 14546.0625, + "expectedTxns": 145000, + "resultTxns": 145000, "tpsExpectMet": true, "trxExpectMet": true, "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-14-24-15500", - "testStart": "2023-02-22T17:14:24.998249", - "testEnd": "2023-02-22T17:15:43.248732" + "logsDir": "p/2023-02-28_17-10-36/testRunLogs/p/2023-02-28_19-24-44-14500", + "testStart": "2023-02-28T19:24:44.811953", + "testEnd": "2023-02-28T19:26:06.165715" } } }, @@ -768,33 +806,34 @@ Finally, the full detail test report for each of the determined max TPS throughp "ProducerThreadAnalysis": { "recommendedThreadCount": 6, "threadToMaxTpsDict": { - "2": 16000, - "3": 18000, - "4": 20000, - "5": 22000, - "6": 22500, - "7": 22000 + "2": 12000, + "3": 14000, + "4": 19000, + "5": 20500, + "6": 21500, + "7": 21500 }, - "analysisStart": "2023-02-22T15:17:12.124072", - "analysisFinish": "2023-02-22T16:25:05.332487" + "analysisStart": "2023-02-28T17:10:36.313384", + "analysisFinish": "2023-02-28T18:15:53.250540" }, "ChainThreadAnalysis": { - "recommendedThreadCount": 2, + "recommendedThreadCount": 3, "threadToMaxTpsDict": { - "2": 15500, - "3": 15500 + "2": 14000, + "3": 15000, + "4": 13500 }, - "analysisStart": "2023-02-22T16:25:05.333425", - "analysisFinish": "2023-02-22T16:44:52.600693" + "analysisStart": "2023-02-28T18:15:53.251366", + "analysisFinish": "2023-02-28T18:49:30.383395" }, "NetThreadAnalysis": { "recommendedThreadCount": 2, "threadToMaxTpsDict": { - "2": 15500, - "3": 15500 + "2": 14000, + "3": 13500 }, - "analysisStart": "2023-02-22T16:44:52.601488", - "analysisFinish": "2023-02-22T17:04:36.629427" + "analysisStart": "2023-02-28T18:49:30.384564", + "analysisFinish": "2023-02-28T19:12:06.501003" }, "args": { "rawCmdLine ": "./tests/performance_tests/performance_test.py --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax", @@ -1203,6 +1242,9 @@ Finally, the full detail test report for each of the determined max TPS throughp "resourceMonitorSpaceThreshold": null, "_resourceMonitorSpaceThresholdNodeosDefault": 90, "_resourceMonitorSpaceThresholdNodeosArg": "--resource-monitor-space-threshold", + "resourceMonitorSpaceAbsoluteGb": null, + "_resourceMonitorSpaceAbsoluteGbNodeosDefault": null, + "_resourceMonitorSpaceAbsoluteGbNodeosArg": "--resource-monitor-space-absolute-gb", "resourceMonitorNotShutdownOnThresholdExceeded": null, "_resourceMonitorNotShutdownOnThresholdExceededNodeosDefault": false, "_resourceMonitorNotShutdownOnThresholdExceededNodeosArg": "--resource-monitor-not-shutdown-on-threshold-exceeded", @@ -1297,8 +1339,6 @@ Finally, the full detail test report for each of the determined max TPS throughp "delReport": false, "delTestReport": false, "numAddlBlocksToPrune": 2, - "logDirRoot": "p/2023-02-22_21-28-58/pluginThreadOptRunLogs", - "delReport": true, "quiet": false, "logDirRoot": ".", "skipTpsTests": false, @@ -1306,10 +1346,10 @@ Finally, the full detail test report for each of the determined max TPS throughp "calcChainThreads": "lmax", "calcNetThreads": "lmax", "logDirBase": "p", - "logDirTimestamp": "2023-02-22_15-17-12", - "logDirPath": "p/2023-02-22_15-17-12", - "ptbLogsDirPath": "p/2023-02-22_15-17-12/testRunLogs", - "pluginThreadOptLogsDirPath": "p/2023-02-22_15-17-12/pluginThreadOptRunLogs" + "logDirTimestamp": "2023-02-28_17-10-36", + "logDirPath": "p/2023-02-28_17-10-36", + "ptbLogsDirPath": "p/2023-02-28_17-10-36/testRunLogs", + "pluginThreadOptLogsDirPath": "p/2023-02-28_17-10-36/pluginThreadOptRunLogs" }, "env": { "system": "Linux", @@ -1333,67 +1373,67 @@ The Performance Test Basic generates, by default, a report that details results ``` json { "completedRun": true, - "testStart": "2023-02-22T17:14:24.998249", - "testFinish": "2023-02-22T17:15:43.248732", + "testStart": "2023-02-28T19:24:44.811953", + "testFinish": "2023-02-28T19:26:06.165715", "Analysis": { "BlockSize": { - "min": 1310400, - "max": 1619520, - "avg": 1484092.2352941176, - "sigma": 67283.84495512008, + "min": 1148352, + "max": 1557888, + "avg": 1396653.1764705882, + "sigma": 80740.60358240586, "emptyBlocks": 0, "numBlocks": 17 }, "BlocksGuide": { "firstBlockNum": 2, - "lastBlockNum": 153, - "totalBlocks": 152, - "testStartBlockNum": 112, - "testEndBlockNum": 142, - "setupBlocksCnt": 110, - "tearDownBlocksCnt": 11, + "lastBlockNum": 159, + "totalBlocks": 158, + "testStartBlockNum": 113, + "testEndBlockNum": 149, + "setupBlocksCnt": 111, + "tearDownBlocksCnt": 10, "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 9, + "trailingEmptyBlocksCnt": 15, "configAddlDropCnt": 2, "testAnalysisBlockCnt": 17 }, "TPS": { - "min": 14579, - "max": 16242, - "avg": 15482.375, - "sigma": 381.4460766281389, + "min": 13737, + "max": 15776, + "avg": 14546.0625, + "sigma": 428.6321950037701, "emptyBlocks": 0, "numBlocks": 17, - "configTps": 15500, + "configTps": 14500, "configTestDuration": 10, "tpsPerGenerator": [ - 3875, - 3875, - 3875, - 3875 + 3625, + 3625, + 3625, + 3625 ], "generatorCount": 4 }, "TrxCPU": { - "min": 6.0, - "max": 3501.0, - "avg": 22.077154838709678, - "sigma": 15.627253758549179, - "samples": 155000 + "min": 7.0, + "max": 924.0, + "avg": 23.99186896551724, + "sigma": 13.466278551411643, + "samples": 145000 }, "TrxLatency": { "min": 0.0009999275207519531, - "max": 0.5750000476837158, - "avg": 0.2690663419123619, - "sigma": 0.14536933582820064, - "samples": 155000 + "max": 0.5899999141693115, + "avg": 0.2662433517719137, + "sigma": 0.146137230822956, + "samples": 145000 }, "TrxNet": { "min": 24.0, "max": 24.0, "avg": 24.0, "sigma": 0.0, - "samples": 155000 + "samples": 145000 }, "DroppedBlocks": {}, "DroppedBlocksCount": 0, @@ -1811,6 +1851,9 @@ The Performance Test Basic generates, by default, a report that details results "resourceMonitorSpaceThreshold": null, "_resourceMonitorSpaceThresholdNodeosDefault": 90, "_resourceMonitorSpaceThresholdNodeosArg": "--resource-monitor-space-threshold", + "resourceMonitorSpaceAbsoluteGb": null, + "_resourceMonitorSpaceAbsoluteGbNodeosDefault": null, + "_resourceMonitorSpaceAbsoluteGbNodeosArg": "--resource-monitor-space-absolute-gb", "resourceMonitorNotShutdownOnThresholdExceeded": null, "_resourceMonitorNotShutdownOnThresholdExceededNodeosDefault": false, "_resourceMonitorNotShutdownOnThresholdExceededNodeosArg": "--resource-monitor-not-shutdown-on-threshold-exceeded", @@ -1877,14 +1920,10 @@ The Performance Test Basic generates, by default, a report that details results } }, "specifiedContract": { - "accountName": "eosio", - "ownerPrivateKey": "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", - "ownerPublicKey": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "activePrivateKey": "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", - "activePublicKey": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", "contractDir": "unittests/contracts/eosio.system", "wasmFile": "eosio.system.wasm", - "abiFile": "eosio.system.abi" + "abiFile": "eosio.system.abi", + "account": "Name: eosio" }, "useBiosBootFile": false, "genesisPath": "tests/performance_tests/genesis.json", @@ -1900,21 +1939,21 @@ The Performance Test Basic generates, by default, a report that details results "1": "--plugin eosio::trace_api_plugin" }, "_totalNodes": 2, - "targetTps": 15500, + "targetTps": 14500, "testTrxGenDurationSec": 10, "tpsLimitPerGenerator": 4000, "numAddlBlocksToPrune": 2, - "logDirRoot": "p/2023-02-22_15-17-12/testRunLogs", + "logDirRoot": "p/2023-02-28_17-10-36/testRunLogs", "delReport": false, "quiet": false, "delPerfLogs": false, - "expectedTransactionsSent": 155000, + "expectedTransactionsSent": 145000, "printMissingTransactions": false, "userTrxDataFile": null, - "logDirBase": "p/2023-02-22_15-17-12/testRunLogs/p", - "logDirTimestamp": "2023-02-22_17-14-24", - "logDirTimestampedOptSuffix": "-15500", - "logDirPath": "p/2023-02-22_15-17-12/testRunLogs/p/2023-02-22_17-14-24-15500" + "logDirBase": "p/2023-02-28_17-10-36/testRunLogs/p", + "logDirTimestamp": "2023-02-28_19-24-44", + "logDirTimestampedOptSuffix": "-14500", + "logDirPath": "p/2023-02-28_17-10-36/testRunLogs/p/2023-02-28_19-24-44-14500" }, "env": { "system": "Linux", From 0d30973b85b9b2667f57761e5d6cbc42478be1fa Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Tue, 28 Feb 2023 14:39:06 -0600 Subject: [PATCH 140/178] rename eos-vm-oc-enable in performance test to represent it only affecting non producer nodes. Resolve related test failure. --- tests/performance_tests/README.md | 8 ++++---- tests/performance_tests/performance_test.py | 2 +- .../performance_tests/performance_test_basic.py | 17 ++++++++++------- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 7aaac9a321..6bc9a24c44 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -287,8 +287,8 @@ The Performance Harness main script `performance_test.py` can be configured usin Maximum size (in MiB) of the EOS VM OC code cache (default: 1024) * `--eos-vm-oc-compile-threads COMPILE_THREADS` Number of threads to use for EOS VM OC tier-up (default: 1) -* `--eos-vm-oc-enable` - Enable EOS VM OC tier-up runtime (default: False) +* `--non-prods-eos-vm-oc-enable` + Enable EOS VM OC tier-up runtime on non producer nodes (default: False) * `--block-log-retain-blocks BLOCKS_TO_RETAIN` If set to greater than 0, periodically prune the block log to store only configured number of most recent blocks. If set to 0, no blocks are be written to the block log; @@ -376,8 +376,8 @@ The following scripts are typically used by the Performance Harness main script Maximum size (in MiB) of the EOS VM OC code cache (default: 1024) * `--eos-vm-oc-compile-threads COMPILE_THREADS` Number of threads to use for EOS VM OC tier-up (default: 1) -* `--eos-vm-oc-enable` - Enable EOS VM OC tier-up runtime (default: False) +* `--non-prods-eos-vm-oc-enable` + Enable EOS VM OC tier-up runtime on non producer nodes (default: False) * `--block-log-retain-blocks BLOCKS_TO_RETAIN` If set to greater than 0, periodically prune the block log to store only configured number of most recent blocks. If set to 0, no blocks are be written to the block log; diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index e20c0ca710..a6cf6a846c 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -502,7 +502,7 @@ def main(): chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode, wasmRuntime=args.wasm_runtime, contractsConsole=args.contracts_console, eosVmOcCacheSizeMb=args.eos_vm_oc_cache_size_mb, eosVmOcCompileThreads=args.eos_vm_oc_compile_threads, - eosVmOcEnable=args.eos_vm_oc_enable, blockLogRetainBlocks=args.block_log_retain_blocks, + blockLogRetainBlocks=args.block_log_retain_blocks, abiSerializerMaxTimeMs=990000, chainStateDbSizeMb=256000) producerPluginArgs = ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, lastBlockTimeOffsetUs=args.last_block_time_offset_us, produceTimeOffsetUs=args.produce_time_offset_us, diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 6ed9eb7777..d002c1500f 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -90,7 +90,7 @@ class SpecifiedContract: nodeosVers: str = "" specificExtraNodeosArgs: dict = field(default_factory=dict) _totalNodes: int = 2 - nonprodsEnableEosVmOcEnable: bool = False + nonProdsEosVmOcEnable: bool = False def log_transactions(self, trxDataFile, block): for trx in block['payload']['transactions']: @@ -100,10 +100,12 @@ def log_transactions(self, trxDataFile, block): def __post_init__(self): self._totalNodes = self.pnodes + 1 if self.totalNodes <= self.pnodes else self.totalNodes + nonProdsSpecificNodeosStr = "" if not self.prodsEnableTraceApi: - self.specificExtraNodeosArgs.update({f"{node}" : "--plugin eosio::trace_api_plugin" for node in range(self.pnodes, self._totalNodes)}) - if self.nonprodsEnableEosVmOcEnable: - self.specificExtraNodeosArgs.update({f"{node}" : "--eos-vm-oc-enable" for node in range(self.pnodes, self._totalNodes)}) + nonProdsSpecificNodeosStr += "--plugin eosio::trace_api_plugin " + if self.nonProdsEosVmOcEnable: + nonProdsSpecificNodeosStr += "--eos-vm-oc-enable " + self.specificExtraNodeosArgs.update({f"{node}" : nonProdsSpecificNodeosStr for node in range(self.pnodes, self._totalNodes)}) assert self.nodeosVers != "v1" and self.nodeosVers != "v0", f"nodeos version {Utils.getNodeosVersion().split('.')[0]} is unsupported by performance test" if self.nodeosVers == "v2": self.fetchBlock = lambda node, blockNum: node.processUrllibRequest("chain", "get_block", {"block_num_or_id":blockNum}, silentErrors=False, exitOnError=True) @@ -563,7 +565,7 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--contracts-console", help="print contract's output to console", action='store_true') ptbBaseParserGroup.add_argument("--eos-vm-oc-cache-size-mb", type=int, help="Maximum size (in MiB) of the EOS VM OC code cache", default=1024) ptbBaseParserGroup.add_argument("--eos-vm-oc-compile-threads", type=int, help="Number of threads to use for EOS VM OC tier-up", default=1) - ptbBaseParserGroup.add_argument("--eos-vm-oc-enable", help="Enable EOS VM OC tier-up runtime", action='store_true') + ptbBaseParserGroup.add_argument("--non-prods-eos-vm-oc-enable", help="Enable EOS VM OC tier-up runtime on non producer nodes", action='store_true') ptbBaseParserGroup.add_argument("--block-log-retain-blocks", type=int, help="If set to greater than 0, periodically prune the block log to\ store only configured number of most recent blocks. If set to 0, no blocks are be written to the block log;\ block log file is removed after startup.", default=None) @@ -605,7 +607,8 @@ def main(): chainPluginArgs = ChainPluginArgs(signatureCpuBillablePct=args.signature_cpu_billable_pct, chainThreads=args.chain_threads, databaseMapMode=args.database_map_mode, wasmRuntime=args.wasm_runtime, contractsConsole=args.contracts_console, - eosVmOcCacheSizeMb=args.eos_vm_oc_cache_size_mb, eosVmOcCompileThreads=args.eos_vm_oc_compile_threads, blockLogRetainBlocks=args.block_log_retain_blocks, + eosVmOcCacheSizeMb=args.eos_vm_oc_cache_size_mb, eosVmOcCompileThreads=args.eos_vm_oc_compile_threads, + blockLogRetainBlocks=args.block_log_retain_blocks, abiSerializerMaxTimeMs=990000, chainStateDbSizeMb=256000) lbto = args.last_block_time_offset_us @@ -630,7 +633,7 @@ def main(): testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, specifiedContract=specifiedContract, loggingLevel=args.cluster_log_lvl, - nodeosVers=Utils.getNodeosVersion().split('.')[0], nonprodsEnableEosVmOcEnable=args.eos_vm_oc_enable) + nodeosVers=Utils.getNodeosVersion().split('.')[0], nonProdsEosVmOcEnable=args.non_prods_eos_vm_oc_enable) if args.contracts_console and testClusterConfig.loggingLevel != "debug" and testClusterConfig.loggingLevel != "all": print("Enabling contracts-console will not print anything unless debug level is 'debug' or higher." From 70754472657f8c0b80d6d976534b8b403d652b54 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Tue, 28 Feb 2023 17:28:03 -0600 Subject: [PATCH 141/178] add fix for duplicate transaction in transaction generators when resigning transactions --- tests/trx_generator/trx_generator.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 97ec8d948a..1f8a8af891 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -55,7 +55,9 @@ namespace eosio::testing { void trx_generator_base::update_resign_transaction(signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { trx.context_free_actions.clear(); - trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), fc::raw::pack(std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + fc::time_point::now().time_since_epoch().count()))); + trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), + fc::raw::pack(std::to_string(_generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + + fc::time_point::now().time_since_epoch().count()))); set_transaction_headers(trx, last_irr_block_id, trx_expiration); trx.signatures.clear(); trx.sign(priv_key, chain_id); From 48022c93b64e758499c98762532e86244df906f4 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 1 Mar 2023 13:03:33 -0600 Subject: [PATCH 142/178] Update ChainPluginArgs to capture arguments added to chain plugin. --- .../NodeosPluginArgs/ChainPluginArgs.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py index e3cd77c64c..22ed36c15d 100755 --- a/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py @@ -14,6 +14,18 @@ class ChainPluginArgs(BasePluginArgs): blocksDir: str=None _blocksDirNodeosDefault: str='"blocks"' _blocksDirNodeosArg: str="--blocks-dir" + blocksLogStride: str=None + _blocksLogStrideNodeosDefault: str=None + _blocksLogStrideNodeosArg: str="--blocks-log-stride" + maxRetainedBlockFiles: str=None + _maxRetainedBlockFilesNodeosDefault: str=None + _maxRetainedBlockFilesNodeosArg: str="--max-retained-block-files" + blocksRetainedDir: str=None + _blocksRetainedDirNodeosDefault: str=None + _blocksRetainedDirNodeosArg: str="--blocks-retained-dir" + blocksArchiveDir: str=None + _blocksArchiveDirNodeosDefault: str=None + _blocksArchiveDirNodeosArg: str="--blocks-archive-dir" stateDir: str=None _stateDirNodeosDefault: str='"state"' _stateDirNodeosArg: str="--state-dir" From ec5db248b7777837a748b974952520cf47ca8a55 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 1 Mar 2023 15:26:10 -0600 Subject: [PATCH 143/178] Update the min threads to start at when calculating number of threads to give to plugins to the plugin default. --- tests/performance_tests/README.md | 18 +++++++------- tests/performance_tests/performance_test.py | 27 ++++++++++++--------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index ab3a53befc..c860fc0361 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -323,19 +323,19 @@ Performance Harness: * `--skip-tps-test` Determines whether to skip the max TPS measurement tests (default: False) * `--calc-producer-threads {none,lmax,full}` Determines whether to calculate number of worker threads to use in producer thread pool ("none", "lmax", or "full"). - In "none" mode, the default, no calculation will be attempted and default configured --producer-threads value will be used. - In "lmax" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. - In "full" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) + In "none" mode, the default, no calculation will be attempted and the configured --producer-threads value will be used. + In "lmax" mode, producer threads will incrementally be tested, starting at plugin default, until the performance rate ceases to increase with the addition of additional threads. + In "full" mode producer threads will incrementally be tested from plugin default..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) * `--calc-chain-threads {none,lmax,full}` Determines whether to calculate number of worker threads to use in chain thread pool ("none", "lmax", or "full"). - In "none" mode, the default, no calculation will be attempted and default configured --chain-threads value will be used. - In "lmax" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. - In "full" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) + In "none" mode, the default, no calculation will be attempted and the configured --chain-threads value will be used. + In "lmax" mode, producer threads will incrementally be tested, starting at plugin default, until the performance rate ceases to increase with the addition of additional threads. + In "full" mode producer threads will incrementally be tested from plugin default..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) * `--calc-net-threads {none,lmax,full}` Determines whether to calculate number of worker threads to use in net thread pool ("none", "lmax", or "full"). - In "none" mode, the default, no calculation will be attempted and default configured --net-threads value will be used. - In "lmax" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. - In "full" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) + In "none" mode, the default, no calculation will be attempted and the configured --net-threads value will be used. + In "lmax" mode, producer threads will incrementally be tested, starting at plugin default, until the performance rate ceases to increase with the addition of additional threads. + In "full" mode producer threads will incrementally be tested from plugin default..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in "lmax" mode). Useful for graphing the full performance impact of each available thread. (default: none) * `--del-test-report` Whether to save json reports from each test scenario. (default: False) Performance Harness - TPS Test Config: diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index a6cf6a846c..9d9e904d41 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -391,7 +391,8 @@ def runTest(self): optType = PerformanceTest.PluginThreadOptRunType.FULL else: optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX - prodResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.PRODUCER, optType=optType) + prodResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.PRODUCER, optType=optType, + minThreadCount=self.clusterConfig.extraNodeosArgs.producerPluginArgs._producerThreadsNodeosDefault) print(f"Producer Thread Optimization results: {prodResults}") self.clusterConfig.extraNodeosArgs.producerPluginArgs.threads = prodResults.recommendedThreadCount @@ -402,7 +403,8 @@ def runTest(self): optType = PerformanceTest.PluginThreadOptRunType.FULL else: optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX - chainResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.CHAIN, optType=optType) + chainResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.CHAIN, optType=optType, + minThreadCount=self.clusterConfig.extraNodeosArgs.chainPluginArgs._chainThreadsNodeosDefault) print(f"Chain Thread Optimization results: {chainResults}") self.clusterConfig.extraNodeosArgs.chainPluginArgs.threads = chainResults.recommendedThreadCount @@ -413,7 +415,8 @@ def runTest(self): optType = PerformanceTest.PluginThreadOptRunType.FULL else: optType = PerformanceTest.PluginThreadOptRunType.LOCAL_MAX - netResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.NET, optType=optType) + netResults = self.optimizePluginThreadCount(optPlugin=PerformanceTest.PluginThreadOpt.NET, optType=optType, + minThreadCount=self.clusterConfig.extraNodeosArgs.netPluginArgs._netThreadsNodeosDefault) print(f"Net Thread Optimization results: {netResults}") self.clusterConfig.extraNodeosArgs.netPluginArgs.threads = netResults.recommendedThreadCount @@ -452,21 +455,21 @@ def createArgumentParser(): ptParserGroup = ptParser.add_argument_group(title=ptGrpTitle, description=ptGrpDescription) ptParserGroup.add_argument("--skip-tps-test", help="Determines whether to skip the max TPS measurement tests", action='store_true') ptParserGroup.add_argument("--calc-producer-threads", type=str, help="Determines whether to calculate number of worker threads to use in producer thread pool (\"none\", \"lmax\", or \"full\"). \ - In \"none\" mode, the default, no calculation will be attempted and default configured --producer-threads value will be used. \ - In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ - In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ + In \"none\" mode, the default, no calculation will be attempted and the configured --producer-threads value will be used. \ + In \"lmax\" mode, producer threads will incrementally be tested, starting at plugin default, until the performance rate ceases to increase with the addition of additional threads. \ + In \"full\" mode producer threads will incrementally be tested from plugin default..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ Useful for graphing the full performance impact of each available thread.", choices=["none", "lmax", "full"], default="none") ptParserGroup.add_argument("--calc-chain-threads", type=str, help="Determines whether to calculate number of worker threads to use in chain thread pool (\"none\", \"lmax\", or \"full\"). \ - In \"none\" mode, the default, no calculation will be attempted and default configured --chain-threads value will be used. \ - In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ - In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ + In \"none\" mode, the default, no calculation will be attempted and the configured --chain-threads value will be used. \ + In \"lmax\" mode, producer threads will incrementally be tested, starting at plugin default, until the performance rate ceases to increase with the addition of additional threads. \ + In \"full\" mode producer threads will incrementally be tested from plugin default..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ Useful for graphing the full performance impact of each available thread.", choices=["none", "lmax", "full"], default="none") ptParserGroup.add_argument("--calc-net-threads", type=str, help="Determines whether to calculate number of worker threads to use in net thread pool (\"none\", \"lmax\", or \"full\"). \ - In \"none\" mode, the default, no calculation will be attempted and default configured --net-threads value will be used. \ - In \"lmax\" mode, producer threads will incrementally be tested until the performance rate ceases to increase with the addition of additional threads. \ - In \"full\" mode producer threads will incrementally be tested from 2..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ + In \"none\" mode, the default, no calculation will be attempted and the configured --net-threads value will be used. \ + In \"lmax\" mode, producer threads will incrementally be tested, starting at plugin default, until the performance rate ceases to increase with the addition of additional threads. \ + In \"full\" mode producer threads will incrementally be tested from plugin default..num logical processors, recording each performance and choosing the local max performance (same value as would be discovered in \"lmax\" mode). \ Useful for graphing the full performance impact of each available thread.", choices=["none", "lmax", "full"], default="none") ptParserGroup.add_argument("--del-test-report", help="Whether to save json reports from each test scenario.", action='store_true') From bad5a9bc912b057735d00f03930160d534a0e3be Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 1 Mar 2023 15:27:55 -0600 Subject: [PATCH 144/178] expose --user-trx-data-file to performance test --- tests/performance_tests/README.md | 4 ++-- tests/performance_tests/performance_test.py | 17 ++++++++++------- .../performance_tests/performance_test_basic.py | 2 +- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index ab3a53befc..dfe9cc473f 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -433,6 +433,8 @@ Performance Test Basic Base: * `--wasm-file WASM_FILE` WASM file name for contract (default: eosio.system.wasm) * `--abi-file ABI_FILE` ABI file name for contract (default: eosio.system.abi) +* `--user-trx-data-file USER_TRX_DATA_FILE` + Path to transaction data JSON file (default: None) * `--wasm-runtime RUNTIME` Override default WASM runtime ("eos-vm-jit", "eos-vm") "eos-vm-jit" : A WebAssembly runtime that compiles WebAssembly code to native x86 code prior to @@ -459,8 +461,6 @@ Performance Test Basic Single Test: The target transfers per second to send during test (default: 8000) * `--test-duration-sec TEST_DURATION_SEC` The duration of transfer trx generation for the test in seconds (default: 90) -* `--user-trx-data-file USER_TRX_DATA_FILE` - Path to transaction data JSON file (default: None) #### Launch Transaction Generators (TestHarness) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index a6cf6a846c..6e25369b49 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -61,6 +61,7 @@ class PtConfig: calcProducerThreads: str="none" calcChainThreads: str="none" calcNetThreads: str="none" + userTrxDataFile: Path=None @dataclass @@ -101,7 +102,7 @@ def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=Perfo self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=Path(self.ptConfig.logDirRoot)/PurePath(PurePath(__file__).name).stem[0], logDirTimestamp=f"{self.testsStart.strftime('%Y-%m-%d_%H-%M-%S')}") - def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: Path, delReport: bool, quiet: bool, delPerfLogs: bool) -> TpsTestResult.PerfTestSearchResults: + def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: Path, delReport: bool, quiet: bool, delPerfLogs: bool, userTrxDataFile: Path) -> TpsTestResult.PerfTestSearchResults: floor = 0 ceiling = self.ptConfig.maxTpsToTest binSearchTarget = self.ptConfig.maxTpsToTest @@ -117,7 +118,7 @@ def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConf scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=logDirRoot, delReport=delReport, - quiet=quiet, delPerfLogs=delPerfLogs) + quiet=quiet, userTrxDataFile=userTrxDataFile) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() @@ -233,7 +234,7 @@ def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: Plugin setattr(getattr(clusterConfig.extraNodeosArgs, optPlugin.value + 'PluginArgs'), f"{optPlugin.value}Threads", threadCount) binSearchResults = self.performPtbBinarySearch(clusterConfig=clusterConfig, logDirRoot=self.loggingConfig.pluginThreadOptLogsDirPath, - delReport=True, quiet=False, delPerfLogs=True) + delReport=True, quiet=False, delPerfLogs=True, userTrxDataFile=self.ptConfig.userTrxDataFile) threadToMaxTpsDict[threadCount] = binSearchResults.maxTpsAchieved if not self.ptConfig.quiet: @@ -353,7 +354,7 @@ def performTpsTest(self) -> TpsTestResult: perfRunSuccessful = False binSearchResults = self.performPtbBinarySearch(clusterConfig=self.clusterConfig, logDirRoot=self.loggingConfig.ptbLogsDirPath, - delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs) + delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile) print(f"Successful rate of: {binSearchResults.maxTpsAchieved}") @@ -514,10 +515,11 @@ def main(): resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=True) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs, resourceMonitorPluginArgs=resourceMonitorPluginArgs) + SC = PerformanceTestBasic.ClusterConfig.SpecifiedContract + specifiedContract=SC(contractDir=args.contract_dir, wasmFile=args.wasm_file, abiFile=args.abi_file, account=Account(args.account_name)) testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, - specifiedContract=PerformanceTestBasic.ClusterConfig.SpecifiedContract(account=Account(args.account_name), - contractDir=args.contract_dir, wasmFile=args.wasm_file, abiFile=args.abi_file), + specifiedContract=specifiedContract, nodeosVers=Utils.getNodeosVersion().split('.')[0]) ptConfig = PerformanceTest.PtConfig(testDurationSec=args.test_iteration_duration_sec, @@ -534,7 +536,8 @@ def main(): skipTpsTests=args.skip_tps_test, calcProducerThreads=args.calc_producer_threads, calcChainThreads=args.calc_chain_threads, - calcNetThreads=args.calc_net_threads) + calcNetThreads=args.calc_net_threads, + userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None) myTest = PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig) perfRunSuccessful = myTest.runTest() diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index a1a2b34da7..0bd4beb704 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -560,6 +560,7 @@ def createBaseArgumentParser(): ptbBaseParserGroup.add_argument("--contract-dir", type=str, help="Path to contract dir", default="unittests/contracts/eosio.system") ptbBaseParserGroup.add_argument("--wasm-file", type=str, help="WASM file name for contract", default="eosio.system.wasm") ptbBaseParserGroup.add_argument("--abi-file", type=str, help="ABI file name for contract", default="eosio.system.abi") + ptbBaseParserGroup.add_argument("--user-trx-data-file", type=str, help="Path to transaction data JSON file") ptbBaseParserGroup.add_argument("--wasm-runtime", type=str, help="Override default WASM runtime (\"eos-vm-jit\", \"eos-vm\")\ \"eos-vm-jit\" : A WebAssembly runtime that compiles WebAssembly code to native x86 code prior to\ execution. \"eos-vm\" : A WebAssembly interpreter.", @@ -588,7 +589,6 @@ def createArgumentParser(): ptbParserGroup.add_argument("--target-tps", type=int, help="The target transfers per second to send during test", default=8000) ptbParserGroup.add_argument("--test-duration-sec", type=int, help="The duration of transfer trx generation for the test in seconds", default=90) - ptbParserGroup.add_argument("--user-trx-data-file", type=str, help="Path to userTrxDataTransfer.json") return ptbParser From ddd64861b0abee5b059cec16d159ab007be6b6f6 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 1 Mar 2023 15:39:54 -0600 Subject: [PATCH 145/178] remove unneeded arg in performPtbBinarySearch --- tests/performance_tests/performance_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 6e25369b49..94bf72f28d 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -102,7 +102,7 @@ def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=Perfo self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=Path(self.ptConfig.logDirRoot)/PurePath(PurePath(__file__).name).stem[0], logDirTimestamp=f"{self.testsStart.strftime('%Y-%m-%d_%H-%M-%S')}") - def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: Path, delReport: bool, quiet: bool, delPerfLogs: bool, userTrxDataFile: Path) -> TpsTestResult.PerfTestSearchResults: + def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: Path, delReport: bool, quiet: bool, delPerfLogs: bool) -> TpsTestResult.PerfTestSearchResults: floor = 0 ceiling = self.ptConfig.maxTpsToTest binSearchTarget = self.ptConfig.maxTpsToTest @@ -118,7 +118,7 @@ def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConf scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=logDirRoot, delReport=delReport, - quiet=quiet, userTrxDataFile=userTrxDataFile) + quiet=quiet, userTrxDataFile=self.ptConfig.userTrxDataFile) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() @@ -234,7 +234,7 @@ def optimizePluginThreadCount(self, optPlugin: PluginThreadOpt, optType: Plugin setattr(getattr(clusterConfig.extraNodeosArgs, optPlugin.value + 'PluginArgs'), f"{optPlugin.value}Threads", threadCount) binSearchResults = self.performPtbBinarySearch(clusterConfig=clusterConfig, logDirRoot=self.loggingConfig.pluginThreadOptLogsDirPath, - delReport=True, quiet=False, delPerfLogs=True, userTrxDataFile=self.ptConfig.userTrxDataFile) + delReport=True, quiet=False, delPerfLogs=True) threadToMaxTpsDict[threadCount] = binSearchResults.maxTpsAchieved if not self.ptConfig.quiet: @@ -354,7 +354,7 @@ def performTpsTest(self) -> TpsTestResult: perfRunSuccessful = False binSearchResults = self.performPtbBinarySearch(clusterConfig=self.clusterConfig, logDirRoot=self.loggingConfig.ptbLogsDirPath, - delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile) + delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs) print(f"Successful rate of: {binSearchResults.maxTpsAchieved}") From b152475c8f4a8a88ad9e2c34d427c3422e84510e Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Wed, 1 Mar 2023 16:02:27 -0600 Subject: [PATCH 146/178] also fix linearsearch test case for performance test. --- tests/performance_tests/performance_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 94bf72f28d..2f8842e8b8 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -159,7 +159,8 @@ def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTe ptbResult = PerformanceTest.PerfTestSearchIndivResult.PerfTestBasicResult() scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling, basicTestResult=ptbResult) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=searchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, - numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs) + numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, + quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig) testSuccessful = myTest.runTest() From 576ba161f259cf811ed5d8bc3ca36fad39ba1125 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 1 Mar 2023 18:18:03 -0600 Subject: [PATCH 147/178] Handle excpetions occurring during runTest. --- tests/performance_tests/performance_test_basic.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 0bd4beb704..5276e84527 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -10,6 +10,7 @@ import json import log_reader import inspect +import traceback from pathlib import Path, PurePath sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) @@ -493,6 +494,9 @@ def runTest(self) -> bool: testSuccessful = False print(f"Error: Transactions received: {self.data.totalTransactions} did not match expected total: {self.ptbConfig.expectedTransactionsSent}") + except: + traceback.print_exc() + finally: TestHelper.shutdown( self.cluster, From 85eecb0961769ed70c0299a162348d8753c34714 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 2 Mar 2023 09:01:24 -0600 Subject: [PATCH 148/178] Update ProducerPluginArgs for new arguments introduced on main. --- tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py index 98a234e788..6cee80285b 100755 --- a/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py @@ -92,6 +92,9 @@ class ProducerPluginArgs(BasePluginArgs): snapshotsDir: str=None _snapshotsDirNodeosDefault: str='"snapshots"' _snapshotsDirNodeosArg: str="--snapshots-dir" + maxReadOnlyTransactionTime: int=None + _maxReadOnlyTransactionTimeNodeosDefault: int=150 + _maxReadOnlyTransactionTimeNodeosArg: str="--max-read-only-transaction-time" def main(): pluginArgs = ProducerPluginArgs() From 9cf5ffe5c03ea76de1daaaaba1e4f1fac0888033 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 2 Mar 2023 13:12:17 -0600 Subject: [PATCH 149/178] set cmake test runs of performance_test_basic to a lower value for chain-state-db-size-mb to reduce allocated space --- tests/performance_tests/CMakeLists.txt | 10 +++++----- tests/performance_tests/README.md | 4 ++++ tests/performance_tests/performance_test_basic.py | 3 ++- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 44bb03ba7c..c00efcc18f 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -12,11 +12,11 @@ configure_file(ramTrxData.json ramTrxData.json COPYONLY) configure_file(userTrxDataTransfer.json userTrxDataTransfer.json COPYONLY) configure_file(userTrxDataNewAccount.json userTrxDataNewAccount.json COPYONLY) -add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/ramTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ex_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/ramTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 8e453d3026..e82ae30b28 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -316,6 +316,8 @@ Performance Test Basic Base: block log file is removed after startup. (default: None) * `--http-threads HTTP_THREADS` Number of worker threads in http thread pool (default: 2) +* `--chain-state-db-size-mb DB_SIZE_MiB` + Maximum size (in MiB) of the chain state database (default: 25600) Performance Harness: Performance Harness testing configuration items. @@ -452,6 +454,8 @@ Performance Test Basic Base: block log file is removed after startup. (default: None) * `--http-threads HTTP_THREADS` Number of worker threads in http thread pool (default: 2) +* `--chain-state-db-size-mb DB_SIZE_MiB` + Maximum size (in MiB) of the chain state database (default: 25600) Performance Test Basic Single Test: Performance Test Basic single test configuration items. Useful for running a single test directly. These items may not be directly configurable from diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 5276e84527..e97d88a61a 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -577,6 +577,7 @@ def createBaseArgumentParser(): store only configured number of most recent blocks. If set to 0, no blocks are be written to the block log;\ block log file is removed after startup.", default=None) ptbBaseParserGroup.add_argument("--http-threads", type=int, help="Number of worker threads in http thread pool", default=2) + ptbBaseParserGroup.add_argument("--chain-state-db-size-mb", type=int, help="Maximum size (in MiB) of the chain state database", default=25600) return ptbBaseParser @@ -615,7 +616,7 @@ def main(): wasmRuntime=args.wasm_runtime, contractsConsole=args.contracts_console, eosVmOcCacheSizeMb=args.eos_vm_oc_cache_size_mb, eosVmOcCompileThreads=args.eos_vm_oc_compile_threads, blockLogRetainBlocks=args.block_log_retain_blocks, - abiSerializerMaxTimeMs=990000, chainStateDbSizeMb=256000) + chainStateDbSizeMb=args.chain_state_db_size_mb, abiSerializerMaxTimeMs=990000) lbto = args.last_block_time_offset_us lbcep = args.last_block_cpu_effort_percent From 3f007ba932138b189b2642af85ae0dc8164cb89a Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 2 Mar 2023 15:26:25 -0600 Subject: [PATCH 150/178] fix introduced compatibility issues for nodeos 2.0 in performance tests --- tests/performance_tests/log_reader.py | 5 ++++- tests/performance_tests/performance_test.py | 8 ++++++-- tests/performance_tests/performance_test_basic.py | 8 ++++++-- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index c5ae3232a6..786e632d6b 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -241,7 +241,10 @@ def scrapeBlockTrxDataLog(trxDict, path): #blockTrxData.txt selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: - trxDict.update(dict([(x[0], trxData(blockNum=x[1], blockTime=x[2], cpuUsageUs=x[3], netUsageUs=x[4])) for x in (line.rstrip('\n').split(',') for line in f)])) + if Utils.getNodeosVersion().split('.')[0] == "v2": + trxDict.update(dict([(x[0], trxData(blockNum=x[1], cpuUsageUs=x[2], netUsageUs=x[3])) for x in (line.rstrip('\n').split(',') for line in f)])) + else: + trxDict.update(dict([(x[0], trxData(blockNum=x[1], blockTime=x[2], cpuUsageUs=x[3], netUsageUs=x[4])) for x in (line.rstrip('\n').split(',') for line in f)])) def scrapeBlockDataLog(blockDict, path): #blockData.txt diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index 5a9882f540..da8b8d0354 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -516,7 +516,11 @@ def main(): httpPluginArgs = HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms, httpMaxBytesInFlightMb=args.http_max_bytes_in_flight_mb, httpThreads=args.http_threads) netPluginArgs = NetPluginArgs(netThreads=args.net_threads, maxClients=0) - resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=True) + nodeosVers=Utils.getNodeosVersion().split('.')[0] + if nodeosVers == "v2": + resourceMonitorPluginArgs = ResourceMonitorPluginArgs() + else: + resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=True) extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs, resourceMonitorPluginArgs=resourceMonitorPluginArgs) SC = PerformanceTestBasic.ClusterConfig.SpecifiedContract @@ -524,7 +528,7 @@ def main(): testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, specifiedContract=specifiedContract, - nodeosVers=Utils.getNodeosVersion().split('.')[0]) + nodeosVers=nodeosVers) ptConfig = PerformanceTest.PtConfig(testDurationSec=args.test_iteration_duration_sec, finalDurationSec=args.final_iterations_duration_sec, diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index e97d88a61a..306605a1fe 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -631,7 +631,11 @@ def main(): httpPluginArgs = HttpPluginArgs(httpMaxResponseTimeMs=args.http_max_response_time_ms, httpMaxBytesInFlightMb=args.http_max_bytes_in_flight_mb, httpThreads=args.http_threads) netPluginArgs = NetPluginArgs(netThreads=args.net_threads, maxClients=0) - resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=True) + nodeosVers=Utils.getNodeosVersion().split('.')[0] + if nodeosVers == "v2": + resourceMonitorPluginArgs = ResourceMonitorPluginArgs() + else: + resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=True) ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs, resourceMonitorPluginArgs=resourceMonitorPluginArgs) @@ -640,7 +644,7 @@ def main(): testClusterConfig = PerformanceTestBasic.ClusterConfig(pnodes=args.p, totalNodes=args.n, topo=args.s, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, specifiedContract=specifiedContract, loggingLevel=args.cluster_log_lvl, - nodeosVers=Utils.getNodeosVersion().split('.')[0], nonProdsEosVmOcEnable=args.non_prods_eos_vm_oc_enable) + nodeosVers=nodeosVers, nonProdsEosVmOcEnable=args.non_prods_eos_vm_oc_enable) if args.contracts_console and testClusterConfig.loggingLevel != "debug" and testClusterConfig.loggingLevel != "all": print("Enabling contracts-console will not print anything unless debug level is 'debug' or higher." From 480eb3126075d9251816e4b5e7ef1e548ef6802b Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Thu, 2 Mar 2023 15:55:22 -0600 Subject: [PATCH 151/178] simplify version check for resource monitor plugin constructor in performance tests --- tests/performance_tests/performance_test.py | 5 +---- tests/performance_tests/performance_test_basic.py | 5 +---- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index da8b8d0354..cf21169d32 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -517,10 +517,7 @@ def main(): httpThreads=args.http_threads) netPluginArgs = NetPluginArgs(netThreads=args.net_threads, maxClients=0) nodeosVers=Utils.getNodeosVersion().split('.')[0] - if nodeosVers == "v2": - resourceMonitorPluginArgs = ResourceMonitorPluginArgs() - else: - resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=True) + resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=not nodeosVers == "v2") extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs, resourceMonitorPluginArgs=resourceMonitorPluginArgs) SC = PerformanceTestBasic.ClusterConfig.SpecifiedContract diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 306605a1fe..e665ed4ca9 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -632,10 +632,7 @@ def main(): httpThreads=args.http_threads) netPluginArgs = NetPluginArgs(netThreads=args.net_threads, maxClients=0) nodeosVers=Utils.getNodeosVersion().split('.')[0] - if nodeosVers == "v2": - resourceMonitorPluginArgs = ResourceMonitorPluginArgs() - else: - resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=True) + resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=not nodeosVers == "v2") ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs, resourceMonitorPluginArgs=resourceMonitorPluginArgs) From 1e36c5123f9ce62a68106aa40f49f681774ff0ab Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 3 Mar 2023 09:14:12 -0600 Subject: [PATCH 152/178] Update StateHistoryPluginArgs for new arguments introduced on main. --- .../NodeosPluginArgs/StateHistoryPluginArgs.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py index 732909a2b8..c7d8d6fbc9 100755 --- a/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py @@ -14,6 +14,18 @@ class StateHistoryPluginArgs(BasePluginArgs): stateHistoryDir: str=None _stateHistoryDirNodeosDefault: str='"state-history"' _stateHistoryDirNodeosArg: str="--state-history-dir" + stateHistoryRetainedDir: str=None + _stateHistoryRetainedDirNodeosDefault: str=None + _stateHistoryRetainedDirNodeosArg: str="--state-history-retained-dir" + stateHistoryArchiveDir: str=None + _stateHistoryArchiveDirNodeosDefault: str=None + _stateHistoryArchiveDirNodeosArg: str="--state-history-archive-dir" + stateHistoryStride: str=None + _stateHistoryStrideNodeosDefault: str=None + _stateHistoryStrideNodeosArg: str="--state-history-stride" + maxRetainedHistoryFiles: str=None + _maxRetainedHistoryFilesNodeosDefault: str=None + _maxRetainedHistoryFilesNodeosArg: str="--max-retained-history-files" traceHistory: bool=None _traceHistoryNodeosDefault: bool=False _traceHistoryNodeosArg: str="--trace-history" From e6bfc9e214822c05aba777fee346a3a5c98cc2b5 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 3 Mar 2023 14:27:21 -0600 Subject: [PATCH 153/178] Add new section to log_reader_tests to ensure 2.0 compatibility doesn't get broken by changes to scrapeBlockTrxDataLog in the future. --- tests/performance_tests/CMakeLists.txt | 1 + .../block_trx_data_log_2_0_14.txt.gz | Bin 0 -> 152 bytes tests/performance_tests/log_reader.py | 4 ++-- tests/performance_tests/log_reader_tests.py | 8 ++++++++ 4 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 tests/performance_tests/block_trx_data_log_2_0_14.txt.gz diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index c00efcc18f..75a51afcae 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -5,6 +5,7 @@ configure_file(read_log_data.py read_log_data.py COPYONLY) configure_file(log_reader_tests.py log_reader_tests.py COPYONLY) configure_file(nodeos_log_2_0_14.txt.gz nodeos_log_2_0_14.txt.gz COPYONLY) configure_file(nodeos_log_3_2.txt.gz nodeos_log_3_2.txt.gz COPYONLY) +configure_file(block_trx_data_log_2_0_14.txt.gz block_trx_data_log_2_0_14.txt.gz COPYONLY) configure_file(genesis.json genesis.json COPYONLY) configure_file(validate_nodeos_plugin_args.py validate_nodeos_plugin_args.py COPYONLY) configure_file(cpuTrxData.json cpuTrxData.json COPYONLY) diff --git a/tests/performance_tests/block_trx_data_log_2_0_14.txt.gz b/tests/performance_tests/block_trx_data_log_2_0_14.txt.gz new file mode 100644 index 0000000000000000000000000000000000000000..006f693786508fed8b42e84af99ef84d5b058e49 GIT binary patch literal 152 zcmV;J0B8RniwFpnQvzfF17d7%V{2b@a(G{4VRT_%Y;R{@GG8!XF*Gi8cys_Q%(o2# zK@0%FT(x{ab9Cj<9Q!XpFF?UY3(UdR=_G-TvNN%%>1dYG&H-IGfJI0=y6!Az#c%FOWTANB>@4w7V< G0001vWj$E{ literal 0 HcmV?d00001 diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 786e632d6b..b84dcc237e 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -237,11 +237,11 @@ def scrapeTrxGenLog(trxSent, path): with selectedopen(path, 'rt') as f: trxSent.update(dict([(x[0], x[1]) for x in (line.rstrip('\n').split(',') for line in f)])) -def scrapeBlockTrxDataLog(trxDict, path): +def scrapeBlockTrxDataLog(trxDict, path, nodeosVersion=None): #blockTrxData.txt selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: - if Utils.getNodeosVersion().split('.')[0] == "v2": + if Utils.getNodeosVersion().split('.')[0] == "v2" or nodeosVersion == "v2": trxDict.update(dict([(x[0], trxData(blockNum=x[1], cpuUsageUs=x[2], netUsageUs=x[3])) for x in (line.rstrip('\n').split(',') for line in f)])) else: trxDict.update(dict([(x[0], trxData(blockNum=x[1], blockTime=x[2], cpuUsageUs=x[3], netUsageUs=x[4])) for x in (line.rstrip('\n').split(',') for line in f)])) diff --git a/tests/performance_tests/log_reader_tests.py b/tests/performance_tests/log_reader_tests.py index fc9cdf8765..73be98bb2f 100755 --- a/tests/performance_tests/log_reader_tests.py +++ b/tests/performance_tests/log_reader_tests.py @@ -192,6 +192,14 @@ expectedTpsStats = log_reader.stats(41, 41, 41, 0.0, 0, 2) assert expectedTpsStats == stats , f"Error: Stats calculated: {stats} did not match expected stats: {expectedTpsStats}" +#ensure that scraping of trxDataLog is compatible with 2.0 +trxDict = {} +log_reader.scrapeBlockTrxDataLog(trxDict=trxDict, path=Path("tests")/"performance_tests"/"block_trx_data_log_2_0_14.txt", nodeosVersion="v2") +expectedDict = {} +expectedDict["41c6dca250f9b74d9fa6a8177a9c8390cb1d01b2123d6f88354f571f0053df72"] = log_reader.trxData(blockNum='112',cpuUsageUs='1253',netUsageUs='19') +expectedDict["fa17f9033589bb8757be009af46d465f0d903e26b7d198ea0fb6a3cbed93c2e6"] = log_reader.trxData(blockNum='112',cpuUsageUs='1263',netUsageUs='19') +assert trxDict == expectedDict, f"Scraped transaction dictionary: {trxDict} did not match expected dictionary : {expectedDict}" + testSuccessful = True exitCode = 0 if testSuccessful else 1 From cfeaa64b41ba8b36262f10efab74a63ceb6161e1 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 3 Mar 2023 15:00:41 -0600 Subject: [PATCH 154/178] refactor log_reader use of nodeos version so it isn't recalculated multiple times during a test run --- tests/performance_tests/log_reader.py | 16 +++++++++------- tests/performance_tests/log_reader_tests.py | 2 +- .../performance_tests/performance_test_basic.py | 2 +- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index b84dcc237e..1687a1a686 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -237,11 +237,11 @@ def scrapeTrxGenLog(trxSent, path): with selectedopen(path, 'rt') as f: trxSent.update(dict([(x[0], x[1]) for x in (line.rstrip('\n').split(',') for line in f)])) -def scrapeBlockTrxDataLog(trxDict, path, nodeosVersion=None): +def scrapeBlockTrxDataLog(trxDict, path, nodeosVers): #blockTrxData.txt selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: - if Utils.getNodeosVersion().split('.')[0] == "v2" or nodeosVersion == "v2": + if nodeosVers == "v2": trxDict.update(dict([(x[0], trxData(blockNum=x[1], cpuUsageUs=x[2], netUsageUs=x[3])) for x in (line.rstrip('\n').split(',') for line in f)])) else: trxDict.update(dict([(x[0], trxData(blockNum=x[1], blockTime=x[2], cpuUsageUs=x[3], netUsageUs=x[4])) for x in (line.rstrip('\n').split(',') for line in f)])) @@ -438,7 +438,8 @@ def calcTrxLatencyCpuNetStats(trxDict : dict, blockDict: dict): basicStats(float(np.min(npLatencyCpuNetList[:,2])), float(np.max(npLatencyCpuNetList[:,2])), float(np.average(npLatencyCpuNetList[:,2])), float(np.std(npLatencyCpuNetList[:,2])), len(npLatencyCpuNetList)) def createReport(guide: chainBlocksGuide, tpsTestConfig: TpsTestConfig, tpsStats: stats, blockSizeStats: stats, trxLatencyStats: basicStats, trxCpuStats: basicStats, - trxNetStats: basicStats, forkedBlocks, droppedBlocks, prodWindows: productionWindows, notFound: dict, testStart: datetime, testFinish: datetime, argsDict: dict, completedRun: bool) -> dict: + trxNetStats: basicStats, forkedBlocks, droppedBlocks, prodWindows: productionWindows, notFound: dict, testStart: datetime, testFinish: datetime, + argsDict: dict, completedRun: bool, nodeosVers: str) -> dict: report = {} report['completedRun'] = completedRun report['testStart'] = testStart @@ -464,7 +465,7 @@ def createReport(guide: chainBlocksGuide, tpsTestConfig: TpsTestConfig, tpsStats report['Analysis']['ForksCount'] = len(forkedBlocks) report['args'] = argsDict report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} - report['nodeosVersion'] = Utils.getNodeosVersion() + report['nodeosVersion'] = nodeosVers return report class LogReaderEncoder(json.JSONEncoder): @@ -489,14 +490,14 @@ def default(self, obj): def reportAsJSON(report: dict) -> json: return json.dumps(report, indent=2, cls=LogReaderEncoder) -def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: ArtifactPaths, argsDict: dict, testStart: datetime=None, completedRun: bool=True) -> dict: +def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: ArtifactPaths, argsDict: dict, testStart: datetime=None, completedRun: bool=True, nodeosVers: str="") -> dict: scrapeLog(data, artifacts.nodeosLogPath) trxSent = {} scrapeTrxGenTrxSentDataLogs(trxSent, artifacts.trxGenLogDirPath, tpsTestConfig.quiet) trxDict = {} - scrapeBlockTrxDataLog(trxDict, artifacts.blockTrxDataPath) + scrapeBlockTrxDataLog(trxDict, artifacts.blockTrxDataPath, nodeosVers) blockDict = {} scrapeBlockDataLog(blockDict, artifacts.blockDataPath) @@ -531,7 +532,8 @@ def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: Arti report = createReport(guide=guide, tpsTestConfig=tpsTestConfig, tpsStats=tpsStats, blockSizeStats=blkSizeStats, trxLatencyStats=trxLatencyStats, trxCpuStats=trxCpuStats, trxNetStats=trxNetStats, forkedBlocks=data.forkedBlocks, droppedBlocks=data.droppedBlocks, - prodWindows=prodWindows, notFound=notFound, testStart=start, testFinish=finish, argsDict=argsDict, completedRun=completedRun) + prodWindows=prodWindows, notFound=notFound, testStart=start, testFinish=finish, argsDict=argsDict, completedRun=completedRun, + nodeosVers=nodeosVers) return report def exportReportAsJSON(report: json, exportPath): diff --git a/tests/performance_tests/log_reader_tests.py b/tests/performance_tests/log_reader_tests.py index 73be98bb2f..37684dc4da 100755 --- a/tests/performance_tests/log_reader_tests.py +++ b/tests/performance_tests/log_reader_tests.py @@ -194,7 +194,7 @@ #ensure that scraping of trxDataLog is compatible with 2.0 trxDict = {} -log_reader.scrapeBlockTrxDataLog(trxDict=trxDict, path=Path("tests")/"performance_tests"/"block_trx_data_log_2_0_14.txt", nodeosVersion="v2") +log_reader.scrapeBlockTrxDataLog(trxDict=trxDict, path=Path("tests")/"performance_tests"/"block_trx_data_log_2_0_14.txt", nodeosVers="v2") expectedDict = {} expectedDict["41c6dca250f9b74d9fa6a8177a9c8390cb1d01b2123d6f88354f571f0053df72"] = log_reader.trxData(blockNum='112',cpuUsageUs='1253',netUsageUs='19') expectedDict["fa17f9033589bb8757be009af46d465f0d903e26b7d198ea0fb6a3cbed93c2e6"] = log_reader.trxData(blockNum='112',cpuUsageUs='1263',netUsageUs='19') diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index e665ed4ca9..b1fbf6364b 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -444,7 +444,7 @@ def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): numBlocksToPrune=self.ptbConfig.numAddlBlocksToPrune, numTrxGensUsed=testResult.numGeneratorsUsed, targetTpsPerGenList=testResult.targetTpsPerGenList, quiet=self.ptbConfig.quiet) self.report = log_reader.calcAndReport(data=self.data, tpsTestConfig=tpsTestConfig, artifacts=artifactsLocate, argsDict=args, testStart=self.testStart, - completedRun=testResult.completedRun) + completedRun=testResult.completedRun,nodeosVers=self.clusterConfig.nodeosVers) jsonReport = None if not self.ptbConfig.quiet or not self.ptbConfig.delReport: From a631088f4d2952ed01568b597ceb0498c386d7e5 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 3 Mar 2023 15:23:38 -0600 Subject: [PATCH 155/178] pass along nodeos version in performance test rather than calculating in multiple places --- tests/performance_tests/performance_test.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index cf21169d32..0aff352dd4 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -281,7 +281,8 @@ def createTpsTestReport(self, tpsTestResult: TpsTestResult) -> dict: report['LongRunningMaxTpsReport'] = tpsTestResult.longRunningSearchResults.maxTpsReport return report - def createReport(self,producerThreadResult: PluginThreadOptResult=None, chainThreadResult: PluginThreadOptResult=None, netThreadResult: PluginThreadOptResult=None, tpsTestResult: dict=None) -> dict: + def createReport(self, producerThreadResult: PluginThreadOptResult=None, chainThreadResult: PluginThreadOptResult=None, netThreadResult: PluginThreadOptResult=None, + tpsTestResult: dict=None, nodeosVers: str="") -> dict: report = {} report['perfTestsBegin'] = self.testsStart report['perfTestsFinish'] = self.testsFinish @@ -299,7 +300,7 @@ def createReport(self,producerThreadResult: PluginThreadOptResult=None, chainThr report['args'] = self.prepArgsDict() report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} - report['nodeosVersion'] = Utils.getNodeosVersion() + report['nodeosVersion'] = nodeosVers return report def reportAsJSON(self, report: dict) -> json: @@ -431,7 +432,7 @@ def runTest(self): self.testsFinish = datetime.utcnow() - self.report = self.createReport(producerThreadResult=prodResults, chainThreadResult=chainResults, netThreadResult=netResults, tpsTestResult=tpsTestResult) + self.report = self.createReport(producerThreadResult=prodResults, chainThreadResult=chainResults, netThreadResult=netResults, tpsTestResult=tpsTestResult, nodeosVers=self.clusterConfig.nodeosVers) jsonReport = self.reportAsJSON(self.report) if not self.ptConfig.quiet: From 60299d5cb5cf8cdfb394746fe58d6c2bd7e9a815 Mon Sep 17 00:00:00 2001 From: Clayton Calabrese Date: Fri, 3 Mar 2023 15:41:35 -0600 Subject: [PATCH 156/178] rename file passed into log_reader_tests to a proper .gz suffix --- tests/performance_tests/log_reader_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/log_reader_tests.py b/tests/performance_tests/log_reader_tests.py index 37684dc4da..be4c09557f 100755 --- a/tests/performance_tests/log_reader_tests.py +++ b/tests/performance_tests/log_reader_tests.py @@ -194,7 +194,7 @@ #ensure that scraping of trxDataLog is compatible with 2.0 trxDict = {} -log_reader.scrapeBlockTrxDataLog(trxDict=trxDict, path=Path("tests")/"performance_tests"/"block_trx_data_log_2_0_14.txt", nodeosVers="v2") +log_reader.scrapeBlockTrxDataLog(trxDict=trxDict, path=Path("tests")/"performance_tests"/"block_trx_data_log_2_0_14.txt.gz", nodeosVers="v2") expectedDict = {} expectedDict["41c6dca250f9b74d9fa6a8177a9c8390cb1d01b2123d6f88354f571f0053df72"] = log_reader.trxData(blockNum='112',cpuUsageUs='1253',netUsageUs='19') expectedDict["fa17f9033589bb8757be009af46d465f0d903e26b7d198ea0fb6a3cbed93c2e6"] = log_reader.trxData(blockNum='112',cpuUsageUs='1263',netUsageUs='19') From 71a155b9cf51b8b1baf3785ee56adcb27198f6fa Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 7 Mar 2023 12:20:41 -0600 Subject: [PATCH 157/178] Updates for changes in nodeos plugin args from merge from main. --- .../NodeosPluginArgs/CMakeLists.txt | 1 - .../NodeosPluginArgs/HttpClientPluginArgs.py | 27 ---------- .../NodeosPluginArgs/HttpPluginArgs.py | 12 ----- .../NodeosPluginArgs/NetPluginArgs.py | 3 ++ .../NodeosPluginArgs/__init__.py | 3 +- ...generate_nodeos_plugin_args_class_files.py | 2 - tests/performance_tests/README.md | 50 ------------------- .../performance_test_basic.py | 3 +- .../validate_nodeos_plugin_args.py | 6 +-- 9 files changed, 8 insertions(+), 99 deletions(-) delete mode 100755 tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py diff --git a/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt b/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt index 57833a3942..087be0e7a2 100644 --- a/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt +++ b/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt @@ -1,7 +1,6 @@ configure_file(__init__.py __init__.py COPYONLY) configure_file(BasePluginArgs.py BasePluginArgs.py COPYONLY) configure_file(ChainPluginArgs.py ChainPluginArgs.py COPYONLY) -configure_file(HttpClientPluginArgs.py HttpClientPluginArgs.py COPYONLY) configure_file(HttpPluginArgs.py HttpPluginArgs.py COPYONLY) configure_file(NetPluginArgs.py NetPluginArgs.py COPYONLY) configure_file(ProducerPluginArgs.py ProducerPluginArgs.py COPYONLY) diff --git a/tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py deleted file mode 100755 index 1a3bd81a1b..0000000000 --- a/tests/performance_tests/NodeosPluginArgs/HttpClientPluginArgs.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python3 - -from dataclasses import dataclass -from .BasePluginArgs import BasePluginArgs - -""" -This file/class was generated by generate_nodeos_plugin_args_class_files.py -""" - -@dataclass -class HttpClientPluginArgs(BasePluginArgs): - _pluginNamespace: str="eosio" - _pluginName: str="http_client_plugin" - httpsClientRootCert: str=None - _httpsClientRootCertNodeosDefault: str=None - _httpsClientRootCertNodeosArg: str="--https-client-root-cert" - httpsClientValidatePeers: int=None - _httpsClientValidatePeersNodeosDefault: int=1 - _httpsClientValidatePeersNodeosArg: str="--https-client-validate-peers" - -def main(): - pluginArgs = HttpClientPluginArgs() - print(pluginArgs.supportedNodeosArgs()) - exit(0) - -if __name__ == '__main__': - main() diff --git a/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py index 25b2336dc4..48f08f5204 100755 --- a/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py @@ -17,18 +17,6 @@ class HttpPluginArgs(BasePluginArgs): httpServerAddress: str=None _httpServerAddressNodeosDefault: str="127.0.0.1:8888" _httpServerAddressNodeosArg: str="--http-server-address" - httpsServerAddress: str=None - _httpsServerAddressNodeosDefault: str=None - _httpsServerAddressNodeosArg: str="--https-server-address" - httpsCertificateChainFile: str=None - _httpsCertificateChainFileNodeosDefault: str=None - _httpsCertificateChainFileNodeosArg: str="--https-certificate-chain-file" - httpsPrivateKeyFile: str=None - _httpsPrivateKeyFileNodeosDefault: str=None - _httpsPrivateKeyFileNodeosArg: str="--https-private-key-file" - httpsEcdhCurve: str=None - _httpsEcdhCurveNodeosDefault: str="secp384r1" - _httpsEcdhCurveNodeosArg: str="--https-ecdh-curve" accessControlAllowOrigin: str=None _accessControlAllowOriginNodeosDefault: str=None _accessControlAllowOriginNodeosArg: str="--access-control-allow-origin" diff --git a/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py index 20a090acf0..9c7bf8c668 100755 --- a/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py @@ -26,6 +26,9 @@ class NetPluginArgs(BasePluginArgs): p2pAcceptTransactions: int=None _p2pAcceptTransactionsNodeosDefault: int=1 _p2pAcceptTransactionsNodeosArg: str="--p2p-accept-transactions" + p2pAutoBpPeer: str=None + _p2pAutoBpPeerNodeosDefault: str=None + _p2pAutoBpPeerNodeosArg: str="--p2p-auto-bp-peer" agentName: str=None _agentNameNodeosDefault: str="EOS Test Agent" _agentNameNodeosArg: str="--agent-name" diff --git a/tests/performance_tests/NodeosPluginArgs/__init__.py b/tests/performance_tests/NodeosPluginArgs/__init__.py index af58fe3fdd..9608f422f1 100644 --- a/tests/performance_tests/NodeosPluginArgs/__init__.py +++ b/tests/performance_tests/NodeosPluginArgs/__init__.py @@ -1,8 +1,7 @@ -__all__ = ['BasePluginArgs', 'ChainPluginArgs', 'HttpClientPluginArgs', 'HttpPluginArgs', 'NetPluginArgs', 'ProducerPluginArgs', 'ResourceMonitorPluginArgs', 'SignatureProviderPluginArgs', 'StateHistoryPluginArgs', 'TraceApiPluginArgs'] +__all__ = ['BasePluginArgs', 'ChainPluginArgs', 'HttpPluginArgs', 'NetPluginArgs', 'ProducerPluginArgs', 'ResourceMonitorPluginArgs', 'SignatureProviderPluginArgs', 'StateHistoryPluginArgs', 'TraceApiPluginArgs'] from .BasePluginArgs import BasePluginArgs from .ChainPluginArgs import ChainPluginArgs -from .HttpClientPluginArgs import HttpClientPluginArgs from .HttpPluginArgs import HttpPluginArgs from .NetPluginArgs import NetPluginArgs from .ProducerPluginArgs import ProducerPluginArgs diff --git a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py index 1c200d2481..3c4550527b 100755 --- a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py +++ b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py @@ -14,7 +14,6 @@ Currently it generates the following scripts: - ChainPluginArgs.py -- HttpClientPluginArgs.py - HttpPluginArgs.py - NetPluginArgs.py - ProducerPluginArgs.py @@ -189,7 +188,6 @@ def writeHelpers(pluginName: str) -> str: dataclassFile.write(writeHelpers(f"{newPlugin}Args")) writeDataclass(plugin="chain_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) - writeDataclass(plugin="http_client_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) writeDataclass(plugin="http_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) writeDataclass(plugin="net_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) writeDataclass(plugin="producer_plugin:", dataFieldDict=newDict, pluginOptsDict=pluginOptsDict) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index e82ae30b28..40d95bf109 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -1059,37 +1059,12 @@ Finally, the full detail test report for each of the determined max TPS throughp "_snapshotNodeosDefault": null, "_snapshotNodeosArg": "--snapshot" }, - "httpClientPluginArgs": { - "_pluginNamespace": "eosio", - "_pluginName": "http_client_plugin", - "httpsClientRootCert": null, - "_httpsClientRootCertNodeosDefault": null, - "_httpsClientRootCertNodeosArg": "--https-client-root-cert", - "httpsClientValidatePeers": null, - "_httpsClientValidatePeersNodeosDefault": 1, - "_httpsClientValidatePeersNodeosArg": "--https-client-validate-peers" - }, "httpPluginArgs": { "_pluginNamespace": "eosio", "_pluginName": "http_plugin", "unixSocketPath": null, "_unixSocketPathNodeosDefault": null, "_unixSocketPathNodeosArg": "--unix-socket-path", - "httpServerAddress": null, - "_httpServerAddressNodeosDefault": "127.0.0.1:8888", - "_httpServerAddressNodeosArg": "--http-server-address", - "httpsServerAddress": null, - "_httpsServerAddressNodeosDefault": null, - "_httpsServerAddressNodeosArg": "--https-server-address", - "httpsCertificateChainFile": null, - "_httpsCertificateChainFileNodeosDefault": null, - "_httpsCertificateChainFileNodeosArg": "--https-certificate-chain-file", - "httpsPrivateKeyFile": null, - "_httpsPrivateKeyFileNodeosDefault": null, - "_httpsPrivateKeyFileNodeosArg": "--https-private-key-file", - "httpsEcdhCurve": null, - "_httpsEcdhCurveNodeosDefault": "secp384r1", - "_httpsEcdhCurveNodeosArg": "--https-ecdh-curve", "accessControlAllowOrigin": null, "_accessControlAllowOriginNodeosDefault": null, "_accessControlAllowOriginNodeosArg": "--access-control-allow-origin", @@ -1668,37 +1643,12 @@ The Performance Test Basic generates, by default, a report that details results "_snapshotNodeosDefault": null, "_snapshotNodeosArg": "--snapshot" }, - "httpClientPluginArgs": { - "_pluginNamespace": "eosio", - "_pluginName": "http_client_plugin", - "httpsClientRootCert": null, - "_httpsClientRootCertNodeosDefault": null, - "_httpsClientRootCertNodeosArg": "--https-client-root-cert", - "httpsClientValidatePeers": null, - "_httpsClientValidatePeersNodeosDefault": 1, - "_httpsClientValidatePeersNodeosArg": "--https-client-validate-peers" - }, "httpPluginArgs": { "_pluginNamespace": "eosio", "_pluginName": "http_plugin", "unixSocketPath": null, "_unixSocketPathNodeosDefault": null, "_unixSocketPathNodeosArg": "--unix-socket-path", - "httpServerAddress": null, - "_httpServerAddressNodeosDefault": "127.0.0.1:8888", - "_httpServerAddressNodeosArg": "--http-server-address", - "httpsServerAddress": null, - "_httpsServerAddressNodeosDefault": null, - "_httpsServerAddressNodeosArg": "--https-server-address", - "httpsCertificateChainFile": null, - "_httpsCertificateChainFileNodeosDefault": null, - "_httpsCertificateChainFileNodeosArg": "--https-certificate-chain-file", - "httpsPrivateKeyFile": null, - "_httpsPrivateKeyFileNodeosDefault": null, - "_httpsPrivateKeyFileNodeosArg": "--https-private-key-file", - "httpsEcdhCurve": null, - "_httpsEcdhCurveNodeosDefault": "secp384r1", - "_httpsEcdhCurveNodeosArg": "--https-ecdh-curve", "accessControlAllowOrigin": null, "_accessControlAllowOriginNodeosDefault": null, "_accessControlAllowOriginNodeosArg": "--access-control-allow-origin", diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index b1fbf6364b..56f2cbd71f 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -15,7 +15,7 @@ from pathlib import Path, PurePath sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) -from NodeosPluginArgs import ChainPluginArgs, HttpClientPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs, SignatureProviderPluginArgs, StateHistoryPluginArgs, TraceApiPluginArgs +from NodeosPluginArgs import ChainPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs, SignatureProviderPluginArgs, StateHistoryPluginArgs, TraceApiPluginArgs from TestHarness import Account, Cluster, TestHelper, Utils, WalletMgr, TransactionGeneratorsLauncher, TpsTrxGensConfig from TestHarness.TestHelper import AppArgs from dataclasses import dataclass, asdict, field @@ -52,7 +52,6 @@ class ClusterConfig: class ExtraNodeosArgs: chainPluginArgs: ChainPluginArgs = ChainPluginArgs() - httpClientPluginArgs: HttpClientPluginArgs = HttpClientPluginArgs() httpPluginArgs: HttpPluginArgs = HttpPluginArgs() netPluginArgs: NetPluginArgs = NetPluginArgs() producerPluginArgs: ProducerPluginArgs = ProducerPluginArgs() diff --git a/tests/performance_tests/validate_nodeos_plugin_args.py b/tests/performance_tests/validate_nodeos_plugin_args.py index 47be3f08ef..1ff0ce8b30 100755 --- a/tests/performance_tests/validate_nodeos_plugin_args.py +++ b/tests/performance_tests/validate_nodeos_plugin_args.py @@ -3,7 +3,7 @@ import re import subprocess -from NodeosPluginArgs import ChainPluginArgs, HttpClientPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs, SignatureProviderPluginArgs, StateHistoryPluginArgs, TraceApiPluginArgs +from NodeosPluginArgs import ChainPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs, SignatureProviderPluginArgs, StateHistoryPluginArgs, TraceApiPluginArgs testSuccessful = False @@ -65,7 +65,7 @@ def pairwise(iterable): nodeosPluginOptsDict = parseNodeosConfigOptions() -curListOfSupportedPlugins = [ChainPluginArgs(), HttpClientPluginArgs(), HttpPluginArgs(), NetPluginArgs(), ProducerPluginArgs(), +curListOfSupportedPlugins = [ChainPluginArgs(), HttpPluginArgs(), NetPluginArgs(), ProducerPluginArgs(), ResourceMonitorPluginArgs(), SignatureProviderPluginArgs(), StateHistoryPluginArgs(), TraceApiPluginArgs()] curListOfUnsupportedOptionGroups = ["txn_test_gen_plugin", "Application Config Options", "Application Command Line Options"] @@ -81,7 +81,7 @@ def argStrToAttrName(argStr: str) -> str: for supportedPlugin in curListOfSupportedPlugins: #Check whether nodeos has removed any plugin configuration sections - assert supportedPlugin._pluginName in nodeosPluginOptsDict, f"ERROR: Supported config section \"{supportedPlugin}\" no longer supported by nodeos. {regenSuggestion}" + assert supportedPlugin._pluginName in nodeosPluginOptsDict, f"ERROR: Supported config section \"{supportedPlugin._pluginName}\" no longer supported by nodeos. {regenSuggestion}" for opt in supportedPlugin.supportedNodeosArgs(): #Check whether nodeos has removed any arguments in a plugin From fe8e3dfca32bdb95015e3d5353e8cd5a63dbf8ca Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 7 Mar 2023 13:20:23 -0600 Subject: [PATCH 158/178] Clean up argument handling per peer review suggestions. --- tests/trx_generator/main.cpp | 79 +++++++++++++----------------------- 1 file changed, 28 insertions(+), 51 deletions(-) diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 20fea0444d..19f7bb3262 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -97,95 +97,72 @@ int main(int argc, char** argv) { transaction_specified = true; } - if(!vmap.count("chain-id")) { + if(chain_id_in.empty()) { ilog("Initialization error: missing chain-id"); cli.print(std::cerr); return INITIALIZE_FAIL; } - if(!vmap.count("log-dir")) { + if(log_dir_in.empty()) { ilog("Initialization error: missing log-dir"); cli.print(std::cerr); return INITIALIZE_FAIL; } - if(!vmap.count("last-irreversible-block-id")) { + if(lib_id_str.empty()) { ilog("Initialization error: missing last-irreversible-block-id"); cli.print(std::cerr); return INITIALIZE_FAIL; } - if(vmap.count("contract-owner-account")) { - } else { + if(contract_owner_acct.empty()) { ilog("Initialization error: missing contract-owner-account"); cli.print(std::cerr); return INITIALIZE_FAIL; } - if(vmap.count("accounts")) { - boost::split(account_str_vector, accts, boost::is_any_of(",")); - if(!transaction_specified && account_str_vector.size() < 2) { - ilog("Initialization error: requires at minimum 2 transfer accounts"); - cli.print(std::cerr); - return INITIALIZE_FAIL; - } - } else { + boost::split(account_str_vector, accts, boost::is_any_of(",")); + if(account_str_vector.size() < 1 || (!transaction_specified && account_str_vector.size() < 2)) { ilog("Initialization error: did not specify transfer accounts. Auto transfer transaction generation requires at minimum 2 transfer accounts, while providing transaction action data requires at least one."); cli.print(std::cerr); return INITIALIZE_FAIL; } - if(vmap.count("priv-keys")) { - boost::split(private_keys_str_vector, p_keys, boost::is_any_of(",")); - if(!transaction_specified && private_keys_str_vector.size() < 2) { - ilog("Initialization error: requires at minimum 2 private keys"); - cli.print(std::cerr); - return INITIALIZE_FAIL; - } - } else { + boost::split(private_keys_str_vector, p_keys, boost::is_any_of(",")); + if(private_keys_str_vector.size() < 1 || (!transaction_specified && private_keys_str_vector.size() < 2)) { ilog("Initialization error: did not specify accounts' private keys. Auto transfer transaction generation requires at minimum 2 private keys, while providing transaction action data requires at least one."); cli.print(std::cerr); return INITIALIZE_FAIL; } - if(vmap.count("generator-id")) { - if(gen_id > generator_id_max) { - ilog("Initialization error: Exceeded max value for generator id. Value must be less than ${max}.", ("max", generator_id_max)); - cli.print(std::cerr); - return INITIALIZE_FAIL; - } + if(gen_id > generator_id_max) { + ilog("Initialization error: Exceeded max value for generator id. Value must be less than ${max}.", ("max", generator_id_max)); + cli.print(std::cerr); + return INITIALIZE_FAIL; } - if(vmap.count("trx-expiration")) { - if(trx_expr > trx_expiration_max) { - ilog("Initialization error: Exceeded max value for transaction expiration. Value must be less than ${max}.", ("max", trx_expiration_max)); - cli.print(std::cerr); - return INITIALIZE_FAIL; - } + if(trx_expr > trx_expiration_max) { + ilog("Initialization error: Exceeded max value for transaction expiration. Value must be less than ${max}.", ("max", trx_expiration_max)); + cli.print(std::cerr); + return INITIALIZE_FAIL; } - if(vmap.count("spinup-time-us")) { - if(spinup_time_us < 0) { - ilog("Initialization error: spinup-time-us cannot be negative"); - cli.print(std::cerr); - return INITIALIZE_FAIL; - } + if(spinup_time_us < 0) { + ilog("Initialization error: spinup-time-us cannot be negative"); + cli.print(std::cerr); + return INITIALIZE_FAIL; } - if(vmap.count("max-lag-duration-us")) { - if(max_lag_duration_us < 0) { - ilog("Initialization error: max-lag-duration-us cannot be negative"); - cli.print(std::cerr); - return INITIALIZE_FAIL; - } + if(max_lag_duration_us < 0) { + ilog("Initialization error: max-lag-duration-us cannot be negative"); + cli.print(std::cerr); + return INITIALIZE_FAIL; } - if(vmap.count("max-lag-percent")) { - if(max_lag_per > 100) { - ilog("Initialization error: max-lag-percent must be between 0 and 100"); - cli.print(std::cerr); - return INITIALIZE_FAIL; - } + if(max_lag_per > 100) { + ilog("Initialization error: max-lag-percent must be between 0 and 100"); + cli.print(std::cerr); + return INITIALIZE_FAIL; } } catch(bpo::unknown_option& ex) { std::cerr << ex.what() << std::endl; From 23670d00ca58bf6ed496e42b1782981bc4e398cb Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 7 Mar 2023 15:25:22 -0600 Subject: [PATCH 159/178] Reintroduce stop-on-trx-failed argument. Inadvertently removed in commit d8d9fa3. --- tests/trx_generator/main.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 19f7bb3262..e9408d2e92 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -68,6 +68,7 @@ int main(int argc, char** argv) { ("monitor-max-lag-percent", bpo::value(&max_lag_per)->default_value(5), "Max percentage off from expected transactions sent before being in violation. Defaults to 5.") ("monitor-max-lag-duration-us", bpo::value(&max_lag_duration_us)->default_value(1000000), "Max microseconds that transaction generation can be in violation before quitting. Defaults to 1000000 (1s).") ("log-dir", bpo::value(&log_dir_in), "set the logs directory") + ("stop-on-trx-failed", bpo::value(&stop_on_trx_failed)->default_value(true), "stop transaction generation if sending fails.") ("abi-file", bpo::value(&abi_file_path_in), "The path to the contract abi file to use for the supplied transaction action data") ("actions-data", bpo::value(&actions_data_json_file_or_str), "The json actions data file or json actions data description string to use") ("actions-auths", bpo::value(&actions_auths_json_file_or_str), "The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") From eed59be1347f0e1a7b047f2f7b0538c04255fdc1 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 8 Mar 2023 11:39:18 -0600 Subject: [PATCH 160/178] Work to coalesce arguments into config objects. Additional work to clean up argument handling in trx generator main. Drop a couple helper functions that we not needed. Don't pass arguments into member functions that could simple access member variables. --- tests/trx_generator/main.cpp | 128 ++++++++++---------- tests/trx_generator/trx_generator.cpp | 113 ++++++----------- tests/trx_generator/trx_generator.hpp | 99 ++++++++++----- tests/trx_generator/trx_generator_tests.cpp | 32 ++--- tests/trx_generator/trx_provider.cpp | 10 +- tests/trx_generator/trx_provider.hpp | 51 +++++--- 6 files changed, 223 insertions(+), 210 deletions(-) diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index e9408d2e92..9f468dca2e 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -24,56 +24,48 @@ enum return_codes { }; int main(int argc, char** argv) { + provider_base_config provider_config; + trx_generator_base_config trx_gen_base_config; + user_specified_trx_config user_trx_config; + accounts_config accts_config; + trx_tps_tester_config tester_config; + const int64_t trx_expiration_max = 3600; const uint16_t generator_id_max = 960; variables_map vmap; options_description cli("Transaction Generator command line options."); - uint16_t gen_id = 0; - string chain_id_in; - string contract_owner_acct; + std::string chain_id_in; + std::string contract_owner_account_in; + std::string lib_id_str; string accts; string p_keys; - int64_t trx_expr = 3600; - uint32_t gen_duration = 60; - uint32_t target_tps = 1; - string lib_id_str; int64_t spinup_time_us = 1000000; uint32_t max_lag_per = 5; int64_t max_lag_duration_us = 1000000; - string log_dir_in; - bool stop_on_trx_failed; - std::string peer_endpoint = "127.0.0.1"; - unsigned short port = 9876; + int64_t trx_expr = 3600; bool transaction_specified = false; - std::string abi_file_path_in; - std::string actions_data_json_file_or_str; - std::string actions_auths_json_file_or_str; - - vector account_str_vector; - vector private_keys_str_vector; - cli.add_options() - ("generator-id", bpo::value(&gen_id)->default_value(0), "Id for the transaction generator. Allowed range (0-960). Defaults to 0.") + ("generator-id", bpo::value(&trx_gen_base_config._generator_id)->default_value(0), "Id for the transaction generator. Allowed range (0-960). Defaults to 0.") ("chain-id", bpo::value(&chain_id_in), "set the chain id") - ("contract-owner-account", bpo::value(&contract_owner_acct), "Account name of the contract account for the transaction actions") + ("contract-owner-account", bpo::value(&contract_owner_account_in), "Account name of the contract account for the transaction actions") ("accounts", bpo::value(&accts), "comma-separated list of accounts that will be used for transfers. Minimum required accounts: 2.") ("priv-keys", bpo::value(&p_keys), "comma-separated list of private keys in same order of accounts list that will be used to sign transactions. Minimum required: 2.") ("trx-expiration", bpo::value(&trx_expr)->default_value(3600), "transaction expiration time in seconds. Defaults to 3,600. Maximum allowed: 3,600") - ("trx-gen-duration", bpo::value(&gen_duration)->default_value(60), "Transaction generation duration (seconds). Defaults to 60 seconds.") - ("target-tps", bpo::value(&target_tps)->default_value(1), "Target transactions per second to generate/send. Defaults to 1 transaction per second.") + ("trx-gen-duration", bpo::value(&tester_config._gen_duration_seconds)->default_value(60), "Transaction generation duration (seconds). Defaults to 60 seconds.") + ("target-tps", bpo::value(&tester_config._target_tps)->default_value(1), "Target transactions per second to generate/send. Defaults to 1 transaction per second.") ("last-irreversible-block-id", bpo::value(&lib_id_str), "Current last-irreversible-block-id (LIB ID) to use for transactions.") ("monitor-spinup-time-us", bpo::value(&spinup_time_us)->default_value(1000000), "Number of microseconds to wait before monitoring TPS. Defaults to 1000000 (1s).") ("monitor-max-lag-percent", bpo::value(&max_lag_per)->default_value(5), "Max percentage off from expected transactions sent before being in violation. Defaults to 5.") ("monitor-max-lag-duration-us", bpo::value(&max_lag_duration_us)->default_value(1000000), "Max microseconds that transaction generation can be in violation before quitting. Defaults to 1000000 (1s).") - ("log-dir", bpo::value(&log_dir_in), "set the logs directory") - ("stop-on-trx-failed", bpo::value(&stop_on_trx_failed)->default_value(true), "stop transaction generation if sending fails.") - ("abi-file", bpo::value(&abi_file_path_in), "The path to the contract abi file to use for the supplied transaction action data") - ("actions-data", bpo::value(&actions_data_json_file_or_str), "The json actions data file or json actions data description string to use") - ("actions-auths", bpo::value(&actions_auths_json_file_or_str), "The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") - ("peer-endpoint", bpo::value(&peer_endpoint)->default_value("127.0.0.1"), "set the peer endpoint to send transactions to") - ("port", bpo::value(&port)->default_value(9876), "set the peer endpoint port to send transactions to") + ("log-dir", bpo::value(&trx_gen_base_config._log_dir), "set the logs directory") + ("stop-on-trx-failed", bpo::value(&trx_gen_base_config._stop_on_trx_failed)->default_value(true), "stop transaction generation if sending fails.") + ("abi-file", bpo::value(&user_trx_config._abi_data_file_path), "The path to the contract abi file to use for the supplied transaction action data") + ("actions-data", bpo::value(&user_trx_config._actions_data_json_file_or_str), "The json actions data file or json actions data description string to use") + ("actions-auths", bpo::value(&user_trx_config._actions_auths_json_file_or_str), "The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") + ("peer-endpoint", bpo::value(&provider_config._peer_endpoint)->default_value("127.0.0.1"), "set the peer endpoint to send transactions to") + ("port", bpo::value(&provider_config._port)->default_value(9876), "set the peer endpoint port to send transactions to") ("help,h", "print this list") ; @@ -86,25 +78,24 @@ int main(int argc, char** argv) { return SUCCESS; } - if((vmap.count("abi-file") || vmap.count("actions-data") || vmap.count("actions-auths")) && - !(vmap.count("abi-file") && vmap.count("actions-data") && vmap.count("actions-auths"))) { + if(user_trx_config.fully_configured()) { + ilog("Specifying transaction to generate directly using abi-file, actions-data, and actions-auths."); + transaction_specified = true; + } else if(user_trx_config.partially_configured()) { ilog("Initialization error: If using abi-file, actions-data, and actions-auths to specify a transaction type to generate, must provide all inputs."); cli.print(std::cerr); return INITIALIZE_FAIL; } - if(vmap.count("abi-file") && vmap.count("actions-data") && vmap.count("actions-auths")) { - ilog("Specifying transaction to generate directly using abi-file, actions-data, and actions-auths."); - transaction_specified = true; - } - if(chain_id_in.empty()) { ilog("Initialization error: missing chain-id"); cli.print(std::cerr); return INITIALIZE_FAIL; + } else { + trx_gen_base_config._chain_id = chain_id_type(chain_id_in); } - if(log_dir_in.empty()) { + if(trx_gen_base_config._log_dir.empty()) { ilog("Initialization error: missing log-dir"); cli.print(std::cerr); return INITIALIZE_FAIL; @@ -114,29 +105,45 @@ int main(int argc, char** argv) { ilog("Initialization error: missing last-irreversible-block-id"); cli.print(std::cerr); return INITIALIZE_FAIL; + } else { + trx_gen_base_config._last_irr_block_id = fc::variant(lib_id_str).as(); } - if(contract_owner_acct.empty()) { + if(contract_owner_account_in.empty()) { ilog("Initialization error: missing contract-owner-account"); cli.print(std::cerr); return INITIALIZE_FAIL; + } else { + trx_gen_base_config._contract_owner_account = name(contract_owner_account_in); } + std::vector account_str_vector; boost::split(account_str_vector, accts, boost::is_any_of(",")); - if(account_str_vector.size() < 1 || (!transaction_specified && account_str_vector.size() < 2)) { - ilog("Initialization error: did not specify transfer accounts. Auto transfer transaction generation requires at minimum 2 transfer accounts, while providing transaction action data requires at least one."); + if(!transaction_specified && account_str_vector.size() < 2) { + ilog("Initialization error: did not specify transfer accounts. Auto transfer transaction generation requires at minimum 2 transfer accounts."); cli.print(std::cerr); return INITIALIZE_FAIL; + } else if (!accts.empty() && !account_str_vector.empty()) { + for(const string& account_name: account_str_vector) { + ilog("Initializing accounts. Attempt to create name for ${acct}", ("acct", account_name)); + accts_config._acct_name_vec.emplace_back(account_name); + } } + std::vector private_keys_str_vector; boost::split(private_keys_str_vector, p_keys, boost::is_any_of(",")); - if(private_keys_str_vector.size() < 1 || (!transaction_specified && private_keys_str_vector.size() < 2)) { - ilog("Initialization error: did not specify accounts' private keys. Auto transfer transaction generation requires at minimum 2 private keys, while providing transaction action data requires at least one."); + if(!transaction_specified && private_keys_str_vector.size() < 2) { + ilog("Initialization error: did not specify accounts' private keys. Auto transfer transaction generation requires at minimum 2 private keys."); cli.print(std::cerr); return INITIALIZE_FAIL; + } else if (!p_keys.empty() && !private_keys_str_vector.empty()) { + for(const string& private_key: private_keys_str_vector) { + ilog("Initializing private keys. Attempt to create private_key for ${key} : gen key ${newKey}", ("key", private_key)("newKey", fc::crypto::private_key(private_key))); + accts_config._priv_keys_vec.emplace_back(private_key); + } } - if(gen_id > generator_id_max) { + if(trx_gen_base_config._generator_id > generator_id_max) { ilog("Initialization error: Exceeded max value for generator id. Value must be less than ${max}.", ("max", generator_id_max)); cli.print(std::cerr); return INITIALIZE_FAIL; @@ -146,6 +153,8 @@ int main(int argc, char** argv) { ilog("Initialization error: Exceeded max value for transaction expiration. Value must be less than ${max}.", ("max", trx_expiration_max)); cli.print(std::cerr); return INITIALIZE_FAIL; + } else { + trx_gen_base_config._trx_expiration_us = fc::seconds(trx_expr); } if(spinup_time_us < 0) { @@ -171,43 +180,30 @@ int main(int argc, char** argv) { return INITIALIZE_FAIL; } - ilog("Initial generator id ${id}", ("id", gen_id)); - ilog("Initial chain id ${chainId}", ("chainId", chain_id_in)); - ilog("Contract owner account ${acct}", ("acct", contract_owner_acct)); - ilog("Transfer accounts ${accts}", ("accts", accts)); - ilog("Account private keys ${priv_keys}", ("priv_keys", p_keys)); - ilog("Transaction expiration seconds ${expr}", ("expr", trx_expr)); - ilog("Reference LIB block id ${LIB}", ("LIB", lib_id_str)); - ilog("Transaction Generation Duration (sec) ${dur}", ("dur", gen_duration)); - ilog("Target generation Transaction Per Second (TPS) ${tps}", ("tps", target_tps)); - ilog("Logs directory ${logDir}", ("logDir", log_dir_in)); - ilog("Peer Endpoint ${peer-endpoint}:${peer-port}", ("peer-endpoint", peer_endpoint)("peer-port", port)); + ilog("Initial Trx Generator config: ${config}", ("config", trx_gen_base_config.to_string())); + ilog("Initial Provider config: ${config}", ("config", provider_config.to_string())); + ilog("Initial Accounts config: ${config}", ("config", accts_config.to_string())); + ilog("Transaction TPS Tester config: ${config}", ("config", tester_config.to_string())); if (transaction_specified) { - ilog("User Transaction Specified: Abi File ${abi}", ("abi", abi_file_path_in)); - ilog("User Transaction Specified: Actions Data ${acts}", ("acts", actions_data_json_file_or_str)); - ilog("User Transaction Specified: Actions Auths ${auths}", ("auths", actions_auths_json_file_or_str)); + ilog("User Transaction Specified: ${config}", ("config", user_trx_config.to_string())); } - fc::microseconds trx_expr_ms = fc::seconds(trx_expr); - std::shared_ptr monitor; if (transaction_specified) { - auto generator = std::make_shared(gen_id, chain_id_in, abi_file_path_in, contract_owner_acct, - actions_data_json_file_or_str, actions_auths_json_file_or_str, - trx_expr_ms, lib_id_str, log_dir_in, stop_on_trx_failed, peer_endpoint, port); + auto generator = std::make_shared(trx_gen_base_config, provider_config, user_trx_config); + monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); - trx_tps_tester tester{generator, monitor, gen_duration, target_tps}; + trx_tps_tester tester{generator, monitor, tester_config}; if (!tester.run()) { return OTHER_FAIL; } } else { - auto generator = std::make_shared(gen_id, chain_id_in, contract_owner_acct, account_str_vector, trx_expr_ms, private_keys_str_vector, - lib_id_str, log_dir_in, stop_on_trx_failed, peer_endpoint, port); + auto generator = std::make_shared(trx_gen_base_config, provider_config, accts_config); monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); - trx_tps_tester tester{generator, monitor, gen_duration, target_tps}; + trx_tps_tester tester{generator, monitor, tester_config}; if (!tester.run()) { return OTHER_FAIL; diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 1f8a8af891..081caa6606 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -33,30 +33,28 @@ namespace eosio::testing { trx.actions.emplace_back(std::move(act)); } trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), - fc::raw::pack(std::to_string(_generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + + fc::raw::pack(std::to_string(_config._generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + fc::time_point::now().time_since_epoch().count()))); trx.sign(priv_key, chain_id); return signed_transaction_w_signer(trx, priv_key); } - vector transfer_trx_generator::create_initial_transfer_transactions(const vector& action_pairs_vector, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + void transfer_trx_generator::create_initial_transfer_transactions(uint64_t& nonce_prefix, uint64_t& nonce) { std::vector trxs; - trxs.reserve(2 * action_pairs_vector.size()); + _trxs.reserve(2 * _action_pairs_vector.size()); - for (const action_pair_w_keys& ap : action_pairs_vector) { - trxs.emplace_back(create_trx_w_actions_and_signer({ap._first_act}, ap._first_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); - trxs.emplace_back(create_trx_w_actions_and_signer({ap._second_act}, ap._second_act_priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id)); + for (const action_pair_w_keys& ap : _action_pairs_vector) { + _trxs.emplace_back(create_trx_w_actions_and_signer({ap._first_act}, ap._first_act_priv_key, nonce_prefix, nonce, _config._trx_expiration_us, _config._chain_id, _config._last_irr_block_id)); + _trxs.emplace_back(create_trx_w_actions_and_signer({ap._second_act}, ap._second_act_priv_key, nonce_prefix, nonce, _config._trx_expiration_us, _config._chain_id, _config._last_irr_block_id)); } - - return trxs; } void trx_generator_base::update_resign_transaction(signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { trx.context_free_actions.clear(); trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), - fc::raw::pack(std::to_string(_generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + + fc::raw::pack(std::to_string(_config._generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + fc::time_point::now().time_since_epoch().count()))); set_transaction_headers(trx, last_irr_block_id, trx_expiration); trx.signatures.clear(); @@ -72,76 +70,47 @@ namespace eosio::testing { account, "transfer"_n, make_transfer_data(from, to, quantity, std::move(memo))); } - vector transfer_trx_generator::create_initial_transfer_actions(const std::string& salt, const uint64_t& period, const name& contract_owner_account, const vector& accounts, const vector& priv_keys) { - vector actions_pairs_vector; + void transfer_trx_generator::create_initial_transfer_actions(const std::string& salt, const uint64_t& period) { - for (size_t i = 0; i < accounts.size(); ++i) { - for (size_t j = i + 1; j < accounts.size(); ++j) { + for (size_t i = 0; i < _accts_config._acct_name_vec.size(); ++i) { + for (size_t j = i + 1; j < _accts_config._acct_name_vec.size(); ++j) { //create the actions here - ilog("create_initial_transfer_actions: creating transfer from ${acctA} to ${acctB}", ("acctA", accounts.at(i))("acctB", accounts.at(j))); - action act_a_to_b = make_transfer_action(contract_owner_account, accounts.at(i), accounts.at(j), asset::from_string("1.0000 CUR"), salt); + ilog("create_initial_transfer_actions: creating transfer from ${acctA} to ${acctB}", ("acctA", _accts_config._acct_name_vec.at(i))("acctB", _accts_config._acct_name_vec.at(j))); + action act_a_to_b = make_transfer_action(_config._contract_owner_account, _accts_config._acct_name_vec.at(i), _accts_config._acct_name_vec.at(j), asset::from_string("1.0000 CUR"), salt); - ilog("create_initial_transfer_actions: creating transfer from ${acctB} to ${acctA}", ("acctB", accounts.at(j))("acctA", accounts.at(i))); - action act_b_to_a = make_transfer_action(contract_owner_account, accounts.at(j), accounts.at(i), asset::from_string("1.0000 CUR"), salt); + ilog("create_initial_transfer_actions: creating transfer from ${acctB} to ${acctA}", ("acctB", _accts_config._acct_name_vec.at(j))("acctA", _accts_config._acct_name_vec.at(i))); + action act_b_to_a = make_transfer_action(_config._contract_owner_account, _accts_config._acct_name_vec.at(j), _accts_config._acct_name_vec.at(i), asset::from_string("1.0000 CUR"), salt); - actions_pairs_vector.emplace_back(action_pair_w_keys(act_a_to_b, act_b_to_a, priv_keys.at(i), priv_keys.at(j))); + _action_pairs_vector.emplace_back(act_a_to_b, act_b_to_a, _accts_config._priv_keys_vec.at(i), _accts_config._priv_keys_vec.at(j)); } } - ilog("create_initial_transfer_actions: total action pairs created: ${pairs}", ("pairs", actions_pairs_vector.size())); - return actions_pairs_vector; + ilog("create_initial_transfer_actions: total action pairs created: ${pairs}", ("pairs", _action_pairs_vector.size())); } - trx_generator_base::trx_generator_base(uint16_t generator_id, const std::string& chain_id_in, const std::string& contract_owner_account, const fc::microseconds& trx_expr, const std::string& lib_id_str, const std::string& log_dir, - bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) - : _provider(peer_endpoint, port), _generator_id(generator_id), _chain_id(chain_id_in), _contract_owner_account(contract_owner_account), _trx_expiration(trx_expr), - _last_irr_block_id(fc::variant(lib_id_str).as()), _log_dir(log_dir), _stop_on_trx_failed(stop_on_trx_failed) {} + trx_generator_base::trx_generator_base(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config) + : _config(trx_gen_base_config), _provider(provider_config) {} - transfer_trx_generator::transfer_trx_generator(uint16_t generator_id, const std::string& chain_id_in, const std::string& contract_owner_account, const std::vector& accts, - const fc::microseconds& trx_expr, const std::vector& private_keys_str_vector, const std::string& lib_id_str, const std::string& log_dir, bool stop_on_trx_failed, const std::string& peer_endpoint, unsigned short port) - : trx_generator_base(generator_id, chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), _accts(accts), _private_keys_str_vector(private_keys_str_vector) {} + transfer_trx_generator::transfer_trx_generator(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config, const accounts_config& accts_config) + : trx_generator_base(trx_gen_base_config, provider_config), _accts_config(accts_config) {} - vector transfer_trx_generator::get_accounts(const vector& account_str_vector) { - vector acct_name_list; - for (const string& account_name : account_str_vector) { - ilog("get_account about to try to create name for ${acct}", ("acct", account_name)); - acct_name_list.emplace_back(eosio::chain::name(account_name)); - } - return acct_name_list; - } - vector transfer_trx_generator::get_private_keys(const vector& priv_key_str_vector) { - vector key_list; - for (const string& private_key: priv_key_str_vector) { - ilog("get_private_keys about to try to create private_key for ${key} : gen key ${newKey}", ("key", private_key)("newKey", fc::crypto::private_key(private_key))); - key_list.emplace_back(fc::crypto::private_key(private_key)); - } - return key_list; - } - bool transfer_trx_generator::setup() { - const vector accounts = get_accounts(_accts); - const vector private_key_vector = get_private_keys(_private_keys_str_vector); + bool transfer_trx_generator::setup() { const std::string salt = std::to_string(getpid()); const uint64_t &period = 20; _nonce_prefix = 0; _nonce = static_cast(fc::time_point::now().sec_since_epoch()) << 32; - ilog("Create All Initial Transfer Action/Reaction Pairs (acct 1 -> acct 2, acct 2 -> acct 1) between all provided accounts."); - const auto action_pairs_vector = create_initial_transfer_actions(salt, period, _contract_owner_account, accounts, - private_key_vector); - ilog("Stop Generation (form potential ongoing generation in preparation for starting new generation run)."); stop_generation(); + ilog("Create All Initial Transfer Action/Reaction Pairs (acct 1 -> acct 2, acct 2 -> acct 1) between all provided accounts."); + create_initial_transfer_actions(salt, period); + ilog("Create All Initial Transfer Transactions (one for each created action)."); - _trxs = create_initial_transfer_transactions(action_pairs_vector, - ++_nonce_prefix, - _nonce, - _trx_expiration, - _chain_id, - _last_irr_block_id); + create_initial_transfer_transactions(++_nonce_prefix, _nonce); ilog("Setup p2p transaction provider"); @@ -217,14 +186,8 @@ namespace eosio::testing { trx_generator_base::update_resign_transaction(trx, priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); } - trx_generator::trx_generator(uint16_t generator_id, const std::string& chain_id_in, const std::string& abi_data_file, const std::string& contract_owner_account, - const std::string& actions_data_json_file_or_str, const std::string& actions_auths_json_file_or_str, - const fc::microseconds& trx_expr, const std::string& lib_id_str, const std::string& log_dir, bool stop_on_trx_failed, - const std::string& peer_endpoint, unsigned short port) - : trx_generator_base(generator_id, chain_id_in, contract_owner_account, trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port), - _abi_data_file_path(abi_data_file), - _actions_data_json_file_or_str(actions_data_json_file_or_str), _actions_auths_json_file_or_str(actions_auths_json_file_or_str), - _acct_name_generator() {} + trx_generator::trx_generator(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config, const user_specified_trx_config& usr_trx_config) + : trx_generator_base(trx_gen_base_config, provider_config), _usr_trx_config(usr_trx_config), _acct_name_generator() {} void trx_generator::update_actions() { _actions.clear(); @@ -249,7 +212,7 @@ namespace eosio::testing { } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse unpacked action data JSON") eosio::chain::action act; - act.account = _contract_owner_account; + act.account = _config._contract_owner_account; act.name = action_name; chain::name auth_actor = chain::name(action_mvo["authorization"].get_object()["actor"].as_string()); @@ -269,9 +232,9 @@ namespace eosio::testing { stop_generation(); ilog("Create Initial Transaction with action data."); - _abi = abi_serializer(fc::json::from_file(_abi_data_file_path).as(), abi_serializer::create_yield_function( abi_serializer_max_time )); - fc::variant unpacked_actions_data_json = json_from_file_or_string(_actions_data_json_file_or_str); - fc::variant unpacked_actions_auths_data_json = json_from_file_or_string(_actions_auths_json_file_or_str); + _abi = abi_serializer(fc::json::from_file(_usr_trx_config._abi_data_file_path).as(), abi_serializer::create_yield_function( abi_serializer_max_time )); + fc::variant unpacked_actions_data_json = json_from_file_or_string(_usr_trx_config._actions_data_json_file_or_str); + fc::variant unpacked_actions_auths_data_json = json_from_file_or_string(_usr_trx_config._actions_auths_json_file_or_str); ilog("Loaded actions data: ${data}", ("data", fc::json::to_pretty_string(unpacked_actions_data_json))); ilog("Loaded actions auths data: ${auths}", ("auths", fc::json::to_pretty_string(unpacked_actions_auths_data_json))); @@ -289,7 +252,7 @@ namespace eosio::testing { ilog("acct_gen_fields entry: ${value}", ("value", e)); } ilog("Priming name generator for trx generator prefix."); - _acct_name_generator.setPrefix(_generator_id); + _acct_name_generator.setPrefix(_config._generator_id); } ilog("Setting up transaction signer."); @@ -305,7 +268,7 @@ namespace eosio::testing { } ilog("Populate initial transaction."); - _trxs.emplace_back(create_trx_w_actions_and_signer(_actions, signer_key, ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, _last_irr_block_id)); + _trxs.emplace_back(create_trx_w_actions_and_signer(_actions, signer_key, ++_nonce_prefix, _nonce, _config._trx_expiration_us, _config._chain_id, _config._last_irr_block_id)); ilog("Setup p2p transaction provider"); @@ -316,7 +279,7 @@ namespace eosio::testing { } bool trx_generator_base::tear_down() { - _provider.log_trxs(_log_dir); + _provider.log_trxs(_config._log_dir); _provider.teardown(); ilog("Sent transactions: ${cnt}", ("cnt", _txcount)); @@ -332,8 +295,8 @@ namespace eosio::testing { try { if (_trxs.size()) { size_t index_to_send = _txcount % _trxs.size(); - push_transaction(_provider, _trxs.at(index_to_send), ++_nonce_prefix, _nonce, _trx_expiration, _chain_id, - _last_irr_block_id); + push_transaction(_provider, _trxs.at(index_to_send), ++_nonce_prefix, _nonce, _config._trx_expiration_us, _config._chain_id, + _config._last_irr_block_id); ++_txcount; } else { elog("no transactions available to send"); @@ -362,7 +325,7 @@ namespace eosio::testing { void trx_generator_base::push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { update_resign_transaction(trx._trx, trx._signer, ++nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); if (_txcount == 0) { - log_first_trx(_log_dir, trx._trx); + log_first_trx(_config._log_dir, trx._trx); } provider.send(trx._trx); } @@ -377,6 +340,6 @@ namespace eosio::testing { } bool trx_generator_base::stop_on_trx_fail() { - return _stop_on_trx_failed; + return _config._stop_on_trx_failed; } } diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index 7db6c8bef2..8dc20d24c8 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -95,27 +95,84 @@ namespace eosio::testing { } }; + struct trx_generator_base_config { + uint16_t _generator_id = 0; + eosio::chain::chain_id_type _chain_id = eosio::chain::chain_id_type::empty_chain_id(); + eosio::chain::name _contract_owner_account = eosio::chain::name(); + fc::microseconds _trx_expiration_us = fc::seconds(3600); + eosio::chain::block_id_type _last_irr_block_id = eosio::chain::block_id_type(); + std::string _log_dir = "."; + bool _stop_on_trx_failed = true; + + std::string to_string() const { + std::ostringstream ss; + ss << " generator id: " << _generator_id << " chain id: " << std::string(_chain_id) << " contract owner account: " + << _contract_owner_account << " trx expiration seconds: " << _trx_expiration_us.to_seconds() << " lib id: " << std::string(_last_irr_block_id) + << " log dir: " << _log_dir << " stop on trx failed: " << _stop_on_trx_failed; + return std::move(ss).str(); + }; + }; + + struct user_specified_trx_config { + std::string _abi_data_file_path; + std::string _actions_data_json_file_or_str; + std::string _actions_auths_json_file_or_str; + + bool fully_configured() const { + return !_abi_data_file_path.empty() && !_actions_data_json_file_or_str.empty() && !_actions_auths_json_file_or_str.empty(); + } + + bool partially_configured() const { + return !fully_configured() && (!_abi_data_file_path.empty() || !_actions_data_json_file_or_str.empty() || !_actions_auths_json_file_or_str.empty()); + } + + std::string to_string() const { + std::ostringstream ss; + ss << "User Transaction Specified: Abi File: " << _abi_data_file_path << " Actions Data: " << _actions_data_json_file_or_str << " Actions Auths: " << _actions_auths_json_file_or_str; + return std::move(ss).str(); + }; + }; + + struct accounts_config { + std::vector _acct_name_vec; + std::vector _priv_keys_vec; + + std::string to_string() const { + std::ostringstream ss; + ss << "Accounts Specified: accounts: [ "; + for(size_t i = 0; i < _acct_name_vec.size(); ++i) { + ss << _acct_name_vec.at(i); + if(i < _acct_name_vec.size() - 1) { + ss << ", "; + } + } + ss << " ] keys: [ "; + for(size_t i = 0; i < _priv_keys_vec.size(); ++i) { + ss << _priv_keys_vec.at(i).to_string(); + if(i < _priv_keys_vec.size() - 1) { + ss << ", "; + } + } + ss << " ]"; + return std::move(ss).str(); + }; + }; + struct trx_generator_base { + const trx_generator_base_config& _config; p2p_trx_provider _provider; - uint16_t _generator_id = 0; - eosio::chain::chain_id_type _chain_id; - eosio::chain::name _contract_owner_account; - fc::microseconds _trx_expiration; - eosio::chain::block_id_type _last_irr_block_id; - std::string _log_dir; uint64_t _total_us = 0; uint64_t _txcount = 0; std::vector _trxs; + std::vector _action_pairs_vector; uint64_t _nonce = 0; uint64_t _nonce_prefix = 0; - bool _stop_on_trx_failed = true; - trx_generator_base(uint16_t generator_id, const std::string& chain_id_in, const std::string& contract_owner_account, const fc::microseconds& trx_expr, const std::string& lib_id_str, const std::string& log_dir, bool stop_on_trx_failed, - const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); + trx_generator_base(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config); virtual ~trx_generator_base() = default; @@ -140,29 +197,20 @@ namespace eosio::testing { }; struct transfer_trx_generator : public trx_generator_base { - const std::vector _accts; - std::vector _private_keys_str_vector; - - transfer_trx_generator(uint16_t generator_id, const std::string& chain_id_in, const std::string& contract_owner_account, const std::vector& accts, - const fc::microseconds& trx_expr, const std::vector& private_keys_str_vector, const std::string& lib_id_str, const std::string& log_dir, bool stop_on_trx_failed, - const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); + accounts_config _accts_config; - std::vector get_accounts(const std::vector& account_str_vector); - std::vector get_private_keys(const std::vector& priv_key_str_vector); + transfer_trx_generator(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config, const accounts_config& accts_config); - std::vector create_initial_transfer_transactions(const std::vector& action_pairs_vector, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); + void create_initial_transfer_transactions(uint64_t& nonce_prefix, uint64_t& nonce); eosio::chain::bytes make_transfer_data(const eosio::chain::name& from, const eosio::chain::name& to, const eosio::chain::asset& quantity, const std::string& memo); auto make_transfer_action(eosio::chain::name account, eosio::chain::name from, eosio::chain::name to, eosio::chain::asset quantity, std::string memo); - std::vector create_initial_transfer_actions(const std::string& salt, const uint64_t& period, const eosio::chain::name& contract_owner_account, - const std::vector& accounts, const std::vector& priv_keys); + void create_initial_transfer_actions(const std::string& salt, const uint64_t& period); bool setup(); }; struct trx_generator : public trx_generator_base{ - std::string _abi_data_file_path; - std::string _actions_data_json_file_or_str; - std::string _actions_auths_json_file_or_str; + user_specified_trx_config _usr_trx_config; account_name_generator _acct_name_generator; eosio::chain::abi_serializer _abi; @@ -172,10 +220,7 @@ namespace eosio::testing { const fc::microseconds abi_serializer_max_time = fc::seconds(10); // No risk to client side serialization taking a long time - trx_generator(uint16_t generator_id, const std::string& chain_id_in, const std::string& abi_data_file, const std::string& contract_owner_account, - const std::string& actions_data_json_file_or_str, const std::string& actions_auths_json_file_or_str, - const fc::microseconds& trx_expr, const std::string& lib_id_str, const std::string& log_dir, bool stop_on_trx_failed, - const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); + trx_generator(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config, const user_specified_trx_config& usr_trx_config); void locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, fc::mutable_variant_object& action_mvo, const std::string& key_word); void locate_key_words_in_action_array(std::map>& acct_gen_fields_out, fc::variants& action_array, const std::string& key_word); diff --git a/tests/trx_generator/trx_generator_tests.cpp b/tests/trx_generator/trx_generator_tests.cpp index c5bb3832fd..01fcfe8016 100644 --- a/tests/trx_generator/trx_generator_tests.cpp +++ b/tests/trx_generator/trx_generator_tests.cpp @@ -55,7 +55,7 @@ BOOST_AUTO_TEST_CASE(tps_short_run_low_tps) std::shared_ptr generator = std::make_shared(expected_trxs); std::shared_ptr monitor = std::make_shared(expected_trxs); - trx_tps_tester t1(generator, monitor, test_duration_s, test_tps); + trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); t1.run(); @@ -82,7 +82,7 @@ BOOST_AUTO_TEST_CASE(tps_short_run_high_tps) std::shared_ptr monitor = std::make_shared(expected_trxs); - trx_tps_tester t1(generator, monitor, test_duration_s, test_tps); + trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); t1.run(); @@ -115,7 +115,7 @@ BOOST_AUTO_TEST_CASE(tps_short_run_med_tps_med_delay) std::shared_ptr monitor = std::make_shared(expected_trxs); - trx_tps_tester t1(generator, monitor, test_duration_s, test_tps); + trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); t1.run(); @@ -148,7 +148,7 @@ BOOST_AUTO_TEST_CASE(tps_med_run_med_tps_med_delay) std::shared_ptr monitor = std::make_shared(expected_trxs); - trx_tps_tester t1(generator, monitor, test_duration_s, test_tps); + trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); t1.run(); @@ -181,7 +181,7 @@ BOOST_AUTO_TEST_CASE(tps_cant_keep_up) std::shared_ptr monitor = std::make_shared(expected_trxs); - trx_tps_tester t1(generator, monitor, test_duration_s, test_tps); + trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); t1.run(); @@ -214,7 +214,7 @@ BOOST_AUTO_TEST_CASE(tps_med_run_med_tps_30us_delay) std::shared_ptr monitor = std::make_shared(expected_trxs); - trx_tps_tester t1(generator, monitor, test_duration_s, test_tps); + trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); t1.run(); @@ -311,7 +311,7 @@ BOOST_AUTO_TEST_CASE(tps_cant_keep_up_monitored) std::shared_ptr monitor = std::make_shared(); - trx_tps_tester t1(generator, monitor, test_duration_s, test_tps); + trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); t1.run(); @@ -324,22 +324,14 @@ BOOST_AUTO_TEST_CASE(tps_cant_keep_up_monitored) BOOST_AUTO_TEST_CASE(trx_generator_constructor) { - uint16_t generator_id = 1; - std::string chain_id = "999"; + trx_generator_base_config tg_config{1, chain_id_type("999"), name("eosio"), fc::seconds(3600), fc::variant("00000062989f69fd251df3e0b274c3364ffc2f4fce73de3f1c7b5e11a4c92f21").as(), ".", true}; + provider_base_config p_config{"127.0.0.1", 9876}; const std::string abi_file = "../../unittests/contracts/eosio.token/eosio.token.abi"; - std::string contract_owner_account = "eosio"; const std::string actions_data = "[{\"actionAuthAcct\": \"testacct1\",\"actionName\": \"transfer\",\"authorization\": {\"actor\": \"testacct1\",\"permission\": \"active\"},\"actionData\": {\"from\": \"testacct1\",\"to\": \"testacct2\",\"quantity\": \"0.0001 CUR\",\"memo\": \"transaction specified\"}}]"; const std::string action_auths = "{\"testacct1\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\",\"testacct2\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\",\"eosio\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\"}"; - fc::microseconds trx_expr = fc::seconds(3600); - std::string log_dir = "."; - std::string lib_id_str = "00000062989f69fd251df3e0b274c3364ffc2f4fce73de3f1c7b5e11a4c92f21"; - bool stop_on_trx_failed = true; - std::string peer_endpoint = "127.0.0.1"; - unsigned short port = 9876; - - auto generator = trx_generator(generator_id, chain_id, abi_file, contract_owner_account, - actions_data, action_auths, - trx_expr, lib_id_str, log_dir, stop_on_trx_failed, peer_endpoint, port); + user_specified_trx_config trx_config{abi_file, actions_data, action_auths}; + + auto generator = trx_generator(tg_config, p_config, trx_config); } BOOST_AUTO_TEST_CASE(account_name_generator_tests) diff --git a/tests/trx_generator/trx_provider.cpp b/tests/trx_generator/trx_provider.cpp index 84fe1c3eb0..f0027578df 100644 --- a/tests/trx_generator/trx_provider.cpp +++ b/tests/trx_generator/trx_provider.cpp @@ -37,12 +37,12 @@ namespace eosio::testing { } void p2p_connection::connect() { - ilog("Attempting P2P connection to ${ip}:${port}.", ("ip", _peer_endpoint)("port", _peer_port)); + ilog("Attempting P2P connection to ${ip}:${port}.", ("ip", _config._peer_endpoint)("port", _config._port)); tcp::resolver r(_p2p_service); - tcp::resolver::query q(tcp::v4(), _peer_endpoint, std::to_string(_peer_port)); + tcp::resolver::query q(tcp::v4(), _config._peer_endpoint, std::to_string(_config._port)); auto i = r.resolve(q); boost::asio::connect(_p2p_socket, i); - ilog("Connected to ${ip}:${port}.", ("ip", _peer_endpoint)("port", _peer_port)); + ilog("Connected to ${ip}:${port}.", ("ip", _config._peer_endpoint)("port", _config._port)); } void p2p_connection::disconnect() { @@ -56,8 +56,8 @@ namespace eosio::testing { _p2p_socket.send(boost::asio::buffer(*msg)); } - p2p_trx_provider::p2p_trx_provider(const std::string& peer_endpoint, unsigned short peer_port) : - _peer_connection(peer_endpoint, peer_port) { + p2p_trx_provider::p2p_trx_provider(const provider_base_config& provider_config) : + _peer_connection(provider_config) { } diff --git a/tests/trx_generator/trx_provider.hpp b/tests/trx_generator/trx_provider.hpp index 94380ff045..71c35188b7 100644 --- a/tests/trx_generator/trx_provider.hpp +++ b/tests/trx_generator/trx_provider.hpp @@ -22,14 +22,24 @@ namespace eosio::testing { _trx_id(trx_id), _sent_timestamp(sent) {} }; + struct provider_base_config { + std::string _peer_endpoint = "127.0.0.1"; + unsigned short _port = 9876; + + std::string to_string() const { + std::ostringstream ss; + ss << "peer_endpoint: " << _peer_endpoint << " port: " << _port; + return std::move(ss).str(); + } + }; + struct p2p_connection { - std::string _peer_endpoint; + const provider_base_config& _config; boost::asio::io_service _p2p_service; boost::asio::ip::tcp::socket _p2p_socket; - unsigned short _peer_port; - p2p_connection(const std::string& peer_endpoint, unsigned short peer_port) : - _peer_endpoint(peer_endpoint), _p2p_service(), _p2p_socket(_p2p_service), _peer_port(peer_port) {} + p2p_connection(const provider_base_config& provider_config) : + _config(provider_config), _p2p_service(), _p2p_socket(_p2p_service) {} void connect(); void disconnect(); @@ -37,7 +47,7 @@ namespace eosio::testing { }; struct p2p_trx_provider { - p2p_trx_provider(const std::string& peer_endpoint="127.0.0.1", unsigned short port=9876); + p2p_trx_provider(const provider_base_config& provider_config); void setup(); void send(const std::vector& trxs); @@ -89,23 +99,30 @@ namespace eosio::testing { bool terminated_early() {return _terminated_early;} }; + struct trx_tps_tester_config { + uint32_t _gen_duration_seconds; + uint32_t _target_tps; + + std::string to_string() const { + std::ostringstream ss; + ss << "Trx Tps Tester Config: duration: " << _gen_duration_seconds << " target tps: " << _target_tps; + return std::move(ss).str(); + }; + }; + template struct trx_tps_tester { std::shared_ptr _generator; std::shared_ptr _monitor; + trx_tps_tester_config _config; - uint32_t _gen_duration_seconds; - uint32_t _target_tps; - - trx_tps_tester(std::shared_ptr generator, std::shared_ptr monitor, uint32_t gen_duration_seconds, uint32_t target_tps) : - _generator(generator), _monitor(monitor), - _gen_duration_seconds(gen_duration_seconds), _target_tps(target_tps) { - + trx_tps_tester(std::shared_ptr generator, std::shared_ptr monitor, const trx_tps_tester_config& tester_config) : + _generator(generator), _monitor(monitor), _config(tester_config) { } bool run() { - if ((_target_tps) < 1 || (_gen_duration_seconds < 1)) { - elog("target tps (${tps}) and duration (${dur}) must both be 1+", ("tps", _target_tps)("dur", _gen_duration_seconds)); + if ((_config._target_tps) < 1 || (_config._gen_duration_seconds < 1)) { + elog("target tps (${tps}) and duration (${dur}) must both be 1+", ("tps", _config._target_tps)("dur", _config._gen_duration_seconds)); return false; } @@ -114,12 +131,12 @@ namespace eosio::testing { } tps_test_stats stats; - stats.trx_interval = fc::microseconds(std::chrono::microseconds(1s).count() / _target_tps); + stats.trx_interval = fc::microseconds(std::chrono::microseconds(1s).count() / _config._target_tps); - stats.total_trxs = _gen_duration_seconds * _target_tps; + stats.total_trxs = _config._gen_duration_seconds * _config._target_tps; stats.trxs_left = stats.total_trxs; stats.start_time = fc::time_point::now(); - stats.expected_end_time = stats.start_time + fc::microseconds{_gen_duration_seconds * std::chrono::microseconds(1s).count()}; + stats.expected_end_time = stats.start_time + fc::microseconds{_config._gen_duration_seconds * std::chrono::microseconds(1s).count()}; stats.time_to_next_trx_us = 0; bool keep_running = true; From 2d31597efcb36a9c05d60a91bf7e5ab07f3c04b6 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 8 Mar 2023 11:45:41 -0600 Subject: [PATCH 161/178] Put back in blank line so list will render properly. --- tests/performance_tests/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index 40d95bf109..a795252270 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -473,6 +473,7 @@ Performance Test Basic Single Test:
Expand Argument List + * `chain_id` set the chain id * `last_irreversible_block_id` Current last-irreversible-block-id (LIB ID) to use for transactions. * `contract_owner_account` Account name of the contract owner account for the transfer actions From 3cd1b3609c1bb3b458dfd32399ef7bc81cc55bb3 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 8 Mar 2023 13:01:51 -0600 Subject: [PATCH 162/178] Addressing peer review comments Updates to emplace_back, push_back, transform usage. --- tests/trx_generator/trx_generator.cpp | 84 +++++++++++++-------------- tests/trx_generator/trx_generator.hpp | 5 +- 2 files changed, 42 insertions(+), 47 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 081caa6606..edb8529606 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -26,15 +26,13 @@ namespace eosio::testing { trx.delay_sec = delay_sec; } - signed_transaction_w_signer trx_generator_base::create_trx_w_actions_and_signer(std::vector acts, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + signed_transaction_w_signer trx_generator_base::create_trx_w_actions_and_signer(std::vector&& acts, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { signed_transaction trx; set_transaction_headers(trx, last_irr_block_id, trx_expiration); - for (auto& act : acts) { - trx.actions.emplace_back(std::move(act)); - } - trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), + trx.actions = std::move(acts); + trx.context_free_actions.emplace_back(vector(), config::null_account_name, name("nonce"), fc::raw::pack(std::to_string(_config._generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + - fc::time_point::now().time_since_epoch().count()))); + fc::time_point::now().time_since_epoch().count())); trx.sign(priv_key, chain_id); return signed_transaction_w_signer(trx, priv_key); @@ -45,17 +43,17 @@ namespace eosio::testing { _trxs.reserve(2 * _action_pairs_vector.size()); for (const action_pair_w_keys& ap : _action_pairs_vector) { - _trxs.emplace_back(create_trx_w_actions_and_signer({ap._first_act}, ap._first_act_priv_key, nonce_prefix, nonce, _config._trx_expiration_us, _config._chain_id, _config._last_irr_block_id)); - _trxs.emplace_back(create_trx_w_actions_and_signer({ap._second_act}, ap._second_act_priv_key, nonce_prefix, nonce, _config._trx_expiration_us, _config._chain_id, _config._last_irr_block_id)); + _trxs.push_back(create_trx_w_actions_and_signer({ap._first_act}, ap._first_act_priv_key, nonce_prefix, nonce, _config._trx_expiration_us, _config._chain_id, _config._last_irr_block_id)); + _trxs.push_back(create_trx_w_actions_and_signer({ap._second_act}, ap._second_act_priv_key, nonce_prefix, nonce, _config._trx_expiration_us, _config._chain_id, _config._last_irr_block_id)); } } void trx_generator_base::update_resign_transaction(signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { trx.context_free_actions.clear(); - trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), + trx.context_free_actions.emplace_back(vector(), config::null_account_name, name("nonce"), fc::raw::pack(std::to_string(_config._generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + - fc::time_point::now().time_since_epoch().count()))); + fc::time_point::now().time_since_epoch().count())); set_transaction_headers(trx, last_irr_block_id, trx_expiration); trx.signatures.clear(); trx.sign(priv_key, chain_id); @@ -138,7 +136,7 @@ namespace eosio::testing { void trx_generator::locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, fc::mutable_variant_object& action_mvo, const std::string& key_word) { for (const mutable_variant_object::entry& e: action_mvo) { if (e.value().get_type() == fc::variant::string_type && e.value() == key_word) { - acct_gen_fields_out.emplace_back(e.key()); + acct_gen_fields_out.push_back(e.key()); } else if (e.value().get_type() == fc::variant::object_type) { auto inner_mvo = fc::mutable_variant_object(e.value()); locate_key_words_in_action_mvo(acct_gen_fields_out, inner_mvo, key_word); @@ -179,18 +177,16 @@ namespace eosio::testing { void trx_generator::update_resign_transaction(signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { trx.actions.clear(); - update_actions(); - for (const auto& act: _actions) { - trx.actions.emplace_back(act); - } + trx.actions = generate_actions(); trx_generator_base::update_resign_transaction(trx, priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); } trx_generator::trx_generator(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config, const user_specified_trx_config& usr_trx_config) : trx_generator_base(trx_gen_base_config, provider_config), _usr_trx_config(usr_trx_config), _acct_name_generator() {} - void trx_generator::update_actions() { - _actions.clear(); + std::vector trx_generator::generate_actions() { + std::vector actions; + actions.reserve(_unpacked_actions.size()); if (!_acct_gen_fields.empty()) { std::string generated_account_name = _acct_name_generator.calc_name(); @@ -201,27 +197,25 @@ namespace eosio::testing { } } - for (const auto& action_mvo : _unpacked_actions) { - chain::name action_name = chain::name(action_mvo["actionName"].as_string()); - chain::name action_auth_acct = chain::name(action_mvo["actionAuthAcct"].as_string()); - bytes packed_action_data; - try { - auto action_type = _abi.get_action_type( action_name ); - FC_ASSERT( !action_type.empty(), "Unknown action ${action} in contract ${contract}", ("action", action_name)( "contract", action_auth_acct )); - packed_action_data = _abi.variant_to_binary( action_type, action_mvo["actionData"], abi_serializer::create_yield_function( abi_serializer_max_time ) ); - } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse unpacked action data JSON") - - eosio::chain::action act; - act.account = _config._contract_owner_account; - act.name = action_name; - - chain::name auth_actor = chain::name(action_mvo["authorization"].get_object()["actor"].as_string()); - chain::name auth_perm = chain::name(action_mvo["authorization"].get_object()["permission"].as_string()); - - act.authorization = vector{{auth_actor, auth_perm}}; - act.data = std::move(packed_action_data); - _actions.emplace_back(std::move(act)); - } + std::transform(_unpacked_actions.begin(), _unpacked_actions.end(), std::back_inserter(actions), + [&](const auto& action_mvo) { + chain::name action_name = chain::name(action_mvo["actionName"].as_string()); + chain::name action_auth_acct = chain::name(action_mvo["actionAuthAcct"].as_string()); + bytes packed_action_data; + try { + auto action_type = _abi.get_action_type(action_name); + FC_ASSERT(!action_type.empty(), "Unknown action ${action} in contract ${contract}", ("action", action_name)("contract", action_auth_acct)); + packed_action_data = _abi.variant_to_binary(action_type, action_mvo["actionData"], abi_serializer::create_yield_function(abi_serializer_max_time)); + } + EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse unpacked action data JSON") + + chain::name auth_actor = chain::name(action_mvo["authorization"].get_object()["actor"].as_string()); + chain::name auth_perm = chain::name(action_mvo["authorization"].get_object()["permission"].as_string()); + + return eosio::chain::action({{auth_actor, auth_perm}}, _config._contract_owner_account, action_name, std::move(packed_action_data)); + }); + + return actions; } bool trx_generator::setup() { @@ -241,9 +235,11 @@ namespace eosio::testing { const std::string gen_acct_name_per_trx("ACCT_PER_TRX"); auto action_array = unpacked_actions_data_json.get_array(); - for (size_t i =0; i < action_array.size(); ++i ) { - _unpacked_actions.emplace_back(fc::mutable_variant_object(action_array[i])); - } + _unpacked_actions.reserve(action_array.size()); + std::transform(action_array.begin(), action_array.end(), std::back_inserter(_unpacked_actions), + [&](const auto& var) { + return fc::mutable_variant_object(var); + }); locate_key_words_in_action_array(_acct_gen_fields, action_array, gen_acct_name_per_trx); if (!_acct_gen_fields.empty()) { @@ -260,15 +256,15 @@ namespace eosio::testing { signer_key = fc::crypto::private_key(unpacked_actions_auths_data_json.get_object()[_unpacked_actions.at(0)["actionAuthAcct"].as_string()].as_string()); ilog("Setting up initial transaction actions."); - update_actions(); + auto actions = generate_actions(); ilog("Initial actions (${count}):", ("count", _unpacked_actions.size())); for (size_t i = 0; i < _unpacked_actions.size(); ++i) { ilog("Initial action ${index}: ${act}", ("index", i)("act", fc::json::to_pretty_string(_unpacked_actions.at(i)))); - ilog("Initial action packed data ${index}: ${packed_data}", ("packed_data", fc::to_hex(_actions.at(i).data.data(), _actions.at(i).data.size()))); + ilog("Initial action packed data ${index}: ${packed_data}", ("packed_data", fc::to_hex(actions.at(i).data.data(), actions.at(i).data.size()))); } ilog("Populate initial transaction."); - _trxs.emplace_back(create_trx_w_actions_and_signer(_actions, signer_key, ++_nonce_prefix, _nonce, _config._trx_expiration_us, _config._chain_id, _config._last_irr_block_id)); + _trxs.push_back(create_trx_w_actions_and_signer(std::move(actions), signer_key, ++_nonce_prefix, _nonce, _config._trx_expiration_us, _config._chain_id, _config._last_irr_block_id)); ilog("Setup p2p transaction provider"); diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index 8dc20d24c8..71f04ab340 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -185,7 +185,7 @@ namespace eosio::testing { void set_transaction_headers(eosio::chain::transaction& trx, const eosio::chain::block_id_type& last_irr_block_id, const fc::microseconds& expiration, uint32_t delay_sec = 0); - signed_transaction_w_signer create_trx_w_actions_and_signer(std::vector act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, + signed_transaction_w_signer create_trx_w_actions_and_signer(std::vector&& act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); void log_first_trx(const std::string& log_dir, const eosio::chain::signed_transaction& trx); @@ -216,7 +216,6 @@ namespace eosio::testing { eosio::chain::abi_serializer _abi; std::vector _unpacked_actions; std::map> _acct_gen_fields; - std::vector _actions; const fc::microseconds abi_serializer_max_time = fc::seconds(10); // No risk to client side serialization taking a long time @@ -227,7 +226,7 @@ namespace eosio::testing { void update_key_word_fields_in_sub_action(const std::string& key, fc::mutable_variant_object& action_mvo, const std::string& action_inner_key, const std::string& key_word); void update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string& key_word); - void update_actions(); + std::vector generate_actions(); virtual void update_resign_transaction(eosio::chain::signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); From 778ff304e6d596a8cb535bf5a53df745f4d4d760 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 8 Mar 2023 16:43:56 -0600 Subject: [PATCH 163/178] Cleaning up using namespace statements and namespace qualifiers. --- tests/trx_generator/main.cpp | 67 ++++++++++----------- tests/trx_generator/trx_generator.cpp | 64 +++++++++----------- tests/trx_generator/trx_generator_tests.cpp | 15 ++--- tests/trx_generator/trx_provider.cpp | 8 +-- 4 files changed, 69 insertions(+), 85 deletions(-) diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 9f468dca2e..eaa7bd1dea 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -7,9 +7,8 @@ #include #include -using namespace eosio::testing; -using namespace eosio::chain; -using namespace eosio; +namespace bpo = boost::program_options; +namespace et = eosio::testing; enum return_codes { TERMINATED_EARLY = -3, @@ -24,21 +23,21 @@ enum return_codes { }; int main(int argc, char** argv) { - provider_base_config provider_config; - trx_generator_base_config trx_gen_base_config; - user_specified_trx_config user_trx_config; - accounts_config accts_config; - trx_tps_tester_config tester_config; + et::provider_base_config provider_config; + et::trx_generator_base_config trx_gen_base_config; + et::user_specified_trx_config user_trx_config; + et::accounts_config accts_config; + et::trx_tps_tester_config tester_config; const int64_t trx_expiration_max = 3600; const uint16_t generator_id_max = 960; - variables_map vmap; - options_description cli("Transaction Generator command line options."); + bpo::variables_map vmap; + bpo::options_description cli("Transaction Generator command line options."); std::string chain_id_in; std::string contract_owner_account_in; std::string lib_id_str; - string accts; - string p_keys; + std::string accts; + std::string p_keys; int64_t spinup_time_us = 1000000; uint32_t max_lag_per = 5; int64_t max_lag_duration_us = 1000000; @@ -48,23 +47,23 @@ int main(int argc, char** argv) { cli.add_options() ("generator-id", bpo::value(&trx_gen_base_config._generator_id)->default_value(0), "Id for the transaction generator. Allowed range (0-960). Defaults to 0.") - ("chain-id", bpo::value(&chain_id_in), "set the chain id") - ("contract-owner-account", bpo::value(&contract_owner_account_in), "Account name of the contract account for the transaction actions") - ("accounts", bpo::value(&accts), "comma-separated list of accounts that will be used for transfers. Minimum required accounts: 2.") - ("priv-keys", bpo::value(&p_keys), "comma-separated list of private keys in same order of accounts list that will be used to sign transactions. Minimum required: 2.") + ("chain-id", bpo::value(&chain_id_in), "set the chain id") + ("contract-owner-account", bpo::value(&contract_owner_account_in), "Account name of the contract account for the transaction actions") + ("accounts", bpo::value(&accts), "comma-separated list of accounts that will be used for transfers. Minimum required accounts: 2.") + ("priv-keys", bpo::value(&p_keys), "comma-separated list of private keys in same order of accounts list that will be used to sign transactions. Minimum required: 2.") ("trx-expiration", bpo::value(&trx_expr)->default_value(3600), "transaction expiration time in seconds. Defaults to 3,600. Maximum allowed: 3,600") ("trx-gen-duration", bpo::value(&tester_config._gen_duration_seconds)->default_value(60), "Transaction generation duration (seconds). Defaults to 60 seconds.") ("target-tps", bpo::value(&tester_config._target_tps)->default_value(1), "Target transactions per second to generate/send. Defaults to 1 transaction per second.") - ("last-irreversible-block-id", bpo::value(&lib_id_str), "Current last-irreversible-block-id (LIB ID) to use for transactions.") + ("last-irreversible-block-id", bpo::value(&lib_id_str), "Current last-irreversible-block-id (LIB ID) to use for transactions.") ("monitor-spinup-time-us", bpo::value(&spinup_time_us)->default_value(1000000), "Number of microseconds to wait before monitoring TPS. Defaults to 1000000 (1s).") ("monitor-max-lag-percent", bpo::value(&max_lag_per)->default_value(5), "Max percentage off from expected transactions sent before being in violation. Defaults to 5.") ("monitor-max-lag-duration-us", bpo::value(&max_lag_duration_us)->default_value(1000000), "Max microseconds that transaction generation can be in violation before quitting. Defaults to 1000000 (1s).") - ("log-dir", bpo::value(&trx_gen_base_config._log_dir), "set the logs directory") + ("log-dir", bpo::value(&trx_gen_base_config._log_dir), "set the logs directory") ("stop-on-trx-failed", bpo::value(&trx_gen_base_config._stop_on_trx_failed)->default_value(true), "stop transaction generation if sending fails.") - ("abi-file", bpo::value(&user_trx_config._abi_data_file_path), "The path to the contract abi file to use for the supplied transaction action data") - ("actions-data", bpo::value(&user_trx_config._actions_data_json_file_or_str), "The json actions data file or json actions data description string to use") - ("actions-auths", bpo::value(&user_trx_config._actions_auths_json_file_or_str), "The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") - ("peer-endpoint", bpo::value(&provider_config._peer_endpoint)->default_value("127.0.0.1"), "set the peer endpoint to send transactions to") + ("abi-file", bpo::value(&user_trx_config._abi_data_file_path), "The path to the contract abi file to use for the supplied transaction action data") + ("actions-data", bpo::value(&user_trx_config._actions_data_json_file_or_str), "The json actions data file or json actions data description string to use") + ("actions-auths", bpo::value(&user_trx_config._actions_auths_json_file_or_str), "The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") + ("peer-endpoint", bpo::value(&provider_config._peer_endpoint)->default_value("127.0.0.1"), "set the peer endpoint to send transactions to") ("port", bpo::value(&provider_config._port)->default_value(9876), "set the peer endpoint port to send transactions to") ("help,h", "print this list") ; @@ -92,7 +91,7 @@ int main(int argc, char** argv) { cli.print(std::cerr); return INITIALIZE_FAIL; } else { - trx_gen_base_config._chain_id = chain_id_type(chain_id_in); + trx_gen_base_config._chain_id = eosio::chain::chain_id_type(chain_id_in); } if(trx_gen_base_config._log_dir.empty()) { @@ -106,7 +105,7 @@ int main(int argc, char** argv) { cli.print(std::cerr); return INITIALIZE_FAIL; } else { - trx_gen_base_config._last_irr_block_id = fc::variant(lib_id_str).as(); + trx_gen_base_config._last_irr_block_id = fc::variant(lib_id_str).as(); } if(contract_owner_account_in.empty()) { @@ -114,7 +113,7 @@ int main(int argc, char** argv) { cli.print(std::cerr); return INITIALIZE_FAIL; } else { - trx_gen_base_config._contract_owner_account = name(contract_owner_account_in); + trx_gen_base_config._contract_owner_account = eosio::chain::name(contract_owner_account_in); } std::vector account_str_vector; @@ -124,7 +123,7 @@ int main(int argc, char** argv) { cli.print(std::cerr); return INITIALIZE_FAIL; } else if (!accts.empty() && !account_str_vector.empty()) { - for(const string& account_name: account_str_vector) { + for(const std::string& account_name: account_str_vector) { ilog("Initializing accounts. Attempt to create name for ${acct}", ("acct", account_name)); accts_config._acct_name_vec.emplace_back(account_name); } @@ -137,7 +136,7 @@ int main(int argc, char** argv) { cli.print(std::cerr); return INITIALIZE_FAIL; } else if (!p_keys.empty() && !private_keys_str_vector.empty()) { - for(const string& private_key: private_keys_str_vector) { + for(const std::string& private_key: private_keys_str_vector) { ilog("Initializing private keys. Attempt to create private_key for ${key} : gen key ${newKey}", ("key", private_key)("newKey", fc::crypto::private_key(private_key))); accts_config._priv_keys_vec.emplace_back(private_key); } @@ -189,21 +188,21 @@ int main(int argc, char** argv) { ilog("User Transaction Specified: ${config}", ("config", user_trx_config.to_string())); } - std::shared_ptr monitor; + std::shared_ptr monitor; if (transaction_specified) { - auto generator = std::make_shared(trx_gen_base_config, provider_config, user_trx_config); + auto generator = std::make_shared(trx_gen_base_config, provider_config, user_trx_config); - monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); - trx_tps_tester tester{generator, monitor, tester_config}; + monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); + et::trx_tps_tester tester{generator, monitor, tester_config}; if (!tester.run()) { return OTHER_FAIL; } } else { - auto generator = std::make_shared(trx_gen_base_config, provider_config, accts_config); + auto generator = std::make_shared(trx_gen_base_config, provider_config, accts_config); - monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); - trx_tps_tester tester{generator, monitor, tester_config}; + monitor = std::make_shared(spinup_time_us, max_lag_per, max_lag_duration_us); + et::trx_tps_tester tester{generator, monitor, tester_config}; if (!tester.run()) { return OTHER_FAIL; diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index edb8529606..5c57266494 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -9,15 +9,11 @@ #include #include -using namespace std; -using namespace eosio::chain; -using namespace eosio; -using namespace appbase; -namespace bpo=boost::program_options; - namespace eosio::testing { + using namespace chain::literals; + namespace chain = eosio::chain; - void trx_generator_base::set_transaction_headers(transaction& trx, const block_id_type& last_irr_block_id, const fc::microseconds& expiration, uint32_t delay_sec) { + void trx_generator_base::set_transaction_headers(chain::transaction& trx, const chain::block_id_type& last_irr_block_id, const fc::microseconds& expiration, uint32_t delay_sec) { trx.expiration = fc::time_point::now() + expiration; trx.set_reference_block(last_irr_block_id); @@ -26,13 +22,13 @@ namespace eosio::testing { trx.delay_sec = delay_sec; } - signed_transaction_w_signer trx_generator_base::create_trx_w_actions_and_signer(std::vector&& acts, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { - signed_transaction trx; + signed_transaction_w_signer trx_generator_base::create_trx_w_actions_and_signer(std::vector&& acts, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { + chain::signed_transaction trx; set_transaction_headers(trx, last_irr_block_id, trx_expiration); trx.actions = std::move(acts); - trx.context_free_actions.emplace_back(vector(), config::null_account_name, name("nonce"), + trx.context_free_actions.emplace_back(std::vector(), chain::config::null_account_name, chain::name("nonce"), fc::raw::pack(std::to_string(_config._generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + - fc::time_point::now().time_since_epoch().count())); + std::to_string(fc::time_point::now().time_since_epoch().count()))); trx.sign(priv_key, chain_id); return signed_transaction_w_signer(trx, priv_key); @@ -48,19 +44,19 @@ namespace eosio::testing { } } - void trx_generator_base::update_resign_transaction(signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, - const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + void trx_generator_base::update_resign_transaction(chain::signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, + const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { trx.context_free_actions.clear(); - trx.context_free_actions.emplace_back(vector(), config::null_account_name, name("nonce"), + trx.context_free_actions.emplace_back(std::vector(), chain::config::null_account_name, chain::name("nonce"), fc::raw::pack(std::to_string(_config._generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + - fc::time_point::now().time_since_epoch().count())); + std::to_string(fc::time_point::now().time_since_epoch().count()))); set_transaction_headers(trx, last_irr_block_id, trx_expiration); trx.signatures.clear(); trx.sign(priv_key, chain_id); } chain::bytes transfer_trx_generator::make_transfer_data(const chain::name& from, const chain::name& to, const chain::asset& quantity, const std::string& memo) { - return fc::raw::pack(from, to, quantity, memo); + return fc::raw::pack< chain::name>(from, to, quantity, memo); } auto transfer_trx_generator::make_transfer_action(chain::name account, chain::name from, chain::name to, chain::asset quantity, std::string memo) { @@ -74,10 +70,10 @@ namespace eosio::testing { for (size_t j = i + 1; j < _accts_config._acct_name_vec.size(); ++j) { //create the actions here ilog("create_initial_transfer_actions: creating transfer from ${acctA} to ${acctB}", ("acctA", _accts_config._acct_name_vec.at(i))("acctB", _accts_config._acct_name_vec.at(j))); - action act_a_to_b = make_transfer_action(_config._contract_owner_account, _accts_config._acct_name_vec.at(i), _accts_config._acct_name_vec.at(j), asset::from_string("1.0000 CUR"), salt); + chain::action act_a_to_b = make_transfer_action(_config._contract_owner_account, _accts_config._acct_name_vec.at(i), _accts_config._acct_name_vec.at(j), chain::asset::from_string("1.0000 CUR"), salt); ilog("create_initial_transfer_actions: creating transfer from ${acctB} to ${acctA}", ("acctB", _accts_config._acct_name_vec.at(j))("acctA", _accts_config._acct_name_vec.at(i))); - action act_b_to_a = make_transfer_action(_config._contract_owner_account, _accts_config._acct_name_vec.at(j), _accts_config._acct_name_vec.at(i), asset::from_string("1.0000 CUR"), salt); + chain::action act_b_to_a = make_transfer_action(_config._contract_owner_account, _accts_config._acct_name_vec.at(j), _accts_config._acct_name_vec.at(i), chain::asset::from_string("1.0000 CUR"), salt); _action_pairs_vector.emplace_back(act_a_to_b, act_b_to_a, _accts_config._priv_keys_vec.at(i), _accts_config._priv_keys_vec.at(j)); } @@ -91,10 +87,6 @@ namespace eosio::testing { transfer_trx_generator::transfer_trx_generator(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config, const accounts_config& accts_config) : trx_generator_base(trx_gen_base_config, provider_config), _accts_config(accts_config) {} - - - - bool transfer_trx_generator::setup() { const std::string salt = std::to_string(getpid()); const uint64_t &period = 20; @@ -118,23 +110,23 @@ namespace eosio::testing { return true; } - fc::variant trx_generator::json_from_file_or_string(const string& file_or_str, fc::json::parse_type ptype) + fc::variant trx_generator::json_from_file_or_string(const std::string& file_or_str, fc::json::parse_type ptype) { - regex r("^[ \t]*[\{\[]"); + std::regex r("^[ \t]*[\{\[]"); if ( !regex_search(file_or_str, r) && fc::is_regular_file(file_or_str) ) { try { return fc::json::from_file(file_or_str, ptype); - } EOS_RETHROW_EXCEPTIONS(json_parse_exception, "Fail to parse JSON from file: ${file}", ("file", file_or_str)); + } EOS_RETHROW_EXCEPTIONS(chain::json_parse_exception, "Fail to parse JSON from file: ${file}", ("file", file_or_str)); } else { try { return fc::json::from_string(file_or_str, ptype); - } EOS_RETHROW_EXCEPTIONS(json_parse_exception, "Fail to parse JSON from string: ${string}", ("string", file_or_str)); + } EOS_RETHROW_EXCEPTIONS(chain::json_parse_exception, "Fail to parse JSON from string: ${string}", ("string", file_or_str)); } } void trx_generator::locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, fc::mutable_variant_object& action_mvo, const std::string& key_word) { - for (const mutable_variant_object::entry& e: action_mvo) { + for (const fc::mutable_variant_object::entry& e: action_mvo) { if (e.value().get_type() == fc::variant::string_type && e.value() == key_word) { acct_gen_fields_out.push_back(e.key()); } else if (e.value().get_type() == fc::variant::object_type) { @@ -175,7 +167,7 @@ namespace eosio::testing { } } - void trx_generator::update_resign_transaction(signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + void trx_generator::update_resign_transaction(chain::signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { trx.actions.clear(); trx.actions = generate_actions(); trx_generator_base::update_resign_transaction(trx, priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); @@ -184,8 +176,8 @@ namespace eosio::testing { trx_generator::trx_generator(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config, const user_specified_trx_config& usr_trx_config) : trx_generator_base(trx_gen_base_config, provider_config), _usr_trx_config(usr_trx_config), _acct_name_generator() {} - std::vector trx_generator::generate_actions() { - std::vector actions; + std::vector trx_generator::generate_actions() { + std::vector actions; actions.reserve(_unpacked_actions.size()); if (!_acct_gen_fields.empty()) { @@ -201,18 +193,18 @@ namespace eosio::testing { [&](const auto& action_mvo) { chain::name action_name = chain::name(action_mvo["actionName"].as_string()); chain::name action_auth_acct = chain::name(action_mvo["actionAuthAcct"].as_string()); - bytes packed_action_data; + chain::bytes packed_action_data; try { auto action_type = _abi.get_action_type(action_name); FC_ASSERT(!action_type.empty(), "Unknown action ${action} in contract ${contract}", ("action", action_name)("contract", action_auth_acct)); - packed_action_data = _abi.variant_to_binary(action_type, action_mvo["actionData"], abi_serializer::create_yield_function(abi_serializer_max_time)); + packed_action_data = _abi.variant_to_binary(action_type, action_mvo["actionData"], chain::abi_serializer::create_yield_function(abi_serializer_max_time)); } - EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse unpacked action data JSON") + EOS_RETHROW_EXCEPTIONS(chain::transaction_type_exception, "Fail to parse unpacked action data JSON") chain::name auth_actor = chain::name(action_mvo["authorization"].get_object()["actor"].as_string()); chain::name auth_perm = chain::name(action_mvo["authorization"].get_object()["permission"].as_string()); - return eosio::chain::action({{auth_actor, auth_perm}}, _config._contract_owner_account, action_name, std::move(packed_action_data)); + return chain::action({{auth_actor, auth_perm}}, _config._contract_owner_account, action_name, std::move(packed_action_data)); }); return actions; @@ -314,11 +306,11 @@ namespace eosio::testing { fileName << log_dir << "/first_trx_" << getpid() << ".txt"; std::ofstream out(fileName.str()); - out << fc::string(trx.id()) << "\n"; + out << std::string(trx.id()) << "\n"; out.close(); } - void trx_generator_base::push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain_id_type& chain_id, const block_id_type& last_irr_block_id) { + void trx_generator_base::push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { update_resign_transaction(trx._trx, trx._signer, ++nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); if (_txcount == 0) { log_first_trx(_config._log_dir, trx._trx); diff --git a/tests/trx_generator/trx_generator_tests.cpp b/tests/trx_generator/trx_generator_tests.cpp index 01fcfe8016..11180a9f05 100644 --- a/tests/trx_generator/trx_generator_tests.cpp +++ b/tests/trx_generator/trx_generator_tests.cpp @@ -81,7 +81,6 @@ BOOST_AUTO_TEST_CASE(tps_short_run_high_tps) std::shared_ptr generator = std::make_shared(expected_trxs); std::shared_ptr monitor = std::make_shared(expected_trxs); - trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); @@ -114,7 +113,6 @@ BOOST_AUTO_TEST_CASE(tps_short_run_med_tps_med_delay) std::shared_ptr generator = std::make_shared(expected_trxs, trx_delay_us); std::shared_ptr monitor = std::make_shared(expected_trxs); - trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); @@ -147,7 +145,6 @@ BOOST_AUTO_TEST_CASE(tps_med_run_med_tps_med_delay) std::shared_ptr generator = std::make_shared(expected_trxs, trx_delay_us); std::shared_ptr monitor = std::make_shared(expected_trxs); - trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); @@ -180,7 +177,6 @@ BOOST_AUTO_TEST_CASE(tps_cant_keep_up) std::shared_ptr generator = std::make_shared(expected_trxs, trx_delay_us); std::shared_ptr monitor = std::make_shared(expected_trxs); - trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); @@ -213,7 +209,6 @@ BOOST_AUTO_TEST_CASE(tps_med_run_med_tps_30us_delay) std::shared_ptr generator = std::make_shared(expected_trxs, trx_delay_us); std::shared_ptr monitor = std::make_shared(expected_trxs); - trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); @@ -310,7 +305,6 @@ BOOST_AUTO_TEST_CASE(tps_cant_keep_up_monitored) std::shared_ptr generator = std::make_shared(expected_trxs, trx_delay_us); std::shared_ptr monitor = std::make_shared(); - trx_tps_tester t1(generator, monitor, {test_duration_s, test_tps}); fc::time_point start = fc::time_point::now(); @@ -324,11 +318,14 @@ BOOST_AUTO_TEST_CASE(tps_cant_keep_up_monitored) BOOST_AUTO_TEST_CASE(trx_generator_constructor) { - trx_generator_base_config tg_config{1, chain_id_type("999"), name("eosio"), fc::seconds(3600), fc::variant("00000062989f69fd251df3e0b274c3364ffc2f4fce73de3f1c7b5e11a4c92f21").as(), ".", true}; + trx_generator_base_config tg_config{1, chain::chain_id_type("999"), chain::name("eosio"), fc::seconds(3600), + fc::variant("00000062989f69fd251df3e0b274c3364ffc2f4fce73de3f1c7b5e11a4c92f21").as(), ".", true}; provider_base_config p_config{"127.0.0.1", 9876}; const std::string abi_file = "../../unittests/contracts/eosio.token/eosio.token.abi"; - const std::string actions_data = "[{\"actionAuthAcct\": \"testacct1\",\"actionName\": \"transfer\",\"authorization\": {\"actor\": \"testacct1\",\"permission\": \"active\"},\"actionData\": {\"from\": \"testacct1\",\"to\": \"testacct2\",\"quantity\": \"0.0001 CUR\",\"memo\": \"transaction specified\"}}]"; - const std::string action_auths = "{\"testacct1\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\",\"testacct2\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\",\"eosio\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\"}"; + const std::string actions_data = "[{\"actionAuthAcct\": \"testacct1\",\"actionName\": \"transfer\",\"authorization\": {\"actor\": \"testacct1\",\"permission\": \"active\"}," + "\"actionData\": {\"from\": \"testacct1\",\"to\": \"testacct2\",\"quantity\": \"0.0001 CUR\",\"memo\": \"transaction specified\"}}]"; + const std::string action_auths = "{\"testacct1\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\",\"testacct2\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\"," + "\"eosio\":\"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3\"}"; user_specified_trx_config trx_config{abi_file, actions_data, action_auths}; auto generator = trx_generator(tg_config, p_config, trx_config); diff --git a/tests/trx_generator/trx_provider.cpp b/tests/trx_generator/trx_provider.cpp index f0027578df..67e569a5e1 100644 --- a/tests/trx_generator/trx_provider.cpp +++ b/tests/trx_generator/trx_provider.cpp @@ -8,10 +8,6 @@ #include #include -using std::string; -using std::vector; -using namespace eosio; - namespace eosio::testing { using namespace boost::asio; using ip::tcp; @@ -27,7 +23,7 @@ namespace eosio::testing { const char* const header = reinterpret_cast(&payload_size); // avoid variable size encoding of uint32_t - auto send_buffer = std::make_shared>(buffer_size); + auto send_buffer = std::make_shared>(buffer_size); fc::datastream ds( send_buffer->data(), buffer_size); ds.write( header, message_header_size ); fc::raw::pack( ds, fc::unsigned_int(packed_trx_which)); @@ -83,7 +79,7 @@ namespace eosio::testing { std::ofstream out(fileName.str()); for (logged_trx_data data : _sent_trx_data) { - out << fc::string(data._trx_id) << ","<< std::string(data._sent_timestamp) << "\n"; + out << std::string(data._trx_id) << ","<< std::string(data._sent_timestamp) << "\n"; } out.close(); } From 173fff951f877924ec1cea56000ae182241450ad Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 08:36:59 -0600 Subject: [PATCH 164/178] Fix braces. --- tests/trx_generator/trx_generator.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 5c57266494..a2e8fb3535 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -110,8 +110,7 @@ namespace eosio::testing { return true; } - fc::variant trx_generator::json_from_file_or_string(const std::string& file_or_str, fc::json::parse_type ptype) - { + fc::variant trx_generator::json_from_file_or_string(const std::string& file_or_str, fc::json::parse_type ptype) { std::regex r("^[ \t]*[\{\[]"); if ( !regex_search(file_or_str, r) && fc::is_regular_file(file_or_str) ) { try { From bafd7ddd698672a99859c412124b84c13174d074 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 08:53:17 -0600 Subject: [PATCH 165/178] Address peer review comments. Break up some rather long lines of code. --- tests/trx_generator/trx_generator.cpp | 46 +++++++++++++++++---------- tests/trx_generator/trx_generator.hpp | 5 +-- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index a2e8fb3535..66d1eece48 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -22,7 +22,9 @@ namespace eosio::testing { trx.delay_sec = delay_sec; } - signed_transaction_w_signer trx_generator_base::create_trx_w_actions_and_signer(std::vector&& acts, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { + signed_transaction_w_signer trx_generator_base::create_trx_w_actions_and_signer(std::vector&& acts, const fc::crypto::private_key& priv_key, + uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, + const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { chain::signed_transaction trx; set_transaction_headers(trx, last_irr_block_id, trx_expiration); trx.actions = std::move(acts); @@ -38,14 +40,16 @@ namespace eosio::testing { std::vector trxs; _trxs.reserve(2 * _action_pairs_vector.size()); - for (const action_pair_w_keys& ap : _action_pairs_vector) { - _trxs.push_back(create_trx_w_actions_and_signer({ap._first_act}, ap._first_act_priv_key, nonce_prefix, nonce, _config._trx_expiration_us, _config._chain_id, _config._last_irr_block_id)); - _trxs.push_back(create_trx_w_actions_and_signer({ap._second_act}, ap._second_act_priv_key, nonce_prefix, nonce, _config._trx_expiration_us, _config._chain_id, _config._last_irr_block_id)); + for(const action_pair_w_keys& ap: _action_pairs_vector) { + _trxs.push_back(create_trx_w_actions_and_signer({ap._first_act}, ap._first_act_priv_key, nonce_prefix, nonce, _config._trx_expiration_us, _config._chain_id, + _config._last_irr_block_id)); + _trxs.push_back(create_trx_w_actions_and_signer({ap._second_act}, ap._second_act_priv_key, nonce_prefix, nonce, _config._trx_expiration_us, _config._chain_id, + _config._last_irr_block_id)); } } - void trx_generator_base::update_resign_transaction(chain::signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, - const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { + void trx_generator_base::update_resign_transaction(chain::signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, + const fc::microseconds& trx_expiration, const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { trx.context_free_actions.clear(); trx.context_free_actions.emplace_back(std::vector(), chain::config::null_account_name, chain::name("nonce"), fc::raw::pack(std::to_string(_config._generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + @@ -69,11 +73,15 @@ namespace eosio::testing { for (size_t i = 0; i < _accts_config._acct_name_vec.size(); ++i) { for (size_t j = i + 1; j < _accts_config._acct_name_vec.size(); ++j) { //create the actions here - ilog("create_initial_transfer_actions: creating transfer from ${acctA} to ${acctB}", ("acctA", _accts_config._acct_name_vec.at(i))("acctB", _accts_config._acct_name_vec.at(j))); - chain::action act_a_to_b = make_transfer_action(_config._contract_owner_account, _accts_config._acct_name_vec.at(i), _accts_config._acct_name_vec.at(j), chain::asset::from_string("1.0000 CUR"), salt); + ilog("create_initial_transfer_actions: creating transfer from ${acctA} to ${acctB}", + ("acctA", _accts_config._acct_name_vec.at(i))("acctB", _accts_config._acct_name_vec.at(j))); + chain::action act_a_to_b = make_transfer_action(_config._contract_owner_account, _accts_config._acct_name_vec.at(i), _accts_config._acct_name_vec.at(j), + chain::asset::from_string("1.0000 CUR"), salt); - ilog("create_initial_transfer_actions: creating transfer from ${acctB} to ${acctA}", ("acctB", _accts_config._acct_name_vec.at(j))("acctA", _accts_config._acct_name_vec.at(i))); - chain::action act_b_to_a = make_transfer_action(_config._contract_owner_account, _accts_config._acct_name_vec.at(j), _accts_config._acct_name_vec.at(i), chain::asset::from_string("1.0000 CUR"), salt); + ilog("create_initial_transfer_actions: creating transfer from ${acctB} to ${acctA}", + ("acctB", _accts_config._acct_name_vec.at(j))("acctA", _accts_config._acct_name_vec.at(i))); + chain::action act_b_to_a = make_transfer_action(_config._contract_owner_account, _accts_config._acct_name_vec.at(j), _accts_config._acct_name_vec.at(i), + chain::asset::from_string("1.0000 CUR"), salt); _action_pairs_vector.emplace_back(act_a_to_b, act_b_to_a, _accts_config._priv_keys_vec.at(i), _accts_config._priv_keys_vec.at(j)); } @@ -84,7 +92,8 @@ namespace eosio::testing { trx_generator_base::trx_generator_base(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config) : _config(trx_gen_base_config), _provider(provider_config) {} - transfer_trx_generator::transfer_trx_generator(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config, const accounts_config& accts_config) + transfer_trx_generator::transfer_trx_generator(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config, + const accounts_config& accts_config) : trx_generator_base(trx_gen_base_config, provider_config), _accts_config(accts_config) {} bool transfer_trx_generator::setup() { @@ -142,7 +151,8 @@ namespace eosio::testing { } } - void trx_generator::update_key_word_fields_in_sub_action(const std::string& key, fc::mutable_variant_object& action_mvo, const std::string& action_inner_key, const std::string& key_word) { + void trx_generator::update_key_word_fields_in_sub_action(const std::string& key, fc::mutable_variant_object& action_mvo, const std::string& action_inner_key, + const std::string& key_word) { if (action_mvo.find(action_inner_key) != action_mvo.end()) { if (action_mvo[action_inner_key].get_object().find(key) != action_mvo[action_inner_key].get_object().end()) { fc::mutable_variant_object inner_mvo = fc::mutable_variant_object(action_mvo[action_inner_key].get_object()); @@ -166,7 +176,8 @@ namespace eosio::testing { } } - void trx_generator::update_resign_transaction(chain::signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { + void trx_generator::update_resign_transaction(chain::signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, + const fc::microseconds& trx_expiration, const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { trx.actions.clear(); trx.actions = generate_actions(); trx_generator_base::update_resign_transaction(trx, priv_key, nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); @@ -255,11 +266,13 @@ namespace eosio::testing { } ilog("Populate initial transaction."); - _trxs.push_back(create_trx_w_actions_and_signer(std::move(actions), signer_key, ++_nonce_prefix, _nonce, _config._trx_expiration_us, _config._chain_id, _config._last_irr_block_id)); + _trxs.push_back(create_trx_w_actions_and_signer(std::move(actions), signer_key, ++_nonce_prefix, _nonce, _config._trx_expiration_us, _config._chain_id, + _config._last_irr_block_id)); ilog("Setup p2p transaction provider"); - ilog("Update each trx to qualify as unique and fresh timestamps and update each action with unique generated account name if necessary, re-sign trx, and send each updated transactions via p2p transaction provider"); + ilog("Update each trx to qualify as unique and fresh timestamps and update each action with unique generated account name if necessary," + " re-sign trx, and send each updated transactions via p2p transaction provider"); _provider.setup(); return true; @@ -309,7 +322,8 @@ namespace eosio::testing { out.close(); } - void trx_generator_base::push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { + void trx_generator_base::push_transaction(p2p_trx_provider& provider, signed_transaction_w_signer& trx, uint64_t& nonce_prefix, uint64_t& nonce, + const fc::microseconds& trx_expiration, const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { update_resign_transaction(trx._trx, trx._signer, ++nonce_prefix, nonce, trx_expiration, chain_id, last_irr_block_id); if (_txcount == 0) { log_first_trx(_config._log_dir, trx._trx); diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index 71f04ab340..58be9efa5d 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -185,8 +185,9 @@ namespace eosio::testing { void set_transaction_headers(eosio::chain::transaction& trx, const eosio::chain::block_id_type& last_irr_block_id, const fc::microseconds& expiration, uint32_t delay_sec = 0); - signed_transaction_w_signer create_trx_w_actions_and_signer(std::vector&& act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, - const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); + signed_transaction_w_signer create_trx_w_actions_and_signer(std::vector&& act, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, + uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, + const eosio::chain::block_id_type& last_irr_block_id); void log_first_trx(const std::string& log_dir, const eosio::chain::signed_transaction& trx); From 72451aad536aa2b6e9f13253b463b6e031fe4cfd Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 08:57:14 -0600 Subject: [PATCH 166/178] Address peer review comments. Combine pieces of argument onto same line. Remove trailing whitespace. --- tests/performance_tests/performance_test_basic.py | 2 +- tests/trx_generator/trx_generator.cpp | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 56f2cbd71f..1e92b1fdfb 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -377,7 +377,7 @@ def runTpsTest(self) -> PtbTpsTestResult: self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator) - self.cluster.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=self.clusterConfig.specifiedContract.account.name, + self.cluster.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=self.clusterConfig.specifiedContract.account.name, accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, abiFile=abiFile, actionsData=actionsDataJson, actionsAuths=actionsAuthsJson, diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 66d1eece48..79f0d7feba 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -29,8 +29,7 @@ namespace eosio::testing { set_transaction_headers(trx, last_irr_block_id, trx_expiration); trx.actions = std::move(acts); trx.context_free_actions.emplace_back(std::vector(), chain::config::null_account_name, chain::name("nonce"), - fc::raw::pack(std::to_string(_config._generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + - std::to_string(fc::time_point::now().time_since_epoch().count()))); + fc::raw::pack(std::to_string(_config._generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + std::to_string(fc::time_point::now().time_since_epoch().count()))); trx.sign(priv_key, chain_id); return signed_transaction_w_signer(trx, priv_key); @@ -52,8 +51,7 @@ namespace eosio::testing { const fc::microseconds& trx_expiration, const chain::chain_id_type& chain_id, const chain::block_id_type& last_irr_block_id) { trx.context_free_actions.clear(); trx.context_free_actions.emplace_back(std::vector(), chain::config::null_account_name, chain::name("nonce"), - fc::raw::pack(std::to_string(_config._generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + - std::to_string(fc::time_point::now().time_since_epoch().count()))); + fc::raw::pack(std::to_string(_config._generator_id) + ":" + std::to_string(nonce_prefix) + ":" + std::to_string(++nonce) + ":" + std::to_string(fc::time_point::now().time_since_epoch().count()))); set_transaction_headers(trx, last_irr_block_id, trx_expiration); trx.signatures.clear(); trx.sign(priv_key, chain_id); From c6717c40548adfd6b5960ae82a618aa6f008eb7c Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 10:23:26 -0600 Subject: [PATCH 167/178] Peer review comment. Use variable to make this easier to read and follow. --- tests/trx_generator/trx_generator.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 79f0d7feba..e356a2b2b6 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -152,8 +152,9 @@ namespace eosio::testing { void trx_generator::update_key_word_fields_in_sub_action(const std::string& key, fc::mutable_variant_object& action_mvo, const std::string& action_inner_key, const std::string& key_word) { if (action_mvo.find(action_inner_key) != action_mvo.end()) { - if (action_mvo[action_inner_key].get_object().find(key) != action_mvo[action_inner_key].get_object().end()) { - fc::mutable_variant_object inner_mvo = fc::mutable_variant_object(action_mvo[action_inner_key].get_object()); + auto inner = action_mvo[action_inner_key].get_object(); + if (inner.find(key) != inner.end()) { + fc::mutable_variant_object inner_mvo = fc::mutable_variant_object(inner); inner_mvo.set(key, key_word); action_mvo.set(action_inner_key, std::move(inner_mvo)); } From 045db81995649b32b0a82759eab20d63d234ce7a Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 12:51:16 -0600 Subject: [PATCH 168/178] Addressing peer review comments. Newer versions of Python, e.g. 3.11 require use of default_factory in dataclasses. Newer versions of python/numpy no longer require np predicate before float, and can use float directly. --- tests/performance_tests/log_reader.py | 2 +- .../performance_test_basic.py | 20 +++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index 1687a1a686..348b1a2af2 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -431,7 +431,7 @@ def calcTrxLatencyCpuNetStats(trxDict : dict, blockDict: dict): """ trxLatencyCpuNetList = [(data.latency, data.cpuUsageUs, data.netUsageUs) for trxId, data in trxDict.items() if data.calcdTimeEpoch != 0] - npLatencyCpuNetList = np.array(trxLatencyCpuNetList, dtype=np.float) + npLatencyCpuNetList = np.array(trxLatencyCpuNetList, dtype=float) return basicStats(float(np.min(npLatencyCpuNetList[:,0])), float(np.max(npLatencyCpuNetList[:,0])), float(np.average(npLatencyCpuNetList[:,0])), float(np.std(npLatencyCpuNetList[:,0])), len(npLatencyCpuNetList)), \ basicStats(float(np.min(npLatencyCpuNetList[:,1])), float(np.max(npLatencyCpuNetList[:,1])), float(np.average(npLatencyCpuNetList[:,1])), float(np.std(npLatencyCpuNetList[:,1])), len(npLatencyCpuNetList)), \ diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 1e92b1fdfb..6cb2569b7b 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -51,14 +51,14 @@ class ClusterConfig: @dataclass class ExtraNodeosArgs: - chainPluginArgs: ChainPluginArgs = ChainPluginArgs() - httpPluginArgs: HttpPluginArgs = HttpPluginArgs() - netPluginArgs: NetPluginArgs = NetPluginArgs() - producerPluginArgs: ProducerPluginArgs = ProducerPluginArgs() - resourceMonitorPluginArgs: ResourceMonitorPluginArgs = ResourceMonitorPluginArgs() - signatureProviderPluginArgs: SignatureProviderPluginArgs = SignatureProviderPluginArgs() - stateHistoryPluginArgs: StateHistoryPluginArgs = StateHistoryPluginArgs() - traceApiPluginArgs: TraceApiPluginArgs = TraceApiPluginArgs() + chainPluginArgs: ChainPluginArgs = field(default_factory=ChainPluginArgs) + httpPluginArgs: HttpPluginArgs = field(default_factory=HttpPluginArgs) + netPluginArgs: NetPluginArgs = field(default_factory=NetPluginArgs) + producerPluginArgs: ProducerPluginArgs = field(default_factory=ProducerPluginArgs) + resourceMonitorPluginArgs: ResourceMonitorPluginArgs = field(default_factory=ResourceMonitorPluginArgs) + signatureProviderPluginArgs: SignatureProviderPluginArgs = field(default_factory=SignatureProviderPluginArgs) + stateHistoryPluginArgs: StateHistoryPluginArgs = field(default_factory=StateHistoryPluginArgs) + traceApiPluginArgs: TraceApiPluginArgs = field(default_factory=TraceApiPluginArgs) def __str__(self) -> str: args = [] @@ -78,8 +78,8 @@ class SpecifiedContract: pnodes: int = 1 totalNodes: int = 2 topo: str = "mesh" - extraNodeosArgs: ExtraNodeosArgs = ExtraNodeosArgs() - specifiedContract: SpecifiedContract = SpecifiedContract() + extraNodeosArgs: ExtraNodeosArgs = field(default_factory=ExtraNodeosArgs) + specifiedContract: SpecifiedContract = field(default_factory=SpecifiedContract) useBiosBootFile: bool = False genesisPath: Path = Path("tests")/"performance_tests"/"genesis.json" maximumP2pPerHost: int = 5000 From 1c612e81899b89aa9041250eb84765dd3ca63317 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 13:03:36 -0600 Subject: [PATCH 169/178] Update ProducerPluginArgs for newly added arguments. --- tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py index 6cee80285b..e214f2bade 100755 --- a/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py +++ b/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py @@ -65,6 +65,9 @@ class ProducerPluginArgs(BasePluginArgs): subjectiveAccountMaxFailures: int=None _subjectiveAccountMaxFailuresNodeosDefault: int=3 _subjectiveAccountMaxFailuresNodeosArg: str="--subjective-account-max-failures" + subjectiveAccountMaxFailuresWindowSize: int=None + _subjectiveAccountMaxFailuresWindowSizeNodeosDefault: int=1 + _subjectiveAccountMaxFailuresWindowSizeNodeosArg: str="--subjective-account-max-failures-window-size" subjectiveAccountDecayTimeMinutes: int=None _subjectiveAccountDecayTimeMinutesNodeosDefault: int=1440 _subjectiveAccountDecayTimeMinutesNodeosArg: str="--subjective-account-decay-time-minutes" From 500814876e1e0ee0b3976fc2218e4080585d53e8 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 13:26:13 -0600 Subject: [PATCH 170/178] Addressing peer review comments. --- tests/TestHarness/Cluster.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index edba218d8b..4d6cf68175 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -713,12 +713,15 @@ def createAccountKeys(count): # create account keys and import into wallet. Wallet initialization will be user responsibility # also imports defproducera and defproducerb accounts def populateWallet(self, accountsCount, wallet, accountNames: list=None, createProducerAccounts: bool=True): - if accountsCount == 0 and len(accountNames) == 0: + if accountsCount == 0 and (accountNames is None or len(accountNames) == 0): return True if self.walletMgr is None: Utils.Print("ERROR: WalletMgr hasn't been initialized.") return False + if accountNames is not None: + assert(len(accountNames) <= accountsCount) + accounts=None if accountsCount > 0: Utils.Print ("Create account keys.") @@ -1601,7 +1604,7 @@ def cleanup(self): for f in self.filesToCleanup: os.remove(f) - # Create accounts and validates that the last transaction is received on root node + # Create accounts, if account does not already exist, and validates that the last transaction is received on root node def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000, validationNodeIndex=0): if self.accounts is None: return True From 5ec692a2bedbd2b497f32f397ee846fa80711198 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 13:54:14 -0600 Subject: [PATCH 171/178] Fixup documentation for missing arguments. --- tests/performance_tests/README.md | 2 ++ tests/trx_generator/README.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index a795252270..af4c18a109 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -299,6 +299,8 @@ Performance Test Basic Base: Path to contract dir (default: unittests/contracts/eosio.system) * `--wasm-file WASM_FILE` WASM file name for contract (default: eosio.system.wasm) * `--abi-file ABI_FILE` ABI file name for contract (default: eosio.system.abi) +* `--user-trx-data-file USER_TRX_DATA_FILE` + Path to transaction data JSON file (default: None) * `--wasm-runtime RUNTIME` Override default WASM runtime ("eos-vm-jit", "eos-vm") "eos-vm-jit" : A WebAssembly runtime that compiles WebAssembly code to native x86 code prior to diff --git a/tests/trx_generator/README.md b/tests/trx_generator/README.md index 26a15bdec7..f05cb15c80 100644 --- a/tests/trx_generator/README.md +++ b/tests/trx_generator/README.md @@ -46,6 +46,8 @@ The Transaction Generator logs each transaction's id and sent timestamp at the m generation can be in violation before quitting. Defaults to 1000000 (1s). * `--log-dir arg` set the logs directory +* `--stop-on-trx-failed arg` (=1) stop transaction generation if sending + fails. * `--abi-file arg` The path to the contract abi file to use for the supplied transaction action data From 6ba406ff2dea25d819d7daa17210cbc3d265b63b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 14:31:25 -0600 Subject: [PATCH 172/178] Address peer review comments. Moving some functions out of trx_generator since they don't need to be member functions. Addressing a couple missed const qualifiers on arguments. --- tests/trx_generator/trx_generator.cpp | 10 +++++----- tests/trx_generator/trx_generator.hpp | 11 ++++++----- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index e356a2b2b6..d647ef7f08 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -117,7 +117,7 @@ namespace eosio::testing { return true; } - fc::variant trx_generator::json_from_file_or_string(const std::string& file_or_str, fc::json::parse_type ptype) { + fc::variant json_from_file_or_string(const std::string& file_or_str, fc::json::parse_type ptype) { std::regex r("^[ \t]*[\{\[]"); if ( !regex_search(file_or_str, r) && fc::is_regular_file(file_or_str) ) { try { @@ -131,7 +131,7 @@ namespace eosio::testing { } } - void trx_generator::locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, fc::mutable_variant_object& action_mvo, const std::string& key_word) { + void locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, const fc::mutable_variant_object& action_mvo, const std::string& key_word) { for (const fc::mutable_variant_object::entry& e: action_mvo) { if (e.value().get_type() == fc::variant::string_type && e.value() == key_word) { acct_gen_fields_out.push_back(e.key()); @@ -142,14 +142,14 @@ namespace eosio::testing { } } - void trx_generator::locate_key_words_in_action_array(std::map>& acct_gen_fields_out, fc::variants& action_array, const std::string& key_word) { + void locate_key_words_in_action_array(std::map>& acct_gen_fields_out, const fc::variants& action_array, const std::string& key_word) { for (size_t i = 0; i < action_array.size(); ++i) { auto action_mvo = fc::mutable_variant_object(action_array[i]); locate_key_words_in_action_mvo(acct_gen_fields_out[i], action_mvo, key_word); } } - void trx_generator::update_key_word_fields_in_sub_action(const std::string& key, fc::mutable_variant_object& action_mvo, const std::string& action_inner_key, + void update_key_word_fields_in_sub_action(const std::string& key, fc::mutable_variant_object& action_mvo, const std::string& action_inner_key, const std::string& key_word) { if (action_mvo.find(action_inner_key) != action_mvo.end()) { auto inner = action_mvo[action_inner_key].get_object(); @@ -161,7 +161,7 @@ namespace eosio::testing { } } - void trx_generator::update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string& key_word) { + void update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string& key_word) { for (const auto& key: acct_gen_fields) { if (action_mvo.find(key) != action_mvo.end()) { action_mvo.set(key, key_word); diff --git a/tests/trx_generator/trx_generator.hpp b/tests/trx_generator/trx_generator.hpp index 58be9efa5d..ddbc8a2f46 100644 --- a/tests/trx_generator/trx_generator.hpp +++ b/tests/trx_generator/trx_generator.hpp @@ -210,6 +210,12 @@ namespace eosio::testing { bool setup(); }; + void locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, const fc::mutable_variant_object& action_mvo, const std::string& key_word); + void locate_key_words_in_action_array(std::map>& acct_gen_fields_out, const fc::variants& action_array, const std::string& key_word); + void update_key_word_fields_in_sub_action(const std::string& key, fc::mutable_variant_object& action_mvo, const std::string& action_inner_key, const std::string& key_word); + void update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string& key_word); + fc::variant json_from_file_or_string(const std::string& file_or_str, fc::json::parse_type ptype = fc::json::parse_type::legacy_parser); + struct trx_generator : public trx_generator_base{ user_specified_trx_config _usr_trx_config; account_name_generator _acct_name_generator; @@ -222,16 +228,11 @@ namespace eosio::testing { trx_generator(const trx_generator_base_config& trx_gen_base_config, const provider_base_config& provider_config, const user_specified_trx_config& usr_trx_config); - void locate_key_words_in_action_mvo(std::vector& acct_gen_fields_out, fc::mutable_variant_object& action_mvo, const std::string& key_word); - void locate_key_words_in_action_array(std::map>& acct_gen_fields_out, fc::variants& action_array, const std::string& key_word); - void update_key_word_fields_in_sub_action(const std::string& key, fc::mutable_variant_object& action_mvo, const std::string& action_inner_key, const std::string& key_word); - void update_key_word_fields_in_action(std::vector& acct_gen_fields, fc::mutable_variant_object& action_mvo, const std::string& key_word); std::vector generate_actions(); virtual void update_resign_transaction(eosio::chain::signed_transaction& trx, const fc::crypto::private_key& priv_key, uint64_t& nonce_prefix, uint64_t& nonce, const fc::microseconds& trx_expiration, const eosio::chain::chain_id_type& chain_id, const eosio::chain::block_id_type& last_irr_block_id); - fc::variant json_from_file_or_string(const std::string& file_or_str, fc::json::parse_type ptype = fc::json::parse_type::legacy_parser); bool setup(); }; From df957cd13d81a7e89454daa2cc758edbc40cbae8 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 15:04:20 -0600 Subject: [PATCH 173/178] Addressing peer review comment to remove unused code. --- .../generate_nodeos_plugin_args_class_files.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py index 3c4550527b..b597726c60 100755 --- a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py +++ b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py @@ -61,11 +61,6 @@ def main(): myStr = re.sub("Application Options:\n",'', string=myStr) pluginSections = re.split("(@@@.*?@@@\n)", string=myStr) - - sec=0 - for section in pluginSections: - sec=sec+1 - def pairwise(iterable): "s -> (s0, s1), (s2, s3), (s4, s5), ..." a = iter(iterable) From 9063dd87e1b346d04d4b49a8337630622ac75a22 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 15:05:53 -0600 Subject: [PATCH 174/178] Addressing peer review comments. Add long form for help message. Make sure launch_transaction_generators is added to the module. Remove executable bits from scripts internal to the module. --- tests/TestHarness/TestHelper.py | 3 +-- tests/TestHarness/__init__.py | 2 +- tests/TestHarness/launch_transaction_generators.py | 4 +--- tests/TestHarness/testUtils.py | 0 tests/performance_tests/README.md | 4 ++-- 5 files changed, 5 insertions(+), 8 deletions(-) mode change 100755 => 100644 tests/TestHarness/launch_transaction_generators.py mode change 100755 => 100644 tests/TestHarness/testUtils.py diff --git a/tests/TestHarness/TestHelper.py b/tests/TestHarness/TestHelper.py index 01874c6be0..4cc0571d4d 100644 --- a/tests/TestHarness/TestHelper.py +++ b/tests/TestHarness/TestHelper.py @@ -47,8 +47,7 @@ def createArgumentParser(includeArgs, applicationSpecificArgs=AppArgs()) -> argp thGrpTitle = "Test Helper Arguments" thGrpDescription="Test Helper configuration items used to configure and spin up the regression test framework and blockchain environment." thGrp = thParser.add_argument_group(title=thGrpTitle, description=thGrpDescription) - thGrp.add_argument('-?', action='help', default=argparse.SUPPRESS, - help=argparse._('show this help message and exit')) + thGrp.add_argument('-?', '--help', action='help', default=argparse.SUPPRESS, help=argparse._('show this help message and exit')) if "-p" in includeArgs: thGrp.add_argument("-p", type=int, help="producing nodes count", default=1) diff --git a/tests/TestHarness/__init__.py b/tests/TestHarness/__init__.py index 580c755279..694a874dc8 100644 --- a/tests/TestHarness/__init__.py +++ b/tests/TestHarness/__init__.py @@ -1,4 +1,4 @@ -__all__ = ['Node', 'Cluster', 'WalletMgr', 'testUtils', 'TestHelper', 'TransactionGeneratorsLauncher', 'TpsTrxGensConfig'] +__all__ = ['Node', 'Cluster', 'WalletMgr', 'testUtils', 'TestHelper', 'launch_transaction_generators', 'TransactionGeneratorsLauncher', 'TpsTrxGensConfig'] from .Cluster import Cluster from .Node import Node diff --git a/tests/TestHarness/launch_transaction_generators.py b/tests/TestHarness/launch_transaction_generators.py old mode 100755 new mode 100644 index 2379987840..960ceca042 --- a/tests/TestHarness/launch_transaction_generators.py +++ b/tests/TestHarness/launch_transaction_generators.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 - from dataclasses import dataclass import os import sys @@ -139,7 +137,7 @@ def killAll(self): def parseArgs(): parser = argparse.ArgumentParser(add_help=False) - parser.add_argument('-?', action='help', default=argparse.SUPPRESS, help=argparse._('show this help message and exit')) + parser.add_argument('-?', '--help', action='help', default=argparse.SUPPRESS, help=argparse._('show this help message and exit')) parser.add_argument("chain_id", type=str, help="Chain ID") parser.add_argument("last_irreversible_block_id", type=str, help="Last irreversible block ID") parser.add_argument("contract_owner_account", type=str, help="Cluster contract owner account name") diff --git a/tests/TestHarness/testUtils.py b/tests/TestHarness/testUtils.py old mode 100755 new mode 100644 diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index af4c18a109..fdca07ab57 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -232,7 +232,7 @@ The Performance Harness main script `performance_test.py` can be configured usin Test Helper Arguments: Test Helper configuration items used to configure and spin up the regression test framework and blockchain environment. -* `-?` show this help message and exit +* `-?, --help` show this help message and exit * `-p P` producing nodes count (default: 1) * `-n N` total nodes (default: 0) * `-d D` delay between nodes startup (default: 1) @@ -370,7 +370,7 @@ The following scripts are typically used by the Performance Harness main script Test Helper Arguments: Test Helper configuration items used to configure and spin up the regression test framework and blockchain environment. -* `-?` show this help message and exit +* `-?, --help` show this help message and exit * `-p P` producing nodes count (default: 1) * `-n N` total nodes (default: 0) * `-d D` delay between nodes startup (default: 1) From 7ee35bb31d53ee5768d4aabb5113c6c7b54bb266 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 16:14:01 -0600 Subject: [PATCH 175/178] Generate *PluginArgs.py files directly. Since they are now being generated they will always be kept in sync with nodeos arguments and the validation test is no longer needed. --- tests/performance_tests/CMakeLists.txt | 2 - .../NodeosPluginArgs/CMakeLists.txt | 30 ++- .../NodeosPluginArgs/ChainPluginArgs.py | 198 ------------------ .../NodeosPluginArgs/HttpPluginArgs.py | 66 ------ .../NodeosPluginArgs/NetPluginArgs.py | 78 ------- .../NodeosPluginArgs/ProducerPluginArgs.py | 108 ---------- .../ResourceMonitorPluginArgs.py | 36 ---- .../SignatureProviderPluginArgs.py | 24 --- .../StateHistoryPluginArgs.py | 57 ----- .../NodeosPluginArgs/TraceApiPluginArgs.py | 39 ---- ...generate_nodeos_plugin_args_class_files.py | 4 +- .../validate_nodeos_plugin_args.py | 110 ---------- 12 files changed, 24 insertions(+), 728 deletions(-) delete mode 100755 tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py delete mode 100755 tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py delete mode 100755 tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py delete mode 100755 tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py delete mode 100755 tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py delete mode 100755 tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py delete mode 100755 tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py delete mode 100755 tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py delete mode 100755 tests/performance_tests/validate_nodeos_plugin_args.py diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index 75a51afcae..c2367abd9c 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -7,7 +7,6 @@ configure_file(nodeos_log_2_0_14.txt.gz nodeos_log_2_0_14.txt.gz COPYONLY) configure_file(nodeos_log_3_2.txt.gz nodeos_log_3_2.txt.gz COPYONLY) configure_file(block_trx_data_log_2_0_14.txt.gz block_trx_data_log_2_0_14.txt.gz COPYONLY) configure_file(genesis.json genesis.json COPYONLY) -configure_file(validate_nodeos_plugin_args.py validate_nodeos_plugin_args.py COPYONLY) configure_file(cpuTrxData.json cpuTrxData.json COPYONLY) configure_file(ramTrxData.json ramTrxData.json COPYONLY) configure_file(userTrxDataTransfer.json userTrxDataTransfer.json COPYONLY) @@ -19,7 +18,6 @@ add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performa add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ex_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v -p 1 -n 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/ramTrxData.json WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME log_reader_tests COMMAND tests/performance_tests/log_reader_tests.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME validate_nodeos_plugin_args COMMAND tests/performance_tests/validate_nodeos_plugin_args.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_ex_transfer_trx_spec PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_ex_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt b/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt index 087be0e7a2..4fd7fbcc78 100644 --- a/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt +++ b/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt @@ -1,10 +1,24 @@ configure_file(__init__.py __init__.py COPYONLY) configure_file(BasePluginArgs.py BasePluginArgs.py COPYONLY) -configure_file(ChainPluginArgs.py ChainPluginArgs.py COPYONLY) -configure_file(HttpPluginArgs.py HttpPluginArgs.py COPYONLY) -configure_file(NetPluginArgs.py NetPluginArgs.py COPYONLY) -configure_file(ProducerPluginArgs.py ProducerPluginArgs.py COPYONLY) -configure_file(ResourceMonitorPluginArgs.py ResourceMonitorPluginArgs.py COPYONLY) -configure_file(SignatureProviderPluginArgs.py SignatureProviderPluginArgs.py COPYONLY) -configure_file(StateHistoryPluginArgs.py StateHistoryPluginArgs.py COPYONLY) -configure_file(TraceApiPluginArgs.py TraceApiPluginArgs.py COPYONLY) + +find_package(PythonInterp REQUIRED) +find_package(Python3 REQUIRED) + +set(GEN_FILES + ChainPluginArgs.py + HttpPluginArgs.py + NetPluginArgs.py + ProducerPluginArgs.py + ResourceMonitorPluginArgs.py + SignatureProviderPluginArgs.py + StateHistoryPluginArgs.py + TraceApiPluginArgs.py) + +add_custom_command(COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/generate_nodeos_plugin_args_class_files.py + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS ${NODE_EXECUTABLE_NAME} + OUTPUT ${GEN_FILES}) + +add_custom_target(GenNodeosPluginArgs ALL + DEPENDS ${GEN_FILES} + COMMENT "Generating NodeosPluginArgs files") diff --git a/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py deleted file mode 100755 index 22ed36c15d..0000000000 --- a/tests/performance_tests/NodeosPluginArgs/ChainPluginArgs.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/env python3 - -from dataclasses import dataclass -from .BasePluginArgs import BasePluginArgs - -""" -This file/class was generated by generate_nodeos_plugin_args_class_files.py -""" - -@dataclass -class ChainPluginArgs(BasePluginArgs): - _pluginNamespace: str="eosio" - _pluginName: str="chain_plugin" - blocksDir: str=None - _blocksDirNodeosDefault: str='"blocks"' - _blocksDirNodeosArg: str="--blocks-dir" - blocksLogStride: str=None - _blocksLogStrideNodeosDefault: str=None - _blocksLogStrideNodeosArg: str="--blocks-log-stride" - maxRetainedBlockFiles: str=None - _maxRetainedBlockFilesNodeosDefault: str=None - _maxRetainedBlockFilesNodeosArg: str="--max-retained-block-files" - blocksRetainedDir: str=None - _blocksRetainedDirNodeosDefault: str=None - _blocksRetainedDirNodeosArg: str="--blocks-retained-dir" - blocksArchiveDir: str=None - _blocksArchiveDirNodeosDefault: str=None - _blocksArchiveDirNodeosArg: str="--blocks-archive-dir" - stateDir: str=None - _stateDirNodeosDefault: str='"state"' - _stateDirNodeosArg: str="--state-dir" - protocolFeaturesDir: str=None - _protocolFeaturesDirNodeosDefault: str='"protocol_features"' - _protocolFeaturesDirNodeosArg: str="--protocol-features-dir" - checkpoint: str=None - _checkpointNodeosDefault: str=None - _checkpointNodeosArg: str="--checkpoint" - wasmRuntime: str=None - _wasmRuntimeNodeosDefault: str="eos-vm-jit" - _wasmRuntimeNodeosArg: str="--wasm-runtime" - profileAccount: str=None - _profileAccountNodeosDefault: str=None - _profileAccountNodeosArg: str="--profile-account" - abiSerializerMaxTimeMs: int=None - _abiSerializerMaxTimeMsNodeosDefault: int=15 - _abiSerializerMaxTimeMsNodeosArg: str="--abi-serializer-max-time-ms" - chainStateDbSizeMb: int=None - _chainStateDbSizeMbNodeosDefault: int=1024 - _chainStateDbSizeMbNodeosArg: str="--chain-state-db-size-mb" - chainStateDbGuardSizeMb: int=None - _chainStateDbGuardSizeMbNodeosDefault: int=128 - _chainStateDbGuardSizeMbNodeosArg: str="--chain-state-db-guard-size-mb" - signatureCpuBillablePct: int=None - _signatureCpuBillablePctNodeosDefault: int=50 - _signatureCpuBillablePctNodeosArg: str="--signature-cpu-billable-pct" - chainThreads: int=None - _chainThreadsNodeosDefault: int=2 - _chainThreadsNodeosArg: str="--chain-threads" - contractsConsole: bool=None - _contractsConsoleNodeosDefault: bool=False - _contractsConsoleNodeosArg: str="--contracts-console" - deepMind: bool=None - _deepMindNodeosDefault: bool=False - _deepMindNodeosArg: str="--deep-mind" - actorWhitelist: str=None - _actorWhitelistNodeosDefault: str=None - _actorWhitelistNodeosArg: str="--actor-whitelist" - actorBlacklist: str=None - _actorBlacklistNodeosDefault: str=None - _actorBlacklistNodeosArg: str="--actor-blacklist" - contractWhitelist: str=None - _contractWhitelistNodeosDefault: str=None - _contractWhitelistNodeosArg: str="--contract-whitelist" - contractBlacklist: str=None - _contractBlacklistNodeosDefault: str=None - _contractBlacklistNodeosArg: str="--contract-blacklist" - actionBlacklist: str=None - _actionBlacklistNodeosDefault: str=None - _actionBlacklistNodeosArg: str="--action-blacklist" - keyBlacklist: str=None - _keyBlacklistNodeosDefault: str=None - _keyBlacklistNodeosArg: str="--key-blacklist" - senderBypassWhiteblacklist: str=None - _senderBypassWhiteblacklistNodeosDefault: str=None - _senderBypassWhiteblacklistNodeosArg: str="--sender-bypass-whiteblacklist" - readMode: str=None - _readModeNodeosDefault: str="head" - _readModeNodeosArg: str="--read-mode" - apiAcceptTransactions: int=None - _apiAcceptTransactionsNodeosDefault: int=1 - _apiAcceptTransactionsNodeosArg: str="--api-accept-transactions" - validationMode: str=None - _validationModeNodeosDefault: str="full" - _validationModeNodeosArg: str="--validation-mode" - disableRamBillingNotifyChecks: bool=None - _disableRamBillingNotifyChecksNodeosDefault: bool=False - _disableRamBillingNotifyChecksNodeosArg: str="--disable-ram-billing-notify-checks" - maximumVariableSignatureLength: int=None - _maximumVariableSignatureLengthNodeosDefault: int=16384 - _maximumVariableSignatureLengthNodeosArg: str="--maximum-variable-signature-length" - trustedProducer: str=None - _trustedProducerNodeosDefault: str=None - _trustedProducerNodeosArg: str="--trusted-producer" - databaseMapMode: str=None - _databaseMapModeNodeosDefault: str="mapped" - _databaseMapModeNodeosArg: str="--database-map-mode" - eosVmOcCacheSizeMb: int=None - _eosVmOcCacheSizeMbNodeosDefault: int=1024 - _eosVmOcCacheSizeMbNodeosArg: str="--eos-vm-oc-cache-size-mb" - eosVmOcCompileThreads: int=None - _eosVmOcCompileThreadsNodeosDefault: int=1 - _eosVmOcCompileThreadsNodeosArg: str="--eos-vm-oc-compile-threads" - eosVmOcEnable: bool=None - _eosVmOcEnableNodeosDefault: bool=False - _eosVmOcEnableNodeosArg: str="--eos-vm-oc-enable" - enableAccountQueries: int=None - _enableAccountQueriesNodeosDefault: int=0 - _enableAccountQueriesNodeosArg: str="--enable-account-queries" - maxNonprivilegedInlineActionSize: int=None - _maxNonprivilegedInlineActionSizeNodeosDefault: int=4096 - _maxNonprivilegedInlineActionSizeNodeosArg: str="--max-nonprivileged-inline-action-size" - transactionRetryMaxStorageSizeGb: int=None - _transactionRetryMaxStorageSizeGbNodeosDefault: int=None - _transactionRetryMaxStorageSizeGbNodeosArg: str="--transaction-retry-max-storage-size-gb" - transactionRetryIntervalSec: int=None - _transactionRetryIntervalSecNodeosDefault: int=20 - _transactionRetryIntervalSecNodeosArg: str="--transaction-retry-interval-sec" - transactionRetryMaxExpirationSec: int=None - _transactionRetryMaxExpirationSecNodeosDefault: int=120 - _transactionRetryMaxExpirationSecNodeosArg: str="--transaction-retry-max-expiration-sec" - transactionFinalityStatusMaxStorageSizeGb: int=None - _transactionFinalityStatusMaxStorageSizeGbNodeosDefault: int=None - _transactionFinalityStatusMaxStorageSizeGbNodeosArg: str="--transaction-finality-status-max-storage-size-gb" - transactionFinalityStatusSuccessDurationSec: int=None - _transactionFinalityStatusSuccessDurationSecNodeosDefault: int=180 - _transactionFinalityStatusSuccessDurationSecNodeosArg: str="--transaction-finality-status-success-duration-sec" - transactionFinalityStatusFailureDurationSec: int=None - _transactionFinalityStatusFailureDurationSecNodeosDefault: int=180 - _transactionFinalityStatusFailureDurationSecNodeosArg: str="--transaction-finality-status-failure-duration-sec" - integrityHashOnStart: bool=None - _integrityHashOnStartNodeosDefault: bool=False - _integrityHashOnStartNodeosArg: str="--integrity-hash-on-start" - integrityHashOnStop: bool=None - _integrityHashOnStopNodeosDefault: bool=False - _integrityHashOnStopNodeosArg: str="--integrity-hash-on-stop" - blockLogRetainBlocks: int=None - _blockLogRetainBlocksNodeosDefault: int=None - _blockLogRetainBlocksNodeosArg: str="--block-log-retain-blocks" - genesisJson: str=None - _genesisJsonNodeosDefault: str=None - _genesisJsonNodeosArg: str="--genesis-json" - genesisTimestamp: str=None - _genesisTimestampNodeosDefault: str=None - _genesisTimestampNodeosArg: str="--genesis-timestamp" - printGenesisJson: bool=None - _printGenesisJsonNodeosDefault: bool=False - _printGenesisJsonNodeosArg: str="--print-genesis-json" - extractGenesisJson: str=None - _extractGenesisJsonNodeosDefault: str=None - _extractGenesisJsonNodeosArg: str="--extract-genesis-json" - printBuildInfo: bool=None - _printBuildInfoNodeosDefault: bool=False - _printBuildInfoNodeosArg: str="--print-build-info" - extractBuildInfo: str=None - _extractBuildInfoNodeosDefault: str=None - _extractBuildInfoNodeosArg: str="--extract-build-info" - forceAllChecks: bool=None - _forceAllChecksNodeosDefault: bool=False - _forceAllChecksNodeosArg: str="--force-all-checks" - disableReplayOpts: bool=None - _disableReplayOptsNodeosDefault: bool=False - _disableReplayOptsNodeosArg: str="--disable-replay-opts" - replayBlockchain: bool=None - _replayBlockchainNodeosDefault: bool=False - _replayBlockchainNodeosArg: str="--replay-blockchain" - hardReplayBlockchain: bool=None - _hardReplayBlockchainNodeosDefault: bool=False - _hardReplayBlockchainNodeosArg: str="--hard-replay-blockchain" - deleteAllBlocks: bool=None - _deleteAllBlocksNodeosDefault: bool=False - _deleteAllBlocksNodeosArg: str="--delete-all-blocks" - truncateAtBlock: int=None - _truncateAtBlockNodeosDefault: int=0 - _truncateAtBlockNodeosArg: str="--truncate-at-block" - terminateAtBlock: int=None - _terminateAtBlockNodeosDefault: int=0 - _terminateAtBlockNodeosArg: str="--terminate-at-block" - snapshot: str=None - _snapshotNodeosDefault: str=None - _snapshotNodeosArg: str="--snapshot" - -def main(): - pluginArgs = ChainPluginArgs() - print(pluginArgs.supportedNodeosArgs()) - exit(0) - -if __name__ == '__main__': - main() diff --git a/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py deleted file mode 100755 index 48f08f5204..0000000000 --- a/tests/performance_tests/NodeosPluginArgs/HttpPluginArgs.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python3 - -from dataclasses import dataclass -from .BasePluginArgs import BasePluginArgs - -""" -This file/class was generated by generate_nodeos_plugin_args_class_files.py -""" - -@dataclass -class HttpPluginArgs(BasePluginArgs): - _pluginNamespace: str="eosio" - _pluginName: str="http_plugin" - unixSocketPath: str=None - _unixSocketPathNodeosDefault: str=None - _unixSocketPathNodeosArg: str="--unix-socket-path" - httpServerAddress: str=None - _httpServerAddressNodeosDefault: str="127.0.0.1:8888" - _httpServerAddressNodeosArg: str="--http-server-address" - accessControlAllowOrigin: str=None - _accessControlAllowOriginNodeosDefault: str=None - _accessControlAllowOriginNodeosArg: str="--access-control-allow-origin" - accessControlAllowHeaders: str=None - _accessControlAllowHeadersNodeosDefault: str=None - _accessControlAllowHeadersNodeosArg: str="--access-control-allow-headers" - accessControlMaxAge: int=None - _accessControlMaxAgeNodeosDefault: int=None - _accessControlMaxAgeNodeosArg: str="--access-control-max-age" - accessControlAllowCredentials: bool=None - _accessControlAllowCredentialsNodeosDefault: bool=False - _accessControlAllowCredentialsNodeosArg: str="--access-control-allow-credentials" - maxBodySize: int=None - _maxBodySizeNodeosDefault: int=2097152 - _maxBodySizeNodeosArg: str="--max-body-size" - httpMaxBytesInFlightMb: int=None - _httpMaxBytesInFlightMbNodeosDefault: int=500 - _httpMaxBytesInFlightMbNodeosArg: str="--http-max-bytes-in-flight-mb" - httpMaxInFlightRequests: int=None - _httpMaxInFlightRequestsNodeosDefault: int=-1 - _httpMaxInFlightRequestsNodeosArg: str="--http-max-in-flight-requests" - httpMaxResponseTimeMs: int=None - _httpMaxResponseTimeMsNodeosDefault: int=30 - _httpMaxResponseTimeMsNodeosArg: str="--http-max-response-time-ms" - verboseHttpErrors: bool=None - _verboseHttpErrorsNodeosDefault: bool=False - _verboseHttpErrorsNodeosArg: str="--verbose-http-errors" - httpValidateHost: int=None - _httpValidateHostNodeosDefault: int=1 - _httpValidateHostNodeosArg: str="--http-validate-host" - httpAlias: str=None - _httpAliasNodeosDefault: str=None - _httpAliasNodeosArg: str="--http-alias" - httpThreads: int=None - _httpThreadsNodeosDefault: int=2 - _httpThreadsNodeosArg: str="--http-threads" - httpKeepAlive: int=None - _httpKeepAliveNodeosDefault: int=1 - _httpKeepAliveNodeosArg: str="--http-keep-alive" - -def main(): - pluginArgs = HttpPluginArgs() - print(pluginArgs.supportedNodeosArgs()) - exit(0) - -if __name__ == '__main__': - main() diff --git a/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py deleted file mode 100755 index 9c7bf8c668..0000000000 --- a/tests/performance_tests/NodeosPluginArgs/NetPluginArgs.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 - -from dataclasses import dataclass -from .BasePluginArgs import BasePluginArgs - -""" -This file/class was generated by generate_nodeos_plugin_args_class_files.py -""" - -@dataclass -class NetPluginArgs(BasePluginArgs): - _pluginNamespace: str="eosio" - _pluginName: str="net_plugin" - p2pListenEndpoint: str=None - _p2pListenEndpointNodeosDefault: str="0.0.0.0:9876" - _p2pListenEndpointNodeosArg: str="--p2p-listen-endpoint" - p2pServerAddress: str=None - _p2pServerAddressNodeosDefault: str=None - _p2pServerAddressNodeosArg: str="--p2p-server-address" - p2pPeerAddress: str=None - _p2pPeerAddressNodeosDefault: str=None - _p2pPeerAddressNodeosArg: str="--p2p-peer-address" - p2pMaxNodesPerHost: int=None - _p2pMaxNodesPerHostNodeosDefault: int=1 - _p2pMaxNodesPerHostNodeosArg: str="--p2p-max-nodes-per-host" - p2pAcceptTransactions: int=None - _p2pAcceptTransactionsNodeosDefault: int=1 - _p2pAcceptTransactionsNodeosArg: str="--p2p-accept-transactions" - p2pAutoBpPeer: str=None - _p2pAutoBpPeerNodeosDefault: str=None - _p2pAutoBpPeerNodeosArg: str="--p2p-auto-bp-peer" - agentName: str=None - _agentNameNodeosDefault: str="EOS Test Agent" - _agentNameNodeosArg: str="--agent-name" - allowedConnection: str=None - _allowedConnectionNodeosDefault: str="any" - _allowedConnectionNodeosArg: str="--allowed-connection" - peerKey: str=None - _peerKeyNodeosDefault: str=None - _peerKeyNodeosArg: str="--peer-key" - peerPrivateKey: str=None - _peerPrivateKeyNodeosDefault: str=None - _peerPrivateKeyNodeosArg: str="--peer-private-key" - maxClients: int=None - _maxClientsNodeosDefault: int=25 - _maxClientsNodeosArg: str="--max-clients" - connectionCleanupPeriod: int=None - _connectionCleanupPeriodNodeosDefault: int=30 - _connectionCleanupPeriodNodeosArg: str="--connection-cleanup-period" - maxCleanupTimeMsec: int=None - _maxCleanupTimeMsecNodeosDefault: int=10 - _maxCleanupTimeMsecNodeosArg: str="--max-cleanup-time-msec" - p2pDedupCacheExpireTimeSec: int=None - _p2pDedupCacheExpireTimeSecNodeosDefault: int=10 - _p2pDedupCacheExpireTimeSecNodeosArg: str="--p2p-dedup-cache-expire-time-sec" - netThreads: int=None - _netThreadsNodeosDefault: int=4 - _netThreadsNodeosArg: str="--net-threads" - syncFetchSpan: int=None - _syncFetchSpanNodeosDefault: int=100 - _syncFetchSpanNodeosArg: str="--sync-fetch-span" - useSocketReadWatermark: int=None - _useSocketReadWatermarkNodeosDefault: int=0 - _useSocketReadWatermarkNodeosArg: str="--use-socket-read-watermark" - peerLogFormat: str=None - _peerLogFormatNodeosDefault: str='["${_name}" - ${_cid} ${_ip}:${_port}] ' - _peerLogFormatNodeosArg: str="--peer-log-format" - p2pKeepaliveIntervalMs: int=None - _p2pKeepaliveIntervalMsNodeosDefault: int=10000 - _p2pKeepaliveIntervalMsNodeosArg: str="--p2p-keepalive-interval-ms" - -def main(): - pluginArgs = NetPluginArgs() - print(pluginArgs.supportedNodeosArgs()) - exit(0) - -if __name__ == '__main__': - main() diff --git a/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py deleted file mode 100755 index e214f2bade..0000000000 --- a/tests/performance_tests/NodeosPluginArgs/ProducerPluginArgs.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python3 - -from dataclasses import dataclass -from .BasePluginArgs import BasePluginArgs - -""" -This file/class was generated by generate_nodeos_plugin_args_class_files.py -""" - -@dataclass -class ProducerPluginArgs(BasePluginArgs): - _pluginNamespace: str="eosio" - _pluginName: str="producer_plugin" - enableStaleProduction: bool=None - _enableStaleProductionNodeosDefault: bool=False - _enableStaleProductionNodeosArg: str="--enable-stale-production" - pauseOnStartup: bool=None - _pauseOnStartupNodeosDefault: bool=False - _pauseOnStartupNodeosArg: str="--pause-on-startup" - maxTransactionTime: int=None - _maxTransactionTimeNodeosDefault: int=30 - _maxTransactionTimeNodeosArg: str="--max-transaction-time" - maxIrreversibleBlockAge: int=None - _maxIrreversibleBlockAgeNodeosDefault: int=-1 - _maxIrreversibleBlockAgeNodeosArg: str="--max-irreversible-block-age" - producerName: str=None - _producerNameNodeosDefault: str=None - _producerNameNodeosArg: str="--producer-name" - privateKey: str=None - _privateKeyNodeosDefault: str=None - _privateKeyNodeosArg: str="--private-key" - signatureProvider: str=None - _signatureProviderNodeosDefault: str="EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" - _signatureProviderNodeosArg: str="--signature-provider" - greylistAccount: str=None - _greylistAccountNodeosDefault: str=None - _greylistAccountNodeosArg: str="--greylist-account" - greylistLimit: int=None - _greylistLimitNodeosDefault: int=1000 - _greylistLimitNodeosArg: str="--greylist-limit" - produceTimeOffsetUs: int=None - _produceTimeOffsetUsNodeosDefault: int=0 - _produceTimeOffsetUsNodeosArg: str="--produce-time-offset-us" - lastBlockTimeOffsetUs: int=None - _lastBlockTimeOffsetUsNodeosDefault: int=-200000 - _lastBlockTimeOffsetUsNodeosArg: str="--last-block-time-offset-us" - cpuEffortPercent: int=None - _cpuEffortPercentNodeosDefault: int=80 - _cpuEffortPercentNodeosArg: str="--cpu-effort-percent" - lastBlockCpuEffortPercent: int=None - _lastBlockCpuEffortPercentNodeosDefault: int=80 - _lastBlockCpuEffortPercentNodeosArg: str="--last-block-cpu-effort-percent" - maxBlockCpuUsageThresholdUs: int=None - _maxBlockCpuUsageThresholdUsNodeosDefault: int=5000 - _maxBlockCpuUsageThresholdUsNodeosArg: str="--max-block-cpu-usage-threshold-us" - maxBlockNetUsageThresholdBytes: int=None - _maxBlockNetUsageThresholdBytesNodeosDefault: int=1024 - _maxBlockNetUsageThresholdBytesNodeosArg: str="--max-block-net-usage-threshold-bytes" - maxScheduledTransactionTimePerBlockMs: int=None - _maxScheduledTransactionTimePerBlockMsNodeosDefault: int=100 - _maxScheduledTransactionTimePerBlockMsNodeosArg: str="--max-scheduled-transaction-time-per-block-ms" - subjectiveCpuLeewayUs: int=None - _subjectiveCpuLeewayUsNodeosDefault: int=31000 - _subjectiveCpuLeewayUsNodeosArg: str="--subjective-cpu-leeway-us" - subjectiveAccountMaxFailures: int=None - _subjectiveAccountMaxFailuresNodeosDefault: int=3 - _subjectiveAccountMaxFailuresNodeosArg: str="--subjective-account-max-failures" - subjectiveAccountMaxFailuresWindowSize: int=None - _subjectiveAccountMaxFailuresWindowSizeNodeosDefault: int=1 - _subjectiveAccountMaxFailuresWindowSizeNodeosArg: str="--subjective-account-max-failures-window-size" - subjectiveAccountDecayTimeMinutes: int=None - _subjectiveAccountDecayTimeMinutesNodeosDefault: int=1440 - _subjectiveAccountDecayTimeMinutesNodeosArg: str="--subjective-account-decay-time-minutes" - incomingDeferRatio: int=None - _incomingDeferRatioNodeosDefault: int=1 - _incomingDeferRatioNodeosArg: str="--incoming-defer-ratio" - incomingTransactionQueueSizeMb: int=None - _incomingTransactionQueueSizeMbNodeosDefault: int=1024 - _incomingTransactionQueueSizeMbNodeosArg: str="--incoming-transaction-queue-size-mb" - disableSubjectiveBilling: int=None - _disableSubjectiveBillingNodeosDefault: int=1 - _disableSubjectiveBillingNodeosArg: str="--disable-subjective-billing" - disableSubjectiveAccountBilling: bool=None - _disableSubjectiveAccountBillingNodeosDefault: bool=False - _disableSubjectiveAccountBillingNodeosArg: str="--disable-subjective-account-billing" - disableSubjectiveP2pBilling: int=None - _disableSubjectiveP2pBillingNodeosDefault: int=1 - _disableSubjectiveP2pBillingNodeosArg: str="--disable-subjective-p2p-billing" - disableSubjectiveApiBilling: int=None - _disableSubjectiveApiBillingNodeosDefault: int=1 - _disableSubjectiveApiBillingNodeosArg: str="--disable-subjective-api-billing" - producerThreads: int=None - _producerThreadsNodeosDefault: int=2 - _producerThreadsNodeosArg: str="--producer-threads" - snapshotsDir: str=None - _snapshotsDirNodeosDefault: str='"snapshots"' - _snapshotsDirNodeosArg: str="--snapshots-dir" - maxReadOnlyTransactionTime: int=None - _maxReadOnlyTransactionTimeNodeosDefault: int=150 - _maxReadOnlyTransactionTimeNodeosArg: str="--max-read-only-transaction-time" - -def main(): - pluginArgs = ProducerPluginArgs() - print(pluginArgs.supportedNodeosArgs()) - exit(0) - -if __name__ == '__main__': - main() diff --git a/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py deleted file mode 100755 index 54d3e54031..0000000000 --- a/tests/performance_tests/NodeosPluginArgs/ResourceMonitorPluginArgs.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python3 - -from dataclasses import dataclass -from .BasePluginArgs import BasePluginArgs - -""" -This file/class was generated by generate_nodeos_plugin_args_class_files.py -""" - -@dataclass -class ResourceMonitorPluginArgs(BasePluginArgs): - _pluginNamespace: str="eosio" - _pluginName: str="resource_monitor_plugin" - resourceMonitorIntervalSeconds: int=None - _resourceMonitorIntervalSecondsNodeosDefault: int=2 - _resourceMonitorIntervalSecondsNodeosArg: str="--resource-monitor-interval-seconds" - resourceMonitorSpaceThreshold: int=None - _resourceMonitorSpaceThresholdNodeosDefault: int=90 - _resourceMonitorSpaceThresholdNodeosArg: str="--resource-monitor-space-threshold" - resourceMonitorSpaceAbsoluteGb: str=None - _resourceMonitorSpaceAbsoluteGbNodeosDefault: str=None - _resourceMonitorSpaceAbsoluteGbNodeosArg: str="--resource-monitor-space-absolute-gb" - resourceMonitorNotShutdownOnThresholdExceeded: bool=None - _resourceMonitorNotShutdownOnThresholdExceededNodeosDefault: bool=False - _resourceMonitorNotShutdownOnThresholdExceededNodeosArg: str="--resource-monitor-not-shutdown-on-threshold-exceeded" - resourceMonitorWarningInterval: int=None - _resourceMonitorWarningIntervalNodeosDefault: int=30 - _resourceMonitorWarningIntervalNodeosArg: str="--resource-monitor-warning-interval" - -def main(): - pluginArgs = ResourceMonitorPluginArgs() - print(pluginArgs.supportedNodeosArgs()) - exit(0) - -if __name__ == '__main__': - main() diff --git a/tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py deleted file mode 100755 index 39d9ddf0dd..0000000000 --- a/tests/performance_tests/NodeosPluginArgs/SignatureProviderPluginArgs.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python3 - -from dataclasses import dataclass -from .BasePluginArgs import BasePluginArgs - -""" -This file/class was generated by generate_nodeos_plugin_args_class_files.py -""" - -@dataclass -class SignatureProviderPluginArgs(BasePluginArgs): - _pluginNamespace: str="eosio" - _pluginName: str="signature_provider_plugin" - keosdProviderTimeout: int=None - _keosdProviderTimeoutNodeosDefault: int=5 - _keosdProviderTimeoutNodeosArg: str="--keosd-provider-timeout" - -def main(): - pluginArgs = SignatureProviderPluginArgs() - print(pluginArgs.supportedNodeosArgs()) - exit(0) - -if __name__ == '__main__': - main() diff --git a/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py deleted file mode 100755 index c7d8d6fbc9..0000000000 --- a/tests/performance_tests/NodeosPluginArgs/StateHistoryPluginArgs.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 - -from dataclasses import dataclass -from .BasePluginArgs import BasePluginArgs - -""" -This file/class was generated by generate_nodeos_plugin_args_class_files.py -""" - -@dataclass -class StateHistoryPluginArgs(BasePluginArgs): - _pluginNamespace: str="eosio" - _pluginName: str="state_history_plugin" - stateHistoryDir: str=None - _stateHistoryDirNodeosDefault: str='"state-history"' - _stateHistoryDirNodeosArg: str="--state-history-dir" - stateHistoryRetainedDir: str=None - _stateHistoryRetainedDirNodeosDefault: str=None - _stateHistoryRetainedDirNodeosArg: str="--state-history-retained-dir" - stateHistoryArchiveDir: str=None - _stateHistoryArchiveDirNodeosDefault: str=None - _stateHistoryArchiveDirNodeosArg: str="--state-history-archive-dir" - stateHistoryStride: str=None - _stateHistoryStrideNodeosDefault: str=None - _stateHistoryStrideNodeosArg: str="--state-history-stride" - maxRetainedHistoryFiles: str=None - _maxRetainedHistoryFilesNodeosDefault: str=None - _maxRetainedHistoryFilesNodeosArg: str="--max-retained-history-files" - traceHistory: bool=None - _traceHistoryNodeosDefault: bool=False - _traceHistoryNodeosArg: str="--trace-history" - chainStateHistory: bool=None - _chainStateHistoryNodeosDefault: bool=False - _chainStateHistoryNodeosArg: str="--chain-state-history" - stateHistoryEndpoint: str=None - _stateHistoryEndpointNodeosDefault: str="127.0.0.1:8080" - _stateHistoryEndpointNodeosArg: str="--state-history-endpoint" - stateHistoryUnixSocketPath: str=None - _stateHistoryUnixSocketPathNodeosDefault: str=None - _stateHistoryUnixSocketPathNodeosArg: str="--state-history-unix-socket-path" - traceHistoryDebugMode: bool=None - _traceHistoryDebugModeNodeosDefault: bool=False - _traceHistoryDebugModeNodeosArg: str="--trace-history-debug-mode" - stateHistoryLogRetainBlocks: int=None - _stateHistoryLogRetainBlocksNodeosDefault: int=None - _stateHistoryLogRetainBlocksNodeosArg: str="--state-history-log-retain-blocks" - deleteStateHistory: bool=None - _deleteStateHistoryNodeosDefault: bool=False - _deleteStateHistoryNodeosArg: str="--delete-state-history" - -def main(): - pluginArgs = StateHistoryPluginArgs() - print(pluginArgs.supportedNodeosArgs()) - exit(0) - -if __name__ == '__main__': - main() diff --git a/tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py b/tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py deleted file mode 100755 index 9ef9f5f222..0000000000 --- a/tests/performance_tests/NodeosPluginArgs/TraceApiPluginArgs.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python3 - -from dataclasses import dataclass -from .BasePluginArgs import BasePluginArgs - -""" -This file/class was generated by generate_nodeos_plugin_args_class_files.py -""" - -@dataclass -class TraceApiPluginArgs(BasePluginArgs): - _pluginNamespace: str="eosio" - _pluginName: str="trace_api_plugin" - traceDir: str=None - _traceDirNodeosDefault: str='"traces"' - _traceDirNodeosArg: str="--trace-dir" - traceSliceStride: int=None - _traceSliceStrideNodeosDefault: int=10000 - _traceSliceStrideNodeosArg: str="--trace-slice-stride" - traceMinimumIrreversibleHistoryBlocks: int=None - _traceMinimumIrreversibleHistoryBlocksNodeosDefault: int=-1 - _traceMinimumIrreversibleHistoryBlocksNodeosArg: str="--trace-minimum-irreversible-history-blocks" - traceMinimumUncompressedIrreversibleHistoryBlocks: int=None - _traceMinimumUncompressedIrreversibleHistoryBlocksNodeosDefault: int=-1 - _traceMinimumUncompressedIrreversibleHistoryBlocksNodeosArg: str="--trace-minimum-uncompressed-irreversible-history-blocks" - traceRpcAbi: str=None - _traceRpcAbiNodeosDefault: str=None - _traceRpcAbiNodeosArg: str="--trace-rpc-abi" - traceNoAbis: bool=None - _traceNoAbisNodeosDefault: bool=False - _traceNoAbisNodeosArg: str="--trace-no-abis" - -def main(): - pluginArgs = TraceApiPluginArgs() - print(pluginArgs.supportedNodeosArgs()) - exit(0) - -if __name__ == '__main__': - main() diff --git a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py index b597726c60..89b743b6e1 100755 --- a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py +++ b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py @@ -52,7 +52,7 @@ def main(): - result = subprocess.run(["programs/nodeos/nodeos", "--help"], capture_output=True, text=True) + result = subprocess.run(["../../../bin/nodeos", "--help"], capture_output=True, text=True) myStr = result.stdout myStr = myStr.rstrip("\n") @@ -104,7 +104,7 @@ def pairwise(iterable): def writeDataclass(plugin:str, dataFieldDict:dict, pluginOptsDict:dict): newPlugin="".join([x.capitalize() for x in plugin.split('_')]).replace(":","") - pluginArgsFile=f"../tests/performance_tests/NodeosPluginArgs/{newPlugin}Args.py" + pluginArgsFile=f"./{newPlugin}Args.py" with open(pluginArgsFile, 'w') as dataclassFile: chainPluginArgs = dataFieldDict[newPlugin] diff --git a/tests/performance_tests/validate_nodeos_plugin_args.py b/tests/performance_tests/validate_nodeos_plugin_args.py deleted file mode 100755 index 1ff0ce8b30..0000000000 --- a/tests/performance_tests/validate_nodeos_plugin_args.py +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env python3 - -import re -import subprocess - -from NodeosPluginArgs import ChainPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs, SignatureProviderPluginArgs, StateHistoryPluginArgs, TraceApiPluginArgs - -testSuccessful = False - -regenSuggestion = "Try updating *PluginArgs classes to nodeos's current config options by running the script: generate_nodeos_plugin_args_class_files.py. \ - Updates to generation script may be required if a plugin was added/removed or in some default parameter cases." - -def parseNodeosConfigOptions() -> dict: - result = subprocess.run(["programs/nodeos/nodeos", "--help"], capture_output=True, text=True) - - myStr = result.stdout - myStr = myStr.rstrip("\n") - myStr = re.sub(":\n\s+-",':@@@\n -', string=myStr) - myStr = re.sub("\n\n",'\n@@@', string=myStr) - myStr = re.sub("Application Options:\n",'', string=myStr) - pluginSections = re.split("(@@@.*?@@@\n)", string=myStr) - - def pairwise(iterable): - "s -> (s0, s1), (s2, s3), (s4, s5), ..." - a = iter(iterable) - return zip(a, a) - - pluginOptsDict = {} - for section, options in pairwise(pluginSections[1:]): - myOpts = re.sub("\s+", " ", options) - myOpts = re.sub("\n", " ", myOpts) - myOpts = re.sub(" --", "\n--",string = myOpts) - splitOpts=re.split("\n", myOpts) - - argDefaultsDict = {} - for opt in splitOpts[1:]: - secondSplit = re.split("(--[\w\-]+)", opt)[1:] - argument=secondSplit[0] - argDefaultDesc=secondSplit[1].lstrip("\s") - default = None - match = re.search("\(=.*?\)", argDefaultDesc) - if match is not None: - value = match.group(0)[2:-1] - try: - default = int(value) - except ValueError: - default = str(value) - argDefaultsDict[argument] = default - - section=re.sub("@@@", "", section) - section=re.sub("\n", "", section) - sectionSplit=re.split("::", section) - configSection = section - if len(sectionSplit) > 1: - configSection=sectionSplit[1] - - if configSection[-1] == ":": - configSection = configSection[:-1] - - if pluginOptsDict.get(configSection) is not None: - pluginOptsDict[configSection].update(argDefaultsDict) - else: - pluginOptsDict[configSection] = argDefaultsDict - return pluginOptsDict - -nodeosPluginOptsDict = parseNodeosConfigOptions() - -curListOfSupportedPlugins = [ChainPluginArgs(), HttpPluginArgs(), NetPluginArgs(), ProducerPluginArgs(), - ResourceMonitorPluginArgs(), SignatureProviderPluginArgs(), StateHistoryPluginArgs(), TraceApiPluginArgs()] - -curListOfUnsupportedOptionGroups = ["txn_test_gen_plugin", "Application Config Options", "Application Command Line Options"] - -#Check whether nodeos has added any plugin configuration sections -for confSection in nodeosPluginOptsDict.keys(): - assert confSection in [paClass._pluginName for paClass in curListOfSupportedPlugins] or confSection in curListOfUnsupportedOptionGroups, f"ERROR: New config section \"{confSection}\" added to nodeos which may require updates. {regenSuggestion}" - -def argStrToAttrName(argStr: str) -> str: - attrName="".join([x.capitalize() for x in argStr.split('-')]).replace('--','') - attrName="".join([attrName[0].lower(), attrName[1:]]) - return attrName - -for supportedPlugin in curListOfSupportedPlugins: - #Check whether nodeos has removed any plugin configuration sections - assert supportedPlugin._pluginName in nodeosPluginOptsDict, f"ERROR: Supported config section \"{supportedPlugin._pluginName}\" no longer supported by nodeos. {regenSuggestion}" - - for opt in supportedPlugin.supportedNodeosArgs(): - #Check whether nodeos has removed any arguments in a plugin - assert opt in nodeosPluginOptsDict[supportedPlugin._pluginName].keys(), f"ERROR: nodeos no longer supports \"{opt}\" in \"{supportedPlugin._pluginName}\". {regenSuggestion}" - - - ourDefault = getattr(supportedPlugin, f"_{argStrToAttrName(opt)}NodeosDefault") - nodeosCurDefault = nodeosPluginOptsDict[supportedPlugin._pluginName][opt] - if type(ourDefault) == bool and nodeosCurDefault is None: - nodeosCurDefault=False - #Check whether our defaults no longer match nodeos's - assert ourDefault == nodeosCurDefault, f"ERROR: {type(supportedPlugin)}'s default for \"{opt}\" is {ourDefault} and no longer matches nodeos's default {nodeosCurDefault} in \"{supportedPlugin._pluginName}\". {regenSuggestion}" - - #Check whether nodeos has added/updated any argument defaults - for nodeosOpt, defaultValue in nodeosPluginOptsDict[supportedPlugin._pluginName].items(): - assert nodeosOpt in supportedPlugin.supportedNodeosArgs(), f"ERROR: New nodeos option \"{nodeosOpt}\". Support for this option needs to be added to {type(supportedPlugin)}. {regenSuggestion}" - - ourDefault = getattr(supportedPlugin, f"_{argStrToAttrName(nodeosOpt)}NodeosDefault") - if type(ourDefault) == bool and defaultValue is None: - defaultValue=False - assert defaultValue == ourDefault, f"ERROR: nodeos's default for \"{nodeosOpt}\" is {nodeosCurDefault} and no longer matches {type(supportedPlugin)}'s default: {ourDefault} in \"{supportedPlugin._pluginName}\". {regenSuggestion}" - -testSuccessful = True - -exitCode = 0 if testSuccessful else 1 -exit(exitCode) From 93f52ea34f3ec8a09b4e7ad5949d32dd6181aef5 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 16:20:50 -0600 Subject: [PATCH 176/178] Don't require these find packages, which are already being done higher up. --- tests/performance_tests/NodeosPluginArgs/CMakeLists.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt b/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt index 4fd7fbcc78..48bed00ded 100644 --- a/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt +++ b/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt @@ -1,9 +1,6 @@ configure_file(__init__.py __init__.py COPYONLY) configure_file(BasePluginArgs.py BasePluginArgs.py COPYONLY) -find_package(PythonInterp REQUIRED) -find_package(Python3 REQUIRED) - set(GEN_FILES ChainPluginArgs.py HttpPluginArgs.py From a24b05cd4ed26cafd46a19c4a2e381aa54d55a8b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 9 Mar 2023 16:33:03 -0600 Subject: [PATCH 177/178] Switch to support older versions of python. capture_output not added until python 3.7. Ubuntu 18 build still running on older version of python. --- .../NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py index 89b743b6e1..6968ec0140 100755 --- a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py +++ b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py @@ -52,7 +52,7 @@ def main(): - result = subprocess.run(["../../../bin/nodeos", "--help"], capture_output=True, text=True) + result = subprocess.run(["../../../bin/nodeos", "--help"], stdout=subprocess.PIPE, text=True) myStr = result.stdout myStr = myStr.rstrip("\n") From a9aea207bed67280a910540794a34c2d5c902683 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 10 Mar 2023 08:56:50 -0600 Subject: [PATCH 178/178] Switch to support older versions of python. Before text=True (3.7) there was universal_newlines=True. Ubuntu 18 build still running on older version of python. --- .../NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py index 6968ec0140..792838dcc5 100755 --- a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py +++ b/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py @@ -52,7 +52,7 @@ def main(): - result = subprocess.run(["../../../bin/nodeos", "--help"], stdout=subprocess.PIPE, text=True) + result = subprocess.run(["../../../bin/nodeos", "--help"], stdout=subprocess.PIPE, universal_newlines=True) myStr = result.stdout myStr = myStr.rstrip("\n")