diff --git a/.github/workflows/performance_harness_run.yaml b/.github/workflows/performance_harness_run.yaml
index 1584edf80c..7282bb1316 100644
--- a/.github/workflows/performance_harness_run.yaml
+++ b/.github/workflows/performance_harness_run.yaml
@@ -32,7 +32,7 @@ jobs:
- name: Setup Input Params
id: overrides
run: |
- echo test-params=testBpOpMode >> $GITHUB_OUTPUT
+ echo test-params=findMax testBpOpMode >> $GITHUB_OUTPUT
if [[ "${{inputs.override-test-params}}" != "" ]]; then
echo test-params=${{inputs.override-test-params}} >> $GITHUB_OUTPUT
@@ -103,18 +103,18 @@ jobs:
run: |
zstdcat build.tar.zst | tar x
cd build
- ./tests/performance_tests/performance_test.py ${{needs.v.outputs.test-params}}
+ ./tests/PerformanceHarnessScenarioRunner.py ${{needs.v.outputs.test-params}}
- name: Prepare results
id: prep-results
run: |
- tar -pc build/performance_test | zstd --long -T0 -9 > performance_test_logs.tar.zst
+ tar -pc build/PerformanceHarnessScenarioRunnerLogs | zstd --long -T0 -9 > PerformanceHarnessScenarioRunnerLogs.tar.zst
- name: Upload results
uses: AntelopeIO/upload-artifact-large-chunks-action@v1
with:
name: performance-test-results
- path: performance_test_logs.tar.zst
+ path: PerformanceHarnessScenarioRunnerLogs.tar.zst
- name: Upload report
uses: actions/upload-artifact@v3
with:
name: performance-test-report
- path: ./build/performance_test/**/report.json
+ path: ./build/PerformanceHarnessScenarioRunnerLogs/**/report.json
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 700abde685..71147aaa34 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -70,6 +70,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/auto_bp_peering_test.py ${CMAKE_CURRE
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/auto_bp_peering_test_shape.json ${CMAKE_CURRENT_BINARY_DIR}/auto_bp_peering_test_shape.json COPYONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/gelf_test.py ${CMAKE_CURRENT_BINARY_DIR}/gelf_test.py COPYONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/split_blocklog_replay_test.py ${CMAKE_CURRENT_BINARY_DIR}/split_blocklog_replay_test.py COPYONLY)
+configure_file(${CMAKE_CURRENT_SOURCE_DIR}/PerformanceHarnessScenarioRunner.py ${CMAKE_CURRENT_BINARY_DIR}/PerformanceHarnessScenarioRunner.py COPYONLY)
if(DEFINED ENV{GITHUB_ACTIONS})
set(UNSHARE "--unshared")
@@ -102,7 +103,7 @@ endif()
add_subdirectory( TestHarness )
add_subdirectory( trx_generator )
-add_subdirectory( performance_tests )
+add_subdirectory( PerformanceHarness )
find_package(Threads)
add_executable(ship_client ship_client.cpp)
@@ -271,6 +272,29 @@ set_property(TEST auto_bp_peering_test PROPERTY LABELS long_running_tests)
add_test(NAME gelf_test COMMAND tests/gelf_test.py ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST gelf_test PROPERTY LABELS nonparallelizable_tests)
+add_test(NAME performance_test_bp COMMAND tests/PerformanceHarnessScenarioRunner.py findMax testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+add_test(NAME performance_test_api COMMAND tests/PerformanceHarnessScenarioRunner.py findMax testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+add_test(NAME performance_test_read_only_trxs COMMAND tests/PerformanceHarnessScenarioRunner.py findMax testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --api-nodes-read-only-threads 2 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/PerformanceHarness/readOnlyTrxData.json --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+add_test(NAME performance_test_cpu_trx_spec COMMAND tests/PerformanceHarnessScenarioRunner.py findMax testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/PerformanceHarness/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+add_test(NAME performance_test_basic_p2p COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+add_test(NAME performance_test_basic_http COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --target-tps 10 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+add_test(NAME performance_test_basic_transfer_trx_spec COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/PerformanceHarness/userTrxDataTransfer.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+add_test(NAME performance_test_basic_new_acct_trx_spec COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/PerformanceHarness/userTrxDataNewAccount.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+add_test(NAME performance_test_basic_cpu_trx_spec COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/PerformanceHarness/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+add_test(NAME performance_test_basic_ram_trx_spec COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/PerformanceHarness/ramTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+add_test(NAME performance_test_basic_read_only_trxs COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --api-nodes-read-only-threads 2 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/PerformanceHarness/readOnlyTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+set_property(TEST performance_test_bp PROPERTY LABELS long_running_tests)
+set_property(TEST performance_test_api PROPERTY LABELS long_running_tests)
+set_property(TEST performance_test_read_only_trxs PROPERTY LABELS long_running_tests)
+set_property(TEST performance_test_cpu_trx_spec PROPERTY LABELS long_running_tests)
+set_property(TEST performance_test_basic_p2p PROPERTY LABELS nonparallelizable_tests)
+set_property(TEST performance_test_basic_http PROPERTY LABELS nonparallelizable_tests)
+set_property(TEST performance_test_basic_transfer_trx_spec PROPERTY LABELS nonparallelizable_tests)
+set_property(TEST performance_test_basic_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests)
+set_property(TEST performance_test_basic_cpu_trx_spec PROPERTY LABELS nonparallelizable_tests)
+set_property(TEST performance_test_basic_ram_trx_spec PROPERTY LABELS nonparallelizable_tests)
+set_property(TEST performance_test_basic_read_only_trxs PROPERTY LABELS nonparallelizable_tests)
+
if(ENABLE_COVERAGE_TESTING)
set(Coverage_NAME ${PROJECT_NAME}_coverage)
diff --git a/tests/PerformanceHarness/CMakeLists.txt b/tests/PerformanceHarness/CMakeLists.txt
new file mode 100644
index 0000000000..f5a7711e3f
--- /dev/null
+++ b/tests/PerformanceHarness/CMakeLists.txt
@@ -0,0 +1,11 @@
+configure_file(performance_test_basic.py . COPYONLY)
+configure_file(performance_test.py . COPYONLY)
+configure_file(log_reader.py . COPYONLY)
+configure_file(genesis.json . COPYONLY)
+configure_file(cpuTrxData.json . COPYONLY)
+configure_file(ramTrxData.json . COPYONLY)
+configure_file(readOnlyTrxData.json . COPYONLY)
+configure_file(userTrxDataTransfer.json . COPYONLY)
+configure_file(userTrxDataNewAccount.json . COPYONLY)
+
+add_subdirectory( NodeosPluginArgs )
diff --git a/tests/performance_tests/NodeosPluginArgs/BasePluginArgs.py b/tests/PerformanceHarness/NodeosPluginArgs/BasePluginArgs.py
similarity index 100%
rename from tests/performance_tests/NodeosPluginArgs/BasePluginArgs.py
rename to tests/PerformanceHarness/NodeosPluginArgs/BasePluginArgs.py
diff --git a/tests/performance_tests/NodeosPluginArgs/CMakeLists.txt b/tests/PerformanceHarness/NodeosPluginArgs/CMakeLists.txt
similarity index 100%
rename from tests/performance_tests/NodeosPluginArgs/CMakeLists.txt
rename to tests/PerformanceHarness/NodeosPluginArgs/CMakeLists.txt
diff --git a/tests/performance_tests/NodeosPluginArgs/__init__.py b/tests/PerformanceHarness/NodeosPluginArgs/__init__.py
similarity index 100%
rename from tests/performance_tests/NodeosPluginArgs/__init__.py
rename to tests/PerformanceHarness/NodeosPluginArgs/__init__.py
diff --git a/tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py b/tests/PerformanceHarness/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py
similarity index 100%
rename from tests/performance_tests/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py
rename to tests/PerformanceHarness/NodeosPluginArgs/generate_nodeos_plugin_args_class_files.py
diff --git a/tests/performance_tests/README.md b/tests/PerformanceHarness/README.md
similarity index 86%
rename from tests/performance_tests/README.md
rename to tests/PerformanceHarness/README.md
index 68de0e5ba9..4b267fd1ca 100644
--- a/tests/performance_tests/README.md
+++ b/tests/PerformanceHarness/README.md
@@ -1,8 +1,12 @@
-# Performance Harness Tests
+# Performance Harness
-The Performance Harness is configured and run through the main `performance_test.py` script. The script's main goal is to measure current peak performance metrics through iteratively tuning and running basic performance tests. The current basic test works to determine the maximum throughput of Token Transfers the system can sustain. It does this by conducting a binary search of possible Token Transfers Per Second (TPS) configurations, testing each configuration in a short duration test and scoring its result. The search algorithm iteratively configures and runs `performance_test_basic.py` tests and analyzes the output to determine a success metric used to continue the search. When the search completes, a max TPS throughput value is reported (along with other performance metrics from that run). The script then proceeds to conduct an additional search with longer duration test runs within a narrowed TPS configuration range to determine the sustainable max TPS. Finally it produces a report on the entire performance run, summarizing each individual test scenario, results, and full report details on the tests when maximum TPS was achieved ([Performance Test Report](#performance-test-report))
+The Performance Harness is a module which provides the framework and utilities to run performance load tests on node infrastructure.
-The `performance_test_basic.py` support script performs a single basic performance test that targets a configurable TPS target and, if successful, reports statistics on performance metrics measured during the test. It configures and launches a blockchain test environment, creates wallets and accounts for testing, and configures and launches transaction generators for creating specific transaction load in the ecosystem. Finally it analyzes the performance of the system under the configuration through log analysis and chain queries and produces a [Performance Test Basic Report](#performance-test-basic-report).
+`PerformanceHarnessScenarioRunner.py` is currently the main entry point and provides the utility to configure and run such tests. It also serves as an example of how one can import the `PerformanceHarness` module to configure and run performance tests. The `PerformanceHarnessScenarioRunner` currently provides two options for running performance tests. The first `findMax` uses the `PerformanceTest` class to run a suite of `PerformanceTestBasic` test run to zero in on a max performance (see section on `PerformanceTest` following.). The second is `singleTest` which allows a user to run a single `PerformanceTestBasic` and see the results of a single configuration (see `PerformanceTestBasic` section following).
+
+The `PerformanceTest`'s main goal is to measure current peak performance metrics through iteratively tuning and running basic performance tests. The current `PerformanceTest` scenario works to determine the maximum throughput of Token Transfers (or other transaction types if configured) the system can sustain. It does this by conducting a binary search of possible Token Transfers Per Second (TPS) configurations, testing each configuration in a short duration test and scoring its result. The search algorithm iteratively configures and runs `PerformanceTestBasic` test scenarios and analyzes the output to determine a success metric used to continue the search. When the search completes, a max TPS throughput value is reported (along with other performance metrics from that run). The `PerformanceTest` then proceeds to conduct an additional search with longer duration test runs within a narrowed TPS configuration range to determine the sustainable max TPS. Finally it produces a report on the entire performance run, summarizing each individual test scenario, results, and full report details on the tests when maximum TPS was achieved ([Performance Test Report](#performance-test-report))
+
+The `PerformanceTestBasic` test performs a single basic performance test that targets a configurable TPS target and, if successful, reports statistics on performance metrics measured during the test. It configures and launches a blockchain test environment, creates wallets and accounts for testing, and configures and launches transaction generators for creating specific transaction load in the ecosystem. Finally it analyzes the performance of the system under the configuration through log analysis and chain queries and produces a [Performance Test Basic Report](#performance-test-basic-report).
The `launch_generators.py` support script provides a means to easily calculate and spawn the number of transaction generator instances to generate a given target TPS, distributing generation load between the instances in a fair manner such that the aggregate load meets the requested test load.
@@ -19,28 +23,28 @@ Please refer to [Leap: Build and Install from Source](https://github.com/Antelop
2. Run Performance Tests
1. Full Performance Harness Test Run (Standard):
``` bash
- ./build/tests/performance_tests/performance_test.py testBpOpMode
+ ./build/tests/PerformanceHarnessScenarioRunner.py findMax testBpOpMode
```
2. Single Performance Test Basic Run (Manually run one-off test):
```bash
- ./build/tests/performance_tests/performance_test_basic.py
+ ./build/tests/PerformanceHarnessScenarioRunner.py singleTest
```
3. Collect Results - By default the Performance Harness will capture and save logs. To delete logs, use `--del-perf-logs`. Additionally, final reports will be collected by default. To omit final reports, use `--del-report` and/or `--del-test-report`.
1. Navigate to performance test logs directory
```bash
- cd ./build/performance_test/
+ cd ./build/PerformanceHarnessScenarioRunnerLogs/
```
- 2. Log Directory Structure is hierarchical with each run of the `performance_test.py` reporting into a timestamped directory where it includes the full performance report as well as a directory containing output from each test type run (here, `performance_test_basic.py`) and each individual test run outputs into a timestamped directory that may contain block data logs and transaction generator logs as well as the test's basic report. An example directory structure follows:
+ 2. Log Directory Structure is hierarchical with each run of the `PerformanceHarnessScenarioRunner` reporting into a timestamped directory where it includes the full performance report as well as a directory containing output from each test type run (here, `PerformanceTestBasic`) and each individual test run outputs into a timestamped directory within `testRunLogs` that may contain block data logs and transaction generator logs as well as the test's basic report. An example directory structure follows:
Expand Example Directory Structure
``` bash
- performance_test/
+ PerformanceHarnessScenarioRunnerLogs/
└── 2023-04-05_14-35-59
├── pluginThreadOptRunLogs
│ ├── chainThreadResults.txt
│ ├── netThreadResults.txt
- │ ├── performance_test
+ │ ├── PerformanceHarnessScenarioRunnerLogs
│ │ ├── 2023-04-05_14-35-59-50000
│ │ │ ├── blockDataLogs
│ │ │ │ ├── blockData.txt
@@ -104,7 +108,7 @@ Please refer to [Leap: Build and Install from Source](https://github.com/Antelop
│ │ │ │ .
│ │ │ │ └── trx_data_output_9498.txt
│ │ │ └── var
- │ │ │ └── performance_test8480
+ │ │ │ └── PerformanceHarnessScenarioRunner8480
│ │ │ ├── node_00
│ │ │ │ ├── blocks
│ │ │ │ │ ├── blocks.index
@@ -159,7 +163,7 @@ Please refer to [Leap: Build and Install from Source](https://github.com/Antelop
│ └── producerThreadResults.txt
├── report.json
└── testRunLogs
- └── performance_test
+ └── PerformanceHarnessScenarioRunnerLogs
├── 2023-04-05_16-14-31-50000
│ ├── blockDataLogs
│ │ ├── blockData.txt
@@ -224,7 +228,7 @@ Please refer to [Leap: Build and Install from Source](https://github.com/Antelop
│ │ .
│ │ └── trx_data_output_20211.txt
│ └── var
- │ └── performance_test8480
+ │ └── PerformanceHarnessScenarioRunner8480
│ ├── node_00
│ │ ├── blocks
│ │ │ ├── blocks.index
@@ -281,15 +285,45 @@ Please refer to [Leap: Build and Install from Source](https://github.com/Antelop
# Configuring Performance Harness Tests
+## Performance Harness Scenario Runner
+
+The Performance Harness Scenario Runner is the main script that configures and runs `PerformanceTest` runs or single `PerformanceTestBasic` runs.
+
+
+ Usage
+
+```
+usage: PerformanceHarnessScenarioRunner.py [-h] {singleTest,findMax} ...
+```
+
+
+
+
+ Expand Scenario Type Sub-Command List
+
+```
+optional arguments:
+ -h, --help show this help message and exit
+
+Scenario Types:
+ Each Scenario Type sets up either a Performance Test Basic or Performance Test testing scenario and allows further configuration of the scenario.
+
+ {singleTest,findMax} Currently supported scenario type sub-commands.
+ singleTest Run a single Performance Test Basic test scenario.
+ findMax Runs a Performance Test scenario.
+```
+
+
+
## Performance Test
-The Performance Harness main script `performance_test.py` can be configured using the following command line arguments:
+The Performance Harness main test class `PerformanceTest` (residing in `performance_test.py`) can be configured through the `findMax` sub-command to `PerformanceHarnessScenarioRunner` using the following command line arguments:
Usage
```
-usage: performance_test.py [-h] {testBpOpMode,testApiOpMode} ...
+usage: PerformanceHarnessScenarioRunner.py findMax [-h] {testBpOpMode,testApiOpMode} ...
```
@@ -336,18 +370,18 @@ Operational Modes:
Usage
```
-usage: performance_test.py testBpOpMode [--skip-tps-test]
- [--calc-producer-threads {none,lmax,full}]
- [--calc-chain-threads {none,lmax,full}]
- [--calc-net-threads {none,lmax,full}]
- [--del-test-report]
- [--max-tps-to-test MAX_TPS_TO_TEST]
- [--min-tps-to-test MIN_TPS_TO_TEST]
- [--test-iteration-duration-sec TEST_ITERATION_DURATION_SEC]
- [--test-iteration-min-step TEST_ITERATION_MIN_STEP]
- [--final-iterations-duration-sec FINAL_ITERATIONS_DURATION_SEC]
- [-h]
- {overrideBasicTestConfig} ...
+usage: PerformanceHarnessScenarioRunner.py findMax testBpOpMode [--skip-tps-test]
+ [--calc-producer-threads {none,lmax,full}]
+ [--calc-chain-threads {none,lmax,full}]
+ [--calc-net-threads {none,lmax,full}]
+ [--del-test-report]
+ [--max-tps-to-test MAX_TPS_TO_TEST]
+ [--min-tps-to-test MIN_TPS_TO_TEST]
+ [--test-iteration-duration-sec TEST_ITERATION_DURATION_SEC]
+ [--test-iteration-min-step TEST_ITERATION_MIN_STEP]
+ [--final-iterations-duration-sec FINAL_ITERATIONS_DURATION_SEC]
+ [-h]
+ {overrideBasicTestConfig} ...
```
@@ -457,7 +491,7 @@ Advanced Configuration Options:
Usage
```
-usage: performance_test.py testBpOpMode overrideBasicTestConfig
+usage: PerformanceHarnessScenarioRunner.py findMax testBpOpMode overrideBasicTestConfig
[-h] [-d D] [--dump-error-details] [-v] [--leave-running] [--unshared]
[--endpoint-mode {p2p,http}]
[--producer-nodes PRODUCER_NODES] [--validation-nodes VALIDATION_NODES] [--api-nodes API_NODES]
@@ -592,19 +626,20 @@ Performance Test Basic Base:
-# Support Scripts
+# Support Classes and Scripts
-The following scripts are typically used by the Performance Harness main script `performance_test.py` to perform specific tasks as delegated and configured by the main script. However, there may be applications in certain use cases where running a single one-off test or transaction generator is desired. In those situations, the following argument details might be useful to understanding how to run these utilities in stand-alone mode. The argument breakdown may also be useful in understanding how the Performance Harness main script's arguments are being passed through to configure lower-level entities.
+The following classes and scripts are typically used by the Performance Harness main test class `PerformanceTest` to perform specific tasks as delegated and configured by the main scenario. However, there may be applications in certain use cases where running a single one-off test or transaction generator is desired. In those situations, the following argument details might be useful to understanding how to run these utilities in stand-alone mode. The argument breakdown may also be useful in understanding how the Performance Harness arguments are being passed through to configure lower-level entities.
## Performance Test Basic
-`performance_test_basic.py` can be configured using the following command line arguments:
+`PerformanceTestBasic` (file: `performance_test_basic.py`) can be configured using the following command line arguments:
Usage
```
- usage: performance_test_basic.py [-h] [-d D]
+ usage: PerformanceHarnessScenarioRunner.py singleTest
+ [-h] [-d D]
[--dump-error-details] [-v] [--leave-running]
[--unshared]
[--endpoint-mode {p2p,http}]
@@ -679,7 +714,7 @@ Performance Test Basic Base:
API nodes read only threads count for use with read-only transactions (default: 0)
--tps-limit-per-generator TPS_LIMIT_PER_GENERATOR
Maximum amount of transactions per second a single generator can have. (default: 4000)
- --genesis GENESIS Path to genesis.json (default: tests/performance_tests/genesis.json)
+ --genesis GENESIS Path to genesis.json (default: tests/PerformanceHarness/genesis.json)
--num-blocks-to-prune NUM_BLOCKS_TO_PRUNE
The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks,
to prune from the beginning and end of the range of blocks of interest for evaluation. (default: 2)
@@ -829,12 +864,12 @@ Transaction Generator command line options.:
## Performance Test Report
-The Performance Harness generates a report to summarize results of test scenarios as well as overarching results of the performance harness run. By default the report described below will be written to the top level timestamped directory for the performance run with the file name `report.json`. To omit final report, use `--del-report`.
+The Performance Harness Scenario Runner, through the `PerformanceTest` and `PerformanceTestBasic` classes in the `PerformanceHarness` module, generates a report to summarize results of test scenarios as well as overarching results of the performance harness run. By default the report described below will be written to the top level timestamped directory for the performance run with the file name `report.json`. To omit final report, use `--del-report`.
Command used to run test and generate report:
``` bash
-./build/tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax
+./tests/PerformanceHarnessScenarioRunner.py findMax testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax
```
### Report Breakdown
@@ -855,11 +890,11 @@ Next, a high level summary of the search scenario target and results is included
"19001": "FAIL",
"16001": "FAIL",
"14501": "FAIL",
- "13501": "FAIL",
- "13001": "PASS"
+ "13501": "PASS",
+ "14001": "PASS"
},
"LongRunningSearchScenariosSummary": {
- "13001": "PASS"
+ "14001": "PASS"
},
```
@@ -869,26 +904,26 @@ Next, a summary of the search scenario conducted and respective results is inclu
Expand Search Scenario Summary Example
``` json
- "2": {
+ "0": {
"success": true,
- "searchTarget": 12501,
+ "searchTarget": 14001,
"searchFloor": 1,
- "searchCeiling": 24501,
+ "searchCeiling": 14001,
"basicTestResult": {
- "testStart": "2023-06-05T19:13:42.528121",
- "testEnd": "2023-06-05T19:15:00.441933",
- "testDuration": "0:01:17.913812",
+ "testStart": "2023-08-18T17:49:42.016053",
+ "testEnd": "2023-08-18T17:50:56.550087",
+ "testDuration": "0:01:14.534034",
"testPassed": true,
"testRunSuccessful": true,
"testRunCompleted": true,
"tpsExpectMet": true,
"trxExpectMet": true,
- "targetTPS": 12501,
- "resultAvgTps": 12523.6875,
- "expectedTxns": 125010,
- "resultTxns": 125010,
+ "targetTPS": 14001,
+ "resultAvgTps": 14060.375,
+ "expectedTxns": 140010,
+ "resultTxns": 140010,
"testAnalysisBlockCnt": 17,
- "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-13-42-12501"
+ "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-49-42-14001"
}
}
```
@@ -924,15 +959,15 @@ Finally, the full detail test report for each of the determined max TPS throughp
``` json
{
- "perfTestsBegin": "2023-06-05T17:59:49.175441",
- "perfTestsFinish": "2023-06-05T19:23:03.723738",
- "perfTestsDuration": "1:23:14.548297",
+ "perfTestsBegin": "2023-08-18T16:16:57.515935",
+ "perfTestsFinish": "2023-08-18T17:50:56.573105",
+ "perfTestsDuration": "1:33:59.057170",
"operationalMode": "Block Producer Operational Mode",
- "InitialMaxTpsAchieved": 13001,
- "LongRunningMaxTpsAchieved": 13001,
- "tpsTestStart": "2023-06-05T19:10:32.123231",
- "tpsTestFinish": "2023-06-05T19:23:03.723722",
- "tpsTestDuration": "0:12:31.600491",
+ "InitialMaxTpsAchieved": 14001,
+ "LongRunningMaxTpsAchieved": 14001,
+ "tpsTestStart": "2023-08-18T17:39:08.002919",
+ "tpsTestFinish": "2023-08-18T17:50:56.573095",
+ "tpsTestDuration": "0:11:48.570176",
"InitialSearchScenariosSummary": {
"50000": "FAIL",
"25001": "FAIL",
@@ -940,11 +975,11 @@ Finally, the full detail test report for each of the determined max TPS throughp
"19001": "FAIL",
"16001": "FAIL",
"14501": "FAIL",
- "13501": "FAIL",
- "13001": "PASS"
+ "13501": "PASS",
+ "14001": "PASS"
},
"LongRunningSearchScenariosSummary": {
- "13001": "PASS"
+ "14001": "PASS"
},
"InitialSearchResults": {
"0": {
@@ -953,20 +988,20 @@ Finally, the full detail test report for each of the determined max TPS throughp
"searchFloor": 1,
"searchCeiling": 50000,
"basicTestResult": {
- "testStart": "2023-06-05T19:10:32.123282",
- "testEnd": "2023-06-05T19:12:12.746349",
- "testDuration": "0:01:40.623067",
+ "testStart": "2023-08-18T17:39:08.002959",
+ "testEnd": "2023-08-18T17:40:45.539974",
+ "testDuration": "0:01:37.537015",
"testPassed": false,
"testRunSuccessful": false,
"testRunCompleted": true,
"tpsExpectMet": false,
"trxExpectMet": false,
"targetTPS": 50000,
- "resultAvgTps": 14015.564102564103,
+ "resultAvgTps": 13264.95,
"expectedTxns": 500000,
- "resultTxns": 309515,
- "testAnalysisBlockCnt": 40,
- "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-10-32-50000"
+ "resultTxns": 295339,
+ "testAnalysisBlockCnt": 41,
+ "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-39-08-50000"
}
},
"1": {
@@ -975,20 +1010,20 @@ Finally, the full detail test report for each of the determined max TPS throughp
"searchFloor": 1,
"searchCeiling": 49500,
"basicTestResult": {
- "testStart": "2023-06-05T19:12:12.749120",
- "testEnd": "2023-06-05T19:13:42.524984",
- "testDuration": "0:01:29.775864",
+ "testStart": "2023-08-18T17:40:45.541240",
+ "testEnd": "2023-08-18T17:42:10.566883",
+ "testDuration": "0:01:25.025643",
"testPassed": false,
"testRunSuccessful": false,
"testRunCompleted": true,
"tpsExpectMet": false,
"trxExpectMet": false,
"targetTPS": 25001,
- "resultAvgTps": 13971.5,
+ "resultAvgTps": 13415.515151515152,
"expectedTxns": 250010,
- "resultTxns": 249981,
- "testAnalysisBlockCnt": 33,
- "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-12-12-25001"
+ "resultTxns": 249933,
+ "testAnalysisBlockCnt": 34,
+ "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-40-45-25001"
}
},
"2": {
@@ -997,20 +1032,20 @@ Finally, the full detail test report for each of the determined max TPS throughp
"searchFloor": 1,
"searchCeiling": 24501,
"basicTestResult": {
- "testStart": "2023-06-05T19:13:42.528121",
- "testEnd": "2023-06-05T19:15:00.441933",
- "testDuration": "0:01:17.913812",
+ "testStart": "2023-08-18T17:42:10.568046",
+ "testEnd": "2023-08-18T17:43:23.733271",
+ "testDuration": "0:01:13.165225",
"testPassed": true,
"testRunSuccessful": true,
"testRunCompleted": true,
"tpsExpectMet": true,
"trxExpectMet": true,
"targetTPS": 12501,
- "resultAvgTps": 12523.6875,
+ "resultAvgTps": 12509.9375,
"expectedTxns": 125010,
"resultTxns": 125010,
"testAnalysisBlockCnt": 17,
- "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-13-42-12501"
+ "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-42-10-12501"
}
},
"3": {
@@ -1019,20 +1054,20 @@ Finally, the full detail test report for each of the determined max TPS throughp
"searchFloor": 13001,
"searchCeiling": 24501,
"basicTestResult": {
- "testStart": "2023-06-05T19:15:00.444109",
- "testEnd": "2023-06-05T19:16:25.749654",
- "testDuration": "0:01:25.305545",
+ "testStart": "2023-08-18T17:43:23.734927",
+ "testEnd": "2023-08-18T17:44:44.562268",
+ "testDuration": "0:01:20.827341",
"testPassed": false,
- "testRunSuccessful": false,
+ "testRunSuccessful": true,
"testRunCompleted": true,
"tpsExpectMet": false,
- "trxExpectMet": false,
+ "trxExpectMet": true,
"targetTPS": 19001,
- "resultAvgTps": 14858.095238095239,
+ "resultAvgTps": 14669.863636363636,
"expectedTxns": 190010,
- "resultTxns": 189891,
- "testAnalysisBlockCnt": 22,
- "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-15-00-19001"
+ "resultTxns": 190010,
+ "testAnalysisBlockCnt": 23,
+ "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-43-23-19001"
}
},
"4": {
@@ -1041,20 +1076,20 @@ Finally, the full detail test report for each of the determined max TPS throughp
"searchFloor": 13001,
"searchCeiling": 18501,
"basicTestResult": {
- "testStart": "2023-06-05T19:16:25.751860",
- "testEnd": "2023-06-05T19:17:48.336896",
- "testDuration": "0:01:22.585036",
+ "testStart": "2023-08-18T17:44:44.563387",
+ "testEnd": "2023-08-18T17:46:01.838736",
+ "testDuration": "0:01:17.275349",
"testPassed": false,
- "testRunSuccessful": false,
+ "testRunSuccessful": true,
"testRunCompleted": true,
"tpsExpectMet": false,
- "trxExpectMet": false,
+ "trxExpectMet": true,
"targetTPS": 16001,
- "resultAvgTps": 14846.0,
+ "resultAvgTps": 14538.444444444445,
"expectedTxns": 160010,
- "resultTxns": 159988,
+ "resultTxns": 160010,
"testAnalysisBlockCnt": 19,
- "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-16-25-16001"
+ "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-44-44-16001"
}
},
"5": {
@@ -1063,64 +1098,64 @@ Finally, the full detail test report for each of the determined max TPS throughp
"searchFloor": 13001,
"searchCeiling": 15501,
"basicTestResult": {
- "testStart": "2023-06-05T19:17:48.339990",
- "testEnd": "2023-06-05T19:19:07.843311",
- "testDuration": "0:01:19.503321",
+ "testStart": "2023-08-18T17:46:01.839865",
+ "testEnd": "2023-08-18T17:47:15.595123",
+ "testDuration": "0:01:13.755258",
"testPassed": false,
"testRunSuccessful": false,
"testRunCompleted": true,
- "tpsExpectMet": false,
+ "tpsExpectMet": true,
"trxExpectMet": false,
"targetTPS": 14501,
- "resultAvgTps": 13829.588235294117,
+ "resultAvgTps": 14433.25,
"expectedTxns": 145010,
- "resultTxns": 144964,
- "testAnalysisBlockCnt": 18,
- "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-17-48-14501"
+ "resultTxns": 144898,
+ "testAnalysisBlockCnt": 17,
+ "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-46-01-14501"
}
},
"6": {
- "success": false,
+ "success": true,
"searchTarget": 13501,
"searchFloor": 13001,
"searchCeiling": 14001,
"basicTestResult": {
- "testStart": "2023-06-05T19:19:07.845657",
- "testEnd": "2023-06-05T19:20:27.815030",
- "testDuration": "0:01:19.969373",
- "testPassed": false,
- "testRunSuccessful": false,
+ "testStart": "2023-08-18T17:47:15.596266",
+ "testEnd": "2023-08-18T17:48:29.481603",
+ "testDuration": "0:01:13.885337",
+ "testPassed": true,
+ "testRunSuccessful": true,
"testRunCompleted": true,
"tpsExpectMet": true,
- "trxExpectMet": false,
+ "trxExpectMet": true,
"targetTPS": 13501,
- "resultAvgTps": 13470.375,
+ "resultAvgTps": 13542.625,
"expectedTxns": 135010,
- "resultTxns": 135000,
+ "resultTxns": 135010,
"testAnalysisBlockCnt": 17,
- "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-19-07-13501"
+ "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-47-15-13501"
}
},
"7": {
"success": true,
- "searchTarget": 13001,
- "searchFloor": 13001,
- "searchCeiling": 13001,
+ "searchTarget": 14001,
+ "searchFloor": 14001,
+ "searchCeiling": 14001,
"basicTestResult": {
- "testStart": "2023-06-05T19:20:27.817483",
- "testEnd": "2023-06-05T19:21:44.846130",
- "testDuration": "0:01:17.028647",
+ "testStart": "2023-08-18T17:48:29.482846",
+ "testEnd": "2023-08-18T17:49:41.993743",
+ "testDuration": "0:01:12.510897",
"testPassed": true,
"testRunSuccessful": true,
"testRunCompleted": true,
"tpsExpectMet": true,
"trxExpectMet": true,
- "targetTPS": 13001,
- "resultAvgTps": 13032.5625,
- "expectedTxns": 130010,
- "resultTxns": 130010,
+ "targetTPS": 14001,
+ "resultAvgTps": 14035.8125,
+ "expectedTxns": 140010,
+ "resultTxns": 140010,
"testAnalysisBlockCnt": 17,
- "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-20-27-13001"
+ "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-48-29-14001"
}
}
},
@@ -1143,24 +1178,24 @@ Finally, the full detail test report for each of the determined max TPS throughp
"LongRunningSearchResults": {
"0": {
"success": true,
- "searchTarget": 13001,
+ "searchTarget": 14001,
"searchFloor": 1,
- "searchCeiling": 13001,
+ "searchCeiling": 14001,
"basicTestResult": {
- "testStart": "2023-06-05T19:21:44.879637",
- "testEnd": "2023-06-05T19:23:03.697671",
- "testDuration": "0:01:18.818034",
+ "testStart": "2023-08-18T17:49:42.016053",
+ "testEnd": "2023-08-18T17:50:56.550087",
+ "testDuration": "0:01:14.534034",
"testPassed": true,
"testRunSuccessful": true,
"testRunCompleted": true,
"tpsExpectMet": true,
"trxExpectMet": true,
- "targetTPS": 13001,
- "resultAvgTps": 13027.0,
- "expectedTxns": 130010,
- "resultTxns": 130010,
+ "targetTPS": 14001,
+ "resultAvgTps": 14060.375,
+ "expectedTxns": 140010,
+ "resultTxns": 140010,
"testAnalysisBlockCnt": 17,
- "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-21-44-13001"
+ "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-49-42-14001"
}
}
},
@@ -1181,35 +1216,36 @@ Finally, the full detail test report for each of the determined max TPS throughp
},
"ProducerThreadAnalysis": {
- "recommendedThreadCount": 2,
+ "recommendedThreadCount": 3,
"threadToMaxTpsDict": {
"2": 12001,
- "3": 12001
+ "3": 16001,
+ "4": 14001
},
- "analysisStart": "2023-06-05T17:59:49.197967",
- "analysisFinish": "2023-06-05T18:18:33.449126"
+ "analysisStart": "2023-08-18T16:16:57.535103",
+ "analysisFinish": "2023-08-18T16:46:36.202669"
},
"ChainThreadAnalysis": {
- "recommendedThreadCount": 3,
+ "recommendedThreadCount": 2,
"threadToMaxTpsDict": {
- "2": 4001,
- "3": 13001,
- "4": 5501
+ "2": 14001,
+ "3": 13001
},
- "analysisStart": "2023-06-05T18:18:33.449689",
- "analysisFinish": "2023-06-05T18:48:02.262053"
+ "analysisStart": "2023-08-18T16:46:36.203279",
+ "analysisFinish": "2023-08-18T17:07:30.813917"
},
"NetThreadAnalysis": {
- "recommendedThreadCount": 4,
+ "recommendedThreadCount": 5,
"threadToMaxTpsDict": {
- "4": 14501,
- "5": 13501
+ "4": 12501,
+ "5": 13001,
+ "6": 11001
},
- "analysisStart": "2023-06-05T18:48:02.262594",
- "analysisFinish": "2023-06-05T19:10:32.123003"
+ "analysisStart": "2023-08-18T17:07:30.814441",
+ "analysisFinish": "2023-08-18T17:39:08.002767"
},
"args": {
- "rawCmdLine ": "./tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax",
+ "rawCmdLine ": "tests/PerformanceHarnessScenarioRunner.py findMax testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax",
"dumpErrorDetails": false,
"delay": 1,
"nodesFile": null,
@@ -1323,7 +1359,7 @@ Finally, the full detail test report for each of the determined max TPS throughp
"_eosVmOcCompileThreadsNodeosDefault": 1,
"_eosVmOcCompileThreadsNodeosArg": "--eos-vm-oc-compile-threads",
"eosVmOcEnable": null,
- "_eosVmOcEnableNodeosDefault": false,
+ "_eosVmOcEnableNodeosDefault": "auto",
"_eosVmOcEnableNodeosArg": "--eos-vm-oc-enable",
"enableAccountQueries": null,
"_enableAccountQueriesNodeosDefault": 0,
@@ -1349,6 +1385,9 @@ Finally, the full detail test report for each of the determined max TPS throughp
"transactionFinalityStatusFailureDurationSec": null,
"_transactionFinalityStatusFailureDurationSecNodeosDefault": 180,
"_transactionFinalityStatusFailureDurationSecNodeosArg": "--transaction-finality-status-failure-duration-sec",
+ "disableReplayOpts": null,
+ "_disableReplayOptsNodeosDefault": false,
+ "_disableReplayOptsNodeosArg": "--disable-replay-opts",
"integrityHashOnStart": null,
"_integrityHashOnStartNodeosDefault": false,
"_integrityHashOnStartNodeosArg": "--integrity-hash-on-start",
@@ -1379,9 +1418,6 @@ Finally, the full detail test report for each of the determined max TPS throughp
"forceAllChecks": null,
"_forceAllChecksNodeosDefault": false,
"_forceAllChecksNodeosArg": "--force-all-checks",
- "disableReplayOpts": null,
- "_disableReplayOptsNodeosDefault": false,
- "_disableReplayOptsNodeosArg": "--disable-replay-opts",
"replayBlockchain": null,
"_replayBlockchainNodeosDefault": false,
"_replayBlockchainNodeosArg": "--replay-blockchain",
@@ -1694,7 +1730,7 @@ Finally, the full detail test report for each of the determined max TPS throughp
"abiFile": "eosio.system.abi",
"account": "Name: eosio"
},
- "genesisPath": "tests/performance_tests/genesis.json",
+ "genesisPath": "tests/PerformanceHarness/genesis.json",
"maximumP2pPerHost": 5000,
"maximumClients": 0,
"keepLogs": true,
@@ -1703,10 +1739,9 @@ Finally, the full detail test report for each of the determined max TPS throughp
"bios": "off"
},
"prodsEnableTraceApi": false,
- "nodeosVers": "v4",
+ "nodeosVers": "v4.1.0-dev",
"specificExtraNodeosArgs": {
- "1": "--plugin eosio::trace_api_plugin ",
- "2": "--plugin eosio::chain_api_plugin --plugin eosio::net_api_plugin --read-only-threads 0 "
+ "1": "--plugin eosio::trace_api_plugin "
},
"_totalNodes": 2,
"_pNodes": 1,
@@ -1740,11 +1775,11 @@ Finally, the full detail test report for each of the determined max TPS throughp
"userTrxDataFile": null,
"endpointMode": "p2p",
"opModeCmd": "testBpOpMode",
- "logDirBase": "performance_test",
- "logDirTimestamp": "2023-06-05_17-59-49",
- "logDirPath": "performance_test/2023-06-05_17-59-49",
- "ptbLogsDirPath": "performance_test/2023-06-05_17-59-49/testRunLogs",
- "pluginThreadOptLogsDirPath": "performance_test/2023-06-05_17-59-49/pluginThreadOptRunLogs"
+ "logDirBase": "PerformanceHarnessScenarioRunnerLogs",
+ "logDirTimestamp": "2023-08-18_16-16-57",
+ "logDirPath": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57",
+ "ptbLogsDirPath": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs",
+ "pluginThreadOptLogsDirPath": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/pluginThreadOptRunLogs"
},
"env": {
"system": "Linux",
@@ -1752,7 +1787,7 @@ Finally, the full detail test report for each of the determined max TPS throughp
"release": "5.15.90.1-microsoft-standard-WSL2",
"logical_cpu_count": 16
},
- "nodeosVersion": "v4"
+ "nodeosVersion": "v4.1.0-dev"
}
```
@@ -1760,7 +1795,7 @@ Finally, the full detail test report for each of the determined max TPS throughp
## Performance Test Basic Report
-The Performance Test Basic generates, by default, a report that details results of the test, statistics around metrics of interest, as well as diagnostic information about the test run. If `performance_test.py` is run with `--del-test-report`, or `performance_test_basic.py` is run with `--del-report`, the report described below will not be written. Otherwise the report will be written to the timestamped directory within the `performance_test_basic` log directory for the test run with the file name `data.json`.
+The Performance Test Basic generates, by default, a report that details results of the test, statistics around metrics of interest, as well as diagnostic information about the test run. If `PerformanceHarnessScenarioRunner.py findMax` is run with `--del-test-report`, or `PerformanceHarnessScenarioRunner.py singleTest` is run with `--del-report`, the report described below will not be written. Otherwise the report will be written to the timestamped directory within the `PerformanceHarnessScenarioRunnerLogs` log directory for the test run with the file name `data.json`.
Expand for full sample report
@@ -1770,92 +1805,92 @@ The Performance Test Basic generates, by default, a report that details results
"targetApiEndpointType": "p2p",
"targetApiEndpoint": "NA for P2P",
"Result": {
- "testStart": "2023-06-05T19:21:44.879637",
- "testEnd": "2023-06-05T19:23:03.697671",
- "testDuration": "0:01:18.818034",
+ "testStart": "2023-08-18T17:49:42.016053",
+ "testEnd": "2023-08-18T17:50:56.550087",
+ "testDuration": "0:01:14.534034",
"testPassed": true,
"testRunSuccessful": true,
"testRunCompleted": true,
"tpsExpectMet": true,
"trxExpectMet": true,
- "targetTPS": 13001,
- "resultAvgTps": 13027.0,
- "expectedTxns": 130010,
- "resultTxns": 130010,
+ "targetTPS": 14001,
+ "resultAvgTps": 14060.375,
+ "expectedTxns": 140010,
+ "resultTxns": 140010,
"testAnalysisBlockCnt": 17,
- "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-21-44-13001"
+ "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-49-42-14001"
},
"Analysis": {
"BlockSize": {
- "min": 153503,
- "max": 169275,
- "avg": 162269.76470588235,
- "sigma": 3152.279353278714,
+ "min": 164525,
+ "max": 181650,
+ "avg": 175497.23529411765,
+ "sigma": 4638.469493136106,
"emptyBlocks": 0,
"numBlocks": 17
},
"BlocksGuide": {
- "firstBlockNum": 110,
- "lastBlockNum": 140,
- "totalBlocks": 31,
- "testStartBlockNum": 110,
- "testEndBlockNum": 140,
+ "firstBlockNum": 99,
+ "lastBlockNum": 130,
+ "totalBlocks": 32,
+ "testStartBlockNum": 99,
+ "testEndBlockNum": 130,
"setupBlocksCnt": 0,
"tearDownBlocksCnt": 0,
"leadingEmptyBlocksCnt": 1,
- "trailingEmptyBlocksCnt": 9,
+ "trailingEmptyBlocksCnt": 10,
"configAddlDropCnt": 2,
"testAnalysisBlockCnt": 17
},
"TPS": {
- "min": 12775,
- "max": 13285,
- "avg": 13027.0,
- "sigma": 92.70854868888844,
+ "min": 13598,
+ "max": 14344,
+ "avg": 14060.375,
+ "sigma": 202.9254404331798,
"emptyBlocks": 0,
"numBlocks": 17,
- "configTps": 13001,
+ "configTps": 14001,
"configTestDuration": 10,
"tpsPerGenerator": [
- 3250,
- 3250,
- 3250,
- 3251
+ 3500,
+ 3500,
+ 3500,
+ 3501
],
"generatorCount": 4
},
"TrxCPU": {
"min": 8.0,
- "max": 1180.0,
- "avg": 25.89257749403892,
- "sigma": 12.604252354938811,
- "samples": 130010
+ "max": 767.0,
+ "avg": 24.468759374330403,
+ "sigma": 11.149625462006687,
+ "samples": 140010
},
"TrxLatency": {
"min": 0.0009999275207519531,
- "max": 0.5399999618530273,
- "avg": 0.2522121298066488,
- "sigma": 0.14457374598663084,
- "samples": 130010,
+ "max": 0.5320000648498535,
+ "avg": 0.25838474393291105,
+ "sigma": 0.14487074243481057,
+ "samples": 140010,
"units": "seconds"
},
"TrxNet": {
"min": 24.0,
"max": 25.0,
- "avg": 24.846196446427196,
- "sigma": 0.3607603366241642,
- "samples": 130010
+ "avg": 24.85718162988358,
+ "sigma": 0.3498875294629824,
+ "samples": 140010
},
"TrxAckResponseTime": {
"min": -1.0,
"max": -1.0,
"avg": -1.0,
"sigma": 0.0,
- "samples": 130010,
+ "samples": 140010,
"measurementApplicable": "NOT APPLICABLE",
"units": "microseconds"
},
- "ExpectedTransactions": 130010,
+ "ExpectedTransactions": 140010,
"DroppedTransactions": 0,
"ProductionWindowsTotal": 2,
"ProductionWindowsAverageSize": 12.0,
@@ -1878,7 +1913,7 @@ The Performance Test Basic generates, by default, a report that details results
}
},
"args": {
- "rawCmdLine ": "./tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax",
+ "rawCmdLine ": "tests/PerformanceHarnessScenarioRunner.py findMax testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax",
"dumpErrorDetails": false,
"delay": 1,
"nodesFile": null,
@@ -1992,7 +2027,7 @@ The Performance Test Basic generates, by default, a report that details results
"_eosVmOcCompileThreadsNodeosDefault": 1,
"_eosVmOcCompileThreadsNodeosArg": "--eos-vm-oc-compile-threads",
"eosVmOcEnable": null,
- "_eosVmOcEnableNodeosDefault": false,
+ "_eosVmOcEnableNodeosDefault": "auto",
"_eosVmOcEnableNodeosArg": "--eos-vm-oc-enable",
"enableAccountQueries": null,
"_enableAccountQueriesNodeosDefault": 0,
@@ -2018,6 +2053,9 @@ The Performance Test Basic generates, by default, a report that details results
"transactionFinalityStatusFailureDurationSec": null,
"_transactionFinalityStatusFailureDurationSecNodeosDefault": 180,
"_transactionFinalityStatusFailureDurationSecNodeosArg": "--transaction-finality-status-failure-duration-sec",
+ "disableReplayOpts": null,
+ "_disableReplayOptsNodeosDefault": false,
+ "_disableReplayOptsNodeosArg": "--disable-replay-opts",
"integrityHashOnStart": null,
"_integrityHashOnStartNodeosDefault": false,
"_integrityHashOnStartNodeosArg": "--integrity-hash-on-start",
@@ -2048,9 +2086,6 @@ The Performance Test Basic generates, by default, a report that details results
"forceAllChecks": null,
"_forceAllChecksNodeosDefault": false,
"_forceAllChecksNodeosArg": "--force-all-checks",
- "disableReplayOpts": null,
- "_disableReplayOptsNodeosDefault": false,
- "_disableReplayOptsNodeosArg": "--disable-replay-opts",
"replayBlockchain": null,
"_replayBlockchainNodeosDefault": false,
"_replayBlockchainNodeosArg": "--replay-blockchain",
@@ -2363,7 +2398,7 @@ The Performance Test Basic generates, by default, a report that details results
"abiFile": "eosio.system.abi",
"account": "Name: eosio"
},
- "genesisPath": "tests/performance_tests/genesis.json",
+ "genesisPath": "tests/PerformanceHarness/genesis.json",
"maximumP2pPerHost": 5000,
"maximumClients": 0,
"keepLogs": true,
@@ -2372,10 +2407,9 @@ The Performance Test Basic generates, by default, a report that details results
"bios": "off"
},
"prodsEnableTraceApi": false,
- "nodeosVers": "v4",
+ "nodeosVers": "v4.1.0-dev",
"specificExtraNodeosArgs": {
- "1": "--plugin eosio::trace_api_plugin ",
- "2": "--plugin eosio::chain_api_plugin --plugin eosio::net_api_plugin --read-only-threads 0 "
+ "1": "--plugin eosio::trace_api_plugin "
},
"_totalNodes": 2,
"_pNodes": 1,
@@ -2390,23 +2424,23 @@ The Performance Test Basic generates, by default, a report that details results
],
"nonProdsEosVmOcEnable": false,
"apiNodesReadOnlyThreadCount": 0,
- "targetTps": 13001,
+ "targetTps": 14001,
"testTrxGenDurationSec": 10,
"tpsLimitPerGenerator": 4000,
"numAddlBlocksToPrune": 2,
- "logDirRoot": "performance_test/2023-06-05_17-59-49/testRunLogs",
+ "logDirRoot": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs",
"delReport": false,
"quiet": false,
"delPerfLogs": false,
- "expectedTransactionsSent": 130010,
+ "expectedTransactionsSent": 140010,
"printMissingTransactions": false,
"userTrxDataFile": null,
"endpointMode": "p2p",
"apiEndpoint": null,
- "logDirBase": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test",
- "logDirTimestamp": "2023-06-05_19-21-44",
- "logDirTimestampedOptSuffix": "-13001",
- "logDirPath": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-21-44-13001",
+ "logDirBase": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs",
+ "logDirTimestamp": "2023-08-18_17-49-42",
+ "logDirTimestampedOptSuffix": "-14001",
+ "logDirPath": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-49-42-14001",
"userTrxData": "NOT CONFIGURED"
},
"env": {
@@ -2415,7 +2449,7 @@ The Performance Test Basic generates, by default, a report that details results
"release": "5.15.90.1-microsoft-standard-WSL2",
"logical_cpu_count": 16
},
- "nodeosVersion": "v4"
+ "nodeosVersion": "v4.1.0-dev"
}
```
diff --git a/tests/PerformanceHarness/__init__.py b/tests/PerformanceHarness/__init__.py
new file mode 100644
index 0000000000..fae8007e78
--- /dev/null
+++ b/tests/PerformanceHarness/__init__.py
@@ -0,0 +1,6 @@
+__all__ = ["log_reader", "performance_test_basic", "performance_test", "NodeosPluginArgs"]
+
+from .log_reader import blockData, trxData, chainData, scrapeTrxGenTrxSentDataLogs, JsonReportHandler, analyzeLogResults, TpsTestConfig, ArtifactPaths, LogAnalysis
+from .NodeosPluginArgs import BasePluginArgs, ChainPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs, SignatureProviderPluginArgs, StateHistoryPluginArgs, TraceApiPluginArgs
+from .performance_test_basic import PerformanceTestBasic, PtbArgumentsHandler
+from .performance_test import PerformanceTest, PerfTestArgumentsHandler
\ No newline at end of file
diff --git a/tests/performance_tests/cpuTrxData.json b/tests/PerformanceHarness/cpuTrxData.json
similarity index 100%
rename from tests/performance_tests/cpuTrxData.json
rename to tests/PerformanceHarness/cpuTrxData.json
diff --git a/tests/performance_tests/genesis.json b/tests/PerformanceHarness/genesis.json
similarity index 100%
rename from tests/performance_tests/genesis.json
rename to tests/PerformanceHarness/genesis.json
diff --git a/tests/performance_tests/log_reader.py b/tests/PerformanceHarness/log_reader.py
similarity index 100%
rename from tests/performance_tests/log_reader.py
rename to tests/PerformanceHarness/log_reader.py
diff --git a/tests/performance_tests/performance_test.py b/tests/PerformanceHarness/performance_test.py
similarity index 92%
rename from tests/performance_tests/performance_test.py
rename to tests/PerformanceHarness/performance_test.py
index 537f1926a2..797654fc3a 100755
--- a/tests/performance_tests/performance_test.py
+++ b/tests/PerformanceHarness/performance_test.py
@@ -10,14 +10,13 @@
from pathlib import Path, PurePath
sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent))
-from NodeosPluginArgs import ChainPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs
from TestHarness import TestHelper, Utils, Account
-from performance_test_basic import PerformanceTestBasic, PtbArgumentsHandler
+from .performance_test_basic import PerformanceTestBasic, PtbArgumentsHandler
from platform import release, system
from dataclasses import dataclass, asdict, field
from datetime import datetime
from enum import Enum
-from log_reader import JsonReportHandler
+from .log_reader import JsonReportHandler
class PerformanceTest:
@@ -76,7 +75,7 @@ class PerfTestSearchResults:
@dataclass
class LoggingConfig:
- logDirBase: Path = Path(".")/PurePath(PurePath(__file__).name).stem
+ logDirBase: Path = Path(".")/os.path.basename(sys.argv[0]).rsplit('.',maxsplit=1)[0]
logDirTimestamp: str = f"{datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')}"
logDirPath: Path = field(default_factory=Path, init=False)
ptbLogsDirPath: Path = field(default_factory=Path, init=False)
@@ -95,7 +94,7 @@ def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=Perfo
self.testsStart = datetime.utcnow()
- self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=Path(self.ptConfig.logDirRoot)/PurePath(PurePath(__file__).name).stem,
+ self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=Path(self.ptConfig.logDirRoot)/f"{os.path.basename(sys.argv[0]).rsplit('.',maxsplit=1)[0]}Logs",
logDirTimestamp=f"{self.testsStart.strftime('%Y-%m-%d_%H-%M-%S')}")
def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: Path, delReport: bool, quiet: bool, delPerfLogs: bool) -> TpsTestResult.PerfTestSearchResults:
@@ -115,7 +114,7 @@ def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConf
numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=logDirRoot, delReport=delReport,
quiet=quiet, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointMode=self.ptConfig.endpointMode)
- myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig, testNamePath="performance_test")
+ myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig, testNamePath=os.path.basename(sys.argv[0]).rsplit('.',maxsplit=1)[0])
myTest.runTest()
if myTest.testResult.testPassed:
maxTpsAchieved = binSearchTarget
@@ -157,7 +156,7 @@ def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTe
numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport,
quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointMode=self.ptConfig.endpointMode)
- myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig, testNamePath="performance_test")
+ myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig, testNamePath=os.path.basename(sys.argv[0]).rsplit('.',maxsplit=1)[0])
myTest.runTest()
if myTest.testResult.testPassed:
maxTpsAchieved = searchTarget
@@ -521,40 +520,3 @@ def parseArgs():
ptParser=PerfTestArgumentsHandler.createArgumentParser()
args=ptParser.parse_args()
return args
-
-def main():
- args = PerfTestArgumentsHandler.parseArgs()
- Utils.Debug = args.v
-
- testHelperConfig = PerformanceTestBasic.setupTestHelperConfig(args)
- testClusterConfig = PerformanceTestBasic.setupClusterConfig(args)
-
- ptConfig = PerformanceTest.PtConfig(testDurationSec=args.test_iteration_duration_sec,
- finalDurationSec=args.final_iterations_duration_sec,
- delPerfLogs=args.del_perf_logs,
- maxTpsToTest=args.max_tps_to_test,
- minTpsToTest=args.min_tps_to_test,
- testIterationMinStep=args.test_iteration_min_step,
- tpsLimitPerGenerator=args.tps_limit_per_generator,
- delReport=args.del_report,
- delTestReport=args.del_test_report,
- numAddlBlocksToPrune=args.num_blocks_to_prune,
- quiet=args.quiet,
- logDirRoot=Path("."),
- skipTpsTests=args.skip_tps_test,
- calcProducerThreads=args.calc_producer_threads,
- calcChainThreads=args.calc_chain_threads,
- calcNetThreads=args.calc_net_threads,
- userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None,
- endpointMode=args.endpoint_mode,
- opModeCmd=args.op_mode_sub_cmd)
-
- myTest = PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig)
-
- perfRunSuccessful = myTest.runTest()
-
- exitCode = 0 if perfRunSuccessful else 1
- exit(exitCode)
-
-if __name__ == '__main__':
- main()
diff --git a/tests/performance_tests/performance_test_basic.py b/tests/PerformanceHarness/performance_test_basic.py
similarity index 92%
rename from tests/performance_tests/performance_test_basic.py
rename to tests/PerformanceHarness/performance_test_basic.py
index bdd2163b86..5189dc2530 100755
--- a/tests/performance_tests/performance_test_basic.py
+++ b/tests/PerformanceHarness/performance_test_basic.py
@@ -8,13 +8,13 @@
import shutil
import signal
import json
-import log_reader
import traceback
from pathlib import Path, PurePath
sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent))
-from NodeosPluginArgs import ChainPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs, SignatureProviderPluginArgs, StateHistoryPluginArgs, TraceApiPluginArgs
+from .log_reader import blockData, trxData, chainData, scrapeTrxGenTrxSentDataLogs, JsonReportHandler, analyzeLogResults, TpsTestConfig, ArtifactPaths, LogAnalysis
+from .NodeosPluginArgs import ChainPluginArgs, HttpPluginArgs, NetPluginArgs, ProducerPluginArgs, ResourceMonitorPluginArgs, SignatureProviderPluginArgs, StateHistoryPluginArgs, TraceApiPluginArgs
from TestHarness import Account, Cluster, TestHelper, Utils, WalletMgr, TransactionGeneratorsLauncher, TpsTrxGensConfig
from TestHarness.TestHelper import AppArgs
from dataclasses import dataclass, asdict, field
@@ -97,7 +97,7 @@ class SpecifiedContract:
dontKill: bool = False # leave_running
extraNodeosArgs: ExtraNodeosArgs = field(default_factory=ExtraNodeosArgs)
specifiedContract: SpecifiedContract = field(default_factory=SpecifiedContract)
- genesisPath: Path = Path("tests")/"performance_tests"/"genesis.json"
+ genesisPath: Path = Path("tests")/"PerformanceHarness"/"genesis.json"
maximumP2pPerHost: int = 5000
maximumClients: int = 0
keepLogs: bool = True
@@ -152,12 +152,12 @@ def configureApiNodes():
assert "v1" not in self.nodeosVers and "v0" not in self.nodeosVers, f"nodeos version {Utils.getNodeosVersion()} is unsupported by performance test"
if "v2" in self.nodeosVers:
self.writeTrx = lambda trxDataFile, blockNum, trx: [trxDataFile.write(f"{trx['trx']['id']},{blockNum},{trx['cpu_usage_us']},{trx['net_usage_words']}\n")]
- self.createBlockData = lambda block, blockTransactionTotal, blockNetTotal, blockCpuTotal: log_reader.blockData(blockId=block["payload"]["id"], blockNum=block['payload']['block_num'], transactions=blockTransactionTotal, net=blockNetTotal, cpu=blockCpuTotal, producer=block["payload"]["producer"], status=block["payload"]["confirmed"], _timestamp=block["payload"]["timestamp"])
- self.updateTrxDict = lambda blockNum, transaction, trxDict: trxDict.update(dict([(transaction['trx']['id'], log_reader.trxData(blockNum, transaction['cpu_usage_us'], transaction['net_usage_words']))]))
+ self.createBlockData = lambda block, blockTransactionTotal, blockNetTotal, blockCpuTotal: blockData(blockId=block["payload"]["id"], blockNum=block['payload']['block_num'], transactions=blockTransactionTotal, net=blockNetTotal, cpu=blockCpuTotal, producer=block["payload"]["producer"], status=block["payload"]["confirmed"], _timestamp=block["payload"]["timestamp"])
+ self.updateTrxDict = lambda blockNum, transaction, trxDict: trxDict.update(dict([(transaction['trx']['id'], trxData(blockNum, transaction['cpu_usage_us'], transaction['net_usage_words']))]))
else:
self.writeTrx = lambda trxDataFile, blockNum, trx:[ trxDataFile.write(f"{trx['id']},{trx['block_num']},{trx['block_time']},{trx['cpu_usage_us']},{trx['net_usage_words']},{trx['actions']}\n") ]
- self.createBlockData = lambda block, blockTransactionTotal, blockNetTotal, blockCpuTotal: log_reader.blockData(blockId=block["payload"]["id"], blockNum=block['payload']['number'], transactions=blockTransactionTotal, net=blockNetTotal, cpu=blockCpuTotal, producer=block["payload"]["producer"], status=block["payload"]["status"], _timestamp=block["payload"]["timestamp"])
- self.updateTrxDict = lambda blockNum, transaction, trxDict: trxDict.update(dict([(transaction["id"], log_reader.trxData(blockNum=transaction["block_num"], cpuUsageUs=transaction["cpu_usage_us"], netUsageUs=transaction["net_usage_words"], blockTime=transaction["block_time"]))]))
+ self.createBlockData = lambda block, blockTransactionTotal, blockNetTotal, blockCpuTotal: blockData(blockId=block["payload"]["id"], blockNum=block['payload']['number'], transactions=blockTransactionTotal, net=blockNetTotal, cpu=blockCpuTotal, producer=block["payload"]["producer"], status=block["payload"]["status"], _timestamp=block["payload"]["timestamp"])
+ self.updateTrxDict = lambda blockNum, transaction, trxDict: trxDict.update(dict([(transaction["id"], trxData(blockNum=transaction["block_num"], cpuUsageUs=transaction["cpu_usage_us"], netUsageUs=transaction["net_usage_words"], blockTime=transaction["block_time"]))]))
@dataclass
class PtbConfig:
targetTps: int=8000
@@ -208,7 +208,7 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste
self.testStart = datetime.utcnow()
self.testEnd = self.testStart
self.testNamePath = testNamePath
- self.loggingConfig = PerformanceTestBasic.LoggingConfig(logDirBase=Path(self.ptbConfig.logDirRoot)/self.testNamePath,
+ self.loggingConfig = PerformanceTestBasic.LoggingConfig(logDirBase=Path(self.ptbConfig.logDirRoot)/f"{self.testNamePath}Logs",
logDirTimestamp=f"{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}",
logDirTimestampedOptSuffix = f"-{self.ptbConfig.targetTps}")
@@ -411,7 +411,7 @@ def configureConnections():
info = self.producerNode.getInfo()
chainId = info['chain_id']
lib_id = info['last_irreversible_block_id']
- self.data = log_reader.chainData()
+ self.data = chainData()
self.data.numNodes = self.clusterConfig._totalNodes
abiFile=None
@@ -471,7 +471,7 @@ def configureConnections():
# Get stats after transaction generation stops
trxSent = {}
- log_reader.scrapeTrxGenTrxSentDataLogs(trxSent, self.trxGenLogDirPath, self.ptbConfig.quiet)
+ scrapeTrxGenTrxSentDataLogs(trxSent, self.trxGenLogDirPath, self.ptbConfig.quiet)
if len(trxSent) != self.ptbConfig.expectedTransactionsSent:
print(f"ERROR: Transactions generated: {len(trxSent)} does not match the expected number of transactions: {self.ptbConfig.expectedTransactionsSent}")
blocksToWait = 2 * self.ptbConfig.testTrxGenDurationSec + 10
@@ -496,7 +496,7 @@ def captureLowLevelArtifacts(self):
except Exception as e:
print(f"Failed to move '{self.cluster.nodeosLogPath}' to '{self.varLogsDirPath}': {type(e)}: {e}")
- def createReport(self, logAnalysis: log_reader.LogAnalysis, tpsTestConfig: log_reader.TpsTestConfig, argsDict: dict, testResult: PerfTestBasicResult) -> dict:
+ def createReport(self, logAnalysis: LogAnalysis, tpsTestConfig: TpsTestConfig, argsDict: dict, testResult: PerfTestBasicResult) -> dict:
report = {}
report['targetApiEndpointType'] = self.ptbConfig.endpointMode
report['targetApiEndpoint'] = self.ptbConfig.apiEndpoint if self.ptbConfig.apiEndpoint is not None else "NA for P2P"
@@ -539,12 +539,12 @@ def createReport(self, logAnalysis: log_reader.LogAnalysis, tpsTestConfig: log_r
def analyzeResultsAndReport(self, testResult: PtbTpsTestResult):
args = self.prepArgs()
- artifactsLocate = log_reader.ArtifactPaths(nodeosLogDir=self.nodeosLogDir, nodeosLogPath=self.nodeosLogPath, trxGenLogDirPath=self.trxGenLogDirPath, blockTrxDataPath=self.blockTrxDataPath,
+ artifactsLocate = ArtifactPaths(nodeosLogDir=self.nodeosLogDir, nodeosLogPath=self.nodeosLogPath, trxGenLogDirPath=self.trxGenLogDirPath, blockTrxDataPath=self.blockTrxDataPath,
blockDataPath=self.blockDataPath, transactionMetricsDataPath=self.transactionMetricsDataPath)
- tpsTestConfig = log_reader.TpsTestConfig(targetTps=self.ptbConfig.targetTps, testDurationSec=self.ptbConfig.testTrxGenDurationSec, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator,
+ tpsTestConfig = TpsTestConfig(targetTps=self.ptbConfig.targetTps, testDurationSec=self.ptbConfig.testTrxGenDurationSec, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator,
numBlocksToPrune=self.ptbConfig.numAddlBlocksToPrune, numTrxGensUsed=testResult.numGeneratorsUsed, targetTpsPerGenList=testResult.targetTpsPerGenList,
quiet=self.ptbConfig.quiet, printMissingTransactions=self.ptbConfig.printMissingTransactions)
- self.logAnalysis = log_reader.analyzeLogResults(data=self.data, tpsTestConfig=tpsTestConfig, artifacts=artifactsLocate)
+ self.logAnalysis = analyzeLogResults(data=self.data, tpsTestConfig=tpsTestConfig, artifacts=artifactsLocate)
self.testEnd = datetime.utcnow()
self.testResult = PerformanceTestBasic.PerfTestBasicResult(targetTPS=self.ptbConfig.targetTps, resultAvgTps=self.logAnalysis.tpsStats.avg, expectedTxns=self.ptbConfig.expectedTransactionsSent,
@@ -568,7 +568,7 @@ def analyzeResultsAndReport(self, testResult: PtbTpsTestResult):
jsonReport = None
if not self.ptbConfig.quiet or not self.ptbConfig.delReport:
- jsonReport = log_reader.JsonReportHandler.reportAsJSON(self.report)
+ jsonReport = JsonReportHandler.reportAsJSON(self.report)
if not self.ptbConfig.quiet:
print(self.data)
@@ -576,7 +576,7 @@ def analyzeResultsAndReport(self, testResult: PtbTpsTestResult):
print(f"Report:\n{jsonReport}")
if not self.ptbConfig.delReport:
- log_reader.JsonReportHandler.exportReportAsJSON(jsonReport, self.reportPath)
+ JsonReportHandler.exportReportAsJSON(jsonReport, self.reportPath)
def preTestSpinup(self):
self.testDirsCleanup()
@@ -677,7 +677,7 @@ def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defVa
ptbBaseParserGroup.add_argument("--api-nodes", type=int, help=argparse.SUPPRESS if suppressHelp else "API nodes count", default=defApiNodeCnt)
ptbBaseParserGroup.add_argument("--api-nodes-read-only-threads", type=int, help=argparse.SUPPRESS if suppressHelp else "API nodes read only threads count for use with read-only transactions", default=0)
ptbBaseParserGroup.add_argument("--tps-limit-per-generator", type=int, help=argparse.SUPPRESS if suppressHelp else "Maximum amount of transactions per second a single generator can have.", default=4000)
- ptbBaseParserGroup.add_argument("--genesis", type=str, help=argparse.SUPPRESS if suppressHelp else "Path to genesis.json", default="tests/performance_tests/genesis.json")
+ ptbBaseParserGroup.add_argument("--genesis", type=str, help=argparse.SUPPRESS if suppressHelp else "Path to genesis.json", default="tests/PerformanceHarness/genesis.json")
ptbBaseParserGroup.add_argument("--num-blocks-to-prune", type=int, help=argparse.SUPPRESS if suppressHelp else ("The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, "
"to prune from the beginning and end of the range of blocks of interest for evaluation."), default=2)
ptbBaseParserGroup.add_argument("--signature-cpu-billable-pct", type=int, help=argparse.SUPPRESS if suppressHelp else "Percentage of actual signature recovery cpu to bill. Whole number percentages, e.g. 50 for 50%%", default=0)
@@ -754,36 +754,3 @@ def parseArgs():
ptbParser=PtbArgumentsHandler.createArgumentParser()
args=ptbParser.parse_args()
return args
-
-def main():
-
- args = PtbArgumentsHandler.parseArgs()
- Utils.Debug = args.v
-
- testHelperConfig = PerformanceTestBasic.setupTestHelperConfig(args)
- testClusterConfig = PerformanceTestBasic.setupClusterConfig(args)
-
- if args.contracts_console and testClusterConfig.loggingLevel != "debug" and testClusterConfig.loggingLevel != "all":
- print("Enabling contracts-console will not print anything unless debug level is 'debug' or higher."
- f" Current debug level is: {testClusterConfig.loggingLevel}")
-
- ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=args.target_tps,
- testTrxGenDurationSec=args.test_duration_sec,
- tpsLimitPerGenerator=args.tps_limit_per_generator,
- numAddlBlocksToPrune=args.num_blocks_to_prune,
- logDirRoot=".",
- delReport=args.del_report, quiet=args.quiet,
- delPerfLogs=args.del_perf_logs,
- printMissingTransactions=args.print_missing_transactions,
- userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None,
- endpointMode=args.endpoint_mode)
-
- myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig)
-
- testSuccessful = myTest.runTest()
-
- exitCode = 0 if testSuccessful else 1
- exit(exitCode)
-
-if __name__ == '__main__':
- main()
diff --git a/tests/performance_tests/ph_op_modes.md b/tests/PerformanceHarness/ph_op_modes.md
similarity index 100%
rename from tests/performance_tests/ph_op_modes.md
rename to tests/PerformanceHarness/ph_op_modes.md
diff --git a/tests/performance_tests/ramTrxData.json b/tests/PerformanceHarness/ramTrxData.json
similarity index 100%
rename from tests/performance_tests/ramTrxData.json
rename to tests/PerformanceHarness/ramTrxData.json
diff --git a/tests/performance_tests/readOnlyTrxData.json b/tests/PerformanceHarness/readOnlyTrxData.json
similarity index 100%
rename from tests/performance_tests/readOnlyTrxData.json
rename to tests/PerformanceHarness/readOnlyTrxData.json
diff --git a/tests/performance_tests/userTrxDataNewAccount.json b/tests/PerformanceHarness/userTrxDataNewAccount.json
similarity index 100%
rename from tests/performance_tests/userTrxDataNewAccount.json
rename to tests/PerformanceHarness/userTrxDataNewAccount.json
diff --git a/tests/performance_tests/userTrxDataTransfer.json b/tests/PerformanceHarness/userTrxDataTransfer.json
similarity index 100%
rename from tests/performance_tests/userTrxDataTransfer.json
rename to tests/PerformanceHarness/userTrxDataTransfer.json
diff --git a/tests/PerformanceHarnessScenarioRunner.py b/tests/PerformanceHarnessScenarioRunner.py
new file mode 100755
index 0000000000..ac0a3d54d3
--- /dev/null
+++ b/tests/PerformanceHarnessScenarioRunner.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python3
+
+import argparse
+import sys
+
+from PerformanceHarness import performance_test_basic, performance_test
+from TestHarness import Utils
+
+from pathlib import Path, PurePath
+sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent))
+
+class ScenarioArgumentsHandler(object):
+
+ @staticmethod
+ def createArgumentParser():
+
+ scenarioParser = argparse.ArgumentParser(add_help=True, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+
+ ptbParser=performance_test_basic.PtbArgumentsHandler.createArgumentParser()
+ ptParser=performance_test.PerfTestArgumentsHandler.createArgumentParser()
+
+ #Let top level performance harness parser know there will be sub-commands, and that a scenario type sub-command is required
+ scenarioTypeDesc=("Each Scenario Type sets up either a Performance Test Basic or a Performance Test scenario and allows further configuration of the scenario.")
+ scenarioParserSubparsers = scenarioParser.add_subparsers(title="Scenario Types",
+ description=scenarioTypeDesc,
+ dest="scenario_type_sub_cmd",
+ required=True, help="Currently supported scenario type sub-commands.")
+
+
+ #Create the Single Test Scenario Type Sub-Command and Parsers
+ scenarioParserSubparsers.add_parser(name="singleTest", parents=[ptbParser], add_help=False, help="Run a single Performance Test Basic test scenario.")
+
+ #Create the Find Max Test Scenario Type Sub-Command and Parsers
+ scenarioParserSubparsers.add_parser(name="findMax", parents=[ptParser], add_help=False, help="Runs a Performance Test scenario that iteratively runs performance test basic test scenarios to determine a max tps.")
+
+ return scenarioParser
+
+ @staticmethod
+ def parseArgs():
+ scenarioParser=ScenarioArgumentsHandler.createArgumentParser()
+ args=scenarioParser.parse_args()
+ return args
+
+def main():
+
+ args = ScenarioArgumentsHandler.parseArgs()
+ Utils.Debug = args.v
+
+ testHelperConfig = performance_test_basic.PerformanceTestBasic.setupTestHelperConfig(args)
+ testClusterConfig = performance_test_basic.PerformanceTestBasic.setupClusterConfig(args)
+
+ if args.contracts_console and testClusterConfig.loggingLevel != "debug" and testClusterConfig.loggingLevel != "all":
+ print("Enabling contracts-console will not print anything unless debug level is 'debug' or higher."
+ f" Current debug level is: {testClusterConfig.loggingLevel}")
+
+ if args.scenario_type_sub_cmd == "singleTest":
+ ptbConfig = performance_test_basic.PerformanceTestBasic.PtbConfig(targetTps=args.target_tps,
+ testTrxGenDurationSec=args.test_duration_sec,
+ tpsLimitPerGenerator=args.tps_limit_per_generator,
+ numAddlBlocksToPrune=args.num_blocks_to_prune,
+ logDirRoot=".",
+ delReport=args.del_report, quiet=args.quiet,
+ delPerfLogs=args.del_perf_logs,
+ printMissingTransactions=args.print_missing_transactions,
+ userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None,
+ endpointMode=args.endpoint_mode)
+ Utils.Print(f"testNamePath: {PurePath(PurePath(__file__).name).stem}")
+ myTest = performance_test_basic.PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig, testNamePath=f"{PurePath(PurePath(__file__).name).stem}")
+ elif args.scenario_type_sub_cmd == "findMax":
+ ptConfig = performance_test.PerformanceTest.PtConfig(testDurationSec=args.test_iteration_duration_sec,
+ finalDurationSec=args.final_iterations_duration_sec,
+ delPerfLogs=args.del_perf_logs,
+ maxTpsToTest=args.max_tps_to_test,
+ minTpsToTest=args.min_tps_to_test,
+ testIterationMinStep=args.test_iteration_min_step,
+ tpsLimitPerGenerator=args.tps_limit_per_generator,
+ delReport=args.del_report,
+ delTestReport=args.del_test_report,
+ numAddlBlocksToPrune=args.num_blocks_to_prune,
+ quiet=args.quiet,
+ logDirRoot=Path("."),
+ skipTpsTests=args.skip_tps_test,
+ calcProducerThreads=args.calc_producer_threads,
+ calcChainThreads=args.calc_chain_threads,
+ calcNetThreads=args.calc_net_threads,
+ userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None,
+ endpointMode=args.endpoint_mode,
+ opModeCmd=args.op_mode_sub_cmd)
+
+ myTest = performance_test.PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig)
+ else:
+ Utils.Print(f"Unknown Scenario Type: {args.scenario_type_sub_cmd}")
+ exit(-1)
+
+ testSuccessful = myTest.runTest()
+
+ exitCode = 0 if testSuccessful else 1
+ exit(exitCode)
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt
deleted file mode 100644
index c770cb2dfc..0000000000
--- a/tests/performance_tests/CMakeLists.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-configure_file(performance_test_basic.py . COPYONLY)
-configure_file(performance_test.py . COPYONLY)
-configure_file(log_reader.py . COPYONLY)
-configure_file(genesis.json . COPYONLY)
-configure_file(cpuTrxData.json . COPYONLY)
-configure_file(ramTrxData.json . COPYONLY)
-configure_file(readOnlyTrxData.json . COPYONLY)
-configure_file(userTrxDataTransfer.json . COPYONLY)
-configure_file(userTrxDataNewAccount.json . COPYONLY)
-
-if(DEFINED ENV{GITHUB_ACTIONS})
- set(UNSHARE "--unshared")
-else()
- set(UNSHARE "")
-endif()
-
-add_test(NAME performance_test_bp COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
-add_test(NAME performance_test_api COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
-add_test(NAME performance_test_read_only_trxs COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --api-nodes-read-only-threads 2 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
-add_test(NAME performance_test_cpu_trx_spec COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
-add_test(NAME performance_test_basic_p2p COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
-add_test(NAME performance_test_basic_http COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --target-tps 10 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
-add_test(NAME performance_test_basic_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
-add_test(NAME performance_test_basic_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
-add_test(NAME performance_test_basic_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
-add_test(NAME performance_test_basic_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/ramTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
-add_test(NAME performance_test_basic_read_only_trxs COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --api-nodes-read-only-threads 2 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
-set_property(TEST performance_test_bp PROPERTY LABELS long_running_tests)
-set_property(TEST performance_test_api PROPERTY LABELS long_running_tests)
-set_property(TEST performance_test_read_only_trxs PROPERTY LABELS long_running_tests)
-set_property(TEST performance_test_cpu_trx_spec PROPERTY LABELS long_running_tests)
-set_property(TEST performance_test_basic_p2p PROPERTY LABELS nonparallelizable_tests)
-set_property(TEST performance_test_basic_http PROPERTY LABELS nonparallelizable_tests)
-set_property(TEST performance_test_basic_transfer_trx_spec PROPERTY LABELS nonparallelizable_tests)
-set_property(TEST performance_test_basic_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests)
-set_property(TEST performance_test_basic_cpu_trx_spec PROPERTY LABELS nonparallelizable_tests)
-set_property(TEST performance_test_basic_ram_trx_spec PROPERTY LABELS nonparallelizable_tests)
-set_property(TEST performance_test_basic_read_only_trxs PROPERTY LABELS nonparallelizable_tests)
-
-add_subdirectory( NodeosPluginArgs )