diff --git a/.github/workflows/ARTIFACT_SUMMARY_TEMPLATE.md b/.github/workflows/ARTIFACT_SUMMARY_TEMPLATE.md index e3060d17ab39a..69d366dc3d59a 100644 --- a/.github/workflows/ARTIFACT_SUMMARY_TEMPLATE.md +++ b/.github/workflows/ARTIFACT_SUMMARY_TEMPLATE.md @@ -7,7 +7,7 @@ export INSTALL_DIR_URL="${INSTALL_DIR_GCS_ARTIFACT}" export E2E_TEST_ARTIFACTS_DIR_URL="${E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR}" # Directory of benchmark tool binaries: export BENCHMARK_TOOLS_DIR_URL="${BENCHMARK_TOOLS_GCS_ARTIFACT_DIR}" -# Directory of execution benchmark results and traces: +# Directory of execution benchmark results: export EXECUTION_BENCHMARK_RESULTS_DIR_URL="${EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR}" # Compilation benchmark results: export COMPILATION_BENCHMARK_RESULTS_URL="${COMPILATION_BENCHMARK_RESULTS_GCS_ARTIFACT}" @@ -37,7 +37,7 @@ gcloud storage cp -r "${E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR}/*" /tmp/iree_e2e_te ``` ```sh -# Download execution benchmark results and traces +# Download execution benchmark results mkdir /tmp/iree_benchmark_results gcloud storage cp -r "${EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR}/*" /tmp/iree_benchmark_results ``` diff --git a/.github/workflows/benchmark_execution.yml b/.github/workflows/benchmark_execution.yml index 6ad6595481a15..103ad3b43f85f 100644 --- a/.github/workflows/benchmark_execution.yml +++ b/.github/workflows/benchmark_execution.yml @@ -163,9 +163,7 @@ jobs: BENCHMARK_TOOLS_DIR: ${{ env.PLATFORM_ARCH }}-benchmark-tools-dir run: | tar -xf ${BENCHMARK_TOOLS_ARCHIVE} - echo "normal-benchmark-tools-dir=${BENCHMARK_TOOLS_DIR}/build/tools" >> "${GITHUB_OUTPUT}" - echo "traced-benchmark-tools-dir=${BENCHMARK_TOOLS_DIR}/build-traced/tools" >> "${GITHUB_OUTPUT}" - echo "tracy-capture-tool=${BENCHMARK_TOOLS_DIR}/build-traced/tracy-capture" >> "${GITHUB_OUTPUT}" + echo "benchmark-tools-dir=${BENCHMARK_TOOLS_DIR}/build/tools" >> "${GITHUB_OUTPUT}" - name: "Determine Shard Suffix" id: sharding run: | @@ -178,13 +176,10 @@ jobs: env: IREE_EXECUTION_BENCHMARK_CONFIG: ${{ steps.download-assets.outputs.benchmark-config }} IREE_DOCKER_WRAPPER: ./build_tools/github_actions/docker_run.sh - IREE_NORMAL_BENCHMARK_TOOLS_DIR: ${{ steps.unpack-tools.outputs.normal-benchmark-tools-dir }} - IREE_TRACED_BENCHMARK_TOOLS_DIR: ${{ steps.unpack-tools.outputs.traced-benchmark-tools-dir }} - IREE_TRACY_CAPTURE_TOOL: ${{ steps.unpack-tools.outputs.tracy-capture-tool }} + IREE_BENCHMARK_TOOLS_DIR: ${{ steps.unpack-tools.outputs.benchmark-tools-dir }} IREE_TARGET_DEVICE_NAME: ${{ env.DEVICE_NAME }} IREE_SHARD_INDEX: ${{ matrix.benchmark.shard.index }} IREE_BENCHMARK_RESULTS: ${{ env.BENCHMARK_RESULTS_DIR }}/benchmark-results-${{ matrix.benchmark.device_name }}${{ steps.sharding.outputs.suffix }}.json - IREE_BENCHMARK_TRACES: ${{ env.BENCHMARK_RESULTS_DIR }}/benchmark-traces-${{ matrix.benchmark.device_name }}${{ steps.sharding.outputs.suffix }}.tar.gz run: | mkdir -p ${BENCHMARK_RESULTS_DIR} export IREE_E2E_TEST_ARTIFACTS_DIR="${E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR/gs:\/\//https://storage.googleapis.com/}" diff --git a/.github/workflows/build_benchmark_tools.yml b/.github/workflows/build_benchmark_tools.yml index 5044eba98fd63..22c0224d6953e 100644 --- a/.github/workflows/build_benchmark_tools.yml +++ b/.github/workflows/build_benchmark_tools.yml @@ -69,17 +69,14 @@ jobs: docker_image: "gcr.io/iree-oss/base-bleeding-edge@sha256:c5f28883e6c570c20128fb37d7af3a00a25df3ce4e2b3a24c3a8dcd183182a27" # Builds tools on the host and assumes the builder is Linux x86_64. build_script: "./build_tools/cmake/build_runtime.sh" - tracy_capture: "gs://iree-shared-files/tracy-capture-linux-x86_64-af73dba7" - platform: "linux" arch: "riscv_64" docker_image: "gcr.io/iree-oss/riscv@sha256:62e87bad3405d691ddba6f9be0ef44eeb60461a467c8d86f0842c81a1f97da79" build_script: "./build_tools/cmake/build_riscv.sh" - tracy_capture: "gs://iree-shared-files/tracy-capture-linux-x86_64-af73dba7" - platform: "android" arch: "armv8.2-a" docker_image: "gcr.io/iree-oss/android@sha256:66b92a1c920588a73d03316f26025407ea754bab93e8f9bfe40dbf6ed5fe6c7e" build_script: "./build_tools/cmake/build_android.sh" - tracy_capture: "gs://iree-shared-files/tracy-capture-linux-arm_64-af73dba7" env: PLATFORM: ${{ matrix.target.platform }} ARCH: ${{ matrix.target.arch }} @@ -116,15 +113,6 @@ jobs: --env "BUILD_PRESET=benchmark-with-tracing" \ --env "IREE_HOST_BIN_DIR=${INSTALL_DIR}/bin" \ "${DOCKER_IMAGE}" "${BUILD_SCRIPT}" "${BUILD_TOOLS_DIR}/build-traced" - - name: "Downloading pre-built tracy capture tool" - id: download-tracy-capture - env: - TRACY_CAPTURE_GCS_ARTIFACT: ${{ matrix.target.tracy_capture }} - TRACY_CAPTURE: ${{ env.BUILD_TOOLS_DIR }}/build-traced/tracy-capture - run: | - gcloud storage cp "${TRACY_CAPTURE_GCS_ARTIFACT}" "${TRACY_CAPTURE}" - chmod +x "${TRACY_CAPTURE}" - echo "tracy-capture=${TRACY_CAPTURE}" >> "${GITHUB_OUTPUT}" - name: "Creating the benchmark tools archive" # Here we pack a tracy-capture binary (~7MB) into each benchmark tools # archive. This could be wasteful because multiple benchmark tools @@ -141,13 +129,11 @@ jobs: # into each benchmark tools archive. id: archive env: - TRACY_CAPTURE: ${{ steps.download-tracy-capture.outputs.tracy-capture }} BENCHMARK_TOOLS_ARCHIVE: ${{ env.PLATFORM }}-${{ env.ARCH }}-benchmark-tools.tar run: | tar -cf "${BENCHMARK_TOOLS_ARCHIVE}" \ "${BUILD_TOOLS_DIR}"/*/tools/iree-benchmark-module \ - "${BUILD_TOOLS_DIR}"/*/tools/build_config.txt \ - "${TRACY_CAPTURE}" + "${BUILD_TOOLS_DIR}"/*/tools/build_config.txt echo "benchmark-tools-archive=${BENCHMARK_TOOLS_ARCHIVE}" >> "${GITHUB_OUTPUT}" - name: "Uploading the benchmark tools archive" id: upload diff --git a/build_tools/benchmarks/common/benchmark_config.py b/build_tools/benchmarks/common/benchmark_config.py index fcf44dec4b259..c90dd0718a641 100644 --- a/build_tools/benchmarks/common/benchmark_config.py +++ b/build_tools/benchmarks/common/benchmark_config.py @@ -14,24 +14,6 @@ from common import benchmark_definition BENCHMARK_RESULTS_REL_PATH = "benchmark-results" -CAPTURES_REL_PATH = "captures" - - -@dataclass -class TraceCaptureConfig: - """Represents the settings for capturing traces during benchamrking. - - traced_benchmark_tool_dir: the path to the tracing-enabled benchmark tool - directory. - trace_capture_tool: the path to the tool for collecting captured traces. - capture_tarball: the path of capture tar archive. - capture_tmp_dir: the temporary directory to store captured traces. - """ - - traced_benchmark_tool_dir: pathlib.Path - trace_capture_tool: pathlib.Path - capture_tarball: pathlib.Path - capture_tmp_dir: pathlib.Path @dataclass @@ -43,10 +25,7 @@ class BenchmarkConfig: benchmark suites. benchmark_results_dir: the directory to store benchmark results files. git_commit_hash: the git commit hash. - normal_benchmark_tool_dir: the path to the non-traced benchmark tool - directory. - trace_capture_config: the config for capturing traces. Set if and only if - the traces need to be captured. + benchmark_tool_dir: the path to the tool directory. driver_filter: filter benchmarks to those whose driver matches this regex (or all if this is None). model_name_filter: filter benchmarks to those whose model name matches this @@ -68,8 +47,7 @@ class BenchmarkConfig: benchmark_results_dir: pathlib.Path git_commit_hash: str - normal_benchmark_tool_dir: Optional[pathlib.Path] = None - trace_capture_config: Optional[TraceCaptureConfig] = None + benchmark_tool_dir: Optional[pathlib.Path] = None driver_filter: Optional[str] = None model_name_filter: Optional[str] = None @@ -93,31 +71,11 @@ def build_from_args(args: Namespace, git_commit_hash: str): def real_path_or_none(path: Optional[pathlib.Path]) -> Optional[pathlib.Path]: return path.resolve() if path else None - if not args.normal_benchmark_tool_dir and not args.traced_benchmark_tool_dir: - raise ValueError( - "At least one of --normal_benchmark_tool_dir or --traced_benchmark_tool_dir should be specified." - ) - if not ( - (args.traced_benchmark_tool_dir is None) - == (args.trace_capture_tool is None) - == (args.capture_tarball is None) - ): - raise ValueError( - "The following 3 flags should be simultaneously all specified or all unspecified: --traced_benchmark_tool_dir, --trace_capture_tool, --capture_tarball" - ) + if not args.benchmark_tool_dir: + raise ValueError("--benchmark_tool_dir should be specified.") per_commit_tmp_dir: pathlib.Path = (args.tmp_dir / git_commit_hash).resolve() - if args.traced_benchmark_tool_dir is None: - trace_capture_config = None - else: - trace_capture_config = TraceCaptureConfig( - traced_benchmark_tool_dir=args.traced_benchmark_tool_dir.resolve(), - trace_capture_tool=args.trace_capture_tool.resolve(), - capture_tarball=args.capture_tarball.resolve(), - capture_tmp_dir=per_commit_tmp_dir / CAPTURES_REL_PATH, - ) - root_benchmark_dir = args.e2e_test_artifacts_dir # Convert the local path into Path object. if re.match("^[^:]+://", str(root_benchmark_dir)): @@ -134,8 +92,7 @@ def real_path_or_none(path: Optional[pathlib.Path]) -> Optional[pathlib.Path]: root_benchmark_dir=root_benchmark_dir, benchmark_results_dir=per_commit_tmp_dir / BENCHMARK_RESULTS_REL_PATH, git_commit_hash=git_commit_hash, - normal_benchmark_tool_dir=real_path_or_none(args.normal_benchmark_tool_dir), - trace_capture_config=trace_capture_config, + benchmark_tool_dir=real_path_or_none(args.benchmark_tool_dir), driver_filter=args.driver_filter_regex, model_name_filter=args.model_name_regex, mode_filter=args.mode_regex, diff --git a/build_tools/benchmarks/common/benchmark_config_test.py b/build_tools/benchmarks/common/benchmark_config_test.py index e90454d8bd31c..ab76800e0895a 100644 --- a/build_tools/benchmarks/common/benchmark_config_test.py +++ b/build_tools/benchmarks/common/benchmark_config_test.py @@ -22,11 +22,6 @@ def setUp(self): self.e2e_test_artifacts_dir.mkdir() self.normal_tool_dir = self.build_dir / "normal_tool" self.normal_tool_dir.mkdir() - self.traced_tool_dir = self.build_dir / "traced_tool" - self.traced_tool_dir.mkdir() - self.trace_capture_tool = self.build_dir / "tracy_capture" - # Create capture tool with executable file mode. - self.trace_capture_tool.touch(mode=0o755) self.execution_config = self.build_dir / "execution_config.json" self.execution_config.touch() @@ -38,10 +33,7 @@ def test_build_from_args(self): args = common_arguments.Parser().parse_args( [ f"--tmp_dir={self.tmp_dir}", - f"--normal_benchmark_tool_dir={self.normal_tool_dir}", - f"--traced_benchmark_tool_dir={self.traced_tool_dir}", - f"--trace_capture_tool={self.trace_capture_tool}", - f"--capture_tarball=capture.tar", + f"--benchmark_tool_dir={self.normal_tool_dir}", f"--driver_filter_regex=a", f"--model_name_regex=b", f"--mode_regex=c", @@ -60,12 +52,6 @@ def test_build_from_args(self): ) per_commit_tmp_dir = self.tmp_dir / "abcd" - expected_trace_capture_config = benchmark_config.TraceCaptureConfig( - traced_benchmark_tool_dir=self.traced_tool_dir, - trace_capture_tool=pathlib.Path(self.trace_capture_tool).resolve(), - capture_tarball=pathlib.Path("capture.tar").resolve(), - capture_tmp_dir=per_commit_tmp_dir / "captures", - ) expected_config = benchmark_config.BenchmarkConfig( tmp_dir=per_commit_tmp_dir, root_benchmark_dir=benchmark_definition.ResourceLocation.build_local_path( @@ -73,8 +59,7 @@ def test_build_from_args(self): ), benchmark_results_dir=per_commit_tmp_dir / "benchmark-results", git_commit_hash="abcd", - normal_benchmark_tool_dir=self.normal_tool_dir, - trace_capture_config=expected_trace_capture_config, + benchmark_tool_dir=self.normal_tool_dir, driver_filter="a", model_name_filter="b", mode_filter="c", @@ -89,7 +74,7 @@ def test_build_from_args_benchmark_only(self): args = common_arguments.Parser().parse_args( [ f"--tmp_dir={self.tmp_dir}", - f"--normal_benchmark_tool_dir={self.normal_tool_dir}", + f"--benchmark_tool_dir={self.normal_tool_dir}", f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}", f"--execution_benchmark_config={self.execution_config}", "--target_device=test", @@ -100,13 +85,11 @@ def test_build_from_args_benchmark_only(self): args=args, git_commit_hash="abcd" ) - self.assertIsNone(config.trace_capture_config) - def test_build_from_args_with_test_artifacts_dir_url(self): args = common_arguments.Parser().parse_args( [ f"--tmp_dir={self.tmp_dir}", - f"--normal_benchmark_tool_dir={self.normal_tool_dir}", + f"--benchmark_tool_dir={self.normal_tool_dir}", f"--e2e_test_artifacts_dir=https://example.com/testdata", f"--execution_benchmark_config={self.execution_config}", "--target_device=test", @@ -121,25 +104,6 @@ def test_build_from_args_with_test_artifacts_dir_url(self): config.root_benchmark_dir.get_url(), "https://example.com/testdata" ) - def test_build_from_args_invalid_capture_args(self): - args = common_arguments.Parser().parse_args( - [ - f"--tmp_dir={self.tmp_dir}", - f"--normal_benchmark_tool_dir={self.normal_tool_dir}", - f"--traced_benchmark_tool_dir={self.traced_tool_dir}", - f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}", - f"--execution_benchmark_config={self.execution_config}", - "--target_device=test", - ] - ) - - self.assertRaises( - ValueError, - lambda: benchmark_config.BenchmarkConfig.build_from_args( - args=args, git_commit_hash="abcd" - ), - ) - if __name__ == "__main__": unittest.main() diff --git a/build_tools/benchmarks/common/benchmark_definition.py b/build_tools/benchmarks/common/benchmark_definition.py index 73b6b03518b32..484f0a5fb4120 100644 --- a/build_tools/benchmarks/common/benchmark_definition.py +++ b/build_tools/benchmarks/common/benchmark_definition.py @@ -187,7 +187,6 @@ def get_iree_benchmark_module_arguments( driver_info: DriverInfo, benchmark_min_time: Optional[float] = None, dump_results: bool = True, - capture_mode: bool = False, ): """Returns the common arguments to run iree-benchmark-module.""" @@ -195,10 +194,6 @@ def get_iree_benchmark_module_arguments( # VMVX is very unoptimized for now and can take a long time to run. # Decrease the repetition for it until it's reasonably fast. repetitions = 3 - elif capture_mode: - # Capture mode is slower and we just need enough repetition to collect - # trace after the warmup phase. - repetitions = 4 else: repetitions = 10 diff --git a/build_tools/benchmarks/common/benchmark_driver.py b/build_tools/benchmarks/common/benchmark_driver.py index 49b7fd6790251..e2914833f680f 100644 --- a/build_tools/benchmarks/common/benchmark_driver.py +++ b/build_tools/benchmarks/common/benchmark_driver.py @@ -36,7 +36,6 @@ def __init__( self.benchmark_grace_time = benchmark_grace_time self.verbose = verbose self.finished_benchmarks: List[Tuple[BenchmarkInfo, pathlib.Path]] = [] - self.finished_captures: List[pathlib.Path] = [] self.benchmark_errors = [] self._seen_benchmark_names: Set[str] = set() @@ -44,7 +43,6 @@ def run_benchmark_case( self, benchmark_case: BenchmarkCase, benchmark_results_filename: Optional[pathlib.Path], - capture_filename: Optional[pathlib.Path], ) -> None: """Runs the benchmark case and serializes the results. @@ -52,8 +50,6 @@ def run_benchmark_case( benchmark_case: the benchmark_case. benchmark_results_filename: the path to store the serialized BenchmarkMetrics. Benchmarking is required if set. - capture_filename: the path to store captured trace. Trace capturing is - required if set. Raises: Exception during benchmarking. @@ -66,14 +62,10 @@ def run(self) -> None: It performs the following steps: 1. Enumerate and filter benchmark cases. 2. Call 'run_benchmark_case' for each benchmark case. - 3. Collect the benchmark results and captures. + 3. Collect the benchmark results. """ self.config.benchmark_results_dir.mkdir(parents=True, exist_ok=True) - if self.config.trace_capture_config is not None: - self.config.trace_capture_config.capture_tmp_dir.mkdir( - parents=True, exist_ok=True - ) cpu_target_arch = self.device_info.get_cpu_arch() gpu_target_arch = self.device_info.get_gpu_arch() @@ -128,7 +120,7 @@ def run(self) -> None: ) self._seen_benchmark_names.add(benchmark_name) - results_path, capture_path = self.__get_output_paths(benchmark_name) + results_path = self.__get_output_paths(benchmark_name) # If we continue from the previous results, check and skip if the result # files exist. if self.config.continue_from_previous: @@ -136,24 +128,18 @@ def run(self) -> None: self.finished_benchmarks.append((benchmark_info, results_path)) results_path = None - if capture_path is not None and capture_path.exists(): - self.finished_captures.append(capture_path) - capture_path = None - - # Skip if no need to benchmark and capture. - if results_path is None and capture_path is None: + # Skip if no need to benchmark. + if results_path is None: continue print(f"--> Benchmark started: {benchmark_name} <--") try: - self.run_benchmark_case(benchmark_case, results_path, capture_path) + self.run_benchmark_case(benchmark_case, results_path) except Exception as e: # Delete unfinished results if they exist. if results_path is not None: results_path.unlink(missing_ok=True) - if capture_path is not None: - capture_path.unlink(missing_ok=True) if not self.config.keep_going: raise e @@ -169,8 +155,6 @@ def run(self) -> None: if results_path: self.finished_benchmarks.append((benchmark_info, results_path)) - if capture_path: - self.finished_captures.append(capture_path) def get_benchmark_results(self) -> BenchmarkResults: """Returns the finished benchmark results.""" @@ -197,33 +181,20 @@ def get_benchmark_result_filenames(self) -> Sequence[pathlib.Path]: """Returns the json file paths of finished benchmarks.""" return [path for info, path in self.finished_benchmarks] - def get_capture_filenames(self) -> Sequence[pathlib.Path]: - """Returns the tracy file paths of finished captures.""" - return self.finished_captures - def get_benchmark_errors(self): """Returns the exceptions captured during benchmarking.""" return self.benchmark_errors def __get_output_paths(self, benchmark_name: str): - """Get output paths for the results and capture. The path of results/capture - is None if the benchmark/capture doesn't need to be run. - """ + """Get output path for the results. The path is None if the benchmark doesn't need to be run.""" benchmark_results_filename = None - if self.config.normal_benchmark_tool_dir: + if self.config.benchmark_tool_dir: benchmark_results_filename = ( self.config.benchmark_results_dir / f"{benchmark_name}.json" ) - capture_filename = None - if self.config.trace_capture_config: - capture_filename = ( - self.config.trace_capture_config.capture_tmp_dir - / f"{benchmark_name}.tracy" - ) - - return (benchmark_results_filename, capture_filename) + return benchmark_results_filename def __get_benchmark_info_from_case( self, benchmark_case: BenchmarkCase @@ -248,12 +219,7 @@ def __get_benchmark_info_from_case( def __get_available_drivers_and_loaders( self, ) -> Tuple[Sequence[str], Sequence[str]]: - any_tool_dir = ( - self.config.normal_benchmark_tool_dir - if self.config.normal_benchmark_tool_dir - else self.config.trace_capture_config.traced_benchmark_tool_dir - ) - config_txt_file_path = any_tool_dir / "build_config.txt" + config_txt_file_path = self.config.benchmark_tool_dir / "build_config.txt" config_txt_file_lines = config_txt_file_path.read_text().splitlines() available_drivers = [] diff --git a/build_tools/benchmarks/common/benchmark_driver_test.py b/build_tools/benchmarks/common/benchmark_driver_test.py index 333b772d8b4e0..d25fdf0a3d0b1 100644 --- a/build_tools/benchmarks/common/benchmark_driver_test.py +++ b/build_tools/benchmarks/common/benchmark_driver_test.py @@ -38,7 +38,6 @@ def run_benchmark_case( self, benchmark_case: BenchmarkCase, benchmark_results_filename: Optional[pathlib.Path], - capture_filename: Optional[pathlib.Path], ) -> None: if self.raise_exception_on_case == benchmark_case: raise Exception("fake exception") @@ -56,8 +55,6 @@ def run_benchmark_case( benchmark_results_filename.write_text( json.dumps(fake_benchmark_metrics.to_json_object()) ) - if capture_filename: - capture_filename.write_text("{}") class BenchmarkDriverTest(unittest.TestCase): @@ -75,9 +72,7 @@ def setUp(self): self.benchmark_results_dir = ( self.tmp_dir / benchmark_config.BENCHMARK_RESULTS_REL_PATH ) - self.captures_dir = self.tmp_dir / benchmark_config.CAPTURES_REL_PATH self.benchmark_results_dir.mkdir() - self.captures_dir.mkdir() self.config = benchmark_config.BenchmarkConfig( tmp_dir=self.tmp_dir, @@ -86,13 +81,7 @@ def setUp(self): ), benchmark_results_dir=self.benchmark_results_dir, git_commit_hash="abcd", - normal_benchmark_tool_dir=self.tmp_dir, - trace_capture_config=benchmark_config.TraceCaptureConfig( - traced_benchmark_tool_dir=self.tmp_dir, - trace_capture_tool=self.tmp_dir / "capture_tool", - capture_tarball=self.tmp_dir / "captures.tar", - capture_tmp_dir=self.captures_dir, - ), + benchmark_tool_dir=self.tmp_dir, use_compatible_filter=True, ) @@ -251,13 +240,6 @@ def test_run(self): self.benchmark_results_dir / f"{self.case2.run_config}.json", ], ) - self.assertEqual( - driver.get_capture_filenames(), - [ - self.captures_dir / f"{self.case1.run_config}.tracy", - self.captures_dir / f"{self.case2.run_config}.tracy", - ], - ) self.assertEqual(driver.get_benchmark_errors(), []) def test_run_disable_compatible_filter(self): @@ -270,17 +252,6 @@ def test_run_disable_compatible_filter(self): self.assertEqual(len(driver.get_benchmark_results().benchmarks), 3) - def test_run_with_no_capture(self): - self.config.trace_capture_config = None - driver = FakeBenchmarkDriver( - self.device_info, self.config, self.benchmark_suite - ) - - driver.run() - - self.assertEqual(len(driver.get_benchmark_result_filenames()), 2) - self.assertEqual(driver.get_capture_filenames(), []) - def test_run_with_exception_and_keep_going(self): self.config.keep_going = True driver = FakeBenchmarkDriver( @@ -295,13 +266,11 @@ def test_run_with_exception_and_keep_going(self): self.assertEqual(len(driver.get_benchmark_errors()), 1) self.assertEqual(len(driver.get_benchmark_result_filenames()), 1) - def test_run_with_previous_benchmarks_and_captures(self): + def test_run_with_previous_benchmarks(self): benchmark_filename = ( self.benchmark_results_dir / f"{self.case1.run_config}.json" ) benchmark_filename.touch() - capture_filename = self.captures_dir / f"{self.case1.run_config}.tracy" - capture_filename.touch() config = dataclasses.replace(self.config, continue_from_previous=True) driver = FakeBenchmarkDriver( device_info=self.device_info, @@ -313,7 +282,6 @@ def test_run_with_previous_benchmarks_and_captures(self): self.assertEqual(len(driver.run_benchmark_cases), 1) self.assertEqual(len(driver.get_benchmark_result_filenames()), 2) - self.assertEqual(len(driver.get_capture_filenames()), 2) if __name__ == "__main__": diff --git a/build_tools/benchmarks/common/common_arguments.py b/build_tools/benchmarks/common/common_arguments.py index d0839458c1c55..5fd3892189328 100644 --- a/build_tools/benchmarks/common/common_arguments.py +++ b/build_tools/benchmarks/common/common_arguments.py @@ -51,26 +51,12 @@ def __init__(self, *args, **kwargs): ) self.add_argument( - "--normal_benchmark_tool_dir", + "--benchmark_tool_dir", "--normal-benchmark-tool-dir", type=_check_dir_path, default=None, help="Path to the normal (non-tracing) iree tool directory", ) - self.add_argument( - "--traced_benchmark_tool_dir", - "--traced-benchmark-tool-dir", - type=_check_dir_path, - default=None, - help="Path to the tracing-enabled iree tool directory", - ) - self.add_argument( - "--trace_capture_tool", - "--trace-capture-tool", - type=_check_exe_path, - default=None, - help="Path to the tool for collecting captured traces", - ) self.add_argument( "--driver-filter-regex", "--driver_filter_regex", @@ -99,13 +85,6 @@ def __init__(self, *args, **kwargs): type=pathlib.Path, help="Path to the output file", ) - self.add_argument( - "--capture_tarball", - "--capture-tarball", - default=None, - type=pathlib.Path, - help="Path to the tarball for captures", - ) self.add_argument( "--no-clean", action="store_true", @@ -149,7 +128,7 @@ def __init__(self, *args, **kwargs): "--continue_from_previous", "--continue-from-previous", action="store_true", - help="Previous benchmark and capture results will be used and not " + help="Previous benchmark results will be used and not " "rerun if they are found in the benchmark results directory.", ) self.add_argument( diff --git a/build_tools/benchmarks/common/common_arguments_test.py b/build_tools/benchmarks/common/common_arguments_test.py index 714a8b905cd80..5d1467bd74a27 100644 --- a/build_tools/benchmarks/common/common_arguments_test.py +++ b/build_tools/benchmarks/common/common_arguments_test.py @@ -20,11 +20,6 @@ def setUp(self): self.e2e_test_artifacts_dir.mkdir() self.normal_tool_dir = self.build_dir / "normal_tool" self.normal_tool_dir.mkdir() - self.traced_tool_dir = self.build_dir / "traced_tool" - self.traced_tool_dir.mkdir() - self.trace_capture_tool = self.build_dir / "tracy_capture" - # Create capture tool with executable file mode. - self.trace_capture_tool.touch(mode=0o755) self.execution_config = self.build_dir / "execution_config.json" self.execution_config.touch() @@ -34,45 +29,19 @@ def tearDown(self): def test_parser(self): common.common_arguments.Parser().parse_args( [ - f"--normal_benchmark_tool_dir={self.normal_tool_dir}", - f"--traced_benchmark_tool_dir={self.traced_tool_dir}", - f"--trace_capture_tool={self.trace_capture_tool}", + f"--benchmark_tool_dir={self.normal_tool_dir}", f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}", f"--execution_benchmark_config={self.execution_config}", "--target_device=test", ] ) - def test_parser_check_normal_benchmark_tool(self): + def test_parser_check_benchmark_tool(self): arg_parser = common.common_arguments.Parser() with self.assertRaises(SystemExit): arg_parser.parse_args( [ - "--normal_benchmark_tool_dir=nonexistent", - f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}", - f"--execution_benchmark_config={self.execution_config}", - "--target_device=test", - ] - ) - - def test_parser_check_traced_benchmark_tool(self): - arg_parser = common.common_arguments.Parser() - with self.assertRaises(SystemExit): - arg_parser.parse_args( - [ - "--traced_benchmark_tool_dir=nonexistent", - f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}", - f"--execution_benchmark_config={self.execution_config}", - "--target_device=test", - ] - ) - - def test_parser_check_trace_capture_tool(self): - arg_parser = common.common_arguments.Parser() - with self.assertRaises(SystemExit): - arg_parser.parse_args( - [ - "--trace_capture_tool=nonexistent", + "--benchmark_tool_dir=nonexistent", f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}", f"--execution_benchmark_config={self.execution_config}", "--target_device=test", diff --git a/build_tools/benchmarks/run_benchmarks.sh b/build_tools/benchmarks/run_benchmarks.sh index 582e49349b895..f5e27ec9e48a3 100755 --- a/build_tools/benchmarks/run_benchmarks.sh +++ b/build_tools/benchmarks/run_benchmarks.sh @@ -11,7 +11,7 @@ # build_tools/docker/docker_run.sh if IREE_DOCKER_WRAPPER is not specified. See # the script to learn about the required setup. # -# IREE_NORMAL_BENCHMARK_TOOLS_DIR needs to point to a directory contains IREE +# IREE_BENCHMARK_TOOLS_DIR needs to point to a directory contains IREE # benchmark tools. See benchmarks/README.md for more information. # # Command line arguments: @@ -20,20 +20,16 @@ # 3. The target device name # 4. The shard index # 5. The path to write benchmark results -# 6. The path to write benchmark traces set -euo pipefail DOCKER_WRAPPER="${IREE_DOCKER_WRAPPER:-./build_tools/docker/docker_run.sh}" -NORMAL_BENCHMARK_TOOLS_DIR="${IREE_NORMAL_BENCHMARK_TOOLS_DIR}" -TRACED_BENCHMARK_TOOLS_DIR="${IREE_TRACED_BENCHMARK_TOOLS_DIR}" -TRACY_CAPTURE_TOOL="${IREE_TRACY_CAPTURE_TOOL}" +BENCHMARK_TOOLS_DIR="${IREE_BENCHMARK_TOOLS_DIR}" E2E_TEST_ARTIFACTS_DIR="${1:-${IREE_E2E_TEST_ARTIFACTS_DIR}}" EXECUTION_BENCHMARK_CONFIG="${2:-${IREE_EXECUTION_BENCHMARK_CONFIG}}" TARGET_DEVICE_NAME="${3:-${IREE_TARGET_DEVICE_NAME}}" SHARD_INDEX="${4:-${IREE_SHARD_INDEX}}" BENCHMARK_RESULTS="${5:-${IREE_BENCHMARK_RESULTS}}" -BENCHMARK_TRACES="${6:-${IREE_BENCHMARK_TRACES}}" if [[ "${TARGET_DEVICE_NAME}" == "a2-highgpu-1g" ]]; then ${DOCKER_WRAPPER} \ @@ -41,25 +37,18 @@ if [[ "${TARGET_DEVICE_NAME}" == "a2-highgpu-1g" ]]; then --env NVIDIA_DRIVER_CAPABILITIES=all \ gcr.io/iree-oss/nvidia-bleeding-edge@sha256:81b3b5485f962c978bb7e5b2a6ded44ae4ef432048cafffe2b74fcf6dbe1bbca \ ./build_tools/benchmarks/run_benchmarks_on_linux.py \ - --normal_benchmark_tool_dir="${NORMAL_BENCHMARK_TOOLS_DIR}" \ + --benchmark_tool_dir="${BENCHMARK_TOOLS_DIR}" \ --e2e_test_artifacts_dir="${E2E_TEST_ARTIFACTS_DIR}" \ --execution_benchmark_config="${EXECUTION_BENCHMARK_CONFIG}" \ --target_device_name="${TARGET_DEVICE_NAME}" \ --shard_index="${SHARD_INDEX}" \ --output="${BENCHMARK_RESULTS}" \ --verbose - # TODO(#16157): Renable tracy capture after fixing unresponsiveness. - # --traced_benchmark_tool_dir="${TRACED_BENCHMARK_TOOLS_DIR}" \ - # --trace_capture_tool="${TRACY_CAPTURE_TOOL}" \ - # --capture_tarball="${BENCHMARK_TRACES}" \ elif [[ "${TARGET_DEVICE_NAME}" == "c2-standard-60" ]]; then ${DOCKER_WRAPPER} \ gcr.io/iree-oss/base-bleeding-edge@sha256:c5f28883e6c570c20128fb37d7af3a00a25df3ce4e2b3a24c3a8dcd183182a27 \ ./build_tools/benchmarks/run_benchmarks_on_linux.py \ - --normal_benchmark_tool_dir="${NORMAL_BENCHMARK_TOOLS_DIR}" \ - --traced_benchmark_tool_dir="${TRACED_BENCHMARK_TOOLS_DIR}" \ - --trace_capture_tool="${TRACY_CAPTURE_TOOL}" \ - --capture_tarball="${BENCHMARK_TRACES}" \ + --benchmark_tool_dir="${BENCHMARK_TOOLS_DIR}" \ --e2e_test_artifacts_dir="${E2E_TEST_ARTIFACTS_DIR}" \ --execution_benchmark_config="${EXECUTION_BENCHMARK_CONFIG}" \ --target_device_name="${TARGET_DEVICE_NAME}" \ @@ -70,10 +59,7 @@ elif [[ "${TARGET_DEVICE_NAME}" == "c2-standard-60" ]]; then --verbose elif [[ "${TARGET_DEVICE_NAME}" =~ ^(pixel-4|pixel-6-pro|moto-edge-x30)$ ]]; then ./build_tools/benchmarks/run_benchmarks_on_android.py \ - --normal_benchmark_tool_dir="${NORMAL_BENCHMARK_TOOLS_DIR}" \ - --traced_benchmark_tool_dir="${TRACED_BENCHMARK_TOOLS_DIR}" \ - --trace_capture_tool="${TRACY_CAPTURE_TOOL}" \ - --capture_tarball="${BENCHMARK_TRACES}" \ + --benchmark_tool_dir="${BENCHMARK_TOOLS_DIR}" \ --e2e_test_artifacts_dir="${E2E_TEST_ARTIFACTS_DIR}" \ --execution_benchmark_config="${EXECUTION_BENCHMARK_CONFIG}" \ --target_device_name="${TARGET_DEVICE_NAME}" \ diff --git a/build_tools/benchmarks/run_benchmarks_on_android.py b/build_tools/benchmarks/run_benchmarks_on_android.py index 8d393cb002374..5b1277db4eaa8 100755 --- a/build_tools/benchmarks/run_benchmarks_on_android.py +++ b/build_tools/benchmarks/run_benchmarks_on_android.py @@ -7,26 +7,17 @@ """Runs all matched benchmark suites on an Android device. This script probes the Android phone via `adb` and uses the device information -to filter and run suitable benchmarks and optionally captures Tracy traces on -the Android phone. +to filter and run suitable benchmarks on the Android device. It expects that `adb` is installed, and there is iree tools cross-compiled -towards Android. If to capture traces, another set of tracing-enabled iree -tools and the Tracy `capture` tool should be cross-compiled towards Android. +towards Android. -Example usages: +Example usage: - # Without trace generation python3 run_benchmarks.py \ - --normal_benchmark_tool_dir=/path/to/normal/android/target/tools/dir \ + --benchmark_tool_dir=/path/to/normal/android/target/tools/dir \ /path/to/host/build/dir - # With trace generation - python3 run_benchmarks.py \ - --normal_benchmark_tool_dir=/path/to/normal/android/target/tools/dir \ - --traced_benchmark_tool_dir=/path/to/tracy/android/target/tools/dir \ - --trace_capture_tool=/path/to/host/build/tracy/capture \ - /path/to/host/build/dir """ import sys @@ -73,7 +64,6 @@ ANDROID_TMPDIR = pathlib.PurePosixPath("/data/local/tmp/iree-benchmarks") ADB_SERVER_ADDR = ("localhost", 5037) ANDROID_NORMAL_TOOL_DIR = ANDROID_TMPDIR / "normal-tools" -ANDROID_TRACED_TOOL_DIR = ANDROID_TMPDIR / "traced-tools" ANDROID_TRACY_PORT = 8086 @@ -279,7 +269,6 @@ def run_benchmark_case( self, benchmark_case: BenchmarkCase, benchmark_results_filename: Optional[pathlib.Path], - capture_filename: Optional[pathlib.Path], ) -> None: module_rel_dir = iree_artifacts.get_module_dir_path( benchmark_case.run_config.module_generation_config @@ -329,11 +318,11 @@ def run_benchmark_case( run_args.append(f"--module={module_device_path}") if benchmark_results_filename is not None: - if self.config.normal_benchmark_tool_dir is None: - raise ValueError("normal_benchmark_tool_dir can't be None.") + if self.config.benchmark_tool_dir is None: + raise ValueError("benchmark_tool_dir can't be None.") if expected_outputs_dir: self.__run_verify( - host_tool_dir=self.config.normal_benchmark_tool_dir, + host_tool_dir=self.config.benchmark_tool_dir, run_args=run_args, expected_outputs_dir=expected_outputs_dir, verify_params=benchmark_case.verify_params, @@ -341,27 +330,13 @@ def run_benchmark_case( ) self.__run_benchmark( - host_tool_dir=self.config.normal_benchmark_tool_dir, + host_tool_dir=self.config.benchmark_tool_dir, benchmark_case=benchmark_case, run_args=run_args, results_filename=benchmark_results_filename, taskset=taskset, ) - if capture_filename is not None: - capture_config = self.config.trace_capture_config - if capture_config is None: - raise ValueError("Trace capture config can't be None.") - - self.__run_capture( - host_tool_dir=capture_config.traced_benchmark_tool_dir, - trace_capture_tool=capture_config.trace_capture_tool, - benchmark_case=benchmark_case, - run_args=run_args, - capture_filename=capture_filename, - taskset=taskset, - ) - def __run_verify( self, host_tool_dir: pathlib.Path, @@ -410,52 +385,6 @@ def __run_benchmark( print(benchmark_metrics) results_filename.write_text(json.dumps(benchmark_metrics.to_json_object())) - def __run_capture( - self, - host_tool_dir: pathlib.Path, - trace_capture_tool: pathlib.Path, - benchmark_case: BenchmarkCase, - run_args: Sequence[str], - capture_filename: pathlib.Path, - taskset: str, - ): - tool_name = benchmark_case.benchmark_tool_name - device_tool = self.__check_and_push_file( - host_tool_dir / tool_name, ANDROID_TRACED_TOOL_DIR - ) - run_cmd = [ - "TRACY_NO_EXIT=1", - f"IREE_PRESERVE_DYLIB_TEMP_FILES={ANDROID_TMPDIR}", - "taskset", - taskset, - device_tool, - ] - run_cmd += run_args - if tool_name == "iree-benchmark-module": - run_cmd += get_iree_benchmark_module_arguments( - driver_info=benchmark_case.driver_info, - benchmark_min_time=self.config.benchmark_min_time, - dump_results=False, - capture_mode=True, - ) - - # Just launch the traced benchmark tool with TRACY_NO_EXIT=1 without - # waiting for the adb command to complete as that won't happen. - process = adb_start_cmd(run_cmd, verbose=self.verbose) - - wait_for_iree_benchmark_module_start(process, self.verbose) - - # Now it's okay to collect the trace via the capture tool. This will - # send the signal to let the previously waiting benchmark tool to - # complete. - capture_cmd = [trace_capture_tool, "-f", "-o", capture_filename] - # If verbose, just let the subprocess print its output. The subprocess - # may need to detect if the output is a TTY to decide whether to log - # verbose progress info and use ANSI colors, so it's better to use - # stdout redirection than to capture the output in a string. - stdout_redirect = None if self.verbose else subprocess.DEVNULL - execute_cmd(capture_cmd, verbose=self.verbose, stdout=stdout_redirect) - def __deduce_taskset_from_run_config( self, run_config: iree_definitions.E2EModelRunConfig ) -> str: @@ -600,10 +529,6 @@ def main(args): # Also clear temporary directory on the host device. atexit.register(shutil.rmtree, args.tmp_dir) - trace_capture_config = benchmark_config.trace_capture_config - if trace_capture_config: - add_port_forwarding(port=ANDROID_TRACY_PORT, verbose=args.verbose) - benchmark_driver.run() benchmark_results = benchmark_driver.get_benchmark_results() @@ -615,12 +540,6 @@ def main(args): print(benchmark_results.commit) print(benchmark_results.benchmarks) - if trace_capture_config: - # Put all captures in a tarball and remove the original files. - with tarfile.open(trace_capture_config.capture_tarball, "w:gz") as tar: - for capture_filename in benchmark_driver.get_capture_filenames(): - tar.add(capture_filename) - benchmark_errors = benchmark_driver.get_benchmark_errors() if benchmark_errors: print("Benchmarking completed with errors", file=sys.stderr) diff --git a/build_tools/benchmarks/run_benchmarks_on_linux.py b/build_tools/benchmarks/run_benchmarks_on_linux.py index 618dbdb164d87..0adc89a18f642 100755 --- a/build_tools/benchmarks/run_benchmarks_on_linux.py +++ b/build_tools/benchmarks/run_benchmarks_on_linux.py @@ -50,7 +50,6 @@ def run_benchmark_case( self, benchmark_case: BenchmarkCase, benchmark_results_filename: Optional[pathlib.Path], - capture_filename: Optional[pathlib.Path], ) -> None: module_dir = benchmark_case.module_dir local_module_dir = module_dir.get_local_path() @@ -91,14 +90,14 @@ def run_benchmark_case( external_params.append(param_arg) if benchmark_results_filename: - if self.config.normal_benchmark_tool_dir is None: - raise ValueError("normal_benchmark_tool_dir can't be None.") + if self.config.benchmark_tool_dir is None: + raise ValueError("benchmark_tool_dir can't be None.") if self.config.verify and expected_output_dir: if not inputs_dir: raise ValueError(f"Input data is missing for {benchmark_case}.") self.__run_verify( - tool_dir=self.config.normal_benchmark_tool_dir, + tool_dir=self.config.benchmark_tool_dir, benchmark_case=benchmark_case, module_path=module_path, inputs_dir=inputs_dir, @@ -107,21 +106,13 @@ def run_benchmark_case( ) self.__run_benchmark( - tool_dir=self.config.normal_benchmark_tool_dir, + tool_dir=self.config.benchmark_tool_dir, benchmark_case=benchmark_case, module_path=module_path, results_filename=benchmark_results_filename, external_params=external_params, ) - if capture_filename: - self.__run_capture( - benchmark_case=benchmark_case, - module_path=module_path, - capture_filename=capture_filename, - external_params=external_params, - ) - def __build_tool_cmds( self, benchmark_case: BenchmarkCase, @@ -227,45 +218,6 @@ def __run_benchmark( print(benchmark_metrics) results_filename.write_text(json.dumps(benchmark_metrics.to_json_object())) - def __run_capture( - self, - benchmark_case: BenchmarkCase, - module_path: pathlib.Path, - capture_filename: pathlib.Path, - external_params: Sequence[str] = (), - ): - capture_config = self.config.trace_capture_config - if capture_config is None: - raise ValueError("capture_config can't be None.") - - tool_name = benchmark_case.benchmark_tool_name - cmd = self.__build_tool_cmds( - benchmark_case=benchmark_case, - tool_path=capture_config.traced_benchmark_tool_dir / tool_name, - module_path=module_path, - external_params=external_params, - ) - - if tool_name == "iree-benchmark-module": - cmd.extend( - get_iree_benchmark_module_arguments( - driver_info=benchmark_case.driver_info, - benchmark_min_time=self.config.benchmark_min_time, - dump_results=False, - capture_mode=True, - ) - ) - - process = subprocess.Popen( - cmd, env={"TRACY_NO_EXIT": "1"}, stdout=subprocess.PIPE, text=True - ) - - wait_for_iree_benchmark_module_start(process, self.verbose) - - capture_cmd = [capture_config.trace_capture_tool, "-f", "-o", capture_filename] - stdout_redirect = None if self.verbose else subprocess.DEVNULL - execute_cmd(capture_cmd, verbose=self.verbose, stdout=stdout_redirect) - def main(args): device_info = get_linux_device_info( @@ -313,13 +265,6 @@ def main(args): print(benchmark_results.commit) print(benchmark_results.benchmarks) - trace_capture_config = benchmark_config.trace_capture_config - if trace_capture_config: - # Put all captures in a tarball and remove the original files. - with tarfile.open(trace_capture_config.capture_tarball, "w:gz") as tar: - for capture_filename in benchmark_driver.get_capture_filenames(): - tar.add(capture_filename) - benchmark_errors = benchmark_driver.get_benchmark_errors() if benchmark_errors: print("Benchmarking completed with errors", file=sys.stderr) diff --git a/docs/website/docs/developers/performance/benchmark-suites.md b/docs/website/docs/developers/performance/benchmark-suites.md index 1bfe0225902a4..de3501f78c980 100644 --- a/docs/website/docs/developers/performance/benchmark-suites.md +++ b/docs/website/docs/developers/performance/benchmark-suites.md @@ -110,17 +110,13 @@ Run benchmarks (currently only support running on a Linux host): ```sh build_tools/benchmarks/run_benchmarks_on_linux.py \ - --normal_benchmark_tool_dir="${IREE_BUILD_DIR?}/tools" \ + --benchmark_tool_dir="${IREE_BUILD_DIR?}/tools" \ --e2e_test_artifacts_dir="${E2E_TEST_ARTIFACTS_DIR?}" \ --execution_benchmark_config="${E2E_TEST_ARTIFACTS_DIR?}/exec_config.json" \ --target_device_name="" \ --output="${E2E_TEST_ARTIFACTS_DIR?}/benchmark_results.json" \ --verbose \ --cpu_uarch="" -# Traces can be collected by adding: -# --traced_benchmark_tool_dir="${IREE_TRACED_BUILD_DIR?}/tools" \ -# --trace_capture_tool=/path/to/iree-tracy-capture \ -# --capture_tarball=captured_tracy_files.tar.gz ``` Note that: @@ -133,14 +129,12 @@ Note that: [build_tools/python/e2e_test_framework/device_specs](https://github.com/iree-org/iree/tree/main/build_tools/python/e2e_test_framework/device_specs). - To run x86_64 benchmarks, right now `--cpu_uarch` needs to be provided and only `CascadeLake` is available currently. -- To build traced benchmark tools, see - [Profiling with Tracy](profiling-with-tracy.md). Filters can be used to select the benchmarks: ```sh build_tools/benchmarks/run_benchmarks_on_linux.py \ - --normal_benchmark_tool_dir="${IREE_BUILD_DIR?}/tools" \ + --benchmark_tool_dir="${IREE_BUILD_DIR?}/tools" \ --e2e_test_artifacts_dir="${E2E_TEST_ARTIFACTS_DIR?}" \ --execution_benchmark_config="${E2E_TEST_ARTIFACTS_DIR?}/exec_config.json" \ --target_device_name="c2-standard-60" \ @@ -303,7 +297,7 @@ gcloud storage cp \ "${E2E_TEST_ARTIFACTS_DIR?}/comp_config.json" ``` -Benchmark raw results and traces can be downloaded at: +Benchmark raw results can be downloaded at: ```sh # Execution benchmark raw results @@ -312,9 +306,6 @@ gcloud storage cp "${EXECUTION_BENCHMARK_RESULTS_DIR_URL?}/benchmark-results-*.j # Optional: Merge raw results into a single file build_tools/benchmarks/benchmark_helper.py merge-results benchmark-results-*.json > benchmark_results.json -# Execution benchmark traces -gcloud storage cp "${EXECUTION_BENCHMARK_RESULTS_DIR_URL?}/benchmark-traces-*.tar.gz" . - # Compilation benchmark results gcloud storage cp "${COMPILATION_BENCHMARK_RESULTS_URL?}" . ```