Skip to content

Performance Benchmark #58

Performance Benchmark

Performance Benchmark #58

name: Performance Benchmark
on:
workflow_dispatch: # run on request (no need for PR)
inputs:
model-category:
type: choice
description: Model category to run benchmark
options:
- speed
- balance
- accuracy
- default # speed, balance, accuracy models only
- all # default + other models
default: all
data-group:
type: choice
description: Data group to run benchmark
options:
- small
- medium
- large
- all
default: all
num-repeat:
description: Overrides default per-data-group number of repeat setting
default: 0
num-epoch:
description: Overrides default per-model number of epoch setting
default: 0
eval-upto:
type: choice
description: The last operation to evaluate. 'optimize' means all.
options:
- train
- export
- optimize
default: optimize
pytest-args:
type: string
description: |
Additional perf-benchmark pytest arguments.
"-k detection" -> detection task only
"--dry-run" -> print command w/o execution.
data-root:
type: string
description: Root directory containing validation data in CI server.
default: "/home/validation/data/v2/"
otx-ref:
type: string
description: |
Target OTX ref (tag / branch name / commit hash) on main repo to test. Defaults to the current branch.
`pip install otx[full]@https://github.com/openvinotoolkit/training_extensions.git@{otx_ref}` will be executed before run,
and reverted after run. Works only for v2.x assuming CLI compatibility.
default: __CURRENT_BRANCH_COMMIT__
workflow_call:
inputs:
model-category:
type: string
description: Model category to run benchmark [speed, balance, accuracy, default, all]
default: default
data-group:
type: string
description: Data group to run benchmark [small, medium, large, all]
default: all
num-repeat:
type: number
description: Overrides default per-data-group number of repeat setting
default: 0
num-epoch:
type: number
description: Overrides default per-model number of epoch setting
default: 0
eval-upto:
type: string
description: The last operation to evaluate. 'optimize' means all. [train, export, optimize]
default: optimize
pytest-args:
type: string
description: |
Additional perf-benchmark pytest arguments.
"-k detection" -> detection task only
"--dry-run" -> print command w/o execution.
data-root:
type: string
description: Root directory containing validation data in CI server.
default: "/home/validation/data/v2/"
otx-ref:
type: string
description: |
Target OTX ref (tag / branch name / commit hash) on main repo to test. Defaults to the current branch.
`pip install otx[full]@https://github.com/openvinotoolkit/training_extensions.git@{otx_ref}` will be executed before run,
and reverted after run. Works only for v2.x assuming CLI compatibility.
default: __CURRENT_BRANCH_COMMIT__
# Declare default permissions as read only.
permissions: read-all
jobs:
Perf-Benchmark-Summary:
if: ${{ always() }}
runs-on: ubuntu-latest
steps:
- name: Download benchmark results
uses: actions/download-artifact@v4
with:
run-id: 8699060888
github-token: ${{ secrets.GITHUB_TOKEN }}
path: tests/perf/history/latest
- name: Checkout repository
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Install Python
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: "3.10"
- name: Install dependencies
run: python -m pip install --upgrade pip pandas matplotlib nbconvert ipython ipykernel openpyxl
- name: Summarize benchamrk results
run: |
python tests/perf/history/summary.py tests/perf/history ./perf-benchmark-summary --pattern "*raw*.csv" --normalize
jupyter nbconvert --execute --to html --no-input tests/perf/history/summary.ipynb --output-dir ./perf-benchmark-summary --output perf-benchmark-summary
- name: Upload benchmark summary
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with:
name: perf-benchmark-summary
path: perf-benchmark-summary
# Use always() to always run this step to publish test results when there are test failures
if: ${{ always() }}