Skip to content

Commit

Permalink
Trigger performance test after successful CI run. (#818)
Browse files Browse the repository at this point in the history
  • Loading branch information
jefchien authored Dec 23, 2021
1 parent 76d949a commit c926da5
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 135 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -1048,12 +1048,12 @@ jobs:
version:
- 'VERSION'
- name: Trigger soaking
- name: Trigger performance test
if: steps.filter.outputs.version == 'true'
uses: peter-evans/repository-dispatch@v1.1.1
with:
token: "${{ secrets.REPO_WRITE_ACCESS_TOKEN }}"
event-type: bump-version
event-type: trigger-perf
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'

clean:
Expand Down
61 changes: 31 additions & 30 deletions .github/workflows/perf.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,9 @@

name: 'Performance test'
on:
schedule:
- cron: '0 9 * * 0' # Sunday at 9 am UTC: pst 1 am.

# we can manually trigger this workflow by using dispatch for debuging
# we can manually trigger this workflow by using dispatch for debugging
repository_dispatch:
types: [manual-perf]
types: [trigger-perf]
workflow_dispatch:
inputs:
sha:
Expand All @@ -27,7 +24,11 @@ on:

env:
TF_VAR_aws_access_key_id: ${{ secrets.INTEG_TEST_AWS_KEY_ID }}
TF_VAR_aws_secret_access_key: ${{ secrets.INTEG_TEST_AWS_KEY_SECRET }}
TF_VAR_aws_secret_access_key: ${{ secrets.INTEG_TEST_AWS_KEY_SECRET }}
GH_PAGES_BRANCH: gh-pages
MAX_BENCHMARKS_TO_KEEP: 100
COMMIT_USER: Github Actions
COMMIT_EMAIL: actions@github.com

jobs:
get-testing-version:
Expand Down Expand Up @@ -148,28 +149,28 @@ jobs:
with:
path: artifacts

- name: Produce performance model table
run: python e2etest/get-performance-model-table.py

- name: Create a new branch
uses: peterjgrainger/action-create-branch@v2.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
branch: "perf-test-${{ github.run_id }}"

- name: Commit to a branch
uses: stefanzweifel/git-auto-commit-action@v4
with:
commit_message: "Update benchmarking"
branch: "perf-test-${{ github.run_id }}"
file_pattern: docs/performance_model.md

- name: pull-request
uses: repo-sync/pull-request@v2
- name: Produce performance report
run: python e2etest/get-performance-model-table.py -v ${{ needs.get-testing-version.outputs.testing_version }}

# Uses github-action-benchmark to update historic benchmark data
# Temporarily using forked action in order to pass in commit SHA
- name: Store benchmark result
uses: jefchien/github-action-benchmark@v1.11.12-alpha2
with:
source_branch: "perf-test-${{ github.run_id }}"
destination_branch: "main"
github_token: ${{ secrets.GITHUB_TOKEN }}
pr_title: "Update Performance Model"
pr_body: "Generated by performance test workflow [#${{ github.run_number }}](https://github.com/aws-observability/aws-otel-collector/actions/runs/${{ github.run_id }}) using https://github.com/aws-observability/aws-otel-collector/commit/${{ needs.get-testing-version.outputs.commit_id }}."
tool: "customSmallerIsBetter"
output-file-path: performance-data.json
benchmark-data-dir-path: benchmark/trend
max-items-in-chart: ${{ env.MAX_BENCHMARKS_TO_KEEP }}
gh-pages-branch: ${{ env.GH_PAGES_BRANCH }}
github-token: ${{ secrets.GITHUB_TOKEN }}
commit-sha: ${{ github.event.inputs.sha }}
auto-push: false

- name: Commit to gh-pages branch
run: |
git switch ${{ env.GH_PAGES_BRANCH }}
rsync -avv performance-report.md benchmark/report.md
rsync -avv performance-data.json benchmark/data/
git add benchmark/data/* benchmark/report.md
git -c user.name="${{ env.COMMIT_USER }}" -c user.email="${{ env.COMMIT_EMAIL }}" commit --amend --reset-author -m "Update benchmarking for ${{ github.event.inputs.sha }}"
git push origin ${{ env.GH_PAGES_BRANCH }}:${{ env.GH_PAGES_BRANCH }}
92 changes: 0 additions & 92 deletions docs/performance_model.md

This file was deleted.

49 changes: 38 additions & 11 deletions e2etest/get-performance-model-table.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import os
import json
import sys
from pathlib import Path
import argparse
import jinja2
import json
import os
import string
import sys

# Schema: performance_models[data_mode][tps] = [model]
performance_models = {}
Expand Down Expand Up @@ -36,7 +36,7 @@ def add_performance_model(model):
performance_models[data_mode][data_rate].append(model)


def flatten_performance_models(models):
def flatten_performance_models():
"""
Flattens performance model into list of grouped models where each group
corresponds to a table in the report.
Expand All @@ -58,13 +58,36 @@ def flatten_performance_models(models):
x["data_mode"], x["data_rate"]))
return models_list

def get_benchmark_entry(model, data_mode, data_rate, value_field, unit, subgroup):
benchmark_entry = {}
benchmark_entry["name"] = model["testcase"]
benchmark_entry["value"] = model[value_field]
benchmark_entry["unit"] = unit
benchmark_entry["extra"] = f"{data_mode} (TPS: {data_rate}) - {subgroup}"
return benchmark_entry

def get_benchmark_data():
"""
Splits models by testcase and groups by data mode, data rate, and field type.
"""
benchmark_data = []

for data_mode, data_rates in performance_models.items():
for data_rate, models in data_rates.items():
for model in models:
benchmark_data.append(get_benchmark_entry(model, data_mode, data_rate, "avgCpu", "%", "Average CPU Usage"))
benchmark_data.append(get_benchmark_entry(model, data_mode, data_rate, "avgMem", "MB", "Average Memory Usage"))

return benchmark_data

if __name__ == "__main__":
aoc_version = Path('VERSION').read_text()
parser = argparse.ArgumentParser("Generate performance-report.md and performance-data.json from artifacts")
parser.add_argument('-v', '--version', help="version to tag the report with", required=True)
args = parser.parse_args()
aoc_version = args.version

from jinja2 import Environment, PackageLoader, select_autoescape
templateLoader = jinja2.FileSystemLoader(searchpath="e2etest/templates/")
env = Environment(autoescape=select_autoescape(['html', 'xml', 'tpl', 'yaml', 'yml']), loader=templateLoader)
env = jinja2.Environment(autoescape=jinja2.select_autoescape(['html', 'xml', 'tpl', 'yaml', 'yml']), loader=templateLoader)

# get performance models from artifacts
artifacts_path = "artifacts/"
Expand All @@ -79,7 +102,7 @@ def flatten_performance_models(models):
testing_ami = model["testingAmi"]
add_performance_model(model)

models_list = flatten_performance_models(performance_models)
models_list = flatten_performance_models()

# render performance models into markdown
template = env.get_template('performance_model.tpl')
Expand All @@ -92,6 +115,10 @@ def flatten_performance_models(models):
})
print(rendered_result)

# write rendered result to docs/performance_model.md
with open("docs/performance_model.md", "w") as f:
# write rendered result to report.md
with open("performance-report.md", "w+") as f:
f.write(rendered_result)

# write benchmark-data.json
with open("performance-data.json", "w+") as f:
json.dump(get_benchmark_data(), f, indent=4)

0 comments on commit c926da5

Please sign in to comment.