Skip to content

Commit

Permalink
108 Do continuous benchmarks (#647)
Browse files Browse the repository at this point in the history
* Benchmark class to control iterations and times.

* Add benchmarks for all the the methods of the CUDS API referenced in the CUDS API tutorial.

* Add wrapper functions to make the benchmarks compatible with pytest-benchmark.

* Add `pytest.ini` file, needed for the benchmarks.

* Simplify pytest-benchmark wrapper functions using a template.

* Move pytest-benchmark wrapper functions template to the benchmark class.

* Change default size of CUDS API benchmarks to 500.

* Add benchmarks workflow.

* Update benchmark token name.

* Enable benchmarks on the PR branch (for testing).

* Workflow test.

* Updated docker image.

* Remove PR branch from `benchmarks.yml`.
  • Loading branch information
kysrpex authored Jun 16, 2021
1 parent dd7f586 commit 55ac8a2
Show file tree
Hide file tree
Showing 5 changed files with 616 additions and 117 deletions.
41 changes: 41 additions & 0 deletions .github/workflows/benchmarks.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
name: Benchmark

on:
push:
branches:
- dev
- master

jobs:
benchmark:
name: Performance check
runs-on: self-hosted
steps:
- uses: actions/checkout@v2
# - uses: actions/setup-python@v1 # (not needed for self-hosted runner)
# with:
# python-version: '3.8'
# run: pip install pytest pytest-benchmark (already in docker image)
- name: Run benchmark
run: |
pip install -e .
pytest tests --benchmark-only --benchmark-timer='time.process_time' --benchmark-json output.json
- name: Download previous benchmark data
uses: actions/cache@v1
with:
path: ./cache
key: ${{ runner.os }}-benchmark
- name: Store benchmark result
uses: rhysd/github-action-benchmark@v1
with:
# What benchmark tool the output.txt came from
tool: 'pytest'
output-file-path: output.json
# Personal access token to deploy GitHub Pages branch
github-token: ${{ secrets.BENCHMARKS_TOKEN }}
auto-push: true
# Workflow will fail when an alert happens
alert-threshold: '150%'
comment-on-alert: true
alert-comment-cc-users: '@yoavnash,@pablo-de-andres,@kysrpex'
fail-on-alert: true
146 changes: 146 additions & 0 deletions tests/benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
"""Contains an abstract class that serves as a base for defining benchmarks."""
import time
from typing import Union
from abc import ABC, abstractmethod


class Benchmark(ABC):
"""Abstract class that serves as a base for defining benchmarks."""

def __init__(self, size: int = 500, *args, **kwargs):
"""Set-up the internal attributes of the benchmark.
Args:
size (int): the number of iterations to be performed by the
benchmark for it to be considered as finished.
"""
super().__init__(*args, **kwargs)
self._size = size
self._iter_times = [None] * size
self._finished = False

@property
def started(self) -> bool:
"""Whether the benchmark was iterated at least once."""
return self.iterations > 0

@property
def finished(self) -> bool:
"""Whether the benchmark finished all its programmed iterations."""
return self._finished or self.iterations >= self.size

@property
def executed(self) -> bool:
"""True of the benchmark is started and finished."""
return self.started and self.finished

@property
def duration(self) -> float:
"""The process time of the benchmark.
The process time is calculated using the time module from the Python
Standard Library. Check its definition on the library's docs
https://docs.python.org/dev/library/time.html#time.process_time .
"""
return sum(float(x) for x in self._iter_times if x is not None)

@property
def iterations(self) -> int:
"""The number of iterations already executed."""
return len(tuple(None for x in self._iter_times if x is not None))

@property
def iteration(self) -> Union[int, None]:
"""The current iteration.
Returns:
Union[int, None]: either the current iteration or None if no
iterations were yet run.
"""
if self.iterations > 0:
return self.iterations - 1
else:
return None

@property
def size(self) -> int:
"""The number of iterations programmed on initialization.
When the number of executed iterations reaches the value of this
parameter, the benchmark is finished.
"""
return self._size

def set_up(self):
"""Set up the benchmark. The time spent in the setup is not counted."""
if not self.started and not self.finished:
self._benchmark_set_up()
elif self.started and not self.finished:
raise RuntimeError('This benchmark has already started.')
else: # Both are true.
raise StopIteration('This benchmark is finished.')

@abstractmethod
def _benchmark_set_up(self):
"""Implementation of the setup for a specific benchmark."""
pass

def tear_down(self):
"""Clean up after the benchmark. The time spent is not counted."""
self._benchmark_tear_down()

@abstractmethod
def _benchmark_tear_down(self):
"""Implementation of the teardown for a specific benchmark."""
pass

def iterate(self):
"""Perform one iteration of the benchmark.
Raises:
StopIteration: when all the iterations of the benchmark were
already executed.
"""
if self.finished:
raise StopIteration('This benchmark is finished.')
iteration = self.iterations
start = time.process_time()
self._benchmark_iterate(iteration=iteration)
end = time.process_time()
self._iter_times[iteration] = end - start

@abstractmethod
def _benchmark_iterate(self, iteration: int = None):
"""Implementation of a benchmark iteration for a specific benchmark.
The time taken to execute any code inside this method is registered.
Args:
iteration (int): the iteration number to be performed.
"""

def run(self):
"""Run a benchmark from start to finish.
This method will only work on a benchmark that has not been started
already. It runs all of its programmed iterations.
"""
self.set_up()
for i in range(self.size):
self.iterate()
self.tear_down()

@classmethod
def iterate_pytest_benchmark(cls, benchmark, size: int = 500,
*args, **kwargs):
"""Template wrapper function for pytest-benchmark.
Can be overridden on a benchmark basis if desired.
"""
kwargs['iterations'] = kwargs.get('rounds', 1)
kwargs['rounds'] = kwargs.get('rounds', size)
kwargs['warmup_rounds'] = kwargs.get('warmup_rounds', 0)
benchmark_instance = cls(size=size)
benchmark_instance.set_up()
benchmark.pedantic(benchmark_instance.iterate, *args, **kwargs)
benchmark_instance.tear_down()
Loading

0 comments on commit 55ac8a2

Please sign in to comment.