Skip to content

Commit

Permalink
Introduced on_demand_benchmark decorator - see SciTools/iris#4621.
Browse files Browse the repository at this point in the history
  • Loading branch information
trexfeathers committed Nov 21, 2022
1 parent af1fd52 commit 878b7a3
Show file tree
Hide file tree
Showing 3 changed files with 93 additions and 22 deletions.
40 changes: 33 additions & 7 deletions benchmarks/benchmarks/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
"""Benchmark tests for iris-esmf-regrid"""


from os import environ


def disable_repeat_between_setup(benchmark_object):
"""
Decorator for benchmarks where object persistence would be inappropriate.
Expand Down Expand Up @@ -30,15 +33,38 @@ def disable_repeat_between_setup(benchmark_object):
def skip_benchmark(benchmark_object):
"""
Decorator for benchmarks skipping benchmarks.
Simply doesn't return the object.
Warnings
--------
ASV's architecture means decorated classes cannot be sub-classed. Code for
inheritance should be in a mixin class that doesn't include any methods
which ASV will recognise as benchmarks
(e.g. ``def time_something(self):`` ).
"""
pass


def on_demand_benchmark(benchmark_object):
"""
Decorator. Disables these benchmark(s) unless ON_DEMAND_BENCHARKS env var is set.
def setup_cache(self):
pass
For benchmarks that, for whatever reason, should not be run by default.
E.g:
* Require a local file
* Used for scalability analysis instead of commit monitoring.
def setup(*args):
raise NotImplementedError
Can be applied to benchmark classes/methods/functions.
benchmark_object.setup_cache = setup_cache
benchmark_object.setup = setup
Warnings
--------
ASV's architecture means decorated classes cannot be sub-classed. Code for
inheritance should be in a mixin class that doesn't include any methods
which ASV will recognise as benchmarks
(e.g. ``def time_something(self):`` ).
return benchmark_object
"""
if "ON_DEMAND_BENCHMARKS" in environ:
return benchmark_object
73 changes: 58 additions & 15 deletions benchmarks/benchmarks/long/esmf_regridder.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@
)
from esmf_regrid.schemes import ESMFAreaWeightedRegridder

from .. import skip_benchmark
from .. import on_demand_benchmark, skip_benchmark
from ..generate_data import _grid_cube, _gridlike_mesh_cube


class PrepareScalabilityGridToGrid:
class PrepareScalabilityMixin:
timeout = 180
params = [50, 100, 200, 400, 600, 800]
param_names = ["grid width"]
Expand All @@ -42,11 +42,18 @@ def setup(self, n):
self.src = self.src_cube(n)
self.tgt = self.tgt_cube(n)

def time_prepare(self, n):
def _time_prepare(self, n):
_ = self.regridder(self.src, self.tgt)


class PrepareScalabilityMeshToGrid(PrepareScalabilityGridToGrid):
@on_demand_benchmark
class PrepareScalabilityGridToGrid(PrepareScalabilityMixin):
def time_prepare(self, n):
super()._time_prepare(n)


@on_demand_benchmark
class PrepareScalabilityMeshToGrid(PrepareScalabilityMixin):
regridder = MeshToGridESMFRegridder

def src_cube(self, n):
Expand Down Expand Up @@ -83,10 +90,11 @@ def time_save(self, _, n):
save_regridder(self.rg, self.destination_file)

def time_prepare(self, _, n):
super().time_prepare(n)
super()._time_prepare(n)


class PrepareScalabilityGridToMesh(PrepareScalabilityGridToGrid):
@on_demand_benchmark
class PrepareScalabilityGridToMesh(PrepareScalabilityMixin):
regridder = GridToMeshESMFRegridder

def tgt_cube(self, n):
Expand Down Expand Up @@ -123,10 +131,10 @@ def time_save(self, _, n):
save_regridder(self.rg, self.destination_file)

def time_prepare(self, _, n):
super().time_prepare(n)
super()._time_prepare(n)


class PerformScalabilityGridToGrid:
class PerformScalabilityMixin:
params = [100, 200, 400, 600, 800, 1000]
param_names = ["height"]
grid_size = 400
Expand Down Expand Up @@ -185,21 +193,31 @@ def setup(self, cache, height):
cube = self.add_src_metadata(cube)
self.result = regridder(cube)

def time_perform(self, cache, height):
def _time_perform(self, cache, height):
assert not self.src.has_lazy_data()
rg, _ = cache
_ = rg(self.src)

def time_lazy_perform(self, cache, height):
def _time_lazy_perform(self, cache, height):
# Don't touch result.data - permanent realisation plays badly with
# ASV's re-run strategy.
assert self.result.has_lazy_data()
self.result.core_data().compute()


class PerformScalabilityMeshToGrid(PerformScalabilityGridToGrid):
@on_demand_benchmark
class PerformScalabilityGridToGrid(PerformScalabilityMixin):
def time_perform(self, cache, height):
super()._time_perform(cache, height)

def time_lazy_perform(self, cache, height):
super()._time_lazy_perform(cache, height)


@on_demand_benchmark
class PerformScalabilityMeshToGrid(PerformScalabilityMixin):
regridder = MeshToGridESMFRegridder
chunk_size = [PerformScalabilityGridToGrid.grid_size ^ 2, 10]
chunk_size = [PerformScalabilityMixin.grid_size ^ 2, 10]
file_name = "chunked_cube_1d.nc"

def setup_cache(self):
Expand All @@ -223,8 +241,15 @@ def add_src_metadata(self, cube):
cube.add_aux_coord(mesh_coord_y, 0)
return cube

def time_perform(self, cache, height):
super()._time_perform(cache, height)

def time_lazy_perform(self, cache, height):
super()._time_lazy_perform(cache, height)

class PerformScalabilityGridToMesh(PerformScalabilityGridToGrid):

@on_demand_benchmark
class PerformScalabilityGridToMesh(PerformScalabilityMixin):
regridder = GridToMeshESMFRegridder

def setup_cache(self):
Expand All @@ -242,11 +267,17 @@ def tgt_cube(self):
tgt.add_aux_coord(mesh_coord_y, 0)
return tgt

def time_perform(self, cache, height):
super()._time_perform(cache, height)

def time_lazy_perform(self, cache, height):
super()._time_lazy_perform(cache, height)


# These benchmarks unusually long and are resource intensive so are skipped.
# They can be run by manually removing the skip.
@skip_benchmark
class PerformScalability1kGridToGrid(PerformScalabilityGridToGrid):
class PerformScalability1kGridToGrid(PerformScalabilityMixin):
timeout = 600
grid_size = 1100
chunk_size = [grid_size, grid_size, 10]
Expand All @@ -259,11 +290,17 @@ class PerformScalability1kGridToGrid(PerformScalabilityGridToGrid):
def setup_cache(self):
return super().setup_cache()

def time_perform(self, cache, height):
super()._time_perform(cache, height)

def time_lazy_perform(self, cache, height):
super()._time_lazy_perform(cache, height)


# These benchmarks unusually long and are resource intensive so are skipped.
# They can be run by manually removing the skip.
@skip_benchmark
class PerformScalability2kGridToGrid(PerformScalabilityGridToGrid):
class PerformScalability2kGridToGrid(PerformScalabilityMixin):
timeout = 600
grid_size = 2200
chunk_size = [grid_size, grid_size, 10]
Expand All @@ -275,3 +312,9 @@ class PerformScalability2kGridToGrid(PerformScalabilityGridToGrid):

def setup_cache(self):
return super().setup_cache()

def time_perform(self, cache, height):
super()._time_perform(cache, height)

def time_lazy_perform(self, cache, height):
super()._time_lazy_perform(cache, height)
2 changes: 2 additions & 0 deletions noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -475,6 +475,8 @@ def benchmarks(
)
publish_subdir.mkdir()

# Activate on demand benchmarks (C/SPerf are deactivated for 'standard' runs).
session.env["ON_DEMAND_BENCHMARKS"] = "True"
commit_range = "upstream/main^!"

asv_command = [
Expand Down

0 comments on commit 878b7a3

Please sign in to comment.