Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Benchmarks for basic functionality and IPA improvements #4004

Merged
merged 6 commits into from
Jan 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
add_subdirectory(ipa_bench)
add_subdirectory(decrypt_bench)
add_subdirectory(ipa_bench)
add_subdirectory(pippenger_bench)
add_subdirectory(plonk_bench)
add_subdirectory(ultra_bench)
add_subdirectory(goblin_bench)
add_subdirectory(basics_bench)
add_subdirectory(relations_bench)
add_subdirectory(widgets_bench)
add_subdirectory(protogalaxy_bench)
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# Each source represents a separate benchmark suite
set(BENCHMARK_SOURCES
basics.bench.cpp
)

# Required libraries for benchmark suites
set(LINKED_LIBRARIES
benchmark::benchmark
ecc
)

# Add executable and custom target for each suite, e.g. ultra_honk_bench
foreach(BENCHMARK_SOURCE ${BENCHMARK_SOURCES})
get_filename_component(BENCHMARK_NAME ${BENCHMARK_SOURCE} NAME_WE) # extract name without extension
add_executable(${BENCHMARK_NAME}_bench ${BENCHMARK_SOURCE})
target_link_libraries(${BENCHMARK_NAME}_bench ${LINKED_LIBRARIES})
add_custom_target(run_${BENCHMARK_NAME} COMMAND ${BENCHMARK_NAME} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
endforeach()
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
#!/usr/bin/python3
"""
Tool for analysing several benchmarks from basics_bench to calculate operation timings
For example, in src directory:
python3 ../src/barretenberg/benchmark/basics_bench/analyse_all_benchmarks.py -f bin/basics_bench
"""
import argparse
import subprocess
import tempfile
from single_benchmark_analysis import evaluate_benchmark_from_file
import os

# Some of the benchmarks use other operations to randomise the procedure, so we need to subtract the results
filter_rules={
"sequential_copy":"cycle_waste",
"cycle_waste":None,
"parallel_for_field_element_addition:":None,
"ff_addition":"cycle_waste",
"ff_multiplication":"cycle_waste",
"ff_sqr":"cycle_waste",
"ff_invert":"ff_addition",
"ff_to_montgomery":"cycle_waste",
"ff_from_montgomery":"cycle_waste",
"ff_reduce":"ff_addition",
"projective_point_addition":"cycle_waste",
"projective_point_accidental_doubling":"cycle_waste",
"projective_point_doubling":"cycle_waste",
"scalar_multiplication":"ff_addition",
}
def get_benchmarks(filename):
"""
Get a list of benchmarks from the binary
"""
result=subprocess.run([filename,"--benchmark_list_tests"],capture_output=True)
result.check_returncode()
output_lines=result.stdout.splitlines()
benchmark_names=set([x.decode().split('/')[0] for x in output_lines])
return sorted(list(benchmark_names))

def run_benchmarks(filename,bnames):
"""
Run benchmarks for each type and collect results
"""
benchmark_results=dict()
for bname in bnames:
output_file=tempfile.mktemp()
result=subprocess.run([filename,f"--benchmark_filter={bname}.*",f"--benchmark_out={output_file}","--benchmark_out_format=csv"])
result.check_returncode()
benchmark_result=evaluate_benchmark_from_file(output_file)*1000
benchmark_results[bname]=benchmark_result
print (f"Benchmark {bname} unfiltered: {benchmark_result} ns")
os.remove(output_file)

return benchmark_results

def filter_benchmarks(benchmark_results):
"""
Apply filtering rules and print the benchmarks
"""
global filter_rules
print ("Filtered benchmark results:")
max_len=0
for bname in sorted(benchmark_results.keys()):
if len(bname)>max_len:
max_len=len(bname)
for bname in sorted(benchmark_results.keys()):
if bname not in filter_rules.keys() or filter_rules[bname]==None:
print(f"\t{bname}:{' '*(max_len-len(bname))}\t{benchmark_results[bname]:.1f}")
else:
print(f"\t{bname}:{' '*(max_len-len(bname))}\t{benchmark_results[bname]-benchmark_results[filter_rules[bname]]:.1f}")

if __name__=="__main__":
parser=argparse.ArgumentParser(description='Run all the individual benchmarks',epilog='This expects a single file with a single type of benchmark <name>/i')
parser.add_argument("-f","--file",dest="filename",required=True,help="run benchmark FILE", metavar="FILE")
args=parser.parse_args()
filename=args.filename
if filename==None:
parser.print_help()
exit()
benchmark_names=get_benchmarks(filename)
print("Will run the following benchmarks:")
for bname in benchmark_names:
print(f'\t{bname}')
unfiltered_results=run_benchmarks(filename,benchmark_names)
filter_benchmarks(unfiltered_results)



Loading