From 8ebd6c80c0475e1d203671a5a3807961e0eb44cf Mon Sep 17 00:00:00 2001 From: "(Eliseo) Nathaniel Ruiz Nowell" Date: Tue, 22 Dec 2020 17:06:41 -0800 Subject: [PATCH] Add throughput performance tests for OTLP exporter (#1491) --- .github/workflows/test.yml | 9 +-- .../test_benchmark_trace_exporter.py | 80 +++++++++++++++++++ 2 files changed, 84 insertions(+), 5 deletions(-) create mode 100644 exporter/opentelemetry-exporter-otlp/tests/performance/benchmarks/test_benchmark_trace_exporter.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c534fd9caf3..d702613f32f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -63,15 +63,14 @@ jobs: - name: run tox run: tox -f ${{ matrix.python-version }}-${{ matrix.package }} -- --benchmark-json=${{ env.RUN_MATRIX_COMBINATION }}-benchmark.json - name: Find and merge benchmarks - # TODO: Add at least one benchmark to every package type to remove this - if: matrix.package == 'core' + id: find_and_merge_benchmarks run: >- jq -s '.[0].benchmarks = ([.[].benchmarks] | add) | if .[0].benchmarks == null then null else .[0] end' - opentelemetry-*/tests/*${{ matrix.package }}*-benchmark.json > output.json + $(find . -name '*${{ matrix.package }}*-benchmark.json') > output.json + && echo "::set-output name=json_plaintext::$(cat output.json)" - name: Report on benchmark results - # TODO: Add at least one benchmark to every package type to remove this - if: matrix.package == 'core' + if: steps.find_and_merge_benchmarks.outputs.json_plaintext != 'null' uses: rhysd/github-action-benchmark@v1 with: name: OpenTelemetry Python Benchmarks - Python ${{ env[matrix.python-version ]}} - ${{ matrix.package }} diff --git a/exporter/opentelemetry-exporter-otlp/tests/performance/benchmarks/test_benchmark_trace_exporter.py b/exporter/opentelemetry-exporter-otlp/tests/performance/benchmarks/test_benchmark_trace_exporter.py new file mode 100644 index 00000000000..00c22f0359b --- /dev/null +++ b/exporter/opentelemetry-exporter-otlp/tests/performance/benchmarks/test_benchmark_trace_exporter.py @@ -0,0 +1,80 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import patch + +from opentelemetry.exporter.otlp.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk.trace import TracerProvider, sampling +from opentelemetry.sdk.trace.export import ( + BatchExportSpanProcessor, + SimpleExportSpanProcessor, +) + + +def get_tracer_with_processor(span_processor_class): + span_processor = span_processor_class(OTLPSpanExporter()) + tracer = TracerProvider( + active_span_processor=span_processor, sampler=sampling.DEFAULT_ON, + ).get_tracer("pipeline_benchmark_tracer") + return tracer + + +class MockTraceServiceStub(object): + def __init__(self, channel): + self.Export = lambda *args, **kwargs: None + + +@patch( + "opentelemetry.exporter.otlp.trace_exporter.OTLPSpanExporter._stub", + new=MockTraceServiceStub, +) +def test_simple_span_processor(benchmark): + tracer = get_tracer_with_processor(SimpleExportSpanProcessor) + + def create_spans_to_be_exported(): + span = tracer.start_span("benchmarkedSpan",) + for i in range(10): + span.set_attribute( + "benchmarkAttribute_{}".format(i), + "benchmarkAttrValue_{}".format(i), + ) + span.end() + + benchmark(create_spans_to_be_exported) + + +@patch( + "opentelemetry.exporter.otlp.trace_exporter.OTLPSpanExporter._stub", + new=MockTraceServiceStub, +) +def test_batch_span_processor(benchmark): + """Runs benchmark tests using BatchExportSpanProcessor. + + One particular call by pytest-benchmark will be much more expensive since + the batch export thread will activate and consume a lot of CPU to process + all the spans. For this reason, focus on the average measurement. Do not + focus on the min/max measurements which will be misleading. + """ + tracer = get_tracer_with_processor(BatchExportSpanProcessor) + + def create_spans_to_be_exported(): + span = tracer.start_span("benchmarkedSpan",) + for i in range(10): + span.set_attribute( + "benchmarkAttribute_{}".format(i), + "benchmarkAttrValue_{}".format(i), + ) + span.end() + + benchmark(create_spans_to_be_exported)