Skip to content

Commit

Permalink
Add throughput performance tests for OTLP exporter (#1491)
Browse files Browse the repository at this point in the history
  • Loading branch information
NathanielRN authored Dec 23, 2020
1 parent 4195360 commit 8ebd6c8
Show file tree
Hide file tree
Showing 2 changed files with 84 additions and 5 deletions.
9 changes: 4 additions & 5 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,14 @@ jobs:
- name: run tox
run: tox -f ${{ matrix.python-version }}-${{ matrix.package }} -- --benchmark-json=${{ env.RUN_MATRIX_COMBINATION }}-benchmark.json
- name: Find and merge benchmarks
# TODO: Add at least one benchmark to every package type to remove this
if: matrix.package == 'core'
id: find_and_merge_benchmarks
run: >-
jq -s '.[0].benchmarks = ([.[].benchmarks] | add)
| if .[0].benchmarks == null then null else .[0] end'
opentelemetry-*/tests/*${{ matrix.package }}*-benchmark.json > output.json
$(find . -name '*${{ matrix.package }}*-benchmark.json') > output.json
&& echo "::set-output name=json_plaintext::$(cat output.json)"
- name: Report on benchmark results
# TODO: Add at least one benchmark to every package type to remove this
if: matrix.package == 'core'
if: steps.find_and_merge_benchmarks.outputs.json_plaintext != 'null'
uses: rhysd/github-action-benchmark@v1
with:
name: OpenTelemetry Python Benchmarks - Python ${{ env[matrix.python-version ]}} - ${{ matrix.package }}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from unittest.mock import patch

from opentelemetry.exporter.otlp.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.trace import TracerProvider, sampling
from opentelemetry.sdk.trace.export import (
BatchExportSpanProcessor,
SimpleExportSpanProcessor,
)


def get_tracer_with_processor(span_processor_class):
span_processor = span_processor_class(OTLPSpanExporter())
tracer = TracerProvider(
active_span_processor=span_processor, sampler=sampling.DEFAULT_ON,
).get_tracer("pipeline_benchmark_tracer")
return tracer


class MockTraceServiceStub(object):
def __init__(self, channel):
self.Export = lambda *args, **kwargs: None


@patch(
"opentelemetry.exporter.otlp.trace_exporter.OTLPSpanExporter._stub",
new=MockTraceServiceStub,
)
def test_simple_span_processor(benchmark):
tracer = get_tracer_with_processor(SimpleExportSpanProcessor)

def create_spans_to_be_exported():
span = tracer.start_span("benchmarkedSpan",)
for i in range(10):
span.set_attribute(
"benchmarkAttribute_{}".format(i),
"benchmarkAttrValue_{}".format(i),
)
span.end()

benchmark(create_spans_to_be_exported)


@patch(
"opentelemetry.exporter.otlp.trace_exporter.OTLPSpanExporter._stub",
new=MockTraceServiceStub,
)
def test_batch_span_processor(benchmark):
"""Runs benchmark tests using BatchExportSpanProcessor.
One particular call by pytest-benchmark will be much more expensive since
the batch export thread will activate and consume a lot of CPU to process
all the spans. For this reason, focus on the average measurement. Do not
focus on the min/max measurements which will be misleading.
"""
tracer = get_tracer_with_processor(BatchExportSpanProcessor)

def create_spans_to_be_exported():
span = tracer.start_span("benchmarkedSpan",)
for i in range(10):
span.set_attribute(
"benchmarkAttribute_{}".format(i),
"benchmarkAttrValue_{}".format(i),
)
span.end()

benchmark(create_spans_to_be_exported)

0 comments on commit 8ebd6c8

Please sign in to comment.