diff --git a/tests/README.md b/tests/README.md index b7203a9cf2..9571b05878 100644 --- a/tests/README.md +++ b/tests/README.md @@ -5,8 +5,7 @@ provides a set of integration tests and associated utilities. The general testi is: 1. Building the Collector (`make otelcol` or `make all`) -1. Defining your expected [resource metric content](./testutils/README.md#resource-metrics) as a yaml file -([see example](testutils/telemetry/testdata/metrics/resource-metrics.yaml)) +1. Defining [your expected golden file content as a yaml file](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/golden) 1. Spin up your target resources as [docker containers](./testutils/README.md#test-containers). 1. Stand up an in-memory [OTLP metrics receiver and sink](./testutils/README.md#otlp-metrics-receiver-sink) capable of detecting if/when desired data are received. 1. Spin up your Collector [as a subprocess](./testutils/README.md#collector-process) or [as a container](./testutils/README.md#collector-container) configured to report to this OTLP receiver. @@ -17,51 +16,3 @@ is more useful overall. **NOTE** At this time, integration tests generally target collector containers (`SPLUNK_OTEL_COLLECTOR_IMAGE` env var), and test coverage for the subprocess is best effort only, unless the test cases explicitly maintain one. The collector process targets are generally for test development without requiring frequent rebuilds of a local docker image. - -```go -package example_test - -import ( - "context" - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/signalfx/splunk-otel-collector/tests/testutils" - "github.com/signalfx/splunk-otel-collector/tests/testutils/telemetry" -) - -func TestMyExampleComponent(t *testing.T) { - expectedResourceMetrics, err := telemetry.LoadResourceMetrics( - filepath.Join(".", "testdata", "metrics", "my_resource_metrics.yaml"), - ) - require.NoError(t, err) - require.NotNil(t, expectedResourceMetrics) - - // combination OTLP Receiver, consumertests.MetricsSink, and consumertests.LogsSink - otlp, err := testutils.NewOTLPReceiverSink().WithEndpoint("localhost:23456").Build() - require.NoError(t, err) - require.NoError(t, otlp.Start()) - - defer func() { - require.NoError(t, otlp.Shutdown()) - }() - - myContainer := testutils.NewContainer().WithImage("someTarget").Build() - err = myContainer.Start(context.Background()) - require.NoError(t, err) - - // running collector subprocess that uses the provided config set to export OTLP to our test receiver - myCollector, err := testutils.NewCollectorProcess().WithConfigPath(filepath.Join(".", "testdata", "config.yaml")).Build() - require.NoError(t, err) - err = myCollector.Start() - require.NoError(t, err) - defer func() { - require.NoError(t, myCollector.Shutdown() ) - }() - - require.NoError(t, otlp.AssertAllMetricsReceived(t, *expectedResourceMetrics, 30*time.Second)) -} -``` diff --git a/tests/general/container_test.go b/tests/general/container_test.go index b43836effb..18806e370a 100644 --- a/tests/general/container_test.go +++ b/tests/general/container_test.go @@ -111,7 +111,6 @@ func TestSpecifiedContainerConfigDefaultsToCmdLineArgIfEnvVarConflict(t *testing return false } receivedOTLPMetrics := tc.OTLPReceiverSink.AllMetrics() - tc.OTLPReceiverSink.Reset() for _, rom := range receivedOTLPMetrics { for i := 0; i < rom.ResourceMetrics().Len(); i++ { @@ -189,7 +188,6 @@ service: return false } receivedOTLPMetrics := tc.OTLPReceiverSink.AllMetrics() - tc.OTLPReceiverSink.Reset() for _, rom := range receivedOTLPMetrics { for i := 0; i < rom.ResourceMetrics().Len(); i++ { diff --git a/tests/general/discoverymode/testdata/resource_metrics/k8s-observer-smart-agent-redis.yaml b/tests/general/discoverymode/testdata/resource_metrics/k8s-observer-smart-agent-redis.yaml deleted file mode 100644 index de19286be7..0000000000 --- a/tests/general/discoverymode/testdata/resource_metrics/k8s-observer-smart-agent-redis.yaml +++ /dev/null @@ -1,17 +0,0 @@ -resource_metrics: - - attributes: - k8s.namespace.name: test-namespace - k8s.pod.name: target.redis - k8s.pod.uid: - one.key: one.value - two.key: two.value - scope_metrics: - - metrics: - - name: gauge.connected_clients - type: IntGauge - attributes: - dsname: value - plugin: redis_info - plugin_instance: - system.type: redis - three.key: three.value.from.cmdline.property diff --git a/tests/receivers/smartagent/collectd-kafka/testdata/resource_metrics/all_broker.yaml b/tests/receivers/smartagent/collectd-kafka/testdata/resource_metrics/all_broker.yaml deleted file mode 100644 index 761cb3a72f..0000000000 --- a/tests/receivers/smartagent/collectd-kafka/testdata/resource_metrics/all_broker.yaml +++ /dev/null @@ -1,66 +0,0 @@ -resource_metrics: - - scope_metrics: - - metrics: - - name: counter.kafka-bytes-in - type: IntMonotonicCumulativeSum - - name: counter.kafka-bytes-out - type: IntMonotonicCumulativeSum - - name: counter.kafka-isr-expands - type: IntMonotonicCumulativeSum - - name: counter.kafka-isr-shrinks - type: IntMonotonicCumulativeSum - - name: counter.kafka-leader-election-rate - type: IntMonotonicCumulativeSum - - name: counter.kafka-messages-in - type: IntMonotonicCumulativeSum - - name: counter.kafka-unclean-elections-rate - type: IntMonotonicCumulativeSum - - name: counter.kafka.fetch-consumer.total-time.count - type: IntMonotonicCumulativeSum - - name: counter.kafka.fetch-follower.total-time.count - type: IntMonotonicCumulativeSum - - name: counter.kafka.produce.total-time.count - type: IntMonotonicCumulativeSum - - name: gauge.jvm.threads.count - type: IntGauge - - name: gauge.kafka-active-controllers - type: IntGauge - - name: gauge.kafka-max-lag - type: IntGauge - - name: gauge.kafka-offline-partitions-count - type: IntGauge - - name: gauge.kafka-request-queue - type: IntGauge - - name: gauge.kafka-underreplicated-partitions - type: IntGauge - - name: gauge.kafka.fetch-consumer.total-time.99th - type: IntGauge - - name: gauge.kafka.fetch-consumer.total-time.median - type: IntGauge - - name: gauge.kafka.fetch-follower.total-time.99th - type: IntGauge - - name: gauge.kafka.fetch-follower.total-time.median - type: IntGauge - - name: gauge.kafka.produce.total-time.99th - type: IntGauge - - name: counter.kafka.logs.flush-time.count - type: IntMonotonicCumulativeSum - - name: gauge.kafka.logs.flush-time.median - type: DoubleGauge - - name: gauge.kafka.logs.flush-time.99th - type: DoubleGauge - - name: gauge.kafka.produce.total-time.median - - name: gauge.loaded_classes - type: IntGauge - - name: invocations - type: IntMonotonicCumulativeSum - - name: jmx_memory.committed - type: IntGauge - - name: jmx_memory.init - type: IntGauge - - name: jmx_memory.max - type: IntGauge - - name: jmx_memory.used - type: IntGauge - - name: total_time_in_ms.collection_time - type: IntMonotonicCumulativeSum diff --git a/tests/receivers/smartagent/collectd-kafka/testdata/resource_metrics/all_consumer.yaml b/tests/receivers/smartagent/collectd-kafka/testdata/resource_metrics/all_consumer.yaml deleted file mode 100644 index a053c40b7f..0000000000 --- a/tests/receivers/smartagent/collectd-kafka/testdata/resource_metrics/all_consumer.yaml +++ /dev/null @@ -1,29 +0,0 @@ -resource_metrics: - - scope_metrics: - - metrics: - - name: gauge.jvm.threads.count - type: IntGauge - - name: gauge.kafka.consumer.bytes-consumed-rate - type: DoubleGauge - - name: gauge.kafka.consumer.fetch-rate - type: DoubleGauge - - name: gauge.kafka.consumer.fetch-size-avg - type: DoubleGauge - - name: gauge.kafka.consumer.records-consumed-rate - type: DoubleGauge - - name: gauge.kafka.consumer.records-lag-max - type: IntGauge - - name: gauge.loaded_classes - type: IntGauge - - name: invocations - type: IntMonotonicCumulativeSum - - name: jmx_memory.committed - type: IntGauge - - name: jmx_memory.init - type: IntGauge - - name: jmx_memory.max - type: IntGauge - - name: jmx_memory.used - type: IntGauge - - name: total_time_in_ms.collection_time - type: IntMonotonicCumulativeSum diff --git a/tests/receivers/smartagent/collectd-kafka/testdata/resource_metrics/all_producer.yaml b/tests/receivers/smartagent/collectd-kafka/testdata/resource_metrics/all_producer.yaml deleted file mode 100644 index 58ea294da0..0000000000 --- a/tests/receivers/smartagent/collectd-kafka/testdata/resource_metrics/all_producer.yaml +++ /dev/null @@ -1,39 +0,0 @@ -resource_metrics: - - scope_metrics: - - metrics: - - name: gauge.jvm.threads.count - type: IntGauge - - name: gauge.kafka.producer.byte-rate - type: DoubleGauge - - name: gauge.kafka.producer.compression-rate - type: IntGauge - - name: gauge.kafka.producer.io-wait-time-ns-avg - type: DoubleGauge - - name: gauge.kafka.producer.outgoing-byte-rate - type: DoubleGauge - - name: gauge.kafka.producer.record-error-rate - type: IntGauge - - name: gauge.kafka.producer.record-retry-rate - type: IntGauge - - name: gauge.kafka.producer.record-send-rate - type: DoubleGauge - - name: gauge.kafka.producer.request-latency-avg - type: DoubleGauge - - name: gauge.kafka.producer.request-rate - type: DoubleGauge - - name: gauge.kafka.producer.response-rate - type: DoubleGauge - - name: gauge.loaded_classes - type: IntGauge - - name: invocations - type: IntMonotonicCumulativeSum - - name: jmx_memory.committed - type: IntGauge - - name: jmx_memory.init - type: IntGauge - - name: jmx_memory.max - type: IntGauge - - name: jmx_memory.used - type: IntGauge - - name: total_time_in_ms.collection_time - type: IntMonotonicCumulativeSum diff --git a/tests/receivers/smartagent/jmx/testdata/resource_metrics/all.yaml b/tests/receivers/smartagent/jmx/testdata/resource_metrics/all.yaml deleted file mode 100644 index e34f0f458d..0000000000 --- a/tests/receivers/smartagent/jmx/testdata/resource_metrics/all.yaml +++ /dev/null @@ -1,11 +0,0 @@ -resource_metrics: - - scope_metrics: - - metrics: - - name: cassandra.status - type: DoubleGauge - - name: cassandra.state - type: DoubleGauge - - name: cassandra.load - type: DoubleGauge - - name: cassandra.ownership - type: DoubleGauge diff --git a/tests/testutils/README.md b/tests/testutils/README.md index 9f616b7cd6..8067d36a19 100644 --- a/tests/testutils/README.md +++ b/tests/testutils/README.md @@ -3,62 +3,6 @@ The `testutils` package provides an internal test format for Collector data, and helpers to help assert its integrity from arbitrary components. -### Resource Metrics - -`ResourceMetrics` are at the core of the internal metric data format for these tests and are intended to be defined -in yaml files or by converting from obtained `pdata.Metrics` items. They provide a strict `Equals()` helper method as -well as `RelaxedEquals()` to help in verifying desired field and values, ignoring those not specified. - -```yaml -resource_metrics: - - attributes: - a_resource_attribute: a_value - another_resource_attribute: another_value - scope_metrics: - - instrumentation_scope: - name: a library - version: some version - - metrics: - - name: my.int.gauge - type: IntGauge - description: my.int.gauge description - unit: ms - attributes: - my_attribute: my_attribute_value - my_other_attribute: my_other_attribute_value - value: 123 - - name: my.double.sum - type: DoubleNonmonotonicDeltaSum - attributes: {} # enforced empty attribute map in RelaxedEquals() (only point with no attributes matches) - value: -123.456 - - scope_metrics: - - instrumentation_scope: - name: an instrumentation library from a different resource without attributes - metrics: - - name: my.double.gauge - type: DoubleGauge - # missing attributes field, so values are not compared in RelaxedEquals() (any attribute values are accepted) - value: 456.789 - - name: my.double.gauge - type: DoubleGauge - attributes: - another: attribute - value: 567.890 - - instrumentation_scope: - name: another instrumentation library - version: this_library_version - metrics: - - name: another_int_gauge - type: IntGauge - value: 456 -``` - -Using `telemetry.LoadResourceMetrics("my_yaml.path")` you can create an equivalent `ResourceMetrics` instance to what your yaml file specifies. -Using `telemetry.PDataToResourceMetrics(myReceivedPDataMetrics)` you can use the assertion helpers to determine if your expected -`ResourceMetrics` are the same as those received in your test case. `telemetry.FlattenResourceMetrics()` is a good way to "normalize" -metrics received over time to ensure that only unique datapoints are represented, and that all unique Resources and -Instrumentation Libraries have a single item. - ### Test Containers The Testcontainers project is a popular testing resource for easy container creation and usage for a number of languages @@ -178,9 +122,19 @@ func MyTest(t *testing.T) { } // will implicitly create a Testcase with OTLPReceiverSink listening at $OTLP_ENDPOINT, - // ./testdata/resource_metrics/my_resource_metrics.yaml ResourceMetrics instance, CollectorProcess with + // ./testdata/expected.yaml golden file, CollectorProcess with // ./testdata/my_collector_config.yaml config, and build and start all specified containers before calling - // OTLPReceiverSink.AssertAllMetricsReceived() with a 30s wait duration. - testutils.AssertAllMetricsReceived(t, "my_resource_metrics.yaml", "my_collector_config.yaml", containers, nil) + testutils.RunMetricsCollectionTest(t, "my_collector_config.yaml", "expected.yaml", + testutils.WithCompareMetricsOptions( + pmetrictest.IgnoreScopeVersion(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreScopeMetricsOrder(), + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreMetricValues(), + pmetrictest.IgnoreTimestamp(), + pmetrictest.IgnoreStartTimestamp(), + ), + ) } ``` diff --git a/tests/testutils/testcase.go b/tests/testutils/testcase.go index 91e0a740c3..27c8245e27 100644 --- a/tests/testutils/testcase.go +++ b/tests/testutils/testcase.go @@ -28,8 +28,6 @@ import ( "runtime" "strings" "testing" - - "github.com/signalfx/splunk-otel-collector/tests/testutils/telemetry" ) type CollectorBuilder func(Collector) Collector @@ -75,16 +73,6 @@ func (t *Testcase) setOTLPEndpoint() { t.OTLPEndpointForCollector = t.OTLPEndpoint } -// Loads and validates a ResourceMetrics instance, assuming it's located in ./testdata/resource_metrics -func (t *Testcase) ResourceMetrics(filename string) *telemetry.ResourceMetrics { - expectedResourceMetrics, err := telemetry.LoadResourceMetrics( - path.Join(".", "testdata", "resource_metrics", filename), - ) - require.NoError(t, err) - require.NotNil(t, expectedResourceMetrics) - return expectedResourceMetrics -} - // Builds and starts all provided Container builder instances, returning them and a validating stop function. func (t *Testcase) Containers(builders ...Container) (containers []*Container, stop func()) { for _, builder := range builders {