diff --git a/CHANGELOG.md b/CHANGELOG.md index e61f0af8f6b..552756be8d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Change the span name to be `GET /path` so it complies with the OTel HTTP semantic conventions in `go.opentelemetry.io/contrib/instrumentation/github.com/labstack/echo/otelecho`. (#6365) - Record errors instead of setting the `gin.errors` attribute in `go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin`. (#6346) +- The `go.opentelemetry.io/contrib/config` now supports multiple schemas in subdirectories (ie. `go.opentelemetry.io/contrib/config/v0.3.0`) for easier migration. (#6412) ### Fixed diff --git a/Makefile b/Makefile index 8b9f9ead06d..73d2e165b44 100644 --- a/Makefile +++ b/Makefile @@ -325,11 +325,12 @@ OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION=v0.3.0 genjsonschema-cleanup: rm -Rf ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR} -GENERATED_CONFIG=./config/generated_config.go +GENERATED_CONFIG=./config/${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION}/generated_config.go # Generate structs for configuration from opentelemetry-configuration schema genjsonschema: genjsonschema-cleanup $(GOJSONSCHEMA) mkdir -p ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR} + mkdir -p ./config/${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION} curl -sSL https://api.github.com/repos/open-telemetry/opentelemetry-configuration/tarball/${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION} | tar xz --strip 1 -C ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR} $(GOJSONSCHEMA) \ --capitalization ID \ diff --git a/config/doc.go b/config/doc.go index 293b43abb25..ffda72986b6 100644 --- a/config/doc.go +++ b/config/doc.go @@ -1,7 +1,10 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package config can be used to parse a configuration file -// that follows the JSON Schema defined by the OpenTelemetry -// Configuration schema. +// Package config can be used to parse a configuration file that follows +// the JSON Schema defined by the OpenTelemetry Configuration schema. Different +// versions of the schema are supported by the code in the directory that +// matches the version number of the schema. For example, the import +// go.opentelemetry.io/contrib/config/v0.3.0 includes code that supports the +// v0.3.0 release of the configuration schema. package config // import "go.opentelemetry.io/contrib/config" diff --git a/config/v0.2.0/config.go b/config/v0.2.0/config.go new file mode 100644 index 00000000000..de1bd8a9b0c --- /dev/null +++ b/config/v0.2.0/config.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.opentelemetry.io/contrib/config/v0.2.0" + +import ( + "context" + "errors" + + "gopkg.in/yaml.v3" + + "go.opentelemetry.io/otel/log" + nooplog "go.opentelemetry.io/otel/log/noop" + "go.opentelemetry.io/otel/metric" + noopmetric "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + nooptrace "go.opentelemetry.io/otel/trace/noop" +) + +const ( + protocolProtobufHTTP = "http/protobuf" + protocolProtobufGRPC = "grpc/protobuf" + + compressionGzip = "gzip" + compressionNone = "none" +) + +type configOptions struct { + ctx context.Context + opentelemetryConfig OpenTelemetryConfiguration +} + +type shutdownFunc func(context.Context) error + +func noopShutdown(context.Context) error { + return nil +} + +// SDK is a struct that contains all the providers +// configured via the configuration model. +type SDK struct { + meterProvider metric.MeterProvider + tracerProvider trace.TracerProvider + loggerProvider log.LoggerProvider + shutdown shutdownFunc +} + +// TracerProvider returns a configured trace.TracerProvider. +func (s *SDK) TracerProvider() trace.TracerProvider { + return s.tracerProvider +} + +// MeterProvider returns a configured metric.MeterProvider. +func (s *SDK) MeterProvider() metric.MeterProvider { + return s.meterProvider +} + +// LoggerProvider returns a configured log.LoggerProvider. +func (s *SDK) LoggerProvider() log.LoggerProvider { + return s.loggerProvider +} + +// Shutdown calls shutdown on all configured providers. +func (s *SDK) Shutdown(ctx context.Context) error { + return s.shutdown(ctx) +} + +var noopSDK = SDK{ + loggerProvider: nooplog.LoggerProvider{}, + meterProvider: noopmetric.MeterProvider{}, + tracerProvider: nooptrace.TracerProvider{}, + shutdown: func(ctx context.Context) error { return nil }, +} + +// NewSDK creates SDK providers based on the configuration model. +func NewSDK(opts ...ConfigurationOption) (SDK, error) { + o := configOptions{} + for _, opt := range opts { + o = opt.apply(o) + } + if o.opentelemetryConfig.Disabled != nil && *o.opentelemetryConfig.Disabled { + return noopSDK, nil + } + + r, err := newResource(o.opentelemetryConfig.Resource) + if err != nil { + return noopSDK, err + } + + mp, mpShutdown, err := meterProvider(o, r) + if err != nil { + return noopSDK, err + } + + tp, tpShutdown, err := tracerProvider(o, r) + if err != nil { + return noopSDK, err + } + + lp, lpShutdown, err := loggerProvider(o, r) + if err != nil { + return noopSDK, err + } + + return SDK{ + meterProvider: mp, + tracerProvider: tp, + loggerProvider: lp, + shutdown: func(ctx context.Context) error { + return errors.Join(mpShutdown(ctx), tpShutdown(ctx), lpShutdown(ctx)) + }, + }, nil +} + +// ConfigurationOption configures options for providers. +type ConfigurationOption interface { + apply(configOptions) configOptions +} + +type configurationOptionFunc func(configOptions) configOptions + +func (fn configurationOptionFunc) apply(cfg configOptions) configOptions { + return fn(cfg) +} + +// WithContext sets the context.Context for the SDK. +func WithContext(ctx context.Context) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.ctx = ctx + return c + }) +} + +// WithOpenTelemetryConfiguration sets the OpenTelemetryConfiguration used +// to produce the SDK. +func WithOpenTelemetryConfiguration(cfg OpenTelemetryConfiguration) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.opentelemetryConfig = cfg + return c + }) +} + +// ParseYAML parses a YAML configuration file into an OpenTelemetryConfiguration. +func ParseYAML(file []byte) (*OpenTelemetryConfiguration, error) { + var cfg OpenTelemetryConfiguration + err := yaml.Unmarshal(file, &cfg) + if err != nil { + return nil, err + } + + return &cfg, nil +} diff --git a/config/v0.2.0/config_test.go b/config/v0.2.0/config_test.go new file mode 100644 index 00000000000..653c6e978fd --- /dev/null +++ b/config/v0.2.0/config_test.go @@ -0,0 +1,383 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "context" + "encoding/json" + "errors" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + lognoop "go.opentelemetry.io/otel/log/noop" + metricnoop "go.opentelemetry.io/otel/metric/noop" + sdklog "go.opentelemetry.io/otel/sdk/log" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + tracenoop "go.opentelemetry.io/otel/trace/noop" +) + +func TestNewSDK(t *testing.T) { + tests := []struct { + name string + cfg []ConfigurationOption + wantTracerProvider any + wantMeterProvider any + wantLoggerProvider any + wantErr error + wantShutdownErr error + }{ + { + name: "no-configuration", + wantTracerProvider: tracenoop.NewTracerProvider(), + wantMeterProvider: metricnoop.NewMeterProvider(), + wantLoggerProvider: lognoop.NewLoggerProvider(), + }, + { + name: "with-configuration", + cfg: []ConfigurationOption{ + WithContext(context.Background()), + WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ + TracerProvider: &TracerProvider{}, + MeterProvider: &MeterProvider{}, + LoggerProvider: &LoggerProvider{}, + }), + }, + wantTracerProvider: &sdktrace.TracerProvider{}, + wantMeterProvider: &sdkmetric.MeterProvider{}, + wantLoggerProvider: &sdklog.LoggerProvider{}, + }, + { + name: "with-sdk-disabled", + cfg: []ConfigurationOption{ + WithContext(context.Background()), + WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ + Disabled: ptr(true), + TracerProvider: &TracerProvider{}, + MeterProvider: &MeterProvider{}, + LoggerProvider: &LoggerProvider{}, + }), + }, + wantTracerProvider: tracenoop.NewTracerProvider(), + wantMeterProvider: metricnoop.NewMeterProvider(), + wantLoggerProvider: lognoop.NewLoggerProvider(), + }, + } + for _, tt := range tests { + sdk, err := NewSDK(tt.cfg...) + require.Equal(t, tt.wantErr, err) + assert.IsType(t, tt.wantTracerProvider, sdk.TracerProvider()) + assert.IsType(t, tt.wantMeterProvider, sdk.MeterProvider()) + assert.IsType(t, tt.wantLoggerProvider, sdk.LoggerProvider()) + require.Equal(t, tt.wantShutdownErr, sdk.Shutdown(context.Background())) + } +} + +var v02OpenTelemetryConfig = OpenTelemetryConfiguration{ + Disabled: ptr(false), + FileFormat: "0.2", + AttributeLimits: &AttributeLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + }, + LoggerProvider: &LoggerProvider{ + Limits: &LogRecordLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + }, + Processors: []LogRecordProcessor{ + { + Batch: &BatchLogRecordProcessor{ + ExportTimeout: ptr(30000), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Certificate: ptr("/app/cert.pem"), + ClientCertificate: ptr("/app/cert.pem"), + ClientKey: ptr("/app/cert.pem"), + Compression: ptr("gzip"), + Endpoint: "http://localhost:4318", + Headers: Headers{ + "api-key": "1234", + }, + Insecure: ptr(false), + Protocol: "http/protobuf", + Timeout: ptr(10000), + }, + }, + MaxExportBatchSize: ptr(512), + MaxQueueSize: ptr(2048), + ScheduleDelay: ptr(5000), + }, + }, + { + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + Console: Console{}, + }, + }, + }, + }, + }, + MeterProvider: &MeterProvider{ + Readers: []MetricReader{ + { + Pull: &PullMetricReader{ + Exporter: MetricExporter{ + Prometheus: &Prometheus{ + Host: ptr("localhost"), + Port: ptr(9464), + WithResourceConstantLabels: &IncludeExclude{ + Excluded: []string{"service.attr1"}, + Included: []string{"service*"}, + }, + WithoutScopeInfo: ptr(false), + WithoutTypeSuffix: ptr(false), + WithoutUnits: ptr(false), + }, + }, + }, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Certificate: ptr("/app/cert.pem"), + ClientCertificate: ptr("/app/cert.pem"), + ClientKey: ptr("/app/cert.pem"), + Compression: ptr("gzip"), + DefaultHistogramAggregation: ptr(OTLPMetricDefaultHistogramAggregationBase2ExponentialBucketHistogram), + Endpoint: "http://localhost:4318", + Headers: Headers{ + "api-key": "1234", + }, + Insecure: ptr(false), + Protocol: "http/protobuf", + TemporalityPreference: ptr("delta"), + Timeout: ptr(10000), + }, + }, + Interval: ptr(5000), + Timeout: ptr(30000), + }, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + Console: Console{}, + }, + }, + }, + }, + Views: []View{ + { + Selector: &ViewSelector{ + InstrumentName: ptr("my-instrument"), + InstrumentType: ptr(ViewSelectorInstrumentTypeHistogram), + MeterName: ptr("my-meter"), + MeterSchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), + MeterVersion: ptr("1.0.0"), + Unit: ptr("ms"), + }, + Stream: &ViewStream{ + Aggregation: &ViewStreamAggregation{ + ExplicitBucketHistogram: &ViewStreamAggregationExplicitBucketHistogram{ + Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + RecordMinMax: ptr(true), + }, + }, + AttributeKeys: []string{"key1", "key2"}, + Description: ptr("new_description"), + Name: ptr("new_instrument_name"), + }, + }, + }, + }, + Propagator: &Propagator{ + Composite: []string{"tracecontext", "baggage", "b3", "b3multi", "jaeger", "xray", "ottrace"}, + }, + Resource: &Resource{ + Attributes: Attributes{ + "service.name": "unknown_service", + }, + Detectors: &Detectors{ + Attributes: &DetectorsAttributes{ + Excluded: []string{"process.command_args"}, + Included: []string{"process.*"}, + }, + }, + SchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), + }, + TracerProvider: &TracerProvider{ + Limits: &SpanLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + EventCountLimit: ptr(128), + EventAttributeCountLimit: ptr(128), + LinkCountLimit: ptr(128), + LinkAttributeCountLimit: ptr(128), + }, + Processors: []SpanProcessor{ + { + Batch: &BatchSpanProcessor{ + ExportTimeout: ptr(30000), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Certificate: ptr("/app/cert.pem"), + ClientCertificate: ptr("/app/cert.pem"), + ClientKey: ptr("/app/cert.pem"), + Compression: ptr("gzip"), + Endpoint: "http://localhost:4318", + Headers: Headers{ + "api-key": "1234", + }, + Insecure: ptr(false), + Protocol: "http/protobuf", + Timeout: ptr(10000), + }, + }, + MaxExportBatchSize: ptr(512), + MaxQueueSize: ptr(2048), + ScheduleDelay: ptr(5000), + }, + }, + { + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + Zipkin: &Zipkin{ + Endpoint: "http://localhost:9411/api/v2/spans", + Timeout: ptr(10000), + }, + }, + }, + }, + { + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: Console{}, + }, + }, + }, + }, + Sampler: &Sampler{ + ParentBased: &SamplerParentBased{ + LocalParentNotSampled: &Sampler{ + AlwaysOff: SamplerAlwaysOff{}, + }, + LocalParentSampled: &Sampler{ + AlwaysOn: SamplerAlwaysOn{}, + }, + RemoteParentNotSampled: &Sampler{ + AlwaysOff: SamplerAlwaysOff{}, + }, + RemoteParentSampled: &Sampler{ + AlwaysOn: SamplerAlwaysOn{}, + }, + Root: &Sampler{ + TraceIDRatioBased: &SamplerTraceIDRatioBased{ + Ratio: ptr(0.0001), + }, + }, + }, + }, + }, +} + +func TestParseYAML(t *testing.T) { + tests := []struct { + name string + input string + wantErr error + wantType interface{} + }{ + { + name: "valid YAML config", + input: `valid_empty.yaml`, + wantErr: nil, + wantType: &OpenTelemetryConfiguration{ + Disabled: ptr(false), + FileFormat: "0.1", + }, + }, + { + name: "invalid config", + input: "invalid_bool.yaml", + wantErr: errors.New(`yaml: unmarshal errors: + line 2: cannot unmarshal !!str ` + "`notabool`" + ` into bool`), + }, + { + name: "valid v0.2 config", + input: "v0.2.yaml", + wantType: &v02OpenTelemetryConfig, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := os.ReadFile(filepath.Join("..", "testdata", tt.input)) + require.NoError(t, err) + + got, err := ParseYAML(b) + if tt.wantErr != nil { + require.Equal(t, tt.wantErr.Error(), err.Error()) + } else { + require.NoError(t, err) + assert.Equal(t, tt.wantType, got) + } + }) + } +} + +func TestSerializeJSON(t *testing.T) { + tests := []struct { + name string + input string + wantErr error + wantType interface{} + }{ + { + name: "valid JSON config", + input: `valid_empty.json`, + wantErr: nil, + wantType: OpenTelemetryConfiguration{ + Disabled: ptr(false), + FileFormat: "0.1", + }, + }, + { + name: "invalid config", + input: "invalid_bool.json", + wantErr: errors.New(`json: cannot unmarshal string into Go struct field Plain.disabled of type bool`), + }, + { + name: "valid v0.2 config", + input: "v0.2.json", + wantType: v02OpenTelemetryConfig, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := os.ReadFile(filepath.Join("..", "testdata", tt.input)) + require.NoError(t, err) + + var got OpenTelemetryConfiguration + err = json.Unmarshal(b, &got) + + if tt.wantErr != nil { + require.Equal(t, tt.wantErr.Error(), err.Error()) + } else { + require.NoError(t, err) + assert.Equal(t, tt.wantType, got) + } + }) + } +} + +func ptr[T any](v T) *T { + return &v +} diff --git a/config/v0.2.0/generated_config.go b/config/v0.2.0/generated_config.go new file mode 100644 index 00000000000..2315641db64 --- /dev/null +++ b/config/v0.2.0/generated_config.go @@ -0,0 +1,780 @@ +// Code generated by github.com/atombender/go-jsonschema, DO NOT EDIT. + +package config + +import "encoding/json" +import "fmt" +import "reflect" + +type AttributeLimits struct { + // AttributeCountLimit corresponds to the JSON schema field + // "attribute_count_limit". + AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` + + // AttributeValueLengthLimit corresponds to the JSON schema field + // "attribute_value_length_limit". + AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` + + AdditionalProperties interface{} +} + +type Attributes map[string]interface{} + +type BatchLogRecordProcessor struct { + // ExportTimeout corresponds to the JSON schema field "export_timeout". + ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` + + // Exporter corresponds to the JSON schema field "exporter". + Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // MaxExportBatchSize corresponds to the JSON schema field + // "max_export_batch_size". + MaxExportBatchSize *int `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` + + // MaxQueueSize corresponds to the JSON schema field "max_queue_size". + MaxQueueSize *int `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` + + // ScheduleDelay corresponds to the JSON schema field "schedule_delay". + ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in BatchLogRecordProcessor: required") + } + type Plain BatchLogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchLogRecordProcessor(plain) + return nil +} + +type BatchSpanProcessor struct { + // ExportTimeout corresponds to the JSON schema field "export_timeout". + ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` + + // Exporter corresponds to the JSON schema field "exporter". + Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // MaxExportBatchSize corresponds to the JSON schema field + // "max_export_batch_size". + MaxExportBatchSize *int `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` + + // MaxQueueSize corresponds to the JSON schema field "max_queue_size". + MaxQueueSize *int `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` + + // ScheduleDelay corresponds to the JSON schema field "schedule_delay". + ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in BatchSpanProcessor: required") + } + type Plain BatchSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchSpanProcessor(plain) + return nil +} + +type Common map[string]interface{} + +type Console map[string]interface{} + +type Detectors struct { + // Attributes corresponds to the JSON schema field "attributes". + Attributes *DetectorsAttributes `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` +} + +type DetectorsAttributes struct { + // Excluded corresponds to the JSON schema field "excluded". + Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` + + // Included corresponds to the JSON schema field "included". + Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` +} + +type Headers map[string]string + +type IncludeExclude struct { + // Excluded corresponds to the JSON schema field "excluded". + Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` + + // Included corresponds to the JSON schema field "included". + Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` +} + +type LogRecordExporter struct { + // Console corresponds to the JSON schema field "console". + Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // OTLP corresponds to the JSON schema field "otlp". + OTLP *OTLP `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` + + AdditionalProperties interface{} +} + +type LogRecordLimits struct { + // AttributeCountLimit corresponds to the JSON schema field + // "attribute_count_limit". + AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` + + // AttributeValueLengthLimit corresponds to the JSON schema field + // "attribute_value_length_limit". + AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` +} + +type LogRecordProcessor struct { + // Batch corresponds to the JSON schema field "batch". + Batch *BatchLogRecordProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` + + // Simple corresponds to the JSON schema field "simple". + Simple *SimpleLogRecordProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` + + AdditionalProperties interface{} +} + +type LoggerProvider struct { + // Limits corresponds to the JSON schema field "limits". + Limits *LogRecordLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Processors corresponds to the JSON schema field "processors". + Processors []LogRecordProcessor `json:"processors,omitempty" yaml:"processors,omitempty" mapstructure:"processors,omitempty"` +} + +type MeterProvider struct { + // Readers corresponds to the JSON schema field "readers". + Readers []MetricReader `json:"readers,omitempty" yaml:"readers,omitempty" mapstructure:"readers,omitempty"` + + // Views corresponds to the JSON schema field "views". + Views []View `json:"views,omitempty" yaml:"views,omitempty" mapstructure:"views,omitempty"` +} + +type MetricExporter struct { + // Console corresponds to the JSON schema field "console". + Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // OTLP corresponds to the JSON schema field "otlp". + OTLP *OTLPMetric `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *Prometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` + + AdditionalProperties interface{} +} + +type MetricReader struct { + // Periodic corresponds to the JSON schema field "periodic". + Periodic *PeriodicMetricReader `json:"periodic,omitempty" yaml:"periodic,omitempty" mapstructure:"periodic,omitempty"` + + // Pull corresponds to the JSON schema field "pull". + Pull *PullMetricReader `json:"pull,omitempty" yaml:"pull,omitempty" mapstructure:"pull,omitempty"` +} + +type OTLP struct { + // Certificate corresponds to the JSON schema field "certificate". + Certificate *string `json:"certificate,omitempty" yaml:"certificate,omitempty" mapstructure:"certificate,omitempty"` + + // ClientCertificate corresponds to the JSON schema field "client_certificate". + ClientCertificate *string `json:"client_certificate,omitempty" yaml:"client_certificate,omitempty" mapstructure:"client_certificate,omitempty"` + + // ClientKey corresponds to the JSON schema field "client_key". + ClientKey *string `json:"client_key,omitempty" yaml:"client_key,omitempty" mapstructure:"client_key,omitempty"` + + // Compression corresponds to the JSON schema field "compression". + Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` + + // Headers corresponds to the JSON schema field "headers". + Headers Headers `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // Insecure corresponds to the JSON schema field "insecure". + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // Protocol corresponds to the JSON schema field "protocol". + Protocol string `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +type OTLPMetric struct { + // Certificate corresponds to the JSON schema field "certificate". + Certificate *string `json:"certificate,omitempty" yaml:"certificate,omitempty" mapstructure:"certificate,omitempty"` + + // ClientCertificate corresponds to the JSON schema field "client_certificate". + ClientCertificate *string `json:"client_certificate,omitempty" yaml:"client_certificate,omitempty" mapstructure:"client_certificate,omitempty"` + + // ClientKey corresponds to the JSON schema field "client_key". + ClientKey *string `json:"client_key,omitempty" yaml:"client_key,omitempty" mapstructure:"client_key,omitempty"` + + // Compression corresponds to the JSON schema field "compression". + Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // DefaultHistogramAggregation corresponds to the JSON schema field + // "default_histogram_aggregation". + DefaultHistogramAggregation *OTLPMetricDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` + + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` + + // Headers corresponds to the JSON schema field "headers". + Headers Headers `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // Insecure corresponds to the JSON schema field "insecure". + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // Protocol corresponds to the JSON schema field "protocol". + Protocol string `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // TemporalityPreference corresponds to the JSON schema field + // "temporality_preference". + TemporalityPreference *string `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +type OTLPMetricDefaultHistogramAggregation string + +const OTLPMetricDefaultHistogramAggregationBase2ExponentialBucketHistogram OTLPMetricDefaultHistogramAggregation = "base2_exponential_bucket_histogram" +const OTLPMetricDefaultHistogramAggregationExplicitBucketHistogram OTLPMetricDefaultHistogramAggregation = "explicit_bucket_histogram" + +var enumValues_OTLPMetricDefaultHistogramAggregation = []interface{}{ + "explicit_bucket_histogram", + "base2_exponential_bucket_histogram", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_OTLPMetricDefaultHistogramAggregation { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_OTLPMetricDefaultHistogramAggregation, v) + } + *j = OTLPMetricDefaultHistogramAggregation(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPMetric) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return fmt.Errorf("field endpoint in OTLPMetric: required") + } + if _, ok := raw["protocol"]; raw != nil && !ok { + return fmt.Errorf("field protocol in OTLPMetric: required") + } + type Plain OTLPMetric + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OTLPMetric(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLP) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return fmt.Errorf("field endpoint in OTLP: required") + } + if _, ok := raw["protocol"]; raw != nil && !ok { + return fmt.Errorf("field protocol in OTLP: required") + } + type Plain OTLP + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OTLP(plain) + return nil +} + +type OpenTelemetryConfiguration struct { + // AttributeLimits corresponds to the JSON schema field "attribute_limits". + AttributeLimits *AttributeLimits `json:"attribute_limits,omitempty" yaml:"attribute_limits,omitempty" mapstructure:"attribute_limits,omitempty"` + + // Disabled corresponds to the JSON schema field "disabled". + Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` + + // FileFormat corresponds to the JSON schema field "file_format". + FileFormat string `json:"file_format" yaml:"file_format" mapstructure:"file_format"` + + // LoggerProvider corresponds to the JSON schema field "logger_provider". + LoggerProvider *LoggerProvider `json:"logger_provider,omitempty" yaml:"logger_provider,omitempty" mapstructure:"logger_provider,omitempty"` + + // MeterProvider corresponds to the JSON schema field "meter_provider". + MeterProvider *MeterProvider `json:"meter_provider,omitempty" yaml:"meter_provider,omitempty" mapstructure:"meter_provider,omitempty"` + + // Propagator corresponds to the JSON schema field "propagator". + Propagator *Propagator `json:"propagator,omitempty" yaml:"propagator,omitempty" mapstructure:"propagator,omitempty"` + + // Resource corresponds to the JSON schema field "resource". + Resource *Resource `json:"resource,omitempty" yaml:"resource,omitempty" mapstructure:"resource,omitempty"` + + // TracerProvider corresponds to the JSON schema field "tracer_provider". + TracerProvider *TracerProvider `json:"tracer_provider,omitempty" yaml:"tracer_provider,omitempty" mapstructure:"tracer_provider,omitempty"` + + AdditionalProperties interface{} +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["file_format"]; raw != nil && !ok { + return fmt.Errorf("field file_format in OpenTelemetryConfiguration: required") + } + type Plain OpenTelemetryConfiguration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OpenTelemetryConfiguration(plain) + return nil +} + +type PeriodicMetricReader struct { + // Exporter corresponds to the JSON schema field "exporter". + Exporter MetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // Interval corresponds to the JSON schema field "interval". + Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in PeriodicMetricReader: required") + } + type Plain PeriodicMetricReader + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = PeriodicMetricReader(plain) + return nil +} + +type Prometheus struct { + // Host corresponds to the JSON schema field "host". + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Port corresponds to the JSON schema field "port". + Port *int `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` + + // WithResourceConstantLabels corresponds to the JSON schema field + // "with_resource_constant_labels". + WithResourceConstantLabels *IncludeExclude `json:"with_resource_constant_labels,omitempty" yaml:"with_resource_constant_labels,omitempty" mapstructure:"with_resource_constant_labels,omitempty"` + + // WithoutScopeInfo corresponds to the JSON schema field "without_scope_info". + WithoutScopeInfo *bool `json:"without_scope_info,omitempty" yaml:"without_scope_info,omitempty" mapstructure:"without_scope_info,omitempty"` + + // WithoutTypeSuffix corresponds to the JSON schema field "without_type_suffix". + WithoutTypeSuffix *bool `json:"without_type_suffix,omitempty" yaml:"without_type_suffix,omitempty" mapstructure:"without_type_suffix,omitempty"` + + // WithoutUnits corresponds to the JSON schema field "without_units". + WithoutUnits *bool `json:"without_units,omitempty" yaml:"without_units,omitempty" mapstructure:"without_units,omitempty"` +} + +type Propagator struct { + // Composite corresponds to the JSON schema field "composite". + Composite []string `json:"composite,omitempty" yaml:"composite,omitempty" mapstructure:"composite,omitempty"` + + AdditionalProperties interface{} +} + +type PullMetricReader struct { + // Exporter corresponds to the JSON schema field "exporter". + Exporter MetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PullMetricReader) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in PullMetricReader: required") + } + type Plain PullMetricReader + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = PullMetricReader(plain) + return nil +} + +type Resource struct { + // Attributes corresponds to the JSON schema field "attributes". + Attributes Attributes `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` + + // Detectors corresponds to the JSON schema field "detectors". + Detectors *Detectors `json:"detectors,omitempty" yaml:"detectors,omitempty" mapstructure:"detectors,omitempty"` + + // SchemaUrl corresponds to the JSON schema field "schema_url". + SchemaUrl *string `json:"schema_url,omitempty" yaml:"schema_url,omitempty" mapstructure:"schema_url,omitempty"` +} + +type Sampler struct { + // AlwaysOff corresponds to the JSON schema field "always_off". + AlwaysOff SamplerAlwaysOff `json:"always_off,omitempty" yaml:"always_off,omitempty" mapstructure:"always_off,omitempty"` + + // AlwaysOn corresponds to the JSON schema field "always_on". + AlwaysOn SamplerAlwaysOn `json:"always_on,omitempty" yaml:"always_on,omitempty" mapstructure:"always_on,omitempty"` + + // JaegerRemote corresponds to the JSON schema field "jaeger_remote". + JaegerRemote *SamplerJaegerRemote `json:"jaeger_remote,omitempty" yaml:"jaeger_remote,omitempty" mapstructure:"jaeger_remote,omitempty"` + + // ParentBased corresponds to the JSON schema field "parent_based". + ParentBased *SamplerParentBased `json:"parent_based,omitempty" yaml:"parent_based,omitempty" mapstructure:"parent_based,omitempty"` + + // TraceIDRatioBased corresponds to the JSON schema field "trace_id_ratio_based". + TraceIDRatioBased *SamplerTraceIDRatioBased `json:"trace_id_ratio_based,omitempty" yaml:"trace_id_ratio_based,omitempty" mapstructure:"trace_id_ratio_based,omitempty"` + + AdditionalProperties interface{} +} + +type SamplerAlwaysOff map[string]interface{} + +type SamplerAlwaysOn map[string]interface{} + +type SamplerJaegerRemote struct { + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // InitialSampler corresponds to the JSON schema field "initial_sampler". + InitialSampler *Sampler `json:"initial_sampler,omitempty" yaml:"initial_sampler,omitempty" mapstructure:"initial_sampler,omitempty"` + + // Interval corresponds to the JSON schema field "interval". + Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` +} + +type SamplerParentBased struct { + // LocalParentNotSampled corresponds to the JSON schema field + // "local_parent_not_sampled". + LocalParentNotSampled *Sampler `json:"local_parent_not_sampled,omitempty" yaml:"local_parent_not_sampled,omitempty" mapstructure:"local_parent_not_sampled,omitempty"` + + // LocalParentSampled corresponds to the JSON schema field "local_parent_sampled". + LocalParentSampled *Sampler `json:"local_parent_sampled,omitempty" yaml:"local_parent_sampled,omitempty" mapstructure:"local_parent_sampled,omitempty"` + + // RemoteParentNotSampled corresponds to the JSON schema field + // "remote_parent_not_sampled". + RemoteParentNotSampled *Sampler `json:"remote_parent_not_sampled,omitempty" yaml:"remote_parent_not_sampled,omitempty" mapstructure:"remote_parent_not_sampled,omitempty"` + + // RemoteParentSampled corresponds to the JSON schema field + // "remote_parent_sampled". + RemoteParentSampled *Sampler `json:"remote_parent_sampled,omitempty" yaml:"remote_parent_sampled,omitempty" mapstructure:"remote_parent_sampled,omitempty"` + + // Root corresponds to the JSON schema field "root". + Root *Sampler `json:"root,omitempty" yaml:"root,omitempty" mapstructure:"root,omitempty"` +} + +type SamplerTraceIDRatioBased struct { + // Ratio corresponds to the JSON schema field "ratio". + Ratio *float64 `json:"ratio,omitempty" yaml:"ratio,omitempty" mapstructure:"ratio,omitempty"` +} + +type SimpleLogRecordProcessor struct { + // Exporter corresponds to the JSON schema field "exporter". + Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in SimpleLogRecordProcessor: required") + } + type Plain SimpleLogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleLogRecordProcessor(plain) + return nil +} + +type SimpleSpanProcessor struct { + // Exporter corresponds to the JSON schema field "exporter". + Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in SimpleSpanProcessor: required") + } + type Plain SimpleSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleSpanProcessor(plain) + return nil +} + +type SpanExporter struct { + // Console corresponds to the JSON schema field "console". + Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // OTLP corresponds to the JSON schema field "otlp". + OTLP *OTLP `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` + + // Zipkin corresponds to the JSON schema field "zipkin". + Zipkin *Zipkin `json:"zipkin,omitempty" yaml:"zipkin,omitempty" mapstructure:"zipkin,omitempty"` + + AdditionalProperties interface{} +} + +type SpanLimits struct { + // AttributeCountLimit corresponds to the JSON schema field + // "attribute_count_limit". + AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` + + // AttributeValueLengthLimit corresponds to the JSON schema field + // "attribute_value_length_limit". + AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` + + // EventAttributeCountLimit corresponds to the JSON schema field + // "event_attribute_count_limit". + EventAttributeCountLimit *int `json:"event_attribute_count_limit,omitempty" yaml:"event_attribute_count_limit,omitempty" mapstructure:"event_attribute_count_limit,omitempty"` + + // EventCountLimit corresponds to the JSON schema field "event_count_limit". + EventCountLimit *int `json:"event_count_limit,omitempty" yaml:"event_count_limit,omitempty" mapstructure:"event_count_limit,omitempty"` + + // LinkAttributeCountLimit corresponds to the JSON schema field + // "link_attribute_count_limit". + LinkAttributeCountLimit *int `json:"link_attribute_count_limit,omitempty" yaml:"link_attribute_count_limit,omitempty" mapstructure:"link_attribute_count_limit,omitempty"` + + // LinkCountLimit corresponds to the JSON schema field "link_count_limit". + LinkCountLimit *int `json:"link_count_limit,omitempty" yaml:"link_count_limit,omitempty" mapstructure:"link_count_limit,omitempty"` +} + +type SpanProcessor struct { + // Batch corresponds to the JSON schema field "batch". + Batch *BatchSpanProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` + + // Simple corresponds to the JSON schema field "simple". + Simple *SimpleSpanProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` + + AdditionalProperties interface{} +} + +type TracerProvider struct { + // Limits corresponds to the JSON schema field "limits". + Limits *SpanLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Processors corresponds to the JSON schema field "processors". + Processors []SpanProcessor `json:"processors,omitempty" yaml:"processors,omitempty" mapstructure:"processors,omitempty"` + + // Sampler corresponds to the JSON schema field "sampler". + Sampler *Sampler `json:"sampler,omitempty" yaml:"sampler,omitempty" mapstructure:"sampler,omitempty"` +} + +type View struct { + // Selector corresponds to the JSON schema field "selector". + Selector *ViewSelector `json:"selector,omitempty" yaml:"selector,omitempty" mapstructure:"selector,omitempty"` + + // Stream corresponds to the JSON schema field "stream". + Stream *ViewStream `json:"stream,omitempty" yaml:"stream,omitempty" mapstructure:"stream,omitempty"` +} + +type ViewSelector struct { + // InstrumentName corresponds to the JSON schema field "instrument_name". + InstrumentName *string `json:"instrument_name,omitempty" yaml:"instrument_name,omitempty" mapstructure:"instrument_name,omitempty"` + + // InstrumentType corresponds to the JSON schema field "instrument_type". + InstrumentType *ViewSelectorInstrumentType `json:"instrument_type,omitempty" yaml:"instrument_type,omitempty" mapstructure:"instrument_type,omitempty"` + + // MeterName corresponds to the JSON schema field "meter_name". + MeterName *string `json:"meter_name,omitempty" yaml:"meter_name,omitempty" mapstructure:"meter_name,omitempty"` + + // MeterSchemaUrl corresponds to the JSON schema field "meter_schema_url". + MeterSchemaUrl *string `json:"meter_schema_url,omitempty" yaml:"meter_schema_url,omitempty" mapstructure:"meter_schema_url,omitempty"` + + // MeterVersion corresponds to the JSON schema field "meter_version". + MeterVersion *string `json:"meter_version,omitempty" yaml:"meter_version,omitempty" mapstructure:"meter_version,omitempty"` + + // Unit corresponds to the JSON schema field "unit". + Unit *string `json:"unit,omitempty" yaml:"unit,omitempty" mapstructure:"unit,omitempty"` +} + +type ViewSelectorInstrumentType string + +const ViewSelectorInstrumentTypeCounter ViewSelectorInstrumentType = "counter" +const ViewSelectorInstrumentTypeHistogram ViewSelectorInstrumentType = "histogram" +const ViewSelectorInstrumentTypeObservableCounter ViewSelectorInstrumentType = "observable_counter" +const ViewSelectorInstrumentTypeObservableGauge ViewSelectorInstrumentType = "observable_gauge" +const ViewSelectorInstrumentTypeObservableUpDownCounter ViewSelectorInstrumentType = "observable_up_down_counter" +const ViewSelectorInstrumentTypeUpDownCounter ViewSelectorInstrumentType = "up_down_counter" + +var enumValues_ViewSelectorInstrumentType = []interface{}{ + "counter", + "histogram", + "observable_counter", + "observable_gauge", + "observable_up_down_counter", + "up_down_counter", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_ViewSelectorInstrumentType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_ViewSelectorInstrumentType, v) + } + *j = ViewSelectorInstrumentType(v) + return nil +} + +type ViewStream struct { + // Aggregation corresponds to the JSON schema field "aggregation". + Aggregation *ViewStreamAggregation `json:"aggregation,omitempty" yaml:"aggregation,omitempty" mapstructure:"aggregation,omitempty"` + + // AttributeKeys corresponds to the JSON schema field "attribute_keys". + AttributeKeys []string `json:"attribute_keys,omitempty" yaml:"attribute_keys,omitempty" mapstructure:"attribute_keys,omitempty"` + + // Description corresponds to the JSON schema field "description". + Description *string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description,omitempty"` + + // Name corresponds to the JSON schema field "name". + Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` +} + +type ViewStreamAggregation struct { + // Base2ExponentialBucketHistogram corresponds to the JSON schema field + // "base2_exponential_bucket_histogram". + Base2ExponentialBucketHistogram *ViewStreamAggregationBase2ExponentialBucketHistogram `json:"base2_exponential_bucket_histogram,omitempty" yaml:"base2_exponential_bucket_histogram,omitempty" mapstructure:"base2_exponential_bucket_histogram,omitempty"` + + // Default corresponds to the JSON schema field "default". + Default ViewStreamAggregationDefault `json:"default,omitempty" yaml:"default,omitempty" mapstructure:"default,omitempty"` + + // Drop corresponds to the JSON schema field "drop". + Drop ViewStreamAggregationDrop `json:"drop,omitempty" yaml:"drop,omitempty" mapstructure:"drop,omitempty"` + + // ExplicitBucketHistogram corresponds to the JSON schema field + // "explicit_bucket_histogram". + ExplicitBucketHistogram *ViewStreamAggregationExplicitBucketHistogram `json:"explicit_bucket_histogram,omitempty" yaml:"explicit_bucket_histogram,omitempty" mapstructure:"explicit_bucket_histogram,omitempty"` + + // LastValue corresponds to the JSON schema field "last_value". + LastValue ViewStreamAggregationLastValue `json:"last_value,omitempty" yaml:"last_value,omitempty" mapstructure:"last_value,omitempty"` + + // Sum corresponds to the JSON schema field "sum". + Sum ViewStreamAggregationSum `json:"sum,omitempty" yaml:"sum,omitempty" mapstructure:"sum,omitempty"` +} + +type ViewStreamAggregationBase2ExponentialBucketHistogram struct { + // MaxScale corresponds to the JSON schema field "max_scale". + MaxScale *int `json:"max_scale,omitempty" yaml:"max_scale,omitempty" mapstructure:"max_scale,omitempty"` + + // MaxSize corresponds to the JSON schema field "max_size". + MaxSize *int `json:"max_size,omitempty" yaml:"max_size,omitempty" mapstructure:"max_size,omitempty"` + + // RecordMinMax corresponds to the JSON schema field "record_min_max". + RecordMinMax *bool `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` +} + +type ViewStreamAggregationDefault map[string]interface{} + +type ViewStreamAggregationDrop map[string]interface{} + +type ViewStreamAggregationExplicitBucketHistogram struct { + // Boundaries corresponds to the JSON schema field "boundaries". + Boundaries []float64 `json:"boundaries,omitempty" yaml:"boundaries,omitempty" mapstructure:"boundaries,omitempty"` + + // RecordMinMax corresponds to the JSON schema field "record_min_max". + RecordMinMax *bool `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` +} + +type ViewStreamAggregationLastValue map[string]interface{} + +type ViewStreamAggregationSum map[string]interface{} + +type Zipkin struct { + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Zipkin) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return fmt.Errorf("field endpoint in Zipkin: required") + } + type Plain Zipkin + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = Zipkin(plain) + return nil +} diff --git a/config/v0.2.0/log.go b/config/v0.2.0/log.go new file mode 100644 index 00000000000..bd302cfa75a --- /dev/null +++ b/config/v0.2.0/log.go @@ -0,0 +1,155 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.opentelemetry.io/contrib/config/v0.2.0" + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/noop" + sdklog "go.opentelemetry.io/otel/sdk/log" + "go.opentelemetry.io/otel/sdk/resource" +) + +func loggerProvider(cfg configOptions, res *resource.Resource) (log.LoggerProvider, shutdownFunc, error) { + if cfg.opentelemetryConfig.LoggerProvider == nil { + return noop.NewLoggerProvider(), noopShutdown, nil + } + opts := []sdklog.LoggerProviderOption{ + sdklog.WithResource(res), + } + var errs []error + for _, processor := range cfg.opentelemetryConfig.LoggerProvider.Processors { + sp, err := logProcessor(cfg.ctx, processor) + if err == nil { + opts = append(opts, sdklog.WithProcessor(sp)) + } else { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return noop.NewLoggerProvider(), noopShutdown, errors.Join(errs...) + } + + lp := sdklog.NewLoggerProvider(opts...) + return lp, lp.Shutdown, nil +} + +func logProcessor(ctx context.Context, processor LogRecordProcessor) (sdklog.Processor, error) { + if processor.Batch != nil && processor.Simple != nil { + return nil, errors.New("must not specify multiple log processor type") + } + if processor.Batch != nil { + exp, err := logExporter(ctx, processor.Batch.Exporter) + if err != nil { + return nil, err + } + return batchLogProcessor(processor.Batch, exp) + } + if processor.Simple != nil { + exp, err := logExporter(ctx, processor.Simple.Exporter) + if err != nil { + return nil, err + } + return sdklog.NewSimpleProcessor(exp), nil + } + return nil, errors.New("unsupported log processor type, must be one of simple or batch") +} + +func logExporter(ctx context.Context, exporter LogRecordExporter) (sdklog.Exporter, error) { + if exporter.Console != nil && exporter.OTLP != nil { + return nil, errors.New("must not specify multiple exporters") + } + + if exporter.Console != nil { + return stdoutlog.New( + stdoutlog.WithPrettyPrint(), + ) + } + + if exporter.OTLP != nil { + switch exporter.OTLP.Protocol { + case protocolProtobufHTTP: + return otlpHTTPLogExporter(ctx, exporter.OTLP) + default: + return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) + } + } + return nil, errors.New("no valid log exporter") +} + +func batchLogProcessor(blp *BatchLogRecordProcessor, exp sdklog.Exporter) (*sdklog.BatchProcessor, error) { + var opts []sdklog.BatchProcessorOption + if blp.ExportTimeout != nil { + if *blp.ExportTimeout < 0 { + return nil, fmt.Errorf("invalid export timeout %d", *blp.ExportTimeout) + } + opts = append(opts, sdklog.WithExportTimeout(time.Millisecond*time.Duration(*blp.ExportTimeout))) + } + if blp.MaxExportBatchSize != nil { + if *blp.MaxExportBatchSize < 0 { + return nil, fmt.Errorf("invalid batch size %d", *blp.MaxExportBatchSize) + } + opts = append(opts, sdklog.WithExportMaxBatchSize(*blp.MaxExportBatchSize)) + } + if blp.MaxQueueSize != nil { + if *blp.MaxQueueSize < 0 { + return nil, fmt.Errorf("invalid queue size %d", *blp.MaxQueueSize) + } + opts = append(opts, sdklog.WithMaxQueueSize(*blp.MaxQueueSize)) + } + + if blp.ScheduleDelay != nil { + if *blp.ScheduleDelay < 0 { + return nil, fmt.Errorf("invalid schedule delay %d", *blp.ScheduleDelay) + } + opts = append(opts, sdklog.WithExportInterval(time.Millisecond*time.Duration(*blp.ScheduleDelay))) + } + + return sdklog.NewBatchProcessor(exp, opts...), nil +} + +func otlpHTTPLogExporter(ctx context.Context, otlpConfig *OTLP) (sdklog.Exporter, error) { + var opts []otlploghttp.Option + + if len(otlpConfig.Endpoint) > 0 { + u, err := url.ParseRequestURI(otlpConfig.Endpoint) + if err != nil { + return nil, err + } + opts = append(opts, otlploghttp.WithEndpoint(u.Host)) + + if u.Scheme == "http" { + opts = append(opts, otlploghttp.WithInsecure()) + } + if len(u.Path) > 0 { + opts = append(opts, otlploghttp.WithURLPath(u.Path)) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlploghttp.WithCompression(otlploghttp.GzipCompression)) + case compressionNone: + opts = append(opts, otlploghttp.WithCompression(otlploghttp.NoCompression)) + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlploghttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + if len(otlpConfig.Headers) > 0 { + opts = append(opts, otlploghttp.WithHeaders(otlpConfig.Headers)) + } + + return otlploghttp.New(ctx, opts...) +} diff --git a/config/v0.2.0/log_test.go b/config/v0.2.0/log_test.go new file mode 100644 index 00000000000..24effa7d283 --- /dev/null +++ b/config/v0.2.0/log_test.go @@ -0,0 +1,412 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "context" + "errors" + "net/url" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/noop" + sdklog "go.opentelemetry.io/otel/sdk/log" + "go.opentelemetry.io/otel/sdk/resource" +) + +func TestLoggerProvider(t *testing.T) { + tests := []struct { + name string + cfg configOptions + wantProvider log.LoggerProvider + wantErr error + }{ + { + name: "no-logger-provider-configured", + wantProvider: noop.NewLoggerProvider(), + }, + { + name: "error-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + LoggerProvider: &LoggerProvider{ + Processors: []LogRecordProcessor{ + { + Simple: &SimpleLogRecordProcessor{}, + Batch: &BatchLogRecordProcessor{}, + }, + }, + }, + }, + }, + wantProvider: noop.NewLoggerProvider(), + wantErr: errors.Join(errors.New("must not specify multiple log processor type")), + }, + } + for _, tt := range tests { + mp, shutdown, err := loggerProvider(tt.cfg, resource.Default()) + require.Equal(t, tt.wantProvider, mp) + assert.Equal(t, tt.wantErr, err) + require.NoError(t, shutdown(context.Background())) + } +} + +func TestLogProcessor(t *testing.T) { + ctx := context.Background() + + otlpHTTPExporter, err := otlploghttp.New(ctx) + require.NoError(t, err) + + consoleExporter, err := stdoutlog.New( + stdoutlog.WithPrettyPrint(), + ) + require.NoError(t, err) + + testCases := []struct { + name string + processor LogRecordProcessor + args any + wantErr error + wantProcessor sdklog.Processor + }{ + { + name: "no processor", + wantErr: errors.New("unsupported log processor type, must be one of simple or batch"), + }, + { + name: "multiple processor types", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{}, + }, + Simple: &SimpleLogRecordProcessor{}, + }, + wantErr: errors.New("must not specify multiple log processor type"), + }, + { + name: "batch processor invalid batch size otlphttp exporter", + + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(-1), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + }, + }, + }, + }, + wantErr: errors.New("invalid batch size -1"), + }, + { + name: "batch processor invalid export timeout otlphttp exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + ExportTimeout: ptr(-2), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + }, + }, + }, + }, + wantErr: errors.New("invalid export timeout -2"), + }, + { + name: "batch processor invalid queue size otlphttp exporter", + + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxQueueSize: ptr(-3), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + }, + }, + }, + }, + wantErr: errors.New("invalid queue size -3"), + }, + { + name: "batch processor invalid schedule delay console exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + ScheduleDelay: ptr(-4), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + }, + }, + }, + }, + wantErr: errors.New("invalid schedule delay -4"), + }, + { + name: "batch processor invalid exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{}, + }, + }, + wantErr: errors.New("no valid log exporter"), + }, + { + name: "batch/console", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + Console: map[string]any{}, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(consoleExporter), + }, + { + name: "batch/otlp-http-exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: "http://localhost:4318", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-with-path", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: "http://localhost:4318/path/123", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-no-endpoint", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-no-scheme", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-invalid-protocol", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "invalid", + Endpoint: "https://10.0.0.0:443", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantErr: errors.New("unsupported protocol \"invalid\""), + }, + { + name: "batch/otlp-http-invalid-endpoint", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: " ", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantErr: &url.Error{Op: "parse", URL: " ", Err: errors.New("invalid URI for request")}, + }, + { + name: "batch/otlp-http-none-compression", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-invalid-compression", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantErr: errors.New("unsupported compression \"invalid\""), + }, + { + name: "simple/no-exporter", + processor: LogRecordProcessor{ + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{}, + }, + }, + wantErr: errors.New("no valid log exporter"), + }, + { + name: "simple/console", + processor: LogRecordProcessor{ + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + Console: map[string]any{}, + }, + }, + }, + wantProcessor: sdklog.NewSimpleProcessor(consoleExporter), + }, + { + name: "simple/otlp-exporter", + processor: LogRecordProcessor{ + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewSimpleProcessor(otlpHTTPExporter), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := logProcessor(context.Background(), tt.processor) + require.Equal(t, tt.wantErr, err) + if tt.wantProcessor == nil { + require.Nil(t, got) + } else { + require.Equal(t, reflect.TypeOf(tt.wantProcessor), reflect.TypeOf(got)) + wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantProcessor)).FieldByName("exporter").Elem().Type() + gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName("exporter").Elem().Type() + require.Equal(t, wantExporterType.String(), gotExporterType.String()) + } + }) + } +} diff --git a/config/v0.2.0/metric.go b/config/v0.2.0/metric.go new file mode 100644 index 00000000000..458da85f909 --- /dev/null +++ b/config/v0.2.0/metric.go @@ -0,0 +1,496 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.opentelemetry.io/contrib/config/v0.2.0" + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + otelprom "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/sdk/instrumentation" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" +) + +var zeroScope instrumentation.Scope + +const instrumentKindUndefined = sdkmetric.InstrumentKind(0) + +func meterProvider(cfg configOptions, res *resource.Resource) (metric.MeterProvider, shutdownFunc, error) { + if cfg.opentelemetryConfig.MeterProvider == nil { + return noop.NewMeterProvider(), noopShutdown, nil + } + opts := []sdkmetric.Option{ + sdkmetric.WithResource(res), + } + + var errs []error + for _, reader := range cfg.opentelemetryConfig.MeterProvider.Readers { + r, err := metricReader(cfg.ctx, reader) + if err == nil { + opts = append(opts, sdkmetric.WithReader(r)) + } else { + errs = append(errs, err) + } + } + for _, vw := range cfg.opentelemetryConfig.MeterProvider.Views { + v, err := view(vw) + if err == nil { + opts = append(opts, sdkmetric.WithView(v)) + } else { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return noop.NewMeterProvider(), noopShutdown, errors.Join(errs...) + } + + mp := sdkmetric.NewMeterProvider(opts...) + return mp, mp.Shutdown, nil +} + +func metricReader(ctx context.Context, r MetricReader) (sdkmetric.Reader, error) { + if r.Periodic != nil && r.Pull != nil { + return nil, errors.New("must not specify multiple metric reader type") + } + + if r.Periodic != nil { + var opts []sdkmetric.PeriodicReaderOption + if r.Periodic.Interval != nil { + opts = append(opts, sdkmetric.WithInterval(time.Duration(*r.Periodic.Interval)*time.Millisecond)) + } + + if r.Periodic.Timeout != nil { + opts = append(opts, sdkmetric.WithTimeout(time.Duration(*r.Periodic.Timeout)*time.Millisecond)) + } + return periodicExporter(ctx, r.Periodic.Exporter, opts...) + } + + if r.Pull != nil { + return pullReader(ctx, r.Pull.Exporter) + } + return nil, errors.New("no valid metric reader") +} + +func pullReader(ctx context.Context, exporter MetricExporter) (sdkmetric.Reader, error) { + if exporter.Prometheus != nil { + return prometheusReader(ctx, exporter.Prometheus) + } + return nil, errors.New("no valid metric exporter") +} + +func periodicExporter(ctx context.Context, exporter MetricExporter, opts ...sdkmetric.PeriodicReaderOption) (sdkmetric.Reader, error) { + if exporter.Console != nil && exporter.OTLP != nil { + return nil, errors.New("must not specify multiple exporters") + } + if exporter.Console != nil { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + + exp, err := stdoutmetric.New( + stdoutmetric.WithEncoder(enc), + ) + if err != nil { + return nil, err + } + return sdkmetric.NewPeriodicReader(exp, opts...), nil + } + if exporter.OTLP != nil { + var err error + var exp sdkmetric.Exporter + switch exporter.OTLP.Protocol { + case protocolProtobufHTTP: + exp, err = otlpHTTPMetricExporter(ctx, exporter.OTLP) + case protocolProtobufGRPC: + exp, err = otlpGRPCMetricExporter(ctx, exporter.OTLP) + default: + return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) + } + if err != nil { + return nil, err + } + return sdkmetric.NewPeriodicReader(exp, opts...), nil + } + return nil, errors.New("no valid metric exporter") +} + +func otlpHTTPMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmetric.Exporter, error) { + opts := []otlpmetrichttp.Option{} + + if len(otlpConfig.Endpoint) > 0 { + u, err := url.ParseRequestURI(otlpConfig.Endpoint) + if err != nil { + return nil, err + } + opts = append(opts, otlpmetrichttp.WithEndpoint(u.Host)) + + if u.Scheme == "http" { + opts = append(opts, otlpmetrichttp.WithInsecure()) + } + if len(u.Path) > 0 { + opts = append(opts, otlpmetrichttp.WithURLPath(u.Path)) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.GzipCompression)) + case compressionNone: + opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.NoCompression)) + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil { + opts = append(opts, otlpmetrichttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + if len(otlpConfig.Headers) > 0 { + opts = append(opts, otlpmetrichttp.WithHeaders(otlpConfig.Headers)) + } + if otlpConfig.TemporalityPreference != nil { + switch *otlpConfig.TemporalityPreference { + case "delta": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(deltaTemporality)) + case "cumulative": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(cumulativeTemporality)) + case "lowmemory": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(lowMemory)) + default: + return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) + } + } + + return otlpmetrichttp.New(ctx, opts...) +} + +func otlpGRPCMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmetric.Exporter, error) { + var opts []otlpmetricgrpc.Option + + if len(otlpConfig.Endpoint) > 0 { + u, err := url.ParseRequestURI(otlpConfig.Endpoint) + if err != nil { + return nil, err + } + // ParseRequestURI leaves the Host field empty when no + // scheme is specified (i.e. localhost:4317). This check is + // here to support the case where a user may not specify a + // scheme. The code does its best effort here by using + // otlpConfig.Endpoint as-is in that case + if u.Host != "" { + opts = append(opts, otlpmetricgrpc.WithEndpoint(u.Host)) + } else { + opts = append(opts, otlpmetricgrpc.WithEndpoint(otlpConfig.Endpoint)) + } + if u.Scheme == "http" { + opts = append(opts, otlpmetricgrpc.WithInsecure()) + } + } + + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlpmetricgrpc.WithCompressor(*otlpConfig.Compression)) + case compressionNone: + // none requires no options + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlpmetricgrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + if len(otlpConfig.Headers) > 0 { + opts = append(opts, otlpmetricgrpc.WithHeaders(otlpConfig.Headers)) + } + if otlpConfig.TemporalityPreference != nil { + switch *otlpConfig.TemporalityPreference { + case "delta": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(deltaTemporality)) + case "cumulative": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(cumulativeTemporality)) + case "lowmemory": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(lowMemory)) + default: + return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) + } + } + + return otlpmetricgrpc.New(ctx, opts...) +} + +func cumulativeTemporality(sdkmetric.InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +func deltaTemporality(ik sdkmetric.InstrumentKind) metricdata.Temporality { + switch ik { + case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram, sdkmetric.InstrumentKindObservableCounter: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +func lowMemory(ik sdkmetric.InstrumentKind) metricdata.Temporality { + switch ik { + case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmetric.Reader, error) { + var opts []otelprom.Option + if prometheusConfig.Host == nil { + return nil, errors.New("host must be specified") + } + if prometheusConfig.Port == nil { + return nil, errors.New("port must be specified") + } + if prometheusConfig.WithoutScopeInfo != nil && *prometheusConfig.WithoutScopeInfo { + opts = append(opts, otelprom.WithoutScopeInfo()) + } + if prometheusConfig.WithoutTypeSuffix != nil && *prometheusConfig.WithoutTypeSuffix { + opts = append(opts, otelprom.WithoutCounterSuffixes()) + } + if prometheusConfig.WithoutUnits != nil && *prometheusConfig.WithoutUnits { + opts = append(opts, otelprom.WithoutUnits()) + } + if prometheusConfig.WithResourceConstantLabels != nil { + if prometheusConfig.WithResourceConstantLabels.Included != nil { + var keys []attribute.Key + for _, val := range prometheusConfig.WithResourceConstantLabels.Included { + keys = append(keys, attribute.Key(val)) + } + otelprom.WithResourceAsConstantLabels(attribute.NewAllowKeysFilter(keys...)) + } + if prometheusConfig.WithResourceConstantLabels.Excluded != nil { + var keys []attribute.Key + for _, val := range prometheusConfig.WithResourceConstantLabels.Included { + keys = append(keys, attribute.Key(val)) + } + otelprom.WithResourceAsConstantLabels(attribute.NewDenyKeysFilter(keys...)) + } + } + + reg := prometheus.NewRegistry() + opts = append(opts, otelprom.WithRegisterer(reg)) + + mux := http.NewServeMux() + mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) + server := http.Server{ + // Timeouts are necessary to make a server resilient to attacks, but ListenAndServe doesn't set any. + // We use values from this example: https://blog.cloudflare.com/exposing-go-on-the-internet/#:~:text=There%20are%20three%20main%20timeouts + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + IdleTimeout: 120 * time.Second, + Handler: mux, + } + addr := fmt.Sprintf("%s:%d", *prometheusConfig.Host, *prometheusConfig.Port) + + reader, err := otelprom.New(opts...) + if err != nil { + return nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) + } + lis, err := net.Listen("tcp", addr) + if err != nil { + return nil, errors.Join( + fmt.Errorf("binding address %s for Prometheus exporter: %w", addr, err), + reader.Shutdown(ctx), + ) + } + + go func() { + if err := server.Serve(lis); err != nil && errors.Is(err, http.ErrServerClosed) { + otel.Handle(fmt.Errorf("the Prometheus HTTP server exited unexpectedly: %w", err)) + } + }() + + return readerWithServer{reader, &server}, nil +} + +type readerWithServer struct { + sdkmetric.Reader + server *http.Server +} + +func (rws readerWithServer) Shutdown(ctx context.Context) error { + return errors.Join( + rws.Reader.Shutdown(ctx), + rws.server.Shutdown(ctx), + ) +} + +func view(v View) (sdkmetric.View, error) { + if v.Selector == nil { + return nil, errors.New("view: no selector provided") + } + + inst, err := instrument(*v.Selector) + if err != nil { + return nil, err + } + + return sdkmetric.NewView(inst, stream(v.Stream)), nil +} + +func instrument(vs ViewSelector) (sdkmetric.Instrument, error) { + kind, err := instrumentKind(vs.InstrumentType) + if err != nil { + return sdkmetric.Instrument{}, fmt.Errorf("view_selector: %w", err) + } + inst := sdkmetric.Instrument{ + Name: strOrEmpty(vs.InstrumentName), + Unit: strOrEmpty(vs.Unit), + Kind: kind, + Scope: instrumentation.Scope{ + Name: strOrEmpty(vs.MeterName), + Version: strOrEmpty(vs.MeterVersion), + SchemaURL: strOrEmpty(vs.MeterSchemaUrl), + }, + } + + if instrumentIsEmpty(inst) { + return sdkmetric.Instrument{}, errors.New("view_selector: empty selector not supporter") + } + return inst, nil +} + +func stream(vs *ViewStream) sdkmetric.Stream { + if vs == nil { + return sdkmetric.Stream{} + } + + return sdkmetric.Stream{ + Name: strOrEmpty(vs.Name), + Description: strOrEmpty(vs.Description), + Aggregation: aggregation(vs.Aggregation), + AttributeFilter: attributeFilter(vs.AttributeKeys), + } +} + +func attributeFilter(attributeKeys []string) attribute.Filter { + var attrKeys []attribute.Key + for _, attrStr := range attributeKeys { + attrKeys = append(attrKeys, attribute.Key(attrStr)) + } + return attribute.NewAllowKeysFilter(attrKeys...) +} + +func aggregation(aggr *ViewStreamAggregation) sdkmetric.Aggregation { + if aggr == nil { + return nil + } + + if aggr.Base2ExponentialBucketHistogram != nil { + return sdkmetric.AggregationBase2ExponentialHistogram{ + MaxSize: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxSize), + MaxScale: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxScale), + // Need to negate because config has the positive action RecordMinMax. + NoMinMax: !boolOrFalse(aggr.Base2ExponentialBucketHistogram.RecordMinMax), + } + } + if aggr.Default != nil { + // TODO: Understand what to set here. + return nil + } + if aggr.Drop != nil { + return sdkmetric.AggregationDrop{} + } + if aggr.ExplicitBucketHistogram != nil { + return sdkmetric.AggregationExplicitBucketHistogram{ + Boundaries: aggr.ExplicitBucketHistogram.Boundaries, + // Need to negate because config has the positive action RecordMinMax. + NoMinMax: !boolOrFalse(aggr.ExplicitBucketHistogram.RecordMinMax), + } + } + if aggr.LastValue != nil { + return sdkmetric.AggregationLastValue{} + } + if aggr.Sum != nil { + return sdkmetric.AggregationSum{} + } + return nil +} + +func instrumentKind(vsit *ViewSelectorInstrumentType) (sdkmetric.InstrumentKind, error) { + if vsit == nil { + // Equivalent to instrumentKindUndefined. + return instrumentKindUndefined, nil + } + + switch *vsit { + case ViewSelectorInstrumentTypeCounter: + return sdkmetric.InstrumentKindCounter, nil + case ViewSelectorInstrumentTypeUpDownCounter: + return sdkmetric.InstrumentKindUpDownCounter, nil + case ViewSelectorInstrumentTypeHistogram: + return sdkmetric.InstrumentKindHistogram, nil + case ViewSelectorInstrumentTypeObservableCounter: + return sdkmetric.InstrumentKindObservableCounter, nil + case ViewSelectorInstrumentTypeObservableUpDownCounter: + return sdkmetric.InstrumentKindObservableUpDownCounter, nil + case ViewSelectorInstrumentTypeObservableGauge: + return sdkmetric.InstrumentKindObservableGauge, nil + } + + return instrumentKindUndefined, errors.New("instrument_type: invalid value") +} + +func instrumentIsEmpty(i sdkmetric.Instrument) bool { + return i.Name == "" && + i.Description == "" && + i.Kind == instrumentKindUndefined && + i.Unit == "" && + i.Scope == zeroScope +} + +func boolOrFalse(pBool *bool) bool { + if pBool == nil { + return false + } + return *pBool +} + +func int32OrZero(pInt *int) int32 { + if pInt == nil { + return 0 + } + i := *pInt + if i > math.MaxInt32 { + return math.MaxInt32 + } + if i < math.MinInt32 { + return math.MinInt32 + } + return int32(i) // nolint: gosec // Overflow and underflow checked above. +} + +func strOrEmpty(pStr *string) string { + if pStr == nil { + return "" + } + return *pStr +} diff --git a/config/v0.2.0/metric_test.go b/config/v0.2.0/metric_test.go new file mode 100644 index 00000000000..79e706a30cc --- /dev/null +++ b/config/v0.2.0/metric_test.go @@ -0,0 +1,1111 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "context" + "errors" + "net/url" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + otelprom "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/sdk/instrumentation" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/resource" +) + +func TestMeterProvider(t *testing.T) { + tests := []struct { + name string + cfg configOptions + wantProvider metric.MeterProvider + wantErr error + }{ + { + name: "no-meter-provider-configured", + wantProvider: noop.NewMeterProvider(), + }, + { + name: "error-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + MeterProvider: &MeterProvider{ + Readers: []MetricReader{ + { + Periodic: &PeriodicMetricReader{}, + Pull: &PullMetricReader{}, + }, + }, + }, + }, + }, + wantProvider: noop.NewMeterProvider(), + wantErr: errors.Join(errors.New("must not specify multiple metric reader type")), + }, + { + name: "multiple-errors-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + MeterProvider: &MeterProvider{ + Readers: []MetricReader{ + { + Periodic: &PeriodicMetricReader{}, + Pull: &PullMetricReader{}, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + Console: Console{}, + OTLP: &OTLPMetric{}, + }, + }, + }, + }, + }, + }, + }, + wantProvider: noop.NewMeterProvider(), + wantErr: errors.Join(errors.New("must not specify multiple metric reader type"), errors.New("must not specify multiple exporters")), + }, + } + for _, tt := range tests { + mp, shutdown, err := meterProvider(tt.cfg, resource.Default()) + require.Equal(t, tt.wantProvider, mp) + assert.Equal(t, tt.wantErr, err) + require.NoError(t, shutdown(context.Background())) + } +} + +func TestReader(t *testing.T) { + consoleExporter, err := stdoutmetric.New( + stdoutmetric.WithPrettyPrint(), + ) + require.NoError(t, err) + ctx := context.Background() + otlpGRPCExporter, err := otlpmetricgrpc.New(ctx) + require.NoError(t, err) + otlpHTTPExporter, err := otlpmetrichttp.New(ctx) + require.NoError(t, err) + promExporter, err := otelprom.New() + require.NoError(t, err) + testCases := []struct { + name string + reader MetricReader + args any + wantErr error + wantReader sdkmetric.Reader + }{ + { + name: "no reader", + wantErr: errors.New("no valid metric reader"), + }, + { + name: "pull/no-exporter", + reader: MetricReader{ + Pull: &PullMetricReader{}, + }, + wantErr: errors.New("no valid metric exporter"), + }, + { + name: "pull/prometheus-no-host", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: MetricExporter{ + Prometheus: &Prometheus{}, + }, + }, + }, + wantErr: errors.New("host must be specified"), + }, + { + name: "pull/prometheus-no-port", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: MetricExporter{ + Prometheus: &Prometheus{ + Host: ptr("localhost"), + }, + }, + }, + }, + wantErr: errors.New("port must be specified"), + }, + { + name: "pull/prometheus", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: MetricExporter{ + Prometheus: &Prometheus{ + Host: ptr("localhost"), + Port: ptr(0), + WithoutScopeInfo: ptr(true), + WithoutUnits: ptr(true), + WithoutTypeSuffix: ptr(true), + WithResourceConstantLabels: &IncludeExclude{ + Included: []string{"include"}, + Excluded: []string{"exclude"}, + }, + }, + }, + }, + }, + wantReader: readerWithServer{promExporter, nil}, + }, + { + name: "periodic/otlp-exporter-invalid-protocol", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "http/invalid", + }, + }, + }, + }, + wantErr: errors.New("unsupported protocol \"http/invalid\""), + }, + { + name: "periodic/otlp-grpc-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "grpc/protobuf", + Endpoint: "http://localhost:4318", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-exporter-with-path", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "grpc/protobuf", + Endpoint: "http://localhost:4318/path/123", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-exporter-no-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "grpc/protobuf", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-exporter-no-scheme", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "grpc/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-invalid-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "grpc/protobuf", + Endpoint: " ", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantErr: &url.Error{Op: "parse", URL: " ", Err: errors.New("invalid URI for request")}, + }, + { + name: "periodic/otlp-grpc-none-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "grpc/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-delta-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "grpc/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + TemporalityPreference: ptr("delta"), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-cumulative-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "grpc/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + TemporalityPreference: ptr("cumulative"), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-lowmemory-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "grpc/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + TemporalityPreference: ptr("lowmemory"), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-invalid-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "grpc/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + TemporalityPreference: ptr("invalid"), + }, + }, + }, + }, + wantErr: errors.New("unsupported temporality preference \"invalid\""), + }, + { + name: "periodic/otlp-grpc-invalid-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "grpc/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantErr: errors.New("unsupported compression \"invalid\""), + }, + { + name: "periodic/otlp-http-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "http/protobuf", + Endpoint: "http://localhost:4318", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-exporter-with-path", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "http/protobuf", + Endpoint: "http://localhost:4318/path/123", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-exporter-no-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "http/protobuf", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-exporter-no-scheme", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-invalid-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "http/protobuf", + Endpoint: " ", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantErr: &url.Error{Op: "parse", URL: " ", Err: errors.New("invalid URI for request")}, + }, + { + name: "periodic/otlp-http-none-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-cumulative-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + TemporalityPreference: ptr("cumulative"), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-lowmemory-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + TemporalityPreference: ptr("lowmemory"), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-delta-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + TemporalityPreference: ptr("delta"), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-invalid-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + TemporalityPreference: ptr("invalid"), + }, + }, + }, + }, + wantErr: errors.New("unsupported temporality preference \"invalid\""), + }, + { + name: "periodic/otlp-http-invalid-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + OTLP: &OTLPMetric{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantErr: errors.New("unsupported compression \"invalid\""), + }, + { + name: "periodic/no-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{}, + }, + }, + wantErr: errors.New("no valid metric exporter"), + }, + { + name: "periodic/console-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + Console: Console{}, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(consoleExporter), + }, + { + name: "periodic/console-exporter-with-extra-options", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Interval: ptr(30_000), + Timeout: ptr(5_000), + Exporter: MetricExporter{ + Console: Console{}, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader( + consoleExporter, + sdkmetric.WithInterval(30_000*time.Millisecond), + sdkmetric.WithTimeout(5_000*time.Millisecond), + ), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := metricReader(context.Background(), tt.reader) + require.Equal(t, tt.wantErr, err) + if tt.wantReader == nil { + require.Nil(t, got) + } else { + require.Equal(t, reflect.TypeOf(tt.wantReader), reflect.TypeOf(got)) + var fieldName string + switch reflect.TypeOf(tt.wantReader).String() { + case "*metric.PeriodicReader": + fieldName = "exporter" + case "config.readerWithServer": + fieldName = "Reader" + default: + fieldName = "e" + } + wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantReader)).FieldByName(fieldName).Elem().Type() + gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName(fieldName).Elem().Type() + require.Equal(t, wantExporterType.String(), gotExporterType.String()) + require.NoError(t, got.Shutdown(context.Background())) + } + }) + } +} + +func TestView(t *testing.T) { + testCases := []struct { + name string + view View + args any + wantErr string + matchInstrument *sdkmetric.Instrument + wantStream sdkmetric.Stream + wantResult bool + }{ + { + name: "no selector", + wantErr: "view: no selector provided", + }, + { + name: "selector/invalid_type", + view: View{ + Selector: &ViewSelector{ + InstrumentType: (*ViewSelectorInstrumentType)(ptr("invalid_type")), + }, + }, + wantErr: "view_selector: instrument_type: invalid value", + }, + { + name: "selector/invalid_type", + view: View{ + Selector: &ViewSelector{}, + }, + wantErr: "view_selector: empty selector not supporter", + }, + { + name: "all selectors match", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{Name: "test_name", Unit: "test_unit"}, + wantResult: true, + }, + { + name: "all selectors no match name", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "not_match", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match unit", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "not_match", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match kind", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: (*ViewSelectorInstrumentType)(ptr("histogram")), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match meter name", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "not_match", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match meter version", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "not_match", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match meter schema url", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "not_match", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "with stream", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + Unit: ptr("test_unit"), + }, + Stream: &ViewStream{ + Name: ptr("new_name"), + Description: ptr("new_description"), + AttributeKeys: []string{"foo", "bar"}, + Aggregation: &ViewStreamAggregation{Sum: make(ViewStreamAggregationSum)}, + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Description: "test_description", + Unit: "test_unit", + }, + wantStream: sdkmetric.Stream{ + Name: "new_name", + Description: "new_description", + Unit: "test_unit", + Aggregation: sdkmetric.AggregationSum{}, + }, + wantResult: true, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := view(tt.view) + if tt.wantErr != "" { + require.EqualError(t, err, tt.wantErr) + require.Nil(t, got) + } else { + require.NoError(t, err) + gotStream, gotResult := got(*tt.matchInstrument) + // Remove filter, since it cannot be compared + gotStream.AttributeFilter = nil + require.Equal(t, tt.wantStream, gotStream) + require.Equal(t, tt.wantResult, gotResult) + } + }) + } +} + +func TestInstrumentType(t *testing.T) { + testCases := []struct { + name string + instType *ViewSelectorInstrumentType + wantErr error + wantKind sdkmetric.InstrumentKind + }{ + { + name: "nil", + wantKind: sdkmetric.InstrumentKind(0), + }, + { + name: "counter", + instType: (*ViewSelectorInstrumentType)(ptr("counter")), + wantKind: sdkmetric.InstrumentKindCounter, + }, + { + name: "up_down_counter", + instType: (*ViewSelectorInstrumentType)(ptr("up_down_counter")), + wantKind: sdkmetric.InstrumentKindUpDownCounter, + }, + { + name: "histogram", + instType: (*ViewSelectorInstrumentType)(ptr("histogram")), + wantKind: sdkmetric.InstrumentKindHistogram, + }, + { + name: "observable_counter", + instType: (*ViewSelectorInstrumentType)(ptr("observable_counter")), + wantKind: sdkmetric.InstrumentKindObservableCounter, + }, + { + name: "observable_up_down_counter", + instType: (*ViewSelectorInstrumentType)(ptr("observable_up_down_counter")), + wantKind: sdkmetric.InstrumentKindObservableUpDownCounter, + }, + { + name: "observable_gauge", + instType: (*ViewSelectorInstrumentType)(ptr("observable_gauge")), + wantKind: sdkmetric.InstrumentKindObservableGauge, + }, + { + name: "invalid", + instType: (*ViewSelectorInstrumentType)(ptr("invalid")), + wantErr: errors.New("instrument_type: invalid value"), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := instrumentKind(tt.instType) + if tt.wantErr != nil { + require.Equal(t, tt.wantErr, err) + require.Zero(t, got) + } else { + require.NoError(t, err) + require.Equal(t, tt.wantKind, got) + } + }) + } +} + +func TestAggregation(t *testing.T) { + testCases := []struct { + name string + aggregation *ViewStreamAggregation + wantAggregation sdkmetric.Aggregation + }{ + { + name: "nil", + wantAggregation: nil, + }, + { + name: "empty", + aggregation: &ViewStreamAggregation{}, + wantAggregation: nil, + }, + { + name: "Base2ExponentialBucketHistogram empty", + aggregation: &ViewStreamAggregation{ + Base2ExponentialBucketHistogram: &ViewStreamAggregationBase2ExponentialBucketHistogram{}, + }, + wantAggregation: sdkmetric.AggregationBase2ExponentialHistogram{ + MaxSize: 0, + MaxScale: 0, + NoMinMax: true, + }, + }, + { + name: "Base2ExponentialBucketHistogram", + aggregation: &ViewStreamAggregation{ + Base2ExponentialBucketHistogram: &ViewStreamAggregationBase2ExponentialBucketHistogram{ + MaxSize: ptr(2), + MaxScale: ptr(3), + RecordMinMax: ptr(true), + }, + }, + wantAggregation: sdkmetric.AggregationBase2ExponentialHistogram{ + MaxSize: 2, + MaxScale: 3, + NoMinMax: false, + }, + }, + { + name: "Default", + aggregation: &ViewStreamAggregation{ + Default: make(ViewStreamAggregationDefault), + }, + wantAggregation: nil, + }, + { + name: "Drop", + aggregation: &ViewStreamAggregation{ + Drop: make(ViewStreamAggregationDrop), + }, + wantAggregation: sdkmetric.AggregationDrop{}, + }, + { + name: "ExplicitBucketHistogram empty", + aggregation: &ViewStreamAggregation{ + ExplicitBucketHistogram: &ViewStreamAggregationExplicitBucketHistogram{}, + }, + wantAggregation: sdkmetric.AggregationExplicitBucketHistogram{ + Boundaries: nil, + NoMinMax: true, + }, + }, + { + name: "ExplicitBucketHistogram", + aggregation: &ViewStreamAggregation{ + ExplicitBucketHistogram: &ViewStreamAggregationExplicitBucketHistogram{ + Boundaries: []float64{1, 2, 3}, + RecordMinMax: ptr(true), + }, + }, + wantAggregation: sdkmetric.AggregationExplicitBucketHistogram{ + Boundaries: []float64{1, 2, 3}, + NoMinMax: false, + }, + }, + { + name: "LastValue", + aggregation: &ViewStreamAggregation{ + LastValue: make(ViewStreamAggregationLastValue), + }, + wantAggregation: sdkmetric.AggregationLastValue{}, + }, + { + name: "Sum", + aggregation: &ViewStreamAggregation{ + Sum: make(ViewStreamAggregationSum), + }, + wantAggregation: sdkmetric.AggregationSum{}, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got := aggregation(tt.aggregation) + require.Equal(t, tt.wantAggregation, got) + }) + } +} + +func TestAttributeFilter(t *testing.T) { + testCases := []struct { + name string + attributeKeys []string + wantPass []string + wantFail []string + }{ + { + name: "empty", + attributeKeys: []string{}, + wantPass: nil, + wantFail: []string{"foo", "bar"}, + }, + { + name: "filter", + attributeKeys: []string{"foo"}, + wantPass: []string{"foo"}, + wantFail: []string{"bar"}, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got := attributeFilter(tt.attributeKeys) + for _, pass := range tt.wantPass { + require.True(t, got(attribute.KeyValue{Key: attribute.Key(pass), Value: attribute.StringValue("")})) + } + for _, fail := range tt.wantFail { + require.False(t, got(attribute.KeyValue{Key: attribute.Key(fail), Value: attribute.StringValue("")})) + } + }) + } +} diff --git a/config/v0.2.0/resource.go b/config/v0.2.0/resource.go new file mode 100644 index 00000000000..7c24e109f72 --- /dev/null +++ b/config/v0.2.0/resource.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.opentelemetry.io/contrib/config/v0.2.0" + +import ( + "fmt" + "strconv" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" +) + +func keyVal(k string, v any) attribute.KeyValue { + switch val := v.(type) { + case bool: + return attribute.Bool(k, val) + case int64: + return attribute.Int64(k, val) + case uint64: + return attribute.String(k, strconv.FormatUint(val, 10)) + case float64: + return attribute.Float64(k, val) + case int8: + return attribute.Int64(k, int64(val)) + case uint8: + return attribute.Int64(k, int64(val)) + case int16: + return attribute.Int64(k, int64(val)) + case uint16: + return attribute.Int64(k, int64(val)) + case int32: + return attribute.Int64(k, int64(val)) + case uint32: + return attribute.Int64(k, int64(val)) + case float32: + return attribute.Float64(k, float64(val)) + case int: + return attribute.Int(k, val) + case uint: + return attribute.String(k, strconv.FormatUint(uint64(val), 10)) + case string: + return attribute.String(k, val) + default: + return attribute.String(k, fmt.Sprint(v)) + } +} + +func newResource(res *Resource) (*resource.Resource, error) { + if res == nil || res.Attributes == nil { + return resource.Default(), nil + } + var attrs []attribute.KeyValue + + for k, v := range res.Attributes { + attrs = append(attrs, keyVal(k, v)) + } + + return resource.Merge(resource.Default(), + resource.NewWithAttributes(*res.SchemaUrl, + attrs..., + )) +} diff --git a/config/v0.2.0/resource_test.go b/config/v0.2.0/resource_test.go new file mode 100644 index 00000000000..12c15a843e5 --- /dev/null +++ b/config/v0.2.0/resource_test.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type mockType struct{} + +func TestNewResource(t *testing.T) { + res, err := resource.Merge(resource.Default(), + resource.NewWithAttributes(semconv.SchemaURL, + semconv.ServiceName("service-a"), + )) + other := mockType{} + require.NoError(t, err) + resWithAttrs, err := resource.Merge(resource.Default(), + resource.NewWithAttributes(semconv.SchemaURL, + semconv.ServiceName("service-a"), + attribute.Bool("attr-bool", true), + attribute.String("attr-uint64", fmt.Sprintf("%d", 164)), + attribute.Int64("attr-int64", int64(-164)), + attribute.Float64("attr-float64", float64(64.0)), + attribute.Int64("attr-int8", int64(-18)), + attribute.Int64("attr-uint8", int64(18)), + attribute.Int64("attr-int16", int64(-116)), + attribute.Int64("attr-uint16", int64(116)), + attribute.Int64("attr-int32", int64(-132)), + attribute.Int64("attr-uint32", int64(132)), + attribute.Float64("attr-float32", float64(32.0)), + attribute.Int64("attr-int", int64(-1)), + attribute.String("attr-uint", fmt.Sprintf("%d", 1)), + attribute.String("attr-string", "string-val"), + attribute.String("attr-default", fmt.Sprintf("%v", other)), + )) + require.NoError(t, err) + tests := []struct { + name string + config *Resource + wantResource *resource.Resource + wantErr error + }{ + { + name: "no-resource-configuration", + wantResource: resource.Default(), + }, + { + name: "resource-no-attributes", + config: &Resource{}, + wantResource: resource.Default(), + }, + { + name: "resource-with-attributes-invalid-schema", + config: &Resource{ + SchemaUrl: ptr("https://opentelemetry.io/invalid-schema"), + Attributes: Attributes{ + "service.name": "service-a", + }, + }, + wantResource: resource.NewSchemaless(res.Attributes()...), + wantErr: resource.ErrSchemaURLConflict, + }, + { + name: "resource-with-attributes-and-schema", + config: &Resource{ + Attributes: Attributes{ + "service.name": "service-a", + }, + SchemaUrl: ptr(semconv.SchemaURL), + }, + wantResource: res, + }, + { + name: "resource-with-additional-attributes-and-schema", + config: &Resource{ + Attributes: Attributes{ + "service.name": "service-a", + "attr-bool": true, + "attr-int64": int64(-164), + "attr-uint64": uint64(164), + "attr-float64": float64(64.0), + "attr-int8": int8(-18), + "attr-uint8": uint8(18), + "attr-int16": int16(-116), + "attr-uint16": uint16(116), + "attr-int32": int32(-132), + "attr-uint32": uint32(132), + "attr-float32": float32(32.0), + "attr-int": int(-1), + "attr-uint": uint(1), + "attr-string": "string-val", + "attr-default": other, + }, + SchemaUrl: ptr(semconv.SchemaURL), + }, + wantResource: resWithAttrs, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := newResource(tt.config) + assert.ErrorIs(t, err, tt.wantErr) + assert.Equal(t, tt.wantResource, got) + }) + } +} diff --git a/config/v0.2.0/trace.go b/config/v0.2.0/trace.go new file mode 100644 index 00000000000..c639c174328 --- /dev/null +++ b/config/v0.2.0/trace.go @@ -0,0 +1,197 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.opentelemetry.io/contrib/config/v0.2.0" + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +func tracerProvider(cfg configOptions, res *resource.Resource) (trace.TracerProvider, shutdownFunc, error) { + if cfg.opentelemetryConfig.TracerProvider == nil { + return noop.NewTracerProvider(), noopShutdown, nil + } + opts := []sdktrace.TracerProviderOption{ + sdktrace.WithResource(res), + } + var errs []error + for _, processor := range cfg.opentelemetryConfig.TracerProvider.Processors { + sp, err := spanProcessor(cfg.ctx, processor) + if err == nil { + opts = append(opts, sdktrace.WithSpanProcessor(sp)) + } else { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return noop.NewTracerProvider(), noopShutdown, errors.Join(errs...) + } + tp := sdktrace.NewTracerProvider(opts...) + return tp, tp.Shutdown, nil +} + +func spanExporter(ctx context.Context, exporter SpanExporter) (sdktrace.SpanExporter, error) { + if exporter.Console != nil && exporter.OTLP != nil { + return nil, errors.New("must not specify multiple exporters") + } + + if exporter.Console != nil { + return stdouttrace.New( + stdouttrace.WithPrettyPrint(), + ) + } + if exporter.OTLP != nil { + switch exporter.OTLP.Protocol { + case protocolProtobufHTTP: + return otlpHTTPSpanExporter(ctx, exporter.OTLP) + case protocolProtobufGRPC: + return otlpGRPCSpanExporter(ctx, exporter.OTLP) + default: + return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) + } + } + return nil, errors.New("no valid span exporter") +} + +func spanProcessor(ctx context.Context, processor SpanProcessor) (sdktrace.SpanProcessor, error) { + if processor.Batch != nil && processor.Simple != nil { + return nil, errors.New("must not specify multiple span processor type") + } + if processor.Batch != nil { + exp, err := spanExporter(ctx, processor.Batch.Exporter) + if err != nil { + return nil, err + } + return batchSpanProcessor(processor.Batch, exp) + } + if processor.Simple != nil { + exp, err := spanExporter(ctx, processor.Simple.Exporter) + if err != nil { + return nil, err + } + return sdktrace.NewSimpleSpanProcessor(exp), nil + } + return nil, errors.New("unsupported span processor type, must be one of simple or batch") +} + +func otlpGRPCSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanExporter, error) { + var opts []otlptracegrpc.Option + + if len(otlpConfig.Endpoint) > 0 { + u, err := url.ParseRequestURI(otlpConfig.Endpoint) + if err != nil { + return nil, err + } + // ParseRequestURI leaves the Host field empty when no + // scheme is specified (i.e. localhost:4317). This check is + // here to support the case where a user may not specify a + // scheme. The code does its best effort here by using + // otlpConfig.Endpoint as-is in that case. + if u.Host != "" { + opts = append(opts, otlptracegrpc.WithEndpoint(u.Host)) + } else { + opts = append(opts, otlptracegrpc.WithEndpoint(otlpConfig.Endpoint)) + } + + if u.Scheme == "http" { + opts = append(opts, otlptracegrpc.WithInsecure()) + } + } + + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlptracegrpc.WithCompressor(*otlpConfig.Compression)) + case compressionNone: + // none requires no options + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlptracegrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + if len(otlpConfig.Headers) > 0 { + opts = append(opts, otlptracegrpc.WithHeaders(otlpConfig.Headers)) + } + + return otlptracegrpc.New(ctx, opts...) +} + +func otlpHTTPSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanExporter, error) { + var opts []otlptracehttp.Option + + if len(otlpConfig.Endpoint) > 0 { + u, err := url.ParseRequestURI(otlpConfig.Endpoint) + if err != nil { + return nil, err + } + opts = append(opts, otlptracehttp.WithEndpoint(u.Host)) + + if u.Scheme == "http" { + opts = append(opts, otlptracehttp.WithInsecure()) + } + if len(u.Path) > 0 { + opts = append(opts, otlptracehttp.WithURLPath(u.Path)) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.GzipCompression)) + case compressionNone: + opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.NoCompression)) + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlptracehttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + if len(otlpConfig.Headers) > 0 { + opts = append(opts, otlptracehttp.WithHeaders(otlpConfig.Headers)) + } + + return otlptracehttp.New(ctx, opts...) +} + +func batchSpanProcessor(bsp *BatchSpanProcessor, exp sdktrace.SpanExporter) (sdktrace.SpanProcessor, error) { + var opts []sdktrace.BatchSpanProcessorOption + if bsp.ExportTimeout != nil { + if *bsp.ExportTimeout < 0 { + return nil, fmt.Errorf("invalid export timeout %d", *bsp.ExportTimeout) + } + opts = append(opts, sdktrace.WithExportTimeout(time.Millisecond*time.Duration(*bsp.ExportTimeout))) + } + if bsp.MaxExportBatchSize != nil { + if *bsp.MaxExportBatchSize < 0 { + return nil, fmt.Errorf("invalid batch size %d", *bsp.MaxExportBatchSize) + } + opts = append(opts, sdktrace.WithMaxExportBatchSize(*bsp.MaxExportBatchSize)) + } + if bsp.MaxQueueSize != nil { + if *bsp.MaxQueueSize < 0 { + return nil, fmt.Errorf("invalid queue size %d", *bsp.MaxQueueSize) + } + opts = append(opts, sdktrace.WithMaxQueueSize(*bsp.MaxQueueSize)) + } + if bsp.ScheduleDelay != nil { + if *bsp.ScheduleDelay < 0 { + return nil, fmt.Errorf("invalid schedule delay %d", *bsp.ScheduleDelay) + } + opts = append(opts, sdktrace.WithBatchTimeout(time.Millisecond*time.Duration(*bsp.ScheduleDelay))) + } + return sdktrace.NewBatchSpanProcessor(exp, opts...), nil +} diff --git a/config/v0.2.0/trace_test.go b/config/v0.2.0/trace_test.go new file mode 100644 index 00000000000..4f4a197770e --- /dev/null +++ b/config/v0.2.0/trace_test.go @@ -0,0 +1,535 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "context" + "errors" + "net/url" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +func TestTracerPovider(t *testing.T) { + tests := []struct { + name string + cfg configOptions + wantProvider trace.TracerProvider + wantErr error + }{ + { + name: "no-tracer-provider-configured", + wantProvider: noop.NewTracerProvider(), + }, + { + name: "error-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + TracerProvider: &TracerProvider{ + Processors: []SpanProcessor{ + { + Batch: &BatchSpanProcessor{}, + Simple: &SimpleSpanProcessor{}, + }, + }, + }, + }, + }, + wantProvider: noop.NewTracerProvider(), + wantErr: errors.Join(errors.New("must not specify multiple span processor type")), + }, + { + name: "multiple-errors-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + TracerProvider: &TracerProvider{ + Processors: []SpanProcessor{ + { + Batch: &BatchSpanProcessor{}, + Simple: &SimpleSpanProcessor{}, + }, + { + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: Console{}, + OTLP: &OTLP{}, + }, + }, + }, + }, + }, + }, + }, + wantProvider: noop.NewTracerProvider(), + wantErr: errors.Join(errors.New("must not specify multiple span processor type"), errors.New("must not specify multiple exporters")), + }, + } + for _, tt := range tests { + tp, shutdown, err := tracerProvider(tt.cfg, resource.Default()) + require.Equal(t, tt.wantProvider, tp) + assert.Equal(t, tt.wantErr, err) + require.NoError(t, shutdown(context.Background())) + } +} + +func TestSpanProcessor(t *testing.T) { + consoleExporter, err := stdouttrace.New( + stdouttrace.WithPrettyPrint(), + ) + require.NoError(t, err) + ctx := context.Background() + otlpGRPCExporter, err := otlptracegrpc.New(ctx) + require.NoError(t, err) + otlpHTTPExporter, err := otlptracehttp.New(ctx) + require.NoError(t, err) + testCases := []struct { + name string + processor SpanProcessor + args any + wantErr error + wantProcessor sdktrace.SpanProcessor + }{ + { + name: "no processor", + wantErr: errors.New("unsupported span processor type, must be one of simple or batch"), + }, + { + name: "multiple processor types", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{}, + }, + Simple: &SimpleSpanProcessor{}, + }, + wantErr: errors.New("must not specify multiple span processor type"), + }, + { + name: "batch processor invalid exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{}, + }, + }, + wantErr: errors.New("no valid span exporter"), + }, + { + name: "batch processor invalid batch size console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(-1), + Exporter: SpanExporter{ + Console: Console{}, + }, + }, + }, + wantErr: errors.New("invalid batch size -1"), + }, + { + name: "batch processor invalid export timeout console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + ExportTimeout: ptr(-2), + Exporter: SpanExporter{ + Console: Console{}, + }, + }, + }, + wantErr: errors.New("invalid export timeout -2"), + }, + { + name: "batch processor invalid queue size console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxQueueSize: ptr(-3), + Exporter: SpanExporter{ + Console: Console{}, + }, + }, + }, + wantErr: errors.New("invalid queue size -3"), + }, + { + name: "batch processor invalid schedule delay console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + ScheduleDelay: ptr(-4), + Exporter: SpanExporter{ + Console: Console{}, + }, + }, + }, + wantErr: errors.New("invalid schedule delay -4"), + }, + { + name: "batch processor with multiple exporters", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + Console: Console{}, + OTLP: &OTLP{}, + }, + }, + }, + wantErr: errors.New("must not specify multiple exporters"), + }, + { + name: "batch processor console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + Console: Console{}, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(consoleExporter), + }, + { + name: "batch/otlp-exporter-invalid-protocol", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "http/invalid", + }, + }, + }, + }, + wantErr: errors.New("unsupported protocol \"http/invalid\""), + }, + { + name: "batch/otlp-grpc-exporter-no-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "grpc/protobuf", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "grpc/protobuf", + Endpoint: "http://localhost:4317", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-exporter-no-scheme", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "grpc/protobuf", + Endpoint: "localhost:4317", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-invalid-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "grpc/protobuf", + Endpoint: " ", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantErr: &url.Error{Op: "parse", URL: " ", Err: errors.New("invalid URI for request")}, + }, + { + name: "batch/otlp-grpc-invalid-compression", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "grpc/protobuf", + Endpoint: "localhost:4317", + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantErr: errors.New("unsupported compression \"invalid\""), + }, + { + name: "batch/otlp-http-exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: "http://localhost:4318", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-with-path", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: "http://localhost:4318/path/123", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-no-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-no-scheme", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-invalid-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: " ", + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantErr: &url.Error{Op: "parse", URL: " ", Err: errors.New("invalid URI for request")}, + }, + { + name: "batch/otlp-http-none-compression", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-invalid-compression", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLP: &OTLP{ + Protocol: "http/protobuf", + Endpoint: "localhost:4318", + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + wantErr: errors.New("unsupported compression \"invalid\""), + }, + { + name: "simple/no-exporter", + processor: SpanProcessor{ + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{}, + }, + }, + wantErr: errors.New("no valid span exporter"), + }, + { + name: "simple/console-exporter", + processor: SpanProcessor{ + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: Console{}, + }, + }, + }, + wantProcessor: sdktrace.NewSimpleSpanProcessor(consoleExporter), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := spanProcessor(context.Background(), tt.processor) + require.Equal(t, tt.wantErr, err) + if tt.wantProcessor == nil { + require.Nil(t, got) + } else { + require.Equal(t, reflect.TypeOf(tt.wantProcessor), reflect.TypeOf(got)) + var fieldName string + switch reflect.TypeOf(tt.wantProcessor).String() { + case "*trace.simpleSpanProcessor": + fieldName = "exporter" + default: + fieldName = "e" + } + wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantProcessor)).FieldByName(fieldName).Elem().Type() + gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName(fieldName).Elem().Type() + require.Equal(t, wantExporterType.String(), gotExporterType.String()) + } + }) + } +} diff --git a/config/config.go b/config/v0.3.0/config.go similarity index 98% rename from config/config.go rename to config/v0.3.0/config.go index 80d72c9364d..44030d4f612 100644 --- a/config/config.go +++ b/config/v0.3.0/config.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package config // import "go.opentelemetry.io/contrib/config" +package config // import "go.opentelemetry.io/contrib/config/v0.3.0" import ( "context" diff --git a/config/config_json.go b/config/v0.3.0/config_json.go similarity index 99% rename from config/config_json.go rename to config/v0.3.0/config_json.go index a42e1d28601..f350067280d 100644 --- a/config/config_json.go +++ b/config/v0.3.0/config_json.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package config // import "go.opentelemetry.io/contrib/config" +package config // import "go.opentelemetry.io/contrib/config/v0.3.0" import ( "encoding/json" diff --git a/config/config_test.go b/config/v0.3.0/config_test.go similarity index 99% rename from config/config_test.go rename to config/v0.3.0/config_test.go index cdc3ddcd45b..0f7822bd8bb 100644 --- a/config/config_test.go +++ b/config/v0.3.0/config_test.go @@ -424,7 +424,7 @@ func TestParseYAML(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - b, err := os.ReadFile(filepath.Join("testdata", tt.input)) + b, err := os.ReadFile(filepath.Join("..", "testdata", tt.input)) require.NoError(t, err) got, err := ParseYAML(b) @@ -473,7 +473,7 @@ func TestSerializeJSON(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - b, err := os.ReadFile(filepath.Join("testdata", tt.input)) + b, err := os.ReadFile(filepath.Join("..", "testdata", tt.input)) require.NoError(t, err) var got OpenTelemetryConfiguration diff --git a/config/config_yaml.go b/config/v0.3.0/config_yaml.go similarity index 93% rename from config/config_yaml.go rename to config/v0.3.0/config_yaml.go index 88234178880..94af68e6f98 100644 --- a/config/config_yaml.go +++ b/config/v0.3.0/config_yaml.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package config // import "go.opentelemetry.io/contrib/config" +package config // import "go.opentelemetry.io/contrib/config/v0.3.0" import ( "fmt" diff --git a/config/generated_config.go b/config/v0.3.0/generated_config.go similarity index 100% rename from config/generated_config.go rename to config/v0.3.0/generated_config.go diff --git a/config/log.go b/config/v0.3.0/log.go similarity index 98% rename from config/log.go rename to config/v0.3.0/log.go index 81a769238cd..40ad54dbfd1 100644 --- a/config/log.go +++ b/config/v0.3.0/log.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package config // import "go.opentelemetry.io/contrib/config" +package config // import "go.opentelemetry.io/contrib/config/v0.3.0" import ( "context" diff --git a/config/log_test.go b/config/v0.3.0/log_test.go similarity index 99% rename from config/log_test.go rename to config/v0.3.0/log_test.go index 3e1efe11b3a..f04d521eb50 100644 --- a/config/log_test.go +++ b/config/v0.3.0/log_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package config // import "go.opentelemetry.io/contrib/config" +package config import ( "context" diff --git a/config/metric.go b/config/v0.3.0/metric.go similarity index 99% rename from config/metric.go rename to config/v0.3.0/metric.go index de5629a7edb..c551a5b91fe 100644 --- a/config/metric.go +++ b/config/v0.3.0/metric.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package config // import "go.opentelemetry.io/contrib/config" +package config // import "go.opentelemetry.io/contrib/config/v0.3.0" import ( "context" diff --git a/config/metric_test.go b/config/v0.3.0/metric_test.go similarity index 99% rename from config/metric_test.go rename to config/v0.3.0/metric_test.go index 07defc008bb..b6c6d095fa8 100644 --- a/config/metric_test.go +++ b/config/v0.3.0/metric_test.go @@ -150,7 +150,7 @@ func TestReader(t *testing.T) { Exporter: PullMetricExporter{ Prometheus: &Prometheus{ Host: ptr("localhost"), - Port: ptr(8888), + Port: ptr(0), WithoutScopeInfo: ptr(true), WithoutUnits: ptr(true), WithoutTypeSuffix: ptr(true), diff --git a/config/resource.go b/config/v0.3.0/resource.go similarity index 95% rename from config/resource.go rename to config/v0.3.0/resource.go index d2b5ed3ac9a..4983374aa6a 100644 --- a/config/resource.go +++ b/config/v0.3.0/resource.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package config // import "go.opentelemetry.io/contrib/config" +package config // import "go.opentelemetry.io/contrib/config/v0.3.0" import ( "fmt" diff --git a/config/resource_test.go b/config/v0.3.0/resource_test.go similarity index 97% rename from config/resource_test.go rename to config/v0.3.0/resource_test.go index 5a9f756f2b8..3a80e7c280a 100644 --- a/config/resource_test.go +++ b/config/v0.3.0/resource_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package config // import "go.opentelemetry.io/contrib/config" +package config import ( "fmt" diff --git a/config/trace.go b/config/v0.3.0/trace.go similarity index 98% rename from config/trace.go rename to config/v0.3.0/trace.go index 1b08b2c56ae..f1e6552cde3 100644 --- a/config/trace.go +++ b/config/v0.3.0/trace.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package config // import "go.opentelemetry.io/contrib/config" +package config // import "go.opentelemetry.io/contrib/config/v0.3.0" import ( "context" diff --git a/config/trace_test.go b/config/v0.3.0/trace_test.go similarity index 100% rename from config/trace_test.go rename to config/v0.3.0/trace_test.go