diff --git a/README.md b/README.md index 10ba442df2..b6fd74c4d0 100644 --- a/README.md +++ b/README.md @@ -48,7 +48,7 @@ This table represents the supported components of the ADOT Collector. The highli | | [groupbytraceprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/groupbytraceprocessor) | [logzioexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/logzioexporter#logzio-exporter) | | | | [tailsamplingprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/tailsamplingprocessor) | [kafka](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/kafkaexporter) | | | | [k8sattributesprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/k8sattributesprocessor) | [loadbalancingexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/loadbalancingexporter) | | -| | | [awscloudwatchlogsexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/awscloudwatchlogsexporter) | | +| | [transformprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/transformprocessor) | [awscloudwatchlogsexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/awscloudwatchlogsexporter) | | Besides the components that interact with telemetry signals directly from the previous table, there is also support to the following confmap providers: diff --git a/go.mod b/go.mod index 1c1734e1f2..6007bf20f8 100644 --- a/go.mod +++ b/go.mod @@ -36,6 +36,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.90.1 github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.90.1 github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.90.1 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.90.1 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.90.1 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.90.1 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.90.1 diff --git a/go.sum b/go.sum index f665994f24..49e0aca129 100644 --- a/go.sum +++ b/go.sum @@ -858,6 +858,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocesso github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.90.1/go.mod h1:BSLMtC09e0i7UoOve3Ds0AAWSuawx9I+dRyj9Jk7wHI= github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.90.1 h1:49+SN78f/NAb92qL5Z9lBCEqXhqiRVU6YrzD1hREXmo= github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.90.1/go.mod h1:tJX7GDrWEDhnfvHPkCKZAU+xQ2SeQJJsdmR73F6i/UM= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.90.1 h1:+KymcJ8sjSl5IVeSWf6yndXJ8pr3BRlYw8AjkBQcD7Q= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.90.1/go.mod h1:HkVWsD4zuHzUiaTB+6/mSko6HTUPmIK4C5xv2tDYdV4= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.90.1 h1:burL0Emdzdd199FyAVoBH/DP9Tuo3/zUsXmOVLMk/Qk= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.90.1/go.mod h1:IVdbzRl7tC4fDVc+X+/GcW5SvIdSRVG44qVoXW3leJY= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.90.1 h1:rZyAwF+X4/LJW3Thbe5JoGfG1W9Ah4d8mAv0FwdJcqI= diff --git a/pkg/defaultcomponents/defaults.go b/pkg/defaultcomponents/defaults.go index 2623a270e8..51c24291f5 100644 --- a/pkg/defaultcomponents/defaults.go +++ b/pkg/defaultcomponents/defaults.go @@ -48,6 +48,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver" @@ -128,6 +129,7 @@ func Components() (otelcol.Factories, error) { batchprocessor.NewFactory(), memorylimiterprocessor.NewFactory(), k8sattributesprocessor.NewFactory(), + transformprocessor.NewFactory(), } processors, err := processor.MakeFactoryMap(processorList...) diff --git a/pkg/defaultcomponents/defaults_test.go b/pkg/defaultcomponents/defaults_test.go index 7f8342c388..abcdbeeda5 100644 --- a/pkg/defaultcomponents/defaults_test.go +++ b/pkg/defaultcomponents/defaults_test.go @@ -25,7 +25,7 @@ const ( exportersCount = 16 receiversCount = 10 extensionsCount = 8 - processorCount = 15 + processorCount = 16 ) // Assert that the components behind feature gate are not in the default @@ -105,5 +105,6 @@ func TestComponents(t *testing.T) { assert.NotNil(t, processors["groupbytrace"]) assert.NotNil(t, processors["tail_sampling"]) assert.NotNil(t, processors["k8sattributes"]) + assert.NotNil(t, processors["transform"]) } diff --git a/testbed/go.mod b/testbed/go.mod index 348dbd2237..2a814a914f 100644 --- a/testbed/go.mod +++ b/testbed/go.mod @@ -259,6 +259,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.90.1 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.90.1 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.90.1 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.90.1 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.90.1 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.90.1 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.90.1 // indirect diff --git a/testbed/go.sum b/testbed/go.sum index 209e57869c..854563b38e 100644 --- a/testbed/go.sum +++ b/testbed/go.sum @@ -874,6 +874,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocesso github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.90.1/go.mod h1:BSLMtC09e0i7UoOve3Ds0AAWSuawx9I+dRyj9Jk7wHI= github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.90.1 h1:49+SN78f/NAb92qL5Z9lBCEqXhqiRVU6YrzD1hREXmo= github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.90.1/go.mod h1:tJX7GDrWEDhnfvHPkCKZAU+xQ2SeQJJsdmR73F6i/UM= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.90.1 h1:+KymcJ8sjSl5IVeSWf6yndXJ8pr3BRlYw8AjkBQcD7Q= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.90.1/go.mod h1:HkVWsD4zuHzUiaTB+6/mSko6HTUPmIK4C5xv2tDYdV4= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.90.1 h1:burL0Emdzdd199FyAVoBH/DP9Tuo3/zUsXmOVLMk/Qk= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.90.1/go.mod h1:IVdbzRl7tC4fDVc+X+/GcW5SvIdSRVG44qVoXW3leJY= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.90.1 h1:rZyAwF+X4/LJW3Thbe5JoGfG1W9Ah4d8mAv0FwdJcqI= diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md new file mode 100644 index 0000000000..4df2ae3616 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md @@ -0,0 +1,27 @@ +# Instrumentation Scope Context + +The Instrumentation Scope Context is a Context implementation for [pdata Instrumentation Scope](https://github.com/open-telemetry/opentelemetry-collector/blob/main/pdata/pcommon/generated_instrumentationscope.go), the Collector's internal representation for OTLP instrumentation scope data. This Context should be used when interacting only with OTLP instrumentation scope. + +## Paths +In general, the Instrumentation Scope Context supports accessing pdata using the field names from the instrumentation section in the [common proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/common/v1/common.proto). All integers are returned and set via `int64`. All doubles are returned and set via `float64`. + +The following paths are supported. + +| path | field accessed | type | +|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------| +| cache | the value of the current transform context's temporary cache. cache can be used as a temporary placeholder for data during complex transformations | pcommon.Map | +| cache\[""\] | the value of an item in cache. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil | +| resource | resource of the instrumentation scope being processed | pcommon.Resource | +| resource.attributes | resource attributes of the instrumentation scope being processed | pcommon.Map | +| resource.attributes\[""\] | the value of the resource attribute of the instrumentation scope being processed. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil | +| resource.dropped_attributes_count | number of dropped attributes of the resource of the instrumentation scope being processed | int64 | +| name | name of the instrumentation scope of the scope being processed | string | +| version | version of the instrumentation scope of the scope being processed | string | +| dropped_attributes_count | number of dropped attributes of the instrumentation scope of the scope being processed | int64 | +| attributes | instrumentation scope attributes of the scope being processed | pcommon.Map | +| attributes\[""\] | the value of the instrumentation scope attribute of the scope being processed. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil | + + +## Enums + +The Instrumentation Scope Context does not define any Enums at this time. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go new file mode 100644 index 0000000000..9c8ce9406c --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlscope // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" +) + +var _ internal.ResourceContext = TransformContext{} +var _ internal.InstrumentationScopeContext = TransformContext{} + +type TransformContext struct { + instrumentationScope pcommon.InstrumentationScope + resource pcommon.Resource + cache pcommon.Map +} + +type Option func(*ottl.Parser[TransformContext]) + +func NewTransformContext(instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext { + return TransformContext{ + instrumentationScope: instrumentationScope, + resource: resource, + cache: pcommon.NewMap(), + } +} + +func (tCtx TransformContext) GetInstrumentationScope() pcommon.InstrumentationScope { + return tCtx.instrumentationScope +} + +func (tCtx TransformContext) GetResource() pcommon.Resource { + return tCtx.resource +} + +func (tCtx TransformContext) getCache() pcommon.Map { + return tCtx.cache +} + +func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { + pathExpressionParser := pathExpressionParser{telemetrySettings} + p, err := ottl.NewParser[TransformContext]( + functions, + pathExpressionParser.parsePath, + telemetrySettings, + ottl.WithEnumParser[TransformContext](parseEnum), + ) + if err != nil { + return ottl.Parser[TransformContext]{}, err + } + for _, opt := range options { + opt(&p) + } + return p, nil +} + +type StatementsOption func(*ottl.Statements[TransformContext]) + +func WithErrorMode(errorMode ottl.ErrorMode) StatementsOption { + return func(s *ottl.Statements[TransformContext]) { + ottl.WithErrorMode[TransformContext](errorMode)(s) + } +} + +func NewStatements(statements []*ottl.Statement[TransformContext], telemetrySettings component.TelemetrySettings, options ...StatementsOption) ottl.Statements[TransformContext] { + s := ottl.NewStatements(statements, telemetrySettings) + for _, op := range options { + op(&s) + } + return s +} + +func parseEnum(_ *ottl.EnumSymbol) (*ottl.Enum, error) { + return nil, fmt.Errorf("instrumentation scope context does not provide Enum support") +} + +type pathExpressionParser struct { + telemetrySettings component.TelemetrySettings +} + +func (pep *pathExpressionParser) parsePath(val *ottl.Path) (ottl.GetSetter[TransformContext], error) { + if val != nil && len(val.Fields) > 0 { + return newPathGetSetter(val.Fields) + } + return nil, fmt.Errorf("bad path %v", val) +} + +func newPathGetSetter(path []ottl.Field) (ottl.GetSetter[TransformContext], error) { + switch path[0].Name { + case "cache": + mapKey := path[0].Keys + if mapKey == nil { + return accessCache(), nil + } + return accessCacheKey(mapKey), nil + case "resource": + return internal.ResourcePathGetSetter[TransformContext](path[1:]) + default: + return internal.ScopePathGetSetter[TransformContext](path) + } +} + +func accessCache() ottl.StandardGetSetter[TransformContext] { + return ottl.StandardGetSetter[TransformContext]{ + Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + return tCtx.getCache(), nil + }, + Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + if m, ok := val.(pcommon.Map); ok { + m.CopyTo(tCtx.getCache()) + } + return nil + }, + } +} + +func accessCacheKey(keys []ottl.Key) ottl.StandardGetSetter[TransformContext] { + return ottl.StandardGetSetter[TransformContext]{ + Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + return internal.GetMapValue(tCtx.getCache(), keys) + }, + Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + return internal.SetMapValue(tCtx.getCache(), keys, val) + }, + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/CONTRIBUTING.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/CONTRIBUTING.md new file mode 100644 index 0000000000..654cfe9790 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/CONTRIBUTING.md @@ -0,0 +1,19 @@ +# Contributing + +This guide is specific to the transform processor. All guidelines in [Collector Contrib's CONTRIBUTING.MD](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md) must also be followed. + +## New Functions + +If a new function is not specific to the transform processor it should be added to the [OpenTelemetry Transformation Language](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl) instead. + +All new functions must be added via a new file. Function files must start with `func_`. Functions that are usable in multiple pipelines must be placed in `internal/common`. Functions that are specific to a pipeline must be placed in `internal/`. + +New functions must update the appropriate registry. For common functions, update the registry in `internal/common/functions.go`. For pipeline-specific functions, update the registry in `internal//functions.go` + +Unit tests must be added for all new functions. Unit test files must start with `func_` and end in `_test`. Unit tests must be placed in the same directory as the function. Functions that are not specific to a pipeline should be tested independently of any specific pipeline. Functions that are specific to a pipeline should be tests against that pipeline. + +All new functions should have integration tests added to any usable pipeline's `processing_test.go` tests. The purpose of these tests is not to test the function's logic, but its ability to be used within a specific pipeline. + +## New Values + +When adding new values to the grammar you must update the [OpenTelemetry Transformation Language](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/CONTRIBUTING.md) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/Makefile b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/Makefile new file mode 100644 index 0000000000..ded7a36092 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/README.md new file mode 100644 index 0000000000..bf6bd6e08b --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/README.md @@ -0,0 +1,442 @@ +# Transform Processor + +| Status | | +| ------------- |-----------| +| Stability | [alpha]: traces, metrics, logs | +| Distributions | [contrib], [grafana], [observiq], [splunk], [sumo] | +| Warnings | [Unsound Transformations, Identity Conflict, Orphaned Telemetry, Other](#warnings) | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aprocessor%2Ftransform%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aprocessor%2Ftransform) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aprocessor%2Ftransform%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aprocessor%2Ftransform) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@kentquirk](https://www.github.com/kentquirk), [@bogdandrutu](https://www.github.com/bogdandrutu), [@evan-bradley](https://www.github.com/evan-bradley) | + +[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[grafana]: https://github.com/grafana/agent +[observiq]: https://github.com/observIQ/observiq-otel-collector +[splunk]: https://github.com/signalfx/splunk-otel-collector +[sumo]: https://github.com/SumoLogic/sumologic-otel-collector + + +The transform processor modifies telemetry based on configuration using the [OpenTelemetry Transformation Language](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl). + +For each signal type, the processor takes a list of statements associated to a [Context type](#contexts) and executes the statements against the incoming telemetry in the order specified in the config. +Each statement can access and transform telemetry using functions and allow the use of a condition to help decide whether the function should be executed. + +- [Config](#config) +- [Grammar](#grammar) +- [Contexts](#contexts) +- [Supported functions](#supported-functions) +- [Examples](#examples) +- [Contributing](#contributing) + +## Config + +The transform processor allows configuring multiple context statements for traces, metrics, and logs. +The value of `context` specifies which [OTTL Context](#contexts) to use when interpreting the associated statements. +The statement strings, which must be OTTL compatible, will be passed to the OTTL and interpreted using the associated context. +Each context will be processed in the order specified and each statement for a context will be executed in the order specified. + +The transform processor also allows configuring an optional field, `error_mode`, which will determine how the processor reacts to errors that occur while processing a statement. + +| error_mode | description | +|-----------------------|----------------------------------------------------------------------------------------------------------------------------| +| ignore | The processor ignores errors returned by statements and continues on to the next statement. This is the recommended mode. | +| propagate | The processor returns the error up the pipeline. This will result in the payload being dropped from the collector. | + +If not specified, `propagate` will be used. + +```yaml +transform: + error_mode: ignore + _statements: + - context: string + statements: + - string + - string + - string + - context: string + statements: + - string + - string + - string +``` + +Proper use of contexts will provide increased performance and capabilities. See [Contexts](#contexts) for more details. + +Valid values for `context` are: + +| Signal | Context Values | +|-------------------|------------------------------------------------| +| trace_statements | `resource`, `scope`, `span`, and `spanevent` | +| metric_statements | `resource`, `scope`, `metric`, and `datapoint` | +| log_statements | `resource`, `scope`, and `log` | + +### Example + +The example takes advantage of context efficiency by grouping transformations with the context which it intends to transform. +See [Contexts](#contexts) for more details. + +Example configuration: +```yaml +transform: + error_mode: ignore + trace_statements: + - context: resource + statements: + - keep_keys(attributes, ["service.name", "service.namespace", "cloud.region", "process.command_line"]) + - replace_pattern(attributes["process.command_line"], "password\\=[^\\s]*(\\s?)", "password=***") + - limit(attributes, 100, []) + - truncate_all(attributes, 4096) + - context: span + statements: + - set(status.code, 1) where attributes["http.path"] == "/health" + - set(name, attributes["http.route"]) + - replace_match(attributes["http.target"], "/user/*/list/*", "/user/{userId}/list/{listId}") + - limit(attributes, 100, []) + - truncate_all(attributes, 4096) + + metric_statements: + - context: resource + statements: + - keep_keys(attributes, ["host.name"]) + - truncate_all(attributes, 4096) + - context: metric + statements: + - set(description, "Sum") where type == "Sum" + - context: datapoint + statements: + - limit(attributes, 100, ["host.name"]) + - truncate_all(attributes, 4096) + - convert_sum_to_gauge() where metric.name == "system.processes.count" + - convert_gauge_to_sum("cumulative", false) where metric.name == "prometheus_metric" + + log_statements: + - context: resource + statements: + - keep_keys(resource.attributes, ["service.name", "service.namespace", "cloud.region"]) + - context: log + statements: + - set(severity_text, "FAIL") where body == "request failed" + - replace_all_matches(attributes, "/user/*/list/*", "/user/{userId}/list/{listId}") + - replace_all_patterns(attributes, "value", "/account/\\d{4}", "/account/{accountId}") + - set(body, attributes["http.route"]) +``` + +## Grammar + +You can learn more in-depth details on the capabilities and limitations of the OpenTelemetry Transformation Language used by the transform processor by reading about its [grammar](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl#grammar). + +## Contexts + +The transform processor utilizes the OTTL's contexts to transform Resource, Scope, Span, SpanEvent, Metric, DataPoint, and Log telemetry. +The contexts allow the OTTL to interact with the underlying telemetry data in its pdata form. + +- [Resource Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlresource) +- [Scope Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlscope) +- [Span Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlspan) +- [SpanEvent Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlspanevent) +- [Metric Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlmetric) +- [DataPoint Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottldatapoint) +- [Log Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottllog) + +Each context allows transformation of its type of telemetry. +For example, statements associated to a `resource` context will be able to transform the resource's `attributes` and `dropped_attributes_count`. + +Contexts __NEVER__ supply access to individual items "lower" in the protobuf definition. +- This means statements associated to a `resource` __WILL NOT__ be able to access the underlying instrumentation scopes. +- This means statements associated to a `scope` __WILL NOT__ be able to access the underlying telemetry slices (spans, metrics, or logs). +- Similarly, statements associated to a `metric` __WILL NOT__ be able to access individual datapoints, but can access the entire datapoints slice. +- Similarly, statements associated to a `span` __WILL NOT__ be able to access individual SpanEvents, but can access the entire SpanEvents slice. + +For practical purposes, this means that a context cannot make decisions on its telemetry based on telemetry "lower" in the structure. +For example, __the following context statement is not possible__ because it attempts to use individual datapoint attributes in the condition of a statements that is associated to a `metric` + +```yaml +metric_statements: +- context: metric + statements: + - set(description, "test passed") where datapoints.attributes["test"] == "pass" +``` + +Context __ALWAYS__ supply access to the items "higher" in the protobuf definition that are associated to the telemetry being transformed. +- This means that statements associated to a `datapoint` have access to a datapoint's metric, instrumentation scope, and resource. +- This means that statements associated to a `spanevent` have access to a spanevent's span, instrumentation scope, and resource. +- This means that statements associated to a `span`/`metric`/`log` have access to the telemetry's instrumentation scope, and resource. +- This means that statements associated to a `scope` have access to the scope's resource. + +For example, __the following context statement is possible__ because `datapoint` statements can access the datapoint's metric. + +```yaml +metric_statements: +- context: datapoint + statements: + - set(metric.description, "test passed") where attributes["test"] == "pass" +``` + +Whenever possible, associate your statements to the context that the statement intend to transform. +Although you can modify resource attributes associated to a span using the `span` context, it is more efficient to use the `resource` context. +This is because contexts are nested: the efficiency comes because higher-level contexts can avoid iterating through any of the contexts at a lower level. + +## Supported functions: + +Since the transform processor utilizes the OTTL's contexts for Traces, Metrics, and Logs, it is able to utilize functions that expect pdata in addition to any common functions. These common functions can be used for any signal. + +- [OTTL Functions](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs) + +In addition to OTTL functions, the processor defines its own functions to help with transformations specific to this processor: + +**Metrics only functions** +- [convert_sum_to_gauge](#convert_sum_to_gauge) +- [convert_gauge_to_sum](#convert_gauge_to_sum) +- [convert_summary_count_val_to_sum](#convert_summary_count_val_to_sum) +- [convert_summary_sum_val_to_sum](#convert_summary_sum_val_to_sum) + +### convert_sum_to_gauge + +`convert_sum_to_gauge()` + +Converts incoming metrics of type "Sum" to type "Gauge", retaining the metric's datapoints. Noop for metrics that are not of type "Sum". + +**NOTE:** This function may cause a metric to break semantics for [Gauge metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#gauge). Use at your own risk. + +Examples: + +- `convert_sum_to_gauge()` + +### convert_gauge_to_sum + +`convert_gauge_to_sum(aggregation_temporality, is_monotonic)` + +Converts incoming metrics of type "Gauge" to type "Sum", retaining the metric's datapoints and setting its aggregation temporality and monotonicity accordingly. Noop for metrics that are not of type "Gauge". + +`aggregation_temporality` is a string (`"cumulative"` or `"delta"`) that specifies the resultant metric's aggregation temporality. `is_monotonic` is a boolean that specifies the resultant metric's monotonicity. + +**NOTE:** This function may cause a metric to break semantics for [Sum metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums). Use at your own risk. + +Examples: + +- `convert_gauge_to_sum("cumulative", false)` + + +- `convert_gauge_to_sum("delta", true)` + +### extract_count_metric + +> [!NOTE] +> This function supports Histograms, ExponentialHistograms and Summaries. + +`extract_count_metric(is_monotonic)` + +The `extract_count_metric` function creates a new Sum metric from a Histogram, ExponentialHistogram or Summary's count value. A metric will only be created if there is at least one data point. + +`is_monotonic` is a boolean representing the monotonicity of the new metric. + +The name for the new metric will be `_count`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, `description`, and `aggregation_temporality`. As metrics of type Summary don't have an `aggregation_temporality` field, this field will be set to `AGGREGATION_TEMPORALITY_CUMULATIVE` for those metrics. + +The new metric that is created will be passed to all subsequent statements in the metrics statements list. + +> [!WARNING] +> This function may cause a metric to break semantics for [Sum metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums). Use only if you're confident you know what the resulting monotonicity should be. + +Examples: + +- `extract_count_metric(true)` + +- `extract_count_metric(false)` + +### extract_sum_metric + +> [!NOTE] +> This function supports Histograms, ExponentialHistograms and Summaries. + +`extract_sum_metric(is_monotonic)` + +The `extract_sum_metric` function creates a new Sum metric from a Histogram, ExponentialHistogram or Summary's sum value. If the sum value of a Histogram or ExponentialHistogram data point is missing, no data point is added to the output metric. A metric will only be created if there is at least one data point. + +`is_monotonic` is a boolean representing the monotonicity of the new metric. + +The name for the new metric will be `_sum`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, `description`, and `aggregation_temporality`. As metrics of type Summary don't have an `aggregation_temporality` field, this field will be set to `AGGREGATION_TEMPORALITY_CUMULATIVE` for those metrics. + +The new metric that is created will be passed to all subsequent statements in the metrics statements list. + +> [!WARNING] +> This function may cause a metric to break semantics for [Sum metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums). Use only if you're confident you know what the resulting monotonicity should be. + +Examples: + +- `extract_sum_metric(true)` + +- `extract_sum_metric(false)` + +### convert_summary_count_val_to_sum + +`convert_summary_count_val_to_sum(aggregation_temporality, is_monotonic)` + +The `convert_summary_count_val_to_sum` function creates a new Sum metric from a Summary's count value. + +`aggregation_temporality` is a string (`"cumulative"` or `"delta"`) representing the desired aggregation temporality of the new metric. `is_monotonic` is a boolean representing the monotonicity of the new metric. + +The name for the new metric will be `_count`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, and `description`. The new metric that is created will be passed to all functions in the metrics statements list. Function conditions will apply. + +**NOTE:** This function may cause a metric to break semantics for [Sum metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums). Use at your own risk. + +Examples: + +- `convert_summary_count_val_to_sum("delta", true)` + + +- `convert_summary_count_val_to_sum("cumulative", false)` + +### convert_summary_sum_val_to_sum + +`convert_summary_sum_val_to_sum(aggregation_temporality, is_monotonic)` + +The `convert_summary_sum_val_to_sum` function creates a new Sum metric from a Summary's sum value. + +`aggregation_temporality` is a string (`"cumulative"` or `"delta"`) representing the desired aggregation temporality of the new metric. `is_monotonic` is a boolean representing the monotonicity of the new metric. + +The name for the new metric will be `_sum`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, and `description`. The new metric that is created will be passed to all functions in the metrics statements list. Function conditions will apply. + +**NOTE:** This function may cause a metric to break semantics for [Sum metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums). Use at your own risk. + +Examples: + +- `convert_summary_sum_val_to_sum("delta", true)` + + +- `convert_summary_sum_val_to_sum("cumulative", false)` + +## Examples + +### Perform transformation if field does not exist +Set attribute `test` to `"pass"` if the attribute `test` does not exist: +```yaml +transform: + error_mode: ignore + trace_statements: + - context: span + statements: + # accessing a map with a key that does not exist will return nil. + - set(attributes["test"], "pass") where attributes["test"] == nil +``` + +### Rename attribute +There are 2 ways to rename an attribute key: + +You can either set a new attribute and delete the old: + +```yaml +transform: + error_mode: ignore + trace_statements: + - context: resource + statements: + - set(attributes["namespace"], attributes["k8s.namespace.name"]) + - delete_key(attributes, "k8s.namespace.name") +``` + +Or you can update the key using regex: + +```yaml +transform: + error_mode: ignore + trace_statements: + - context: resource + statements: + - replace_all_patterns(attributes, "key", "k8s\\.namespace\\.name", "namespace") +``` + +### Move field to attribute +Set attribute `body` to the value of the log body: + +```yaml +transform: + error_mode: ignore + log_statements: + - context: log + statements: + - set(attributes["body"], body) +``` + +### Combine two attributes +Set attribute `test` to the value of attributes `"foo"` and `"bar"` combined. +```yaml +transform: + error_mode: ignore + trace_statements: + - context: resource + statements: + # Use Concat function to combine any number of string, separated by a delimiter. + - set(attributes["test"], Concat([attributes["foo"], attributes["bar"]], " ")) +``` + +### Parsing JSON logs + +Given the following json body + +```json +{ + "name": "log", + "attr1": "foo", + "attr2": "bar", + "nested": { + "attr3": "example" + } +} +``` + +add specific fields as attributes on the log: + +```yaml +transform: + error_mode: ignore + log_statements: + - context: log + statements: + # Parse body as JSON and merge the resulting map with the cache map, ignoring non-json bodies. + # cache is a field exposed by OTTL that is a temporary storage place for complex operations. + - merge_maps(cache, ParseJSON(body), "upsert") where IsMatch(body, "^\\{") + + # Set attributes using the values merged into cache. + # If the attribute doesn't exist in cache then nothing happens. + - set(attributes["attr1"], cache["attr1"]) + - set(attributes["attr2"], cache["attr2"]) + + # To access nested maps you can chain index ([]) operations. + # If nested or attr3 do no exist in cache then nothing happens. + - set(attributes["nested.attr3"], cache["nested"]["attr3"]) +``` + +### Get Severity of an Unstructured Log Body + +Given the following unstructured log body + +```txt +[2023-09-22 07:38:22,570] INFO [Something]: some interesting log +``` + +You can find the severity using IsMatch: + +```yaml +transform: + error_mode: ignore + log_statements: + - context: log + statements: + - set(severity_number, SEVERITY_NUMBER_INFO) where IsString(body) and IsMatch(body, "\\sINFO\\s") + - set(severity_number, SEVERITY_NUMBER_WARN) where IsString(body) and IsMatch(body, "\\sWARN\\s") + - set(severity_number, SEVERITY_NUMBER_ERROR) where IsString(body) and IsMatch(body, "\\sERROR\\s") +``` + +## Contributing + +See [CONTRIBUTING.md](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/CONTRIBUTING.md). + + +## Warnings + +The transform processor's implementation of the [OpenTelemetry Transformation Language](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/processing.md#opentelemetry-transformation-language) (OTTL) allows users to modify all aspects of their telemetry. Some specific risks are listed below, but this is not an exhaustive list. In general, understand your data before using the transform processor. + +- [Unsound Transformations](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/standard-warnings.md#unsound-transformations): Several Metric-only functions allow you to transform one metric data type to another or create new metrics from an existing metrics. Transformations between metric data types are not defined in the [metrics data model](https://github.com/open-telemetry/opentelemetry-specification/blob/main//specification/metrics/data-model.md). These functions have the expectation that you understand the incoming data and know that it can be meaningfully converted to a new metric data type or can meaningfully be used to create new metrics. + - Although the OTTL allows the `set` function to be used with `metric.data_type`, its implementation in the transform processor is NOOP. To modify a data type you must use a function specific to that purpose. +- [Identity Conflict](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/standard-warnings.md#identity-conflict): Transformation of metrics have the potential to affect the identity of a metric leading to an Identity Crisis. Be especially cautious when transforming metric name and when reducing/changing existing attributes. Adding new attributes is safe. +- [Orphaned Telemetry](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/standard-warnings.md#orphaned-telemetry): The processor allows you to modify `span_id`, `trace_id`, and `parent_span_id` for traces and `span_id`, and `trace_id` logs. Modifying these fields could lead to orphaned spans or logs. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/config.go new file mode 100644 index 0000000000..c81700201a --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/config.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package transformprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" + +import ( + "go.opentelemetry.io/collector/component" + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/logs" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces" +) + +// Config defines the configuration for the processor. +type Config struct { + // ErrorMode determines how the processor reacts to errors that occur while processing a statement. + // Valid values are `ignore` and `propagate`. + // `ignore` means the processor ignores errors returned by statements and continues on to the next statement. This is the recommended mode. + // `propagate` means the processor returns the error up the pipeline. This will result in the payload being dropped from the collector. + // The default value is `propagate`. + ErrorMode ottl.ErrorMode `mapstructure:"error_mode"` + + TraceStatements []common.ContextStatements `mapstructure:"trace_statements"` + MetricStatements []common.ContextStatements `mapstructure:"metric_statements"` + LogStatements []common.ContextStatements `mapstructure:"log_statements"` +} + +var _ component.Config = (*Config)(nil) + +func (c *Config) Validate() error { + var errors error + + if len(c.TraceStatements) > 0 { + pc, err := common.NewTraceParserCollection(component.TelemetrySettings{Logger: zap.NewNop()}, common.WithSpanParser(traces.SpanFunctions()), common.WithSpanEventParser(traces.SpanEventFunctions())) + if err != nil { + return err + } + for _, cs := range c.TraceStatements { + _, err = pc.ParseContextStatements(cs) + if err != nil { + errors = multierr.Append(errors, err) + } + } + } + + if len(c.MetricStatements) > 0 { + pc, err := common.NewMetricParserCollection(component.TelemetrySettings{Logger: zap.NewNop()}, common.WithMetricParser(metrics.MetricFunctions()), common.WithDataPointParser(metrics.DataPointFunctions())) + if err != nil { + return err + } + for _, cs := range c.MetricStatements { + _, err := pc.ParseContextStatements(cs) + if err != nil { + errors = multierr.Append(errors, err) + } + } + } + + if len(c.LogStatements) > 0 { + pc, err := common.NewLogParserCollection(component.TelemetrySettings{Logger: zap.NewNop()}, common.WithLogParser(logs.LogFunctions())) + if err != nil { + return err + } + for _, cs := range c.LogStatements { + _, err = pc.ParseContextStatements(cs) + if err != nil { + errors = multierr.Append(errors, err) + } + } + } + + return errors +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/doc.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/doc.go new file mode 100644 index 0000000000..d833504f6e --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +// Package transformprocessor contains the logic to execute telemetry transform based +// on the OpenTelemetry Transformation Language. +package transformprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/factory.go new file mode 100644 index 0000000000..90eca6440a --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/factory.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package transformprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processorhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/logs" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metadata" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces" +) + +var processorCapabilities = consumer.Capabilities{MutatesData: true} + +func NewFactory() processor.Factory { + return processor.NewFactory( + metadata.Type, + createDefaultConfig, + processor.WithLogs(createLogsProcessor, metadata.LogsStability), + processor.WithTraces(createTracesProcessor, metadata.TracesStability), + processor.WithMetrics(createMetricsProcessor, metadata.MetricsStability), + ) +} + +func createDefaultConfig() component.Config { + return &Config{ + ErrorMode: ottl.PropagateError, + TraceStatements: []common.ContextStatements{}, + MetricStatements: []common.ContextStatements{}, + LogStatements: []common.ContextStatements{}, + } +} + +func createLogsProcessor( + ctx context.Context, + set processor.CreateSettings, + cfg component.Config, + nextConsumer consumer.Logs, +) (processor.Logs, error) { + oCfg := cfg.(*Config) + + proc, err := logs.NewProcessor(oCfg.LogStatements, oCfg.ErrorMode, set.TelemetrySettings) + if err != nil { + return nil, fmt.Errorf("invalid config for \"transform\" processor %w", err) + } + return processorhelper.NewLogsProcessor( + ctx, + set, + cfg, + nextConsumer, + proc.ProcessLogs, + processorhelper.WithCapabilities(processorCapabilities)) +} + +func createTracesProcessor( + ctx context.Context, + set processor.CreateSettings, + cfg component.Config, + nextConsumer consumer.Traces, +) (processor.Traces, error) { + oCfg := cfg.(*Config) + + proc, err := traces.NewProcessor(oCfg.TraceStatements, oCfg.ErrorMode, set.TelemetrySettings) + if err != nil { + return nil, fmt.Errorf("invalid config for \"transform\" processor %w", err) + } + return processorhelper.NewTracesProcessor( + ctx, + set, + cfg, + nextConsumer, + proc.ProcessTraces, + processorhelper.WithCapabilities(processorCapabilities)) +} + +func createMetricsProcessor( + ctx context.Context, + set processor.CreateSettings, + cfg component.Config, + nextConsumer consumer.Metrics, +) (processor.Metrics, error) { + oCfg := cfg.(*Config) + + proc, err := metrics.NewProcessor(oCfg.MetricStatements, oCfg.ErrorMode, set.TelemetrySettings) + if err != nil { + return nil, fmt.Errorf("invalid config for \"transform\" processor %w", err) + } + return processorhelper.NewMetricsProcessor( + ctx, + set, + cfg, + nextConsumer, + proc.ProcessMetrics, + processorhelper.WithCapabilities(processorCapabilities)) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/config.go new file mode 100644 index 0000000000..2747ac11d4 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/config.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package common // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" + +import ( + "fmt" + "strings" +) + +type ContextID string + +const ( + Resource ContextID = "resource" + Scope ContextID = "scope" + Span ContextID = "span" + SpanEvent ContextID = "spanevent" + Metric ContextID = "metric" + DataPoint ContextID = "datapoint" + Log ContextID = "log" +) + +func (c *ContextID) UnmarshalText(text []byte) error { + str := ContextID(strings.ToLower(string(text))) + switch str { + case Resource, Scope, Span, SpanEvent, Metric, DataPoint, Log: + *c = str + return nil + default: + return fmt.Errorf("unknown context %v", str) + } +} + +type ContextStatements struct { + Context ContextID `mapstructure:"context"` + Statements []string `mapstructure:"statements"` +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/functions.go new file mode 100644 index 0000000000..2d0a02b061 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/functions.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package common // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" + +import ( + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" +) + +func ResourceFunctions() map[string]ottl.Factory[ottlresource.TransformContext] { + return ottlfuncs.StandardFuncs[ottlresource.TransformContext]() +} + +func ScopeFunctions() map[string]ottl.Factory[ottlscope.TransformContext] { + return ottlfuncs.StandardFuncs[ottlscope.TransformContext]() +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/logs.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/logs.go new file mode 100644 index 0000000000..7af9031d50 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/logs.go @@ -0,0 +1,117 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package common // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/plog" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" +) + +var _ consumer.Logs = &logStatements{} + +type logStatements struct { + ottl.Statements[ottllog.TransformContext] +} + +func (l logStatements) Capabilities() consumer.Capabilities { + return consumer.Capabilities{ + MutatesData: true, + } +} + +func (l logStatements) ConsumeLogs(ctx context.Context, ld plog.Logs) error { + for i := 0; i < ld.ResourceLogs().Len(); i++ { + rlogs := ld.ResourceLogs().At(i) + for j := 0; j < rlogs.ScopeLogs().Len(); j++ { + slogs := rlogs.ScopeLogs().At(j) + logs := slogs.LogRecords() + for k := 0; k < logs.Len(); k++ { + tCtx := ottllog.NewTransformContext(logs.At(k), slogs.Scope(), rlogs.Resource()) + err := l.Execute(ctx, tCtx) + if err != nil { + return err + } + } + } + } + return nil +} + +type LogParserCollection struct { + parserCollection + logParser ottl.Parser[ottllog.TransformContext] +} + +type LogParserCollectionOption func(*LogParserCollection) error + +func WithLogParser(functions map[string]ottl.Factory[ottllog.TransformContext]) LogParserCollectionOption { + return func(lp *LogParserCollection) error { + logParser, err := ottllog.NewParser(functions, lp.settings) + if err != nil { + return err + } + lp.logParser = logParser + return nil + } +} + +func WithLogErrorMode(errorMode ottl.ErrorMode) LogParserCollectionOption { + return func(lp *LogParserCollection) error { + lp.errorMode = errorMode + return nil + } +} + +func NewLogParserCollection(settings component.TelemetrySettings, options ...LogParserCollectionOption) (*LogParserCollection, error) { + rp, err := ottlresource.NewParser(ResourceFunctions(), settings) + if err != nil { + return nil, err + } + sp, err := ottlscope.NewParser(ScopeFunctions(), settings) + if err != nil { + return nil, err + } + lpc := &LogParserCollection{ + parserCollection: parserCollection{ + settings: settings, + resourceParser: rp, + scopeParser: sp, + }, + } + + for _, op := range options { + err := op(lpc) + if err != nil { + return nil, err + } + } + + return lpc, nil +} + +func (pc LogParserCollection) ParseContextStatements(contextStatements ContextStatements) (consumer.Logs, error) { + switch contextStatements.Context { + case Log: + parsedStatements, err := pc.logParser.ParseStatements(contextStatements.Statements) + if err != nil { + return nil, err + } + lStatements := ottllog.NewStatements(parsedStatements, pc.settings, ottllog.WithErrorMode(pc.errorMode)) + return logStatements{lStatements}, nil + default: + statements, err := pc.parseCommonContextStatements(contextStatements) + if err != nil { + return nil, err + } + return statements, nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/metrics.go new file mode 100644 index 0000000000..becce3132c --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/metrics.go @@ -0,0 +1,225 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package common // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" +) + +var _ consumer.Metrics = &metricStatements{} + +type metricStatements struct { + ottl.Statements[ottlmetric.TransformContext] +} + +func (m metricStatements) Capabilities() consumer.Capabilities { + return consumer.Capabilities{ + MutatesData: true, + } +} + +func (m metricStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { + for i := 0; i < md.ResourceMetrics().Len(); i++ { + rmetrics := md.ResourceMetrics().At(i) + for j := 0; j < rmetrics.ScopeMetrics().Len(); j++ { + smetrics := rmetrics.ScopeMetrics().At(j) + metrics := smetrics.Metrics() + for k := 0; k < metrics.Len(); k++ { + tCtx := ottlmetric.NewTransformContext(metrics.At(k), smetrics.Metrics(), smetrics.Scope(), rmetrics.Resource()) + err := m.Execute(ctx, tCtx) + if err != nil { + return err + } + } + } + } + return nil +} + +var _ consumer.Metrics = &dataPointStatements{} + +type dataPointStatements struct { + ottl.Statements[ottldatapoint.TransformContext] +} + +func (d dataPointStatements) Capabilities() consumer.Capabilities { + return consumer.Capabilities{ + MutatesData: true, + } +} + +func (d dataPointStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { + for i := 0; i < md.ResourceMetrics().Len(); i++ { + rmetrics := md.ResourceMetrics().At(i) + for j := 0; j < rmetrics.ScopeMetrics().Len(); j++ { + smetrics := rmetrics.ScopeMetrics().At(j) + metrics := smetrics.Metrics() + for k := 0; k < metrics.Len(); k++ { + metric := metrics.At(k) + var err error + //exhaustive:enforce + switch metric.Type() { + case pmetric.MetricTypeSum: + err = d.handleNumberDataPoints(ctx, metric.Sum().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource()) + case pmetric.MetricTypeGauge: + err = d.handleNumberDataPoints(ctx, metric.Gauge().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource()) + case pmetric.MetricTypeHistogram: + err = d.handleHistogramDataPoints(ctx, metric.Histogram().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource()) + case pmetric.MetricTypeExponentialHistogram: + err = d.handleExponetialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource()) + case pmetric.MetricTypeSummary: + err = d.handleSummaryDataPoints(ctx, metric.Summary().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource()) + } + if err != nil { + return err + } + } + } + } + return nil +} + +func (d dataPointStatements) handleNumberDataPoints(ctx context.Context, dps pmetric.NumberDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { + for i := 0; i < dps.Len(); i++ { + tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource) + err := d.Execute(ctx, tCtx) + if err != nil { + return err + } + } + return nil +} + +func (d dataPointStatements) handleHistogramDataPoints(ctx context.Context, dps pmetric.HistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { + for i := 0; i < dps.Len(); i++ { + tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource) + err := d.Execute(ctx, tCtx) + if err != nil { + return err + } + } + return nil +} + +func (d dataPointStatements) handleExponetialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { + for i := 0; i < dps.Len(); i++ { + tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource) + err := d.Execute(ctx, tCtx) + if err != nil { + return err + } + } + return nil +} + +func (d dataPointStatements) handleSummaryDataPoints(ctx context.Context, dps pmetric.SummaryDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { + for i := 0; i < dps.Len(); i++ { + tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource) + err := d.Execute(ctx, tCtx) + if err != nil { + return err + } + } + return nil +} + +type MetricParserCollection struct { + parserCollection + metricParser ottl.Parser[ottlmetric.TransformContext] + dataPointParser ottl.Parser[ottldatapoint.TransformContext] +} + +type MetricParserCollectionOption func(*MetricParserCollection) error + +func WithMetricParser(functions map[string]ottl.Factory[ottlmetric.TransformContext]) MetricParserCollectionOption { + return func(mp *MetricParserCollection) error { + metricParser, err := ottlmetric.NewParser(functions, mp.settings) + if err != nil { + return err + } + mp.metricParser = metricParser + return nil + } +} + +func WithDataPointParser(functions map[string]ottl.Factory[ottldatapoint.TransformContext]) MetricParserCollectionOption { + return func(mp *MetricParserCollection) error { + dataPointParser, err := ottldatapoint.NewParser(functions, mp.settings) + if err != nil { + return err + } + mp.dataPointParser = dataPointParser + return nil + } +} + +func WithMetricErrorMode(errorMode ottl.ErrorMode) MetricParserCollectionOption { + return func(mp *MetricParserCollection) error { + mp.errorMode = errorMode + return nil + } +} + +func NewMetricParserCollection(settings component.TelemetrySettings, options ...MetricParserCollectionOption) (*MetricParserCollection, error) { + rp, err := ottlresource.NewParser(ResourceFunctions(), settings) + if err != nil { + return nil, err + } + sp, err := ottlscope.NewParser(ScopeFunctions(), settings) + if err != nil { + return nil, err + } + mpc := &MetricParserCollection{ + parserCollection: parserCollection{ + settings: settings, + resourceParser: rp, + scopeParser: sp, + }, + } + + for _, op := range options { + err := op(mpc) + if err != nil { + return nil, err + } + } + + return mpc, nil +} + +func (pc MetricParserCollection) ParseContextStatements(contextStatements ContextStatements) (consumer.Metrics, error) { + switch contextStatements.Context { + case Metric: + parseStatements, err := pc.metricParser.ParseStatements(contextStatements.Statements) + if err != nil { + return nil, err + } + mStatements := ottlmetric.NewStatements(parseStatements, pc.settings, ottlmetric.WithErrorMode(pc.errorMode)) + return metricStatements{mStatements}, nil + case DataPoint: + parsedStatements, err := pc.dataPointParser.ParseStatements(contextStatements.Statements) + if err != nil { + return nil, err + } + dpStatements := ottldatapoint.NewStatements(parsedStatements, pc.settings, ottldatapoint.WithErrorMode(pc.errorMode)) + return dataPointStatements{dpStatements}, nil + default: + statements, err := pc.parseCommonContextStatements(contextStatements) + if err != nil { + return nil, err + } + return statements, nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/processor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/processor.go new file mode 100644 index 0000000000..56e658b96d --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/processor.go @@ -0,0 +1,164 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package common // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" +) + +var _ consumer.Traces = &resourceStatements{} +var _ consumer.Metrics = &resourceStatements{} +var _ consumer.Logs = &resourceStatements{} +var _ baseContext = &resourceStatements{} + +type resourceStatements struct { + ottl.Statements[ottlresource.TransformContext] +} + +func (r resourceStatements) Capabilities() consumer.Capabilities { + return consumer.Capabilities{ + MutatesData: true, + } +} + +func (r resourceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { + for i := 0; i < td.ResourceSpans().Len(); i++ { + rspans := td.ResourceSpans().At(i) + tCtx := ottlresource.NewTransformContext(rspans.Resource()) + err := r.Execute(ctx, tCtx) + if err != nil { + return err + } + } + return nil +} + +func (r resourceStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { + for i := 0; i < md.ResourceMetrics().Len(); i++ { + rmetrics := md.ResourceMetrics().At(i) + tCtx := ottlresource.NewTransformContext(rmetrics.Resource()) + err := r.Execute(ctx, tCtx) + if err != nil { + return err + } + } + return nil +} + +func (r resourceStatements) ConsumeLogs(ctx context.Context, ld plog.Logs) error { + for i := 0; i < ld.ResourceLogs().Len(); i++ { + rlogs := ld.ResourceLogs().At(i) + tCtx := ottlresource.NewTransformContext(rlogs.Resource()) + err := r.Execute(ctx, tCtx) + if err != nil { + return err + } + } + return nil +} + +var _ consumer.Traces = &scopeStatements{} +var _ consumer.Metrics = &scopeStatements{} +var _ consumer.Logs = &scopeStatements{} +var _ baseContext = &scopeStatements{} + +type scopeStatements struct { + ottl.Statements[ottlscope.TransformContext] +} + +func (s scopeStatements) Capabilities() consumer.Capabilities { + return consumer.Capabilities{ + MutatesData: true, + } +} + +func (s scopeStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { + for i := 0; i < td.ResourceSpans().Len(); i++ { + rspans := td.ResourceSpans().At(i) + for j := 0; j < rspans.ScopeSpans().Len(); j++ { + sspans := rspans.ScopeSpans().At(j) + tCtx := ottlscope.NewTransformContext(sspans.Scope(), rspans.Resource()) + err := s.Execute(ctx, tCtx) + if err != nil { + return err + } + } + } + return nil +} + +func (s scopeStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { + for i := 0; i < md.ResourceMetrics().Len(); i++ { + rmetrics := md.ResourceMetrics().At(i) + for j := 0; j < rmetrics.ScopeMetrics().Len(); j++ { + smetrics := rmetrics.ScopeMetrics().At(j) + tCtx := ottlscope.NewTransformContext(smetrics.Scope(), rmetrics.Resource()) + err := s.Execute(ctx, tCtx) + if err != nil { + return err + } + } + } + return nil +} + +func (s scopeStatements) ConsumeLogs(ctx context.Context, ld plog.Logs) error { + for i := 0; i < ld.ResourceLogs().Len(); i++ { + rlogs := ld.ResourceLogs().At(i) + for j := 0; j < rlogs.ScopeLogs().Len(); j++ { + slogs := rlogs.ScopeLogs().At(j) + tCtx := ottlscope.NewTransformContext(slogs.Scope(), rlogs.Resource()) + err := s.Execute(ctx, tCtx) + if err != nil { + return err + } + } + } + return nil +} + +type parserCollection struct { + settings component.TelemetrySettings + resourceParser ottl.Parser[ottlresource.TransformContext] + scopeParser ottl.Parser[ottlscope.TransformContext] + errorMode ottl.ErrorMode +} + +type baseContext interface { + consumer.Traces + consumer.Metrics + consumer.Logs +} + +func (pc parserCollection) parseCommonContextStatements(contextStatement ContextStatements) (baseContext, error) { + switch contextStatement.Context { + case Resource: + parsedStatements, err := pc.resourceParser.ParseStatements(contextStatement.Statements) + if err != nil { + return nil, err + } + rStatements := ottlresource.NewStatements(parsedStatements, pc.settings, ottlresource.WithErrorMode(pc.errorMode)) + return resourceStatements{rStatements}, nil + case Scope: + parsedStatements, err := pc.scopeParser.ParseStatements(contextStatement.Statements) + if err != nil { + return nil, err + } + sStatements := ottlscope.NewStatements(parsedStatements, pc.settings, ottlscope.WithErrorMode(pc.errorMode)) + return scopeStatements{sStatements}, nil + default: + return nil, fmt.Errorf("unknown context %v", contextStatement.Context) + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/traces.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/traces.go new file mode 100644 index 0000000000..55951eabc9 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common/traces.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package common // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/ptrace" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" +) + +var _ consumer.Traces = &traceStatements{} + +type traceStatements struct { + ottl.Statements[ottlspan.TransformContext] +} + +func (t traceStatements) Capabilities() consumer.Capabilities { + return consumer.Capabilities{ + MutatesData: true, + } +} + +func (t traceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { + for i := 0; i < td.ResourceSpans().Len(); i++ { + rspans := td.ResourceSpans().At(i) + for j := 0; j < rspans.ScopeSpans().Len(); j++ { + sspans := rspans.ScopeSpans().At(j) + spans := sspans.Spans() + for k := 0; k < spans.Len(); k++ { + tCtx := ottlspan.NewTransformContext(spans.At(k), sspans.Scope(), rspans.Resource()) + err := t.Execute(ctx, tCtx) + if err != nil { + return err + } + } + } + } + return nil +} + +var _ consumer.Traces = &spanEventStatements{} + +type spanEventStatements struct { + ottl.Statements[ottlspanevent.TransformContext] +} + +func (s spanEventStatements) Capabilities() consumer.Capabilities { + return consumer.Capabilities{ + MutatesData: true, + } +} + +func (s spanEventStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { + for i := 0; i < td.ResourceSpans().Len(); i++ { + rspans := td.ResourceSpans().At(i) + for j := 0; j < rspans.ScopeSpans().Len(); j++ { + sspans := rspans.ScopeSpans().At(j) + spans := sspans.Spans() + for k := 0; k < spans.Len(); k++ { + span := spans.At(k) + spanEvents := span.Events() + for n := 0; n < spanEvents.Len(); n++ { + tCtx := ottlspanevent.NewTransformContext(spanEvents.At(n), span, sspans.Scope(), rspans.Resource()) + err := s.Execute(ctx, tCtx) + if err != nil { + return err + } + } + } + } + } + return nil +} + +type TraceParserCollection struct { + parserCollection + spanParser ottl.Parser[ottlspan.TransformContext] + spanEventParser ottl.Parser[ottlspanevent.TransformContext] +} + +type TraceParserCollectionOption func(*TraceParserCollection) error + +func WithSpanParser(functions map[string]ottl.Factory[ottlspan.TransformContext]) TraceParserCollectionOption { + return func(tp *TraceParserCollection) error { + spanParser, err := ottlspan.NewParser(functions, tp.settings) + if err != nil { + return err + } + tp.spanParser = spanParser + return nil + } +} + +func WithSpanEventParser(functions map[string]ottl.Factory[ottlspanevent.TransformContext]) TraceParserCollectionOption { + return func(tp *TraceParserCollection) error { + spanEventParser, err := ottlspanevent.NewParser(functions, tp.settings) + if err != nil { + return err + } + tp.spanEventParser = spanEventParser + return nil + } +} + +func WithTraceErrorMode(errorMode ottl.ErrorMode) TraceParserCollectionOption { + return func(tp *TraceParserCollection) error { + tp.errorMode = errorMode + return nil + } +} + +func NewTraceParserCollection(settings component.TelemetrySettings, options ...TraceParserCollectionOption) (*TraceParserCollection, error) { + rp, err := ottlresource.NewParser(ResourceFunctions(), settings) + if err != nil { + return nil, err + } + sp, err := ottlscope.NewParser(ScopeFunctions(), settings) + if err != nil { + return nil, err + } + tpc := &TraceParserCollection{ + parserCollection: parserCollection{ + settings: settings, + resourceParser: rp, + scopeParser: sp, + }, + } + + for _, op := range options { + err := op(tpc) + if err != nil { + return nil, err + } + } + + return tpc, nil +} + +func (pc TraceParserCollection) ParseContextStatements(contextStatements ContextStatements) (consumer.Traces, error) { + switch contextStatements.Context { + case Span: + parsedStatements, err := pc.spanParser.ParseStatements(contextStatements.Statements) + if err != nil { + return nil, err + } + sStatements := ottlspan.NewStatements(parsedStatements, pc.settings, ottlspan.WithErrorMode(pc.errorMode)) + return traceStatements{sStatements}, nil + case SpanEvent: + parsedStatements, err := pc.spanEventParser.ParseStatements(contextStatements.Statements) + if err != nil { + return nil, err + } + seStatements := ottlspanevent.NewStatements(parsedStatements, pc.settings, ottlspanevent.WithErrorMode(pc.errorMode)) + return spanEventStatements{seStatements}, nil + default: + return pc.parseCommonContextStatements(contextStatements) + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/logs/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/logs/functions.go new file mode 100644 index 0000000000..47a5151f62 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/logs/functions.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logs // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/logs" + +import ( + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" +) + +func LogFunctions() map[string]ottl.Factory[ottllog.TransformContext] { + // No logs-only functions yet. + return ottlfuncs.StandardFuncs[ottllog.TransformContext]() +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/logs/processor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/logs/processor.go new file mode 100644 index 0000000000..d9502e9776 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/logs/processor.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logs // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/logs" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/plog" + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" +) + +type Processor struct { + contexts []consumer.Logs + logger *zap.Logger +} + +func NewProcessor(contextStatements []common.ContextStatements, errorMode ottl.ErrorMode, settings component.TelemetrySettings) (*Processor, error) { + pc, err := common.NewLogParserCollection(settings, common.WithLogParser(LogFunctions()), common.WithLogErrorMode(errorMode)) + if err != nil { + return nil, err + } + + contexts := make([]consumer.Logs, len(contextStatements)) + var errors error + for i, cs := range contextStatements { + context, err := pc.ParseContextStatements(cs) + if err != nil { + errors = multierr.Append(errors, err) + } + contexts[i] = context + } + + if errors != nil { + return nil, errors + } + + return &Processor{ + contexts: contexts, + logger: settings.Logger, + }, nil +} + +func (p *Processor) ProcessLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) { + for _, c := range p.contexts { + err := c.ConsumeLogs(ctx, ld) + if err != nil { + p.logger.Error("failed processing logs", zap.Error(err)) + return ld, err + } + } + return ld, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metadata/generated_status.go new file mode 100644 index 0000000000..245af46aba --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metadata/generated_status.go @@ -0,0 +1,14 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +const ( + Type = "transform" + TracesStability = component.StabilityLevelAlpha + MetricsStability = component.StabilityLevelAlpha + LogsStability = component.StabilityLevelAlpha +) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum.go new file mode 100644 index 0000000000..3c3a5100dc --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" +) + +type convertGaugeToSumArguments struct { + StringAggTemp string + Monotonic bool +} + +func newConvertGaugeToSumFactory() ottl.Factory[ottlmetric.TransformContext] { + return ottl.NewFactory("convert_gauge_to_sum", &convertGaugeToSumArguments{}, createConvertGaugeToSumFunction) +} + +func createConvertGaugeToSumFunction(_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[ottlmetric.TransformContext], error) { + args, ok := oArgs.(*convertGaugeToSumArguments) + + if !ok { + return nil, fmt.Errorf("ConvertGaugeToSumFactory args must be of type *ConvertGaugeToSumArguments") + } + + return convertGaugeToSum(args.StringAggTemp, args.Monotonic) +} + +func convertGaugeToSum(stringAggTemp string, monotonic bool) (ottl.ExprFunc[ottlmetric.TransformContext], error) { + var aggTemp pmetric.AggregationTemporality + switch stringAggTemp { + case "delta": + aggTemp = pmetric.AggregationTemporalityDelta + case "cumulative": + aggTemp = pmetric.AggregationTemporalityCumulative + default: + return nil, fmt.Errorf("unknown aggregation temporality: %s", stringAggTemp) + } + + return func(_ context.Context, tCtx ottlmetric.TransformContext) (any, error) { + metric := tCtx.GetMetric() + if metric.Type() != pmetric.MetricTypeGauge { + return nil, nil + } + + dps := metric.Gauge().DataPoints() + + metric.SetEmptySum().SetAggregationTemporality(aggTemp) + metric.Sum().SetIsMonotonic(monotonic) + + // Setting the data type removed all the data points, so we must copy them back to the metric. + dps.CopyTo(metric.Sum().DataPoints()) + + return nil, nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint.go new file mode 100644 index 0000000000..dab12e96d0 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" +) + +func newConvertDatapointGaugeToSumFactory() ottl.Factory[ottldatapoint.TransformContext] { + return ottl.NewFactory("convert_gauge_to_sum", &convertGaugeToSumArguments{}, createConvertDatapointGaugeToSumFunction) +} + +func createConvertDatapointGaugeToSumFunction(_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + // use the same args as in metric context + args, ok := oArgs.(*convertGaugeToSumArguments) + + if !ok { + return nil, fmt.Errorf("ConvertGaugeToSumFactory args must be of type *ConvertGaugeToSumArguments") + } + + return convertDatapointGaugeToSum(args.StringAggTemp, args.Monotonic) +} + +func convertDatapointGaugeToSum(stringAggTemp string, monotonic bool) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + var aggTemp pmetric.AggregationTemporality + switch stringAggTemp { + case "delta": + aggTemp = pmetric.AggregationTemporalityDelta + case "cumulative": + aggTemp = pmetric.AggregationTemporalityCumulative + default: + return nil, fmt.Errorf("unknown aggregation temporality: %s", stringAggTemp) + } + + return func(_ context.Context, tCtx ottldatapoint.TransformContext) (any, error) { + metric := tCtx.GetMetric() + if metric.Type() != pmetric.MetricTypeGauge { + return nil, nil + } + + dps := metric.Gauge().DataPoints() + + metric.SetEmptySum().SetAggregationTemporality(aggTemp) + metric.Sum().SetIsMonotonic(monotonic) + + // Setting the data type removed all the data points, so we must copy them back to the metric. + dps.CopyTo(metric.Sum().DataPoints()) + + return nil, nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge.go new file mode 100644 index 0000000000..f4763e65c9 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + +import ( + "context" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" +) + +func newConvertSumToGaugeFactory() ottl.Factory[ottlmetric.TransformContext] { + return ottl.NewFactory("convert_sum_to_gauge", nil, createConvertSumToGaugeFunction) +} + +func createConvertSumToGaugeFunction(_ ottl.FunctionContext, _ ottl.Arguments) (ottl.ExprFunc[ottlmetric.TransformContext], error) { + return convertSumToGauge() +} + +func convertSumToGauge() (ottl.ExprFunc[ottlmetric.TransformContext], error) { + return func(_ context.Context, tCtx ottlmetric.TransformContext) (any, error) { + metric := tCtx.GetMetric() + if metric.Type() != pmetric.MetricTypeSum { + return nil, nil + } + + dps := metric.Sum().DataPoints() + + // Setting the data type removed all the data points, so we must copy them back to the metric. + dps.CopyTo(metric.SetEmptyGauge().DataPoints()) + + return nil, nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_datapoint.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_datapoint.go new file mode 100644 index 0000000000..ca2f09c8a1 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_datapoint.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + +import ( + "context" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" +) + +func newConvertDatapointSumToGaugeFactory() ottl.Factory[ottldatapoint.TransformContext] { + return ottl.NewFactory("convert_sum_to_gauge", nil, createDatapointConvertSumToGaugeFunction) +} + +func createDatapointConvertSumToGaugeFunction(_ ottl.FunctionContext, _ ottl.Arguments) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + return convertDatapointSumToGauge() +} + +func convertDatapointSumToGauge() (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + return func(_ context.Context, tCtx ottldatapoint.TransformContext) (any, error) { + metric := tCtx.GetMetric() + if metric.Type() != pmetric.MetricTypeSum { + return nil, nil + } + + dps := metric.Sum().DataPoints() + + // Setting the data type removed all the data points, so we must copy them back to the metric. + dps.CopyTo(metric.SetEmptyGauge().DataPoints()) + + return nil, nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_summary_count_val_to_sum.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_summary_count_val_to_sum.go new file mode 100644 index 0000000000..c329ba34fd --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_summary_count_val_to_sum.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" +) + +type convertSummaryCountValToSumArguments struct { + StringAggTemp string + Monotonic bool +} + +func newConvertSummaryCountValToSumFactory() ottl.Factory[ottldatapoint.TransformContext] { + return ottl.NewFactory("convert_summary_count_val_to_sum", &convertSummaryCountValToSumArguments{}, createConvertSummaryCountValToSumFunction) +} + +func createConvertSummaryCountValToSumFunction(_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + args, ok := oArgs.(*convertSummaryCountValToSumArguments) + + if !ok { + return nil, fmt.Errorf("convertSummaryCountValToSumFactory args must be of type *convertSummaryCountValToSumArguments") + } + + return convertSummaryCountValToSum(args.StringAggTemp, args.Monotonic) +} + +func convertSummaryCountValToSum(stringAggTemp string, monotonic bool) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + var aggTemp pmetric.AggregationTemporality + switch stringAggTemp { + case "delta": + aggTemp = pmetric.AggregationTemporalityDelta + case "cumulative": + aggTemp = pmetric.AggregationTemporalityCumulative + default: + return nil, fmt.Errorf("unknown aggregation temporality: %s", stringAggTemp) + } + return func(_ context.Context, tCtx ottldatapoint.TransformContext) (any, error) { + metric := tCtx.GetMetric() + if metric.Type() != pmetric.MetricTypeSummary { + return nil, nil + } + + sumMetric := tCtx.GetMetrics().AppendEmpty() + sumMetric.SetDescription(metric.Description()) + sumMetric.SetName(metric.Name() + "_count") + sumMetric.SetUnit(metric.Unit()) + sumMetric.SetEmptySum().SetAggregationTemporality(aggTemp) + sumMetric.Sum().SetIsMonotonic(monotonic) + + sumDps := sumMetric.Sum().DataPoints() + dps := metric.Summary().DataPoints() + for i := 0; i < dps.Len(); i++ { + dp := dps.At(i) + sumDp := sumDps.AppendEmpty() + dp.Attributes().CopyTo(sumDp.Attributes()) + sumDp.SetIntValue(int64(dp.Count())) + sumDp.SetStartTimestamp(dp.StartTimestamp()) + sumDp.SetTimestamp(dp.Timestamp()) + } + return nil, nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_summary_sum_val_to_sum.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_summary_sum_val_to_sum.go new file mode 100644 index 0000000000..1deb6bdb27 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_convert_summary_sum_val_to_sum.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" +) + +type convertSummarySumValToSumArguments struct { + StringAggTemp string + Monotonic bool +} + +func newConvertSummarySumValToSumFactory() ottl.Factory[ottldatapoint.TransformContext] { + return ottl.NewFactory("convert_summary_sum_val_to_sum", &convertSummarySumValToSumArguments{}, createConvertSummarySumValToSumFunction) +} + +func createConvertSummarySumValToSumFunction(_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + args, ok := oArgs.(*convertSummarySumValToSumArguments) + + if !ok { + return nil, fmt.Errorf("convertSummarySumValToSumFactory args must be of type *convertSummarySumValToSumArguments") + } + + return convertSummarySumValToSum(args.StringAggTemp, args.Monotonic) +} + +func convertSummarySumValToSum(stringAggTemp string, monotonic bool) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + var aggTemp pmetric.AggregationTemporality + switch stringAggTemp { + case "delta": + aggTemp = pmetric.AggregationTemporalityDelta + case "cumulative": + aggTemp = pmetric.AggregationTemporalityCumulative + default: + return nil, fmt.Errorf("unknown aggregation temporality: %s", stringAggTemp) + } + return func(_ context.Context, tCtx ottldatapoint.TransformContext) (any, error) { + metric := tCtx.GetMetric() + if metric.Type() != pmetric.MetricTypeSummary { + return nil, nil + } + + sumMetric := tCtx.GetMetrics().AppendEmpty() + sumMetric.SetDescription(metric.Description()) + sumMetric.SetName(metric.Name() + "_sum") + sumMetric.SetUnit(metric.Unit()) + sumMetric.SetEmptySum().SetAggregationTemporality(aggTemp) + sumMetric.Sum().SetIsMonotonic(monotonic) + + sumDps := sumMetric.Sum().DataPoints() + dps := metric.Summary().DataPoints() + for i := 0; i < dps.Len(); i++ { + dp := dps.At(i) + sumDp := sumDps.AppendEmpty() + dp.Attributes().CopyTo(sumDp.Attributes()) + sumDp.SetDoubleValue(dp.Sum()) + sumDp.SetStartTimestamp(dp.StartTimestamp()) + sumDp.SetTimestamp(dp.Timestamp()) + } + return nil, nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_extract_count_metric.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_extract_count_metric.go new file mode 100644 index 0000000000..789a6226be --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_extract_count_metric.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" +) + +type extractCountMetricArguments struct { + Monotonic bool +} + +func newExtractCountMetricFactory() ottl.Factory[ottlmetric.TransformContext] { + return ottl.NewFactory("extract_count_metric", &extractCountMetricArguments{}, createExtractCountMetricFunction) +} + +func createExtractCountMetricFunction(_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[ottlmetric.TransformContext], error) { + args, ok := oArgs.(*extractCountMetricArguments) + + if !ok { + return nil, fmt.Errorf("extractCountMetricFactory args must be of type *extractCountMetricArguments") + } + + return extractCountMetric(args.Monotonic) +} + +func extractCountMetric(monotonic bool) (ottl.ExprFunc[ottlmetric.TransformContext], error) { + return func(_ context.Context, tCtx ottlmetric.TransformContext) (any, error) { + metric := tCtx.GetMetric() + invalidMetricTypeError := fmt.Errorf("extract_count_metric requires an input metric of type Histogram, ExponentialHistogram or Summary, got %s", metric.Type()) + + aggTemp := getAggregationTemporality(metric) + if aggTemp == pmetric.AggregationTemporalityUnspecified { + return nil, invalidMetricTypeError + } + + countMetric := pmetric.NewMetric() + countMetric.SetDescription(metric.Description()) + countMetric.SetName(metric.Name() + "_count") + countMetric.SetUnit(metric.Unit()) + countMetric.SetEmptySum().SetAggregationTemporality(aggTemp) + countMetric.Sum().SetIsMonotonic(monotonic) + + switch metric.Type() { + case pmetric.MetricTypeHistogram: + dataPoints := metric.Histogram().DataPoints() + for i := 0; i < dataPoints.Len(); i++ { + addCountDataPoint(dataPoints.At(i), countMetric.Sum().DataPoints()) + } + case pmetric.MetricTypeExponentialHistogram: + dataPoints := metric.ExponentialHistogram().DataPoints() + for i := 0; i < dataPoints.Len(); i++ { + addCountDataPoint(dataPoints.At(i), countMetric.Sum().DataPoints()) + } + case pmetric.MetricTypeSummary: + dataPoints := metric.Summary().DataPoints() + for i := 0; i < dataPoints.Len(); i++ { + addCountDataPoint(dataPoints.At(i), countMetric.Sum().DataPoints()) + } + default: + return nil, invalidMetricTypeError + } + + if countMetric.Sum().DataPoints().Len() > 0 { + countMetric.MoveTo(tCtx.GetMetrics().AppendEmpty()) + } + + return nil, nil + }, nil +} + +func addCountDataPoint(dataPoint SumCountDataPoint, destination pmetric.NumberDataPointSlice) { + newDp := destination.AppendEmpty() + dataPoint.Attributes().CopyTo(newDp.Attributes()) + newDp.SetIntValue(int64(dataPoint.Count())) + newDp.SetStartTimestamp(dataPoint.StartTimestamp()) + newDp.SetTimestamp(dataPoint.Timestamp()) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_extract_sum_metric.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_extract_sum_metric.go new file mode 100644 index 0000000000..78b47623cd --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/func_extract_sum_metric.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" +) + +type extractSumMetricArguments struct { + Monotonic bool +} + +func newExtractSumMetricFactory() ottl.Factory[ottlmetric.TransformContext] { + return ottl.NewFactory("extract_sum_metric", &extractSumMetricArguments{}, createExtractSumMetricFunction) +} + +func createExtractSumMetricFunction(_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[ottlmetric.TransformContext], error) { + args, ok := oArgs.(*extractSumMetricArguments) + + if !ok { + return nil, fmt.Errorf("extractSumMetricFactory args must be of type *extractSumMetricArguments") + } + + return extractSumMetric(args.Monotonic) +} + +// this interface helps unify the logic for extracting data from different histogram types +// all supported metric types' datapoints implement it +type SumCountDataPoint interface { + Attributes() pcommon.Map + Sum() float64 + Count() uint64 + StartTimestamp() pcommon.Timestamp + Timestamp() pcommon.Timestamp +} + +func extractSumMetric(monotonic bool) (ottl.ExprFunc[ottlmetric.TransformContext], error) { + return func(_ context.Context, tCtx ottlmetric.TransformContext) (any, error) { + metric := tCtx.GetMetric() + invalidMetricTypeError := fmt.Errorf("extract_sum_metric requires an input metric of type Histogram, ExponentialHistogram or Summary, got %s", metric.Type()) + + aggTemp := getAggregationTemporality(metric) + if aggTemp == pmetric.AggregationTemporalityUnspecified { + return nil, invalidMetricTypeError + } + + sumMetric := pmetric.NewMetric() + sumMetric.SetDescription(metric.Description()) + sumMetric.SetName(metric.Name() + "_sum") + sumMetric.SetUnit(metric.Unit()) + sumMetric.SetEmptySum().SetAggregationTemporality(aggTemp) + sumMetric.Sum().SetIsMonotonic(monotonic) + + switch metric.Type() { + case pmetric.MetricTypeHistogram: + dataPoints := metric.Histogram().DataPoints() + for i := 0; i < dataPoints.Len(); i++ { + dataPoint := dataPoints.At(i) + if dataPoint.HasSum() { + addSumDataPoint(dataPoint, sumMetric.Sum().DataPoints()) + } + } + case pmetric.MetricTypeExponentialHistogram: + dataPoints := metric.ExponentialHistogram().DataPoints() + for i := 0; i < dataPoints.Len(); i++ { + dataPoint := dataPoints.At(i) + if dataPoint.HasSum() { + addSumDataPoint(dataPoint, sumMetric.Sum().DataPoints()) + } + } + case pmetric.MetricTypeSummary: + dataPoints := metric.Summary().DataPoints() + // note that unlike Histograms, the Sum field is required for Summaries + for i := 0; i < dataPoints.Len(); i++ { + addSumDataPoint(dataPoints.At(i), sumMetric.Sum().DataPoints()) + } + default: + return nil, invalidMetricTypeError + } + + if sumMetric.Sum().DataPoints().Len() > 0 { + sumMetric.MoveTo(tCtx.GetMetrics().AppendEmpty()) + } + + return nil, nil + }, nil +} + +func addSumDataPoint(dataPoint SumCountDataPoint, destination pmetric.NumberDataPointSlice) { + newDp := destination.AppendEmpty() + dataPoint.Attributes().CopyTo(newDp.Attributes()) + newDp.SetDoubleValue(dataPoint.Sum()) + newDp.SetStartTimestamp(dataPoint.StartTimestamp()) + newDp.SetTimestamp(dataPoint.Timestamp()) +} + +func getAggregationTemporality(metric pmetric.Metric) pmetric.AggregationTemporality { + switch metric.Type() { + case pmetric.MetricTypeHistogram: + return metric.Histogram().AggregationTemporality() + case pmetric.MetricTypeExponentialHistogram: + return metric.ExponentialHistogram().AggregationTemporality() + case pmetric.MetricTypeSummary: + // Summaries don't have an aggregation temporality, but they *should* be cumulative based on the Openmetrics spec. + // This should become an optional argument once those are available in OTTL. + return pmetric.AggregationTemporalityCumulative + default: + return pmetric.AggregationTemporalityUnspecified + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/functions.go new file mode 100644 index 0000000000..5c993ff3d4 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/functions.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + +import ( + "go.opentelemetry.io/collector/featuregate" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" +) + +var useConvertBetweenSumAndGaugeMetricContext = featuregate.GlobalRegistry().MustRegister( + "processor.transform.ConvertBetweenSumAndGaugeMetricContext", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("When enabled will use metric context for conversion between sum and gauge"), +) + +func DataPointFunctions() map[string]ottl.Factory[ottldatapoint.TransformContext] { + functions := ottlfuncs.StandardFuncs[ottldatapoint.TransformContext]() + + datapointFunctions := ottl.CreateFactoryMap[ottldatapoint.TransformContext]( + newConvertSummarySumValToSumFactory(), + newConvertSummaryCountValToSumFactory(), + ) + + if !useConvertBetweenSumAndGaugeMetricContext.IsEnabled() { + for _, f := range []ottl.Factory[ottldatapoint.TransformContext]{ + newConvertDatapointSumToGaugeFactory(), + newConvertDatapointGaugeToSumFactory(), + } { + datapointFunctions[f.Name()] = f + } + } + + for k, v := range datapointFunctions { + functions[k] = v + } + + return functions +} + +func MetricFunctions() map[string]ottl.Factory[ottlmetric.TransformContext] { + functions := ottlfuncs.StandardFuncs[ottlmetric.TransformContext]() + + metricFunctions := ottl.CreateFactoryMap( + newExtractSumMetricFactory(), + newExtractCountMetricFactory(), + ) + + if useConvertBetweenSumAndGaugeMetricContext.IsEnabled() { + for _, f := range []ottl.Factory[ottlmetric.TransformContext]{ + newConvertSumToGaugeFactory(), + newConvertGaugeToSumFactory(), + } { + metricFunctions[f.Name()] = f + } + } + + for k, v := range metricFunctions { + functions[k] = v + } + + return functions +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/processor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/processor.go new file mode 100644 index 0000000000..135b1bcbad --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics/processor.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" +) + +type Processor struct { + contexts []consumer.Metrics + logger *zap.Logger +} + +func NewProcessor(contextStatements []common.ContextStatements, errorMode ottl.ErrorMode, settings component.TelemetrySettings) (*Processor, error) { + pc, err := common.NewMetricParserCollection(settings, common.WithMetricParser(MetricFunctions()), common.WithDataPointParser(DataPointFunctions()), common.WithMetricErrorMode(errorMode)) + if err != nil { + return nil, err + } + + contexts := make([]consumer.Metrics, len(contextStatements)) + var errors error + for i, cs := range contextStatements { + context, err := pc.ParseContextStatements(cs) + if err != nil { + errors = multierr.Append(errors, err) + } + contexts[i] = context + } + + if errors != nil { + return nil, errors + } + + return &Processor{ + contexts: contexts, + logger: settings.Logger, + }, nil +} + +func (p *Processor) ProcessMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { + for _, c := range p.contexts { + err := c.ConsumeMetrics(ctx, md) + if err != nil { + p.logger.Error("failed processing metrics", zap.Error(err)) + return md, err + } + } + return md, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces/functions.go new file mode 100644 index 0000000000..b64b332758 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces/functions.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package traces // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces" + +import ( + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" +) + +func SpanFunctions() map[string]ottl.Factory[ottlspan.TransformContext] { + // No trace-only functions yet. + return ottlfuncs.StandardFuncs[ottlspan.TransformContext]() +} + +func SpanEventFunctions() map[string]ottl.Factory[ottlspanevent.TransformContext] { + // No trace-only functions yet. + return ottlfuncs.StandardFuncs[ottlspanevent.TransformContext]() +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces/processor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces/processor.go new file mode 100644 index 0000000000..e20c87880c --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces/processor.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package traces // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" +) + +type Processor struct { + contexts []consumer.Traces + logger *zap.Logger +} + +func NewProcessor(contextStatements []common.ContextStatements, errorMode ottl.ErrorMode, settings component.TelemetrySettings) (*Processor, error) { + pc, err := common.NewTraceParserCollection(settings, common.WithSpanParser(SpanFunctions()), common.WithSpanEventParser(SpanEventFunctions()), common.WithTraceErrorMode(errorMode)) + if err != nil { + return nil, err + } + + contexts := make([]consumer.Traces, len(contextStatements)) + var errors error + for i, cs := range contextStatements { + context, err := pc.ParseContextStatements(cs) + if err != nil { + errors = multierr.Append(errors, err) + } + contexts[i] = context + } + + if errors != nil { + return nil, errors + } + + return &Processor{ + contexts: contexts, + logger: settings.Logger, + }, nil +} + +func (p *Processor) ProcessTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { + for _, c := range p.contexts { + err := c.ConsumeTraces(ctx, td) + if err != nil { + p.logger.Error("failed processing traces", zap.Error(err)) + return td, err + } + } + return td, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/metadata.yaml new file mode 100644 index 0000000000..efed23ad53 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/metadata.yaml @@ -0,0 +1,10 @@ +type: transform + +status: + class: processor + stability: + alpha: [traces, metrics, logs] + distributions: [contrib, splunk, observiq, sumo, grafana] + warnings: [Unsound Transformations, Identity Conflict, Orphaned Telemetry, Other] + codeowners: + active: [TylerHelmuth, kentquirk, bogdandrutu, evan-bradley] diff --git a/vendor/modules.txt b/vendor/modules.txt index 1e30eab6ce..3bffdea47f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1247,6 +1247,7 @@ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottl github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/internal/ottlcommon @@ -1408,6 +1409,14 @@ github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsampling github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/idbatcher github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.90.1 +## explicit; go 1.20 +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/logs +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metadata +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces # github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.90.1 ## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver