From 6a9dff6bce25333992b4afa6bb6248b8804f625e Mon Sep 17 00:00:00 2001 From: Adam Boguszewski Date: Tue, 14 May 2024 13:27:37 +0200 Subject: [PATCH 1/8] create package internal/docker/receiver --- internal/docker/go.mod | 55 +- internal/docker/go.sum | 148 +- internal/docker/receiver/config.go | 76 + internal/docker/receiver/config_test.go | 118 + internal/docker/receiver/documentation.md | 763 +++ .../internal/metadata/generated_config.go | 396 ++ .../metadata/generated_config_test.go | 274 + .../internal/metadata/generated_metrics.go | 4416 +++++++++++++++++ .../metadata/generated_metrics_test.go | 1413 ++++++ .../internal/metadata/generated_resource.go | 78 + .../metadata/generated_resource_test.go | 76 + .../internal/metadata/testdata/config.yaml | 379 ++ internal/docker/receiver/metadata.yaml | 705 +++ internal/docker/receiver/metric_helper.go | 134 + .../docker/receiver/metric_helper_test.go | 121 + internal/docker/receiver/metrics_receiver.go | 331 ++ .../docker/receiver/metrics_receiver_test.go | 433 ++ internal/docker/receiver/testdata/config.yaml | 20 + .../docker/receiver/testdata/container.json | 226 + .../testdata/mock/cgroups_v2/container.json | 197 + .../testdata/mock/cgroups_v2/containers.json | 39 + .../mock/cgroups_v2/expected_metrics.yaml | 427 ++ .../testdata/mock/cgroups_v2/stats.json | 111 + .../testdata/mock/cpu_limit/container.json | 196 + .../testdata/mock/cpu_limit/containers.json | 39 + .../mock/cpu_limit/expected_metrics.yaml | 487 ++ .../testdata/mock/cpu_limit/stats.json | 137 + .../mock/no_pids_stats/container.json | 218 + .../mock/no_pids_stats/containers.json | 54 + .../mock/no_pids_stats/expected_metrics.yaml | 828 ++++ .../testdata/mock/no_pids_stats/stats.json | 180 + .../mock/pids_stats_max/container.json | 196 + .../mock/pids_stats_max/containers.json | 39 + .../mock/pids_stats_max/expected_metrics.yaml | 479 ++ .../testdata/mock/pids_stats_max/stats.json | 137 + .../mock/single_container/container.json | 220 + .../mock/single_container/containers.json | 55 + .../single_container/expected_metrics.yaml | 841 ++++ .../testdata/mock/single_container/stats.json | 183 + .../container.json | 218 + .../containers.json | 54 + .../expected_metrics.yaml | 843 ++++ .../stats.json | 183 + .../mock/two_containers/container1.json | 218 + .../mock/two_containers/container2.json | 218 + .../mock/two_containers/containers.json | 106 + .../mock/two_containers/expected_metrics.yaml | 1571 ++++++ .../testdata/mock/two_containers/stats1.json | 181 + .../testdata/mock/two_containers/stats2.json | 181 + internal/docker/receiver/testdata/stats.json | 200 + 50 files changed, 19147 insertions(+), 51 deletions(-) create mode 100644 internal/docker/receiver/config.go create mode 100644 internal/docker/receiver/config_test.go create mode 100644 internal/docker/receiver/documentation.md create mode 100644 internal/docker/receiver/internal/metadata/generated_config.go create mode 100644 internal/docker/receiver/internal/metadata/generated_config_test.go create mode 100644 internal/docker/receiver/internal/metadata/generated_metrics.go create mode 100644 internal/docker/receiver/internal/metadata/generated_metrics_test.go create mode 100644 internal/docker/receiver/internal/metadata/generated_resource.go create mode 100644 internal/docker/receiver/internal/metadata/generated_resource_test.go create mode 100644 internal/docker/receiver/internal/metadata/testdata/config.yaml create mode 100644 internal/docker/receiver/metadata.yaml create mode 100644 internal/docker/receiver/metric_helper.go create mode 100644 internal/docker/receiver/metric_helper_test.go create mode 100644 internal/docker/receiver/metrics_receiver.go create mode 100644 internal/docker/receiver/metrics_receiver_test.go create mode 100644 internal/docker/receiver/testdata/config.yaml create mode 100644 internal/docker/receiver/testdata/container.json create mode 100644 internal/docker/receiver/testdata/mock/cgroups_v2/container.json create mode 100644 internal/docker/receiver/testdata/mock/cgroups_v2/containers.json create mode 100644 internal/docker/receiver/testdata/mock/cgroups_v2/expected_metrics.yaml create mode 100644 internal/docker/receiver/testdata/mock/cgroups_v2/stats.json create mode 100644 internal/docker/receiver/testdata/mock/cpu_limit/container.json create mode 100644 internal/docker/receiver/testdata/mock/cpu_limit/containers.json create mode 100644 internal/docker/receiver/testdata/mock/cpu_limit/expected_metrics.yaml create mode 100644 internal/docker/receiver/testdata/mock/cpu_limit/stats.json create mode 100644 internal/docker/receiver/testdata/mock/no_pids_stats/container.json create mode 100644 internal/docker/receiver/testdata/mock/no_pids_stats/containers.json create mode 100644 internal/docker/receiver/testdata/mock/no_pids_stats/expected_metrics.yaml create mode 100644 internal/docker/receiver/testdata/mock/no_pids_stats/stats.json create mode 100644 internal/docker/receiver/testdata/mock/pids_stats_max/container.json create mode 100644 internal/docker/receiver/testdata/mock/pids_stats_max/containers.json create mode 100644 internal/docker/receiver/testdata/mock/pids_stats_max/expected_metrics.yaml create mode 100644 internal/docker/receiver/testdata/mock/pids_stats_max/stats.json create mode 100644 internal/docker/receiver/testdata/mock/single_container/container.json create mode 100644 internal/docker/receiver/testdata/mock/single_container/containers.json create mode 100644 internal/docker/receiver/testdata/mock/single_container/expected_metrics.yaml create mode 100644 internal/docker/receiver/testdata/mock/single_container/stats.json create mode 100644 internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/container.json create mode 100644 internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/containers.json create mode 100644 internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/expected_metrics.yaml create mode 100644 internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/stats.json create mode 100644 internal/docker/receiver/testdata/mock/two_containers/container1.json create mode 100644 internal/docker/receiver/testdata/mock/two_containers/container2.json create mode 100644 internal/docker/receiver/testdata/mock/two_containers/containers.json create mode 100644 internal/docker/receiver/testdata/mock/two_containers/expected_metrics.yaml create mode 100644 internal/docker/receiver/testdata/mock/two_containers/stats1.json create mode 100644 internal/docker/receiver/testdata/mock/two_containers/stats2.json create mode 100644 internal/docker/receiver/testdata/stats.json diff --git a/internal/docker/go.mod b/internal/docker/go.mod index f42749fa88fe..d7a9a405ed0f 100644 --- a/internal/docker/go.mod +++ b/internal/docker/go.mod @@ -3,15 +3,28 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker go 1.21.0 require ( - github.com/docker/docker v25.0.5+incompatible + github.com/docker/docker v26.1.2+incompatible github.com/gobwas/glob v0.2.3 + github.com/google/go-cmp v0.6.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.100.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.100.0 github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/collector/component v0.100.0 + go.opentelemetry.io/collector/confmap v0.100.0 + go.opentelemetry.io/collector/consumer v0.100.0 + go.opentelemetry.io/collector/filter v0.100.0 + go.opentelemetry.io/collector/pdata v1.7.0 + go.opentelemetry.io/collector/receiver v0.100.0 + go.opentelemetry.io/collector/semconv v0.100.0 go.uber.org/goleak v1.3.0 + go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 ) require ( github.com/Microsoft/go-winio v0.4.17 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect @@ -20,25 +33,45 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/kr/pretty v0.2.1 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect + github.com/knadh/koanf/providers/confmap v0.1.0 // indirect + github.com/knadh/koanf/v2 v2.1.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.100.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.53.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + go.opentelemetry.io/collector v0.100.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.100.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/sdk v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sys v0.18.0 // indirect + go.opentelemetry.io/otel v1.26.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.48.0 // indirect + go.opentelemetry.io/otel/metric v1.26.0 // indirect + go.opentelemetry.io/otel/sdk v1.26.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.26.0 // indirect + go.opentelemetry.io/otel/trace v1.26.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.4.0 // indirect - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/grpc v1.63.2 // indirect + google.golang.org/protobuf v1.34.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.0.3 // indirect ) diff --git a/internal/docker/go.sum b/internal/docker/go.sum index 97ff63efbf32..e34782d9444d 100644 --- a/internal/docker/go.sum +++ b/internal/docker/go.sum @@ -2,17 +2,22 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOEl github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.2+incompatible h1:UVX5ZOrrfTGZZYEP+ZDq3Xn9PdHNXaSYMFPDumMqG2k= +github.com/docker/docker v26.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -24,30 +29,56 @@ github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= +github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.100.0 h1:4cDqd1ZU/HQZmijLqEvzbVV/rdgUQG08IBq5vbO1308= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.100.0/go.mod h1:940rHhObAdeWLuBGBmuK4mRwrXSQv63SOX6arprCPrk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.100.0 h1:UtY5UvfEFNub+E5zT6vCxU18A2GYdMXZK/ClcwE2nNw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.100.0/go.mod h1:5Ak565rKB3OZTunqssZHuXEhGTz1f73VdkfwiVp8vxg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.100.0 h1:549nET9f5zIYC5F3/FBzgZfumjBOy4xx+9rCJ24TRxw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.100.0/go.mod h1:5r/6EFZnfR/zIP+fQQoD11x9b+TJYbR69kBgme0NKVs= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -57,31 +88,67 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/collector v0.100.0 h1:Q6IAGjMzjkZ7WepuwyCa6UytDPP0O88GemonQOUjP2s= +go.opentelemetry.io/collector v0.100.0/go.mod h1:QlVjQWlrPtBwVRm8tr+3P4FzNZSlYEfuUSaWoAwK+ko= +go.opentelemetry.io/collector/component v0.100.0 h1:3Y6dl3uDkDzilaikYrPxbZDOlzrDijrF1cIPzfyTwWA= +go.opentelemetry.io/collector/component v0.100.0/go.mod h1:HLEqEBFzPW2umagnVC3gY8yogOBhbzvuzTBFUqH54HY= +go.opentelemetry.io/collector/config/configtelemetry v0.100.0 h1:unlhNrFFXCinxk6iPHPYwANO+eFY4S1NTb5knSxteW4= +go.opentelemetry.io/collector/config/configtelemetry v0.100.0/go.mod h1:YV5PaOdtnU1xRomPcYqoHmyCr48tnaAREeGO96EZw8o= +go.opentelemetry.io/collector/confmap v0.100.0 h1:r70znwLWUMFRWL4LRcWLhdFfzmTvehXgbnlHFCDm0Tc= +go.opentelemetry.io/collector/confmap v0.100.0/go.mod h1:BWKPIpYeUzSG6ZgCJMjF7xsLvyrvJCfYURl57E5vhiQ= +go.opentelemetry.io/collector/consumer v0.100.0 h1:8sALAcWvizSyrZJCF+zTqD2RLmZAyeCuaQrNS2q6ti0= +go.opentelemetry.io/collector/consumer v0.100.0/go.mod h1:JOPOq8nSTdnQwc2xdHl4hcuYBYV8gjN2SlFqlqBe/Nc= +go.opentelemetry.io/collector/filter v0.100.0 h1:XQyhnqJSK2sw+e9yvpkvl7y8QdJwH/gAnFoZDfEZ0dQ= +go.opentelemetry.io/collector/filter v0.100.0/go.mod h1:3xGRpZo11DMJTDtMUGsDNkxKM6LMHqROGrQ/aTvskh8= +go.opentelemetry.io/collector/pdata v1.7.0 h1:/WNsBbE6KM3TTPUb9v/5B7IDqnDkgf8GyFhVJJqu7II= +go.opentelemetry.io/collector/pdata v1.7.0/go.mod h1:ehCBBA5GoFrMZkwyZAKGY/lAVSgZf6rzUt3p9mddmPU= +go.opentelemetry.io/collector/pdata/testdata v0.100.0 h1:pliojioiAv+CuLNTK+8tnCD2UgiJbKX9q8bDnpHkV1U= +go.opentelemetry.io/collector/pdata/testdata v0.100.0/go.mod h1:01BHOXvXaQaLLt5J34S093u3e+j//RhbfmEujpFJ/ME= +go.opentelemetry.io/collector/receiver v0.100.0 h1:RFeOVhS7o39G562w0H0hqfh1o2QvK71ViHQuWnnfglI= +go.opentelemetry.io/collector/receiver v0.100.0/go.mod h1:Qo3xkorbUy0VXHh7WxMQyphIWiqxI3ZOG0O4YqQ2mCE= +go.opentelemetry.io/collector/semconv v0.100.0 h1:QArUvWcbmsMjM4PV0zngUHRizZeUXibsPBWjDuNJXAs= +go.opentelemetry.io/collector/semconv v0.100.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= -go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= -go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= +go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= +go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= +go.opentelemetry.io/otel/exporters/prometheus v0.48.0 h1:sBQe3VNGUjY9IKWQC6z2lNqa5iGbDSxhs60ABwK4y0s= +go.opentelemetry.io/otel/exporters/prometheus v0.48.0/go.mod h1:DtrbMzoZWwQHyrQmCfLam5DZbnmorsGbOtTbYHycU5o= +go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= +go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= +go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= +go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= +go.opentelemetry.io/otel/sdk/metric v1.26.0 h1:cWSks5tfriHPdWFnl+qpX3P681aAYqlZHcAyHw5aU9Y= +go.opentelemetry.io/otel/sdk/metric v1.26.0/go.mod h1:ClMFFknnThJCksebJwz7KIyEDHO+nTB6gK8obLy8RyE= +go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= +go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -98,8 +165,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -109,8 +176,8 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= @@ -126,17 +193,18 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM= -google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= -google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= -google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= +google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= diff --git a/internal/docker/receiver/config.go b/internal/docker/receiver/config.go new file mode 100644 index 000000000000..3d66e7add896 --- /dev/null +++ b/internal/docker/receiver/config.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package receiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker/receiver" + +import ( + "errors" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker/receiver/internal/metadata" +) + +var ( + // FIXME: type and stability level originally were taken from generated code + // but here we don't have status defined in metadata.yaml, so these weren't generated + Type = component.MustNewType("docker") + Stability = component.StabilityLevelAlpha +) + +var _ component.Config = (*Config)(nil) + +type Config struct { + scraperhelper.ControllerConfig `mapstructure:",squash"` + // The URL of the docker server. Default is "unix:///var/run/docker.sock" + Endpoint string `mapstructure:"endpoint"` + + // A mapping of container label names to MetricDescriptor label keys. + // The corresponding container label value will become the DataPoint label value + // for the mapped name. E.g. `io.kubernetes.container.name: container_spec_name` + // would result in a MetricDescriptor label called `container_spec_name` whose + // Metric DataPoints have the value of the `io.kubernetes.container.name` container label. + ContainerLabelsToMetricLabels map[string]string `mapstructure:"container_labels_to_metric_labels"` + + // A mapping of container environment variable names to MetricDescriptor label + // keys. The corresponding env var values become the DataPoint label value. + // E.g. `APP_VERSION: version` would result MetricDescriptors having a label + // key called `version` whose DataPoint label values are the value of the + // `APP_VERSION` environment variable configured for that particular container, if + // present. + EnvVarsToMetricLabels map[string]string `mapstructure:"env_vars_to_metric_labels"` + + // A list of filters whose matching images are to be excluded. Supports literals, globs, and regex. + ExcludedImages []string `mapstructure:"excluded_images"` + + // Docker client API version. Default is 1.22 + DockerAPIVersion string `mapstructure:"api_version"` + + // MetricsBuilderConfig config. Enable or disable stats by name. + metadata.MetricsBuilderConfig `mapstructure:",squash"` +} + +func CreateDefaultConfig() component.Config { + scs := scraperhelper.NewDefaultControllerConfig() + scs.CollectionInterval = 10 * time.Second + scs.Timeout = 5 * time.Second + return &Config{ + ControllerConfig: scs, + Endpoint: "unix:///var/run/docker.sock", + DockerAPIVersion: defaultDockerAPIVersion, + MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), + } +} + +func (config Config) Validate() error { + if config.Endpoint == "" { + return errors.New("endpoint must be specified") + } + if err := docker.VersionIsValidAndGTE(config.DockerAPIVersion, minimumRequiredDockerAPIVersion); err != nil { + return err + } + return nil +} diff --git a/internal/docker/receiver/config_test.go b/internal/docker/receiver/config_test.go new file mode 100644 index 000000000000..a79ee643ca99 --- /dev/null +++ b/internal/docker/receiver/config_test.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package receiver + +import ( + "path/filepath" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker/receiver/internal/metadata" +) + +func NewFactory() receiver.Factory { + return receiver.NewFactory( + Type, + CreateDefaultConfig, + receiver.WithMetrics(createMetricsReceiver, Stability)) +} + +func TestLoadConfig(t *testing.T) { + t.Parallel() + + tests := []struct { + id component.ID + expected component.Config + }{ + { + id: component.NewIDWithName(Type, ""), + expected: CreateDefaultConfig(), + }, + { + id: component.NewIDWithName(Type, "allsettings"), + expected: &Config{ + ControllerConfig: scraperhelper.ControllerConfig{ + CollectionInterval: 2 * time.Second, + InitialDelay: time.Second, + Timeout: 20 * time.Second, + }, + + Endpoint: "http://example.com/", + DockerAPIVersion: "1.40", + + ExcludedImages: []string{ + "undesired-container", + "another-*-container", + }, + + ContainerLabelsToMetricLabels: map[string]string{ + "my.container.label": "my-metric-label", + "my.other.container.label": "my-other-metric-label", + }, + + EnvVarsToMetricLabels: map[string]string{ + "MY_ENVIRONMENT_VARIABLE": "my-metric-label", + "MY_OTHER_ENVIRONMENT_VARIABLE": "my-other-metric-label", + }, + MetricsBuilderConfig: func() metadata.MetricsBuilderConfig { + m := metadata.DefaultMetricsBuilderConfig() + m.Metrics.ContainerCPUUsageSystem = metadata.MetricConfig{ + Enabled: false, + } + m.Metrics.ContainerMemoryTotalRss = metadata.MetricConfig{ + Enabled: true, + } + return m + }(), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.id.String(), func(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(tt.id.String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + assert.NoError(t, component.ValidateConfig(cfg)) + if diff := cmp.Diff(tt.expected, cfg, cmpopts.IgnoreUnexported(metadata.MetricConfig{}), cmpopts.IgnoreUnexported(metadata.ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } + }) + } +} + +func TestValidateErrors(t *testing.T) { + cfg := &Config{ControllerConfig: scraperhelper.NewDefaultControllerConfig()} + assert.Equal(t, "endpoint must be specified", component.ValidateConfig(cfg).Error()) + + cfg = &Config{ + DockerAPIVersion: "1.21", + Endpoint: "someEndpoint", + ControllerConfig: scraperhelper.ControllerConfig{CollectionInterval: 1 * time.Second}, + } + assert.Equal(t, `"api_version" 1.21 must be at least 1.25`, component.ValidateConfig(cfg).Error()) + + cfg = &Config{ + Endpoint: "someEndpoint", + DockerAPIVersion: "1.25", + ControllerConfig: scraperhelper.ControllerConfig{}, + } + assert.Equal(t, `"collection_interval": requires positive value`, component.ValidateConfig(cfg).Error()) +} diff --git a/internal/docker/receiver/documentation.md b/internal/docker/receiver/documentation.md new file mode 100644 index 000000000000..6523c60518a3 --- /dev/null +++ b/internal/docker/receiver/documentation.md @@ -0,0 +1,763 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# docker/receiver + +**Parent Component:** docker + +## Default Metrics + +The following metrics are emitted by default. Each of them can be disabled by applying the following configuration: + +```yaml +metrics: + : + enabled: false +``` + +### container.blockio.io_service_bytes_recursive + +Number of bytes transferred to/from the disk by the group and descendant groups. + +More docs for [cgroups v1](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt) and [cgroups v2](https://www.kernel.org/doc/Documentation/cgroup-v2.txt) + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| device_major | Device major number for block IO operations. | Any Str | +| device_minor | Device minor number for block IO operations. | Any Str | +| operation | Type of BlockIO operation. | Any Str | + +### container.cpu.usage.kernelmode + +Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Cumulative | true | + +### container.cpu.usage.total + +Total CPU time consumed. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Cumulative | true | + +### container.cpu.usage.usermode + +Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Cumulative | true | + +### container.cpu.utilization + +Percent of CPU used by the container. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +### container.memory.file + +Amount of memory used to cache filesystem data, including tmpfs and shared memory (Only available with cgroups v2). + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v2.txt) + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.percent + +Percentage of memory used. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +### container.memory.total_cache + +Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.usage.limit + +Memory limit of the container. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.usage.total + +Memory usage of the container. This excludes the cache. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.network.io.usage.rx_bytes + +Bytes received by the container. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| interface | Network interface. | Any Str | + +### container.network.io.usage.rx_dropped + +Incoming packets dropped. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {packets} | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| interface | Network interface. | Any Str | + +### container.network.io.usage.tx_bytes + +Bytes sent. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| interface | Network interface. | Any Str | + +### container.network.io.usage.tx_dropped + +Outgoing packets dropped. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {packets} | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| interface | Network interface. | Any Str | + +## Optional Metrics + +The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: + +```yaml +metrics: + : + enabled: true +``` + +### container.blockio.io_merged_recursive + +Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups (Only available with cgroups v1). + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {operations} | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| device_major | Device major number for block IO operations. | Any Str | +| device_minor | Device minor number for block IO operations. | Any Str | +| operation | Type of BlockIO operation. | Any Str | + +### container.blockio.io_queued_recursive + +Number of requests queued up for this cgroup and its descendant cgroups (Only available with cgroups v1). + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {operations} | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| device_major | Device major number for block IO operations. | Any Str | +| device_minor | Device minor number for block IO operations. | Any Str | +| operation | Type of BlockIO operation. | Any Str | + +### container.blockio.io_service_time_recursive + +Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups (Only available with cgroups v1). + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| device_major | Device major number for block IO operations. | Any Str | +| device_minor | Device minor number for block IO operations. | Any Str | +| operation | Type of BlockIO operation. | Any Str | + +### container.blockio.io_serviced_recursive + +Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1). + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {operations} | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| device_major | Device major number for block IO operations. | Any Str | +| device_minor | Device minor number for block IO operations. | Any Str | +| operation | Type of BlockIO operation. | Any Str | + +### container.blockio.io_time_recursive + +Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds (Only available with cgroups v1). + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ms | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| device_major | Device major number for block IO operations. | Any Str | +| device_minor | Device minor number for block IO operations. | Any Str | +| operation | Type of BlockIO operation. | Any Str | + +### container.blockio.io_wait_time_recursive + +Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service (Only available with cgroups v1). + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| device_major | Device major number for block IO operations. | Any Str | +| device_minor | Device minor number for block IO operations. | Any Str | +| operation | Type of BlockIO operation. | Any Str | + +### container.blockio.sectors_recursive + +Number of sectors transferred to/from disk by the group and descendant groups (Only available with cgroups v1). + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {sectors} | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| device_major | Device major number for block IO operations. | Any Str | +| device_minor | Device minor number for block IO operations. | Any Str | +| operation | Type of BlockIO operation. | Any Str | + +### container.cpu.limit + +CPU limit set for the container. + +This metric is only reported if the container has limits set with -cpus, -cpuset-cpus or -cpu-quota. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {cpus} | Gauge | Double | + +### container.cpu.logical.count + +Number of cores available to the container. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {cpus} | Gauge | Int | + +### container.cpu.shares + +CPU shares set for the container. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### container.cpu.throttling_data.periods + +Number of periods with throttling active. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {periods} | Sum | Int | Cumulative | true | + +### container.cpu.throttling_data.throttled_periods + +Number of periods when the container hits its throttling limit. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {periods} | Sum | Int | Cumulative | true | + +### container.cpu.throttling_data.throttled_time + +Aggregate time the container was throttled. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Cumulative | true | + +### container.cpu.usage.percpu + +Per-core CPU usage by the container (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| core | The CPU core number when utilising per-CPU metrics. | Any Str | + +### container.cpu.usage.system + +System CPU usage, as reported by docker. + +Note this is the usage for the system, not the container. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Cumulative | true | + +### container.memory.active_anon + +The amount of anonymous memory that has been identified as active by the kernel. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.active_file + +Cache memory that has been identified as active by the kernel. + +[More docs](https://docs.docker.com/config/containers/runmetrics/) + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.anon + +Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS) (Only available with cgroups v2). + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v2.txt) + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.cache + +The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.dirty + +Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.fails + +Number of times the memory limit was hit. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {fails} | Sum | Int | Cumulative | true | + +### container.memory.hierarchical_memory_limit + +The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.hierarchical_memsw_limit + +The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.inactive_anon + +The amount of anonymous memory that has been identified as inactive by the kernel. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.inactive_file + +Cache memory that has been identified as inactive by the kernel. + +[More docs](https://docs.docker.com/config/containers/runmetrics/) + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.mapped_file + +Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.pgfault + +Indicate the number of times that a process of the cgroup triggered a page fault. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {faults} | Sum | Int | Cumulative | true | + +### container.memory.pgmajfault + +Indicate the number of times that a process of the cgroup triggered a major fault. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {faults} | Sum | Int | Cumulative | true | + +### container.memory.pgpgin + +Number of pages read from disk by the cgroup (Only available with cgroups v1). + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {operations} | Sum | Int | Cumulative | true | + +### container.memory.pgpgout + +Number of pages written to disk by the cgroup (Only available with cgroups v1). + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {operations} | Sum | Int | Cumulative | true | + +### container.memory.rss + +The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.rss_huge + +Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.total_active_anon + +The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.total_active_file + +Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1). + +[More docs](https://docs.docker.com/config/containers/runmetrics/). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.total_dirty + +Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.total_inactive_anon + +The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.total_inactive_file + +Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1). + +[More docs](https://docs.docker.com/config/containers/runmetrics/). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.total_mapped_file + +Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.total_pgfault + +Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {faults} | Sum | Int | Cumulative | true | + +### container.memory.total_pgmajfault + +Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {faults} | Sum | Int | Cumulative | true | + +### container.memory.total_pgpgin + +Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {operations} | Sum | Int | Cumulative | true | + +### container.memory.total_pgpgout + +Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {operations} | Sum | Int | Cumulative | true | + +### container.memory.total_rss + +The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.total_rss_huge + +Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.total_unevictable + +The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.total_writeback + +Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.unevictable + +The amount of memory that cannot be reclaimed. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.usage.max + +Maximum memory usage. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.writeback + +Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.network.io.usage.rx_errors + +Received errors. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {errors} | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| interface | Network interface. | Any Str | + +### container.network.io.usage.rx_packets + +Packets received. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {packets} | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| interface | Network interface. | Any Str | + +### container.network.io.usage.tx_errors + +Sent errors. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {errors} | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| interface | Network interface. | Any Str | + +### container.network.io.usage.tx_packets + +Packets sent. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {packets} | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| interface | Network interface. | Any Str | + +### container.pids.count + +Number of pids in the container's cgroup. + +It requires docker API 1.23 or higher and kernel version >= 4.3 with pids cgroup supported. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/pids.txt) + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {pids} | Sum | Int | Cumulative | false | + +### container.pids.limit + +Maximum number of pids in the container's cgroup. + +It requires docker API 1.23 or higher and kernel version >= 4.3 with pids cgroup supported. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/pids.txt) + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {pids} | Sum | Int | Cumulative | false | + +### container.restarts + +Number of restarts for the container. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {restarts} | Sum | Int | Cumulative | true | + +### container.uptime + +Time elapsed since container start time. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Double | + +## Resource Attributes + +| Name | Description | Values | Enabled | +| ---- | ----------- | ------ | ------- | +| container.command_line | The full command executed by the container. | Any Str | false | +| container.hostname | The hostname of the container. | Any Str | true | +| container.id | The ID of the container. | Any Str | true | +| container.image.id | The ID of the container image. | Any Str | false | +| container.image.name | The name of the docker image in use by the container. | Any Str | true | +| container.name | The name of the container. | Any Str | true | +| container.runtime | The runtime of the container. For this receiver, it will always be 'docker'. | Any Str | true | diff --git a/internal/docker/receiver/internal/metadata/generated_config.go b/internal/docker/receiver/internal/metadata/generated_config.go new file mode 100644 index 000000000000..f0c63950b4bf --- /dev/null +++ b/internal/docker/receiver/internal/metadata/generated_config.go @@ -0,0 +1,396 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/filter" +) + +// MetricConfig provides common config for a particular metric. +type MetricConfig struct { + Enabled bool `mapstructure:"enabled"` + + enabledSetByUser bool +} + +func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { + if parser == nil { + return nil + } + err := parser.Unmarshal(ms) + if err != nil { + return err + } + ms.enabledSetByUser = parser.IsSet("enabled") + return nil +} + +// MetricsConfig provides config for docker/receiver metrics. +type MetricsConfig struct { + ContainerBlockioIoMergedRecursive MetricConfig `mapstructure:"container.blockio.io_merged_recursive"` + ContainerBlockioIoQueuedRecursive MetricConfig `mapstructure:"container.blockio.io_queued_recursive"` + ContainerBlockioIoServiceBytesRecursive MetricConfig `mapstructure:"container.blockio.io_service_bytes_recursive"` + ContainerBlockioIoServiceTimeRecursive MetricConfig `mapstructure:"container.blockio.io_service_time_recursive"` + ContainerBlockioIoServicedRecursive MetricConfig `mapstructure:"container.blockio.io_serviced_recursive"` + ContainerBlockioIoTimeRecursive MetricConfig `mapstructure:"container.blockio.io_time_recursive"` + ContainerBlockioIoWaitTimeRecursive MetricConfig `mapstructure:"container.blockio.io_wait_time_recursive"` + ContainerBlockioSectorsRecursive MetricConfig `mapstructure:"container.blockio.sectors_recursive"` + ContainerCPULimit MetricConfig `mapstructure:"container.cpu.limit"` + ContainerCPULogicalCount MetricConfig `mapstructure:"container.cpu.logical.count"` + ContainerCPUShares MetricConfig `mapstructure:"container.cpu.shares"` + ContainerCPUThrottlingDataPeriods MetricConfig `mapstructure:"container.cpu.throttling_data.periods"` + ContainerCPUThrottlingDataThrottledPeriods MetricConfig `mapstructure:"container.cpu.throttling_data.throttled_periods"` + ContainerCPUThrottlingDataThrottledTime MetricConfig `mapstructure:"container.cpu.throttling_data.throttled_time"` + ContainerCPUUsageKernelmode MetricConfig `mapstructure:"container.cpu.usage.kernelmode"` + ContainerCPUUsagePercpu MetricConfig `mapstructure:"container.cpu.usage.percpu"` + ContainerCPUUsageSystem MetricConfig `mapstructure:"container.cpu.usage.system"` + ContainerCPUUsageTotal MetricConfig `mapstructure:"container.cpu.usage.total"` + ContainerCPUUsageUsermode MetricConfig `mapstructure:"container.cpu.usage.usermode"` + ContainerCPUUtilization MetricConfig `mapstructure:"container.cpu.utilization"` + ContainerMemoryActiveAnon MetricConfig `mapstructure:"container.memory.active_anon"` + ContainerMemoryActiveFile MetricConfig `mapstructure:"container.memory.active_file"` + ContainerMemoryAnon MetricConfig `mapstructure:"container.memory.anon"` + ContainerMemoryCache MetricConfig `mapstructure:"container.memory.cache"` + ContainerMemoryDirty MetricConfig `mapstructure:"container.memory.dirty"` + ContainerMemoryFails MetricConfig `mapstructure:"container.memory.fails"` + ContainerMemoryFile MetricConfig `mapstructure:"container.memory.file"` + ContainerMemoryHierarchicalMemoryLimit MetricConfig `mapstructure:"container.memory.hierarchical_memory_limit"` + ContainerMemoryHierarchicalMemswLimit MetricConfig `mapstructure:"container.memory.hierarchical_memsw_limit"` + ContainerMemoryInactiveAnon MetricConfig `mapstructure:"container.memory.inactive_anon"` + ContainerMemoryInactiveFile MetricConfig `mapstructure:"container.memory.inactive_file"` + ContainerMemoryMappedFile MetricConfig `mapstructure:"container.memory.mapped_file"` + ContainerMemoryPercent MetricConfig `mapstructure:"container.memory.percent"` + ContainerMemoryPgfault MetricConfig `mapstructure:"container.memory.pgfault"` + ContainerMemoryPgmajfault MetricConfig `mapstructure:"container.memory.pgmajfault"` + ContainerMemoryPgpgin MetricConfig `mapstructure:"container.memory.pgpgin"` + ContainerMemoryPgpgout MetricConfig `mapstructure:"container.memory.pgpgout"` + ContainerMemoryRss MetricConfig `mapstructure:"container.memory.rss"` + ContainerMemoryRssHuge MetricConfig `mapstructure:"container.memory.rss_huge"` + ContainerMemoryTotalActiveAnon MetricConfig `mapstructure:"container.memory.total_active_anon"` + ContainerMemoryTotalActiveFile MetricConfig `mapstructure:"container.memory.total_active_file"` + ContainerMemoryTotalCache MetricConfig `mapstructure:"container.memory.total_cache"` + ContainerMemoryTotalDirty MetricConfig `mapstructure:"container.memory.total_dirty"` + ContainerMemoryTotalInactiveAnon MetricConfig `mapstructure:"container.memory.total_inactive_anon"` + ContainerMemoryTotalInactiveFile MetricConfig `mapstructure:"container.memory.total_inactive_file"` + ContainerMemoryTotalMappedFile MetricConfig `mapstructure:"container.memory.total_mapped_file"` + ContainerMemoryTotalPgfault MetricConfig `mapstructure:"container.memory.total_pgfault"` + ContainerMemoryTotalPgmajfault MetricConfig `mapstructure:"container.memory.total_pgmajfault"` + ContainerMemoryTotalPgpgin MetricConfig `mapstructure:"container.memory.total_pgpgin"` + ContainerMemoryTotalPgpgout MetricConfig `mapstructure:"container.memory.total_pgpgout"` + ContainerMemoryTotalRss MetricConfig `mapstructure:"container.memory.total_rss"` + ContainerMemoryTotalRssHuge MetricConfig `mapstructure:"container.memory.total_rss_huge"` + ContainerMemoryTotalUnevictable MetricConfig `mapstructure:"container.memory.total_unevictable"` + ContainerMemoryTotalWriteback MetricConfig `mapstructure:"container.memory.total_writeback"` + ContainerMemoryUnevictable MetricConfig `mapstructure:"container.memory.unevictable"` + ContainerMemoryUsageLimit MetricConfig `mapstructure:"container.memory.usage.limit"` + ContainerMemoryUsageMax MetricConfig `mapstructure:"container.memory.usage.max"` + ContainerMemoryUsageTotal MetricConfig `mapstructure:"container.memory.usage.total"` + ContainerMemoryWriteback MetricConfig `mapstructure:"container.memory.writeback"` + ContainerNetworkIoUsageRxBytes MetricConfig `mapstructure:"container.network.io.usage.rx_bytes"` + ContainerNetworkIoUsageRxDropped MetricConfig `mapstructure:"container.network.io.usage.rx_dropped"` + ContainerNetworkIoUsageRxErrors MetricConfig `mapstructure:"container.network.io.usage.rx_errors"` + ContainerNetworkIoUsageRxPackets MetricConfig `mapstructure:"container.network.io.usage.rx_packets"` + ContainerNetworkIoUsageTxBytes MetricConfig `mapstructure:"container.network.io.usage.tx_bytes"` + ContainerNetworkIoUsageTxDropped MetricConfig `mapstructure:"container.network.io.usage.tx_dropped"` + ContainerNetworkIoUsageTxErrors MetricConfig `mapstructure:"container.network.io.usage.tx_errors"` + ContainerNetworkIoUsageTxPackets MetricConfig `mapstructure:"container.network.io.usage.tx_packets"` + ContainerPidsCount MetricConfig `mapstructure:"container.pids.count"` + ContainerPidsLimit MetricConfig `mapstructure:"container.pids.limit"` + ContainerRestarts MetricConfig `mapstructure:"container.restarts"` + ContainerUptime MetricConfig `mapstructure:"container.uptime"` +} + +func DefaultMetricsConfig() MetricsConfig { + return MetricsConfig{ + ContainerBlockioIoMergedRecursive: MetricConfig{ + Enabled: false, + }, + ContainerBlockioIoQueuedRecursive: MetricConfig{ + Enabled: false, + }, + ContainerBlockioIoServiceBytesRecursive: MetricConfig{ + Enabled: true, + }, + ContainerBlockioIoServiceTimeRecursive: MetricConfig{ + Enabled: false, + }, + ContainerBlockioIoServicedRecursive: MetricConfig{ + Enabled: false, + }, + ContainerBlockioIoTimeRecursive: MetricConfig{ + Enabled: false, + }, + ContainerBlockioIoWaitTimeRecursive: MetricConfig{ + Enabled: false, + }, + ContainerBlockioSectorsRecursive: MetricConfig{ + Enabled: false, + }, + ContainerCPULimit: MetricConfig{ + Enabled: false, + }, + ContainerCPULogicalCount: MetricConfig{ + Enabled: false, + }, + ContainerCPUShares: MetricConfig{ + Enabled: false, + }, + ContainerCPUThrottlingDataPeriods: MetricConfig{ + Enabled: false, + }, + ContainerCPUThrottlingDataThrottledPeriods: MetricConfig{ + Enabled: false, + }, + ContainerCPUThrottlingDataThrottledTime: MetricConfig{ + Enabled: false, + }, + ContainerCPUUsageKernelmode: MetricConfig{ + Enabled: true, + }, + ContainerCPUUsagePercpu: MetricConfig{ + Enabled: false, + }, + ContainerCPUUsageSystem: MetricConfig{ + Enabled: false, + }, + ContainerCPUUsageTotal: MetricConfig{ + Enabled: true, + }, + ContainerCPUUsageUsermode: MetricConfig{ + Enabled: true, + }, + ContainerCPUUtilization: MetricConfig{ + Enabled: true, + }, + ContainerMemoryActiveAnon: MetricConfig{ + Enabled: false, + }, + ContainerMemoryActiveFile: MetricConfig{ + Enabled: false, + }, + ContainerMemoryAnon: MetricConfig{ + Enabled: false, + }, + ContainerMemoryCache: MetricConfig{ + Enabled: false, + }, + ContainerMemoryDirty: MetricConfig{ + Enabled: false, + }, + ContainerMemoryFails: MetricConfig{ + Enabled: false, + }, + ContainerMemoryFile: MetricConfig{ + Enabled: true, + }, + ContainerMemoryHierarchicalMemoryLimit: MetricConfig{ + Enabled: false, + }, + ContainerMemoryHierarchicalMemswLimit: MetricConfig{ + Enabled: false, + }, + ContainerMemoryInactiveAnon: MetricConfig{ + Enabled: false, + }, + ContainerMemoryInactiveFile: MetricConfig{ + Enabled: false, + }, + ContainerMemoryMappedFile: MetricConfig{ + Enabled: false, + }, + ContainerMemoryPercent: MetricConfig{ + Enabled: true, + }, + ContainerMemoryPgfault: MetricConfig{ + Enabled: false, + }, + ContainerMemoryPgmajfault: MetricConfig{ + Enabled: false, + }, + ContainerMemoryPgpgin: MetricConfig{ + Enabled: false, + }, + ContainerMemoryPgpgout: MetricConfig{ + Enabled: false, + }, + ContainerMemoryRss: MetricConfig{ + Enabled: false, + }, + ContainerMemoryRssHuge: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalActiveAnon: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalActiveFile: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalCache: MetricConfig{ + Enabled: true, + }, + ContainerMemoryTotalDirty: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalInactiveAnon: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalInactiveFile: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalMappedFile: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalPgfault: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalPgmajfault: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalPgpgin: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalPgpgout: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalRss: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalRssHuge: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalUnevictable: MetricConfig{ + Enabled: false, + }, + ContainerMemoryTotalWriteback: MetricConfig{ + Enabled: false, + }, + ContainerMemoryUnevictable: MetricConfig{ + Enabled: false, + }, + ContainerMemoryUsageLimit: MetricConfig{ + Enabled: true, + }, + ContainerMemoryUsageMax: MetricConfig{ + Enabled: false, + }, + ContainerMemoryUsageTotal: MetricConfig{ + Enabled: true, + }, + ContainerMemoryWriteback: MetricConfig{ + Enabled: false, + }, + ContainerNetworkIoUsageRxBytes: MetricConfig{ + Enabled: true, + }, + ContainerNetworkIoUsageRxDropped: MetricConfig{ + Enabled: true, + }, + ContainerNetworkIoUsageRxErrors: MetricConfig{ + Enabled: false, + }, + ContainerNetworkIoUsageRxPackets: MetricConfig{ + Enabled: false, + }, + ContainerNetworkIoUsageTxBytes: MetricConfig{ + Enabled: true, + }, + ContainerNetworkIoUsageTxDropped: MetricConfig{ + Enabled: true, + }, + ContainerNetworkIoUsageTxErrors: MetricConfig{ + Enabled: false, + }, + ContainerNetworkIoUsageTxPackets: MetricConfig{ + Enabled: false, + }, + ContainerPidsCount: MetricConfig{ + Enabled: false, + }, + ContainerPidsLimit: MetricConfig{ + Enabled: false, + }, + ContainerRestarts: MetricConfig{ + Enabled: false, + }, + ContainerUptime: MetricConfig{ + Enabled: false, + }, + } +} + +// ResourceAttributeConfig provides common config for a particular resource attribute. +type ResourceAttributeConfig struct { + Enabled bool `mapstructure:"enabled"` + // Experimental: MetricsInclude defines a list of filters for attribute values. + // If the list is not empty, only metrics with matching resource attribute values will be emitted. + MetricsInclude []filter.Config `mapstructure:"metrics_include"` + // Experimental: MetricsExclude defines a list of filters for attribute values. + // If the list is not empty, metrics with matching resource attribute values will not be emitted. + // MetricsInclude has higher priority than MetricsExclude. + MetricsExclude []filter.Config `mapstructure:"metrics_exclude"` + + enabledSetByUser bool +} + +func (rac *ResourceAttributeConfig) Unmarshal(parser *confmap.Conf) error { + if parser == nil { + return nil + } + err := parser.Unmarshal(rac) + if err != nil { + return err + } + rac.enabledSetByUser = parser.IsSet("enabled") + return nil +} + +// ResourceAttributesConfig provides config for docker/receiver resource attributes. +type ResourceAttributesConfig struct { + ContainerCommandLine ResourceAttributeConfig `mapstructure:"container.command_line"` + ContainerHostname ResourceAttributeConfig `mapstructure:"container.hostname"` + ContainerID ResourceAttributeConfig `mapstructure:"container.id"` + ContainerImageID ResourceAttributeConfig `mapstructure:"container.image.id"` + ContainerImageName ResourceAttributeConfig `mapstructure:"container.image.name"` + ContainerName ResourceAttributeConfig `mapstructure:"container.name"` + ContainerRuntime ResourceAttributeConfig `mapstructure:"container.runtime"` +} + +func DefaultResourceAttributesConfig() ResourceAttributesConfig { + return ResourceAttributesConfig{ + ContainerCommandLine: ResourceAttributeConfig{ + Enabled: false, + }, + ContainerHostname: ResourceAttributeConfig{ + Enabled: true, + }, + ContainerID: ResourceAttributeConfig{ + Enabled: true, + }, + ContainerImageID: ResourceAttributeConfig{ + Enabled: false, + }, + ContainerImageName: ResourceAttributeConfig{ + Enabled: true, + }, + ContainerName: ResourceAttributeConfig{ + Enabled: true, + }, + ContainerRuntime: ResourceAttributeConfig{ + Enabled: true, + }, + } +} + +// MetricsBuilderConfig is a configuration for docker/receiver metrics builder. +type MetricsBuilderConfig struct { + Metrics MetricsConfig `mapstructure:"metrics"` + ResourceAttributes ResourceAttributesConfig `mapstructure:"resource_attributes"` +} + +func DefaultMetricsBuilderConfig() MetricsBuilderConfig { + return MetricsBuilderConfig{ + Metrics: DefaultMetricsConfig(), + ResourceAttributes: DefaultResourceAttributesConfig(), + } +} diff --git a/internal/docker/receiver/internal/metadata/generated_config_test.go b/internal/docker/receiver/internal/metadata/generated_config_test.go new file mode 100644 index 000000000000..645dd16efd1a --- /dev/null +++ b/internal/docker/receiver/internal/metadata/generated_config_test.go @@ -0,0 +1,274 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/confmaptest" +) + +func TestMetricsBuilderConfig(t *testing.T) { + tests := []struct { + name string + want MetricsBuilderConfig + }{ + { + name: "default", + want: DefaultMetricsBuilderConfig(), + }, + { + name: "all_set", + want: MetricsBuilderConfig{ + Metrics: MetricsConfig{ + ContainerBlockioIoMergedRecursive: MetricConfig{Enabled: true}, + ContainerBlockioIoQueuedRecursive: MetricConfig{Enabled: true}, + ContainerBlockioIoServiceBytesRecursive: MetricConfig{Enabled: true}, + ContainerBlockioIoServiceTimeRecursive: MetricConfig{Enabled: true}, + ContainerBlockioIoServicedRecursive: MetricConfig{Enabled: true}, + ContainerBlockioIoTimeRecursive: MetricConfig{Enabled: true}, + ContainerBlockioIoWaitTimeRecursive: MetricConfig{Enabled: true}, + ContainerBlockioSectorsRecursive: MetricConfig{Enabled: true}, + ContainerCPULimit: MetricConfig{Enabled: true}, + ContainerCPULogicalCount: MetricConfig{Enabled: true}, + ContainerCPUShares: MetricConfig{Enabled: true}, + ContainerCPUThrottlingDataPeriods: MetricConfig{Enabled: true}, + ContainerCPUThrottlingDataThrottledPeriods: MetricConfig{Enabled: true}, + ContainerCPUThrottlingDataThrottledTime: MetricConfig{Enabled: true}, + ContainerCPUUsageKernelmode: MetricConfig{Enabled: true}, + ContainerCPUUsagePercpu: MetricConfig{Enabled: true}, + ContainerCPUUsageSystem: MetricConfig{Enabled: true}, + ContainerCPUUsageTotal: MetricConfig{Enabled: true}, + ContainerCPUUsageUsermode: MetricConfig{Enabled: true}, + ContainerCPUUtilization: MetricConfig{Enabled: true}, + ContainerMemoryActiveAnon: MetricConfig{Enabled: true}, + ContainerMemoryActiveFile: MetricConfig{Enabled: true}, + ContainerMemoryAnon: MetricConfig{Enabled: true}, + ContainerMemoryCache: MetricConfig{Enabled: true}, + ContainerMemoryDirty: MetricConfig{Enabled: true}, + ContainerMemoryFails: MetricConfig{Enabled: true}, + ContainerMemoryFile: MetricConfig{Enabled: true}, + ContainerMemoryHierarchicalMemoryLimit: MetricConfig{Enabled: true}, + ContainerMemoryHierarchicalMemswLimit: MetricConfig{Enabled: true}, + ContainerMemoryInactiveAnon: MetricConfig{Enabled: true}, + ContainerMemoryInactiveFile: MetricConfig{Enabled: true}, + ContainerMemoryMappedFile: MetricConfig{Enabled: true}, + ContainerMemoryPercent: MetricConfig{Enabled: true}, + ContainerMemoryPgfault: MetricConfig{Enabled: true}, + ContainerMemoryPgmajfault: MetricConfig{Enabled: true}, + ContainerMemoryPgpgin: MetricConfig{Enabled: true}, + ContainerMemoryPgpgout: MetricConfig{Enabled: true}, + ContainerMemoryRss: MetricConfig{Enabled: true}, + ContainerMemoryRssHuge: MetricConfig{Enabled: true}, + ContainerMemoryTotalActiveAnon: MetricConfig{Enabled: true}, + ContainerMemoryTotalActiveFile: MetricConfig{Enabled: true}, + ContainerMemoryTotalCache: MetricConfig{Enabled: true}, + ContainerMemoryTotalDirty: MetricConfig{Enabled: true}, + ContainerMemoryTotalInactiveAnon: MetricConfig{Enabled: true}, + ContainerMemoryTotalInactiveFile: MetricConfig{Enabled: true}, + ContainerMemoryTotalMappedFile: MetricConfig{Enabled: true}, + ContainerMemoryTotalPgfault: MetricConfig{Enabled: true}, + ContainerMemoryTotalPgmajfault: MetricConfig{Enabled: true}, + ContainerMemoryTotalPgpgin: MetricConfig{Enabled: true}, + ContainerMemoryTotalPgpgout: MetricConfig{Enabled: true}, + ContainerMemoryTotalRss: MetricConfig{Enabled: true}, + ContainerMemoryTotalRssHuge: MetricConfig{Enabled: true}, + ContainerMemoryTotalUnevictable: MetricConfig{Enabled: true}, + ContainerMemoryTotalWriteback: MetricConfig{Enabled: true}, + ContainerMemoryUnevictable: MetricConfig{Enabled: true}, + ContainerMemoryUsageLimit: MetricConfig{Enabled: true}, + ContainerMemoryUsageMax: MetricConfig{Enabled: true}, + ContainerMemoryUsageTotal: MetricConfig{Enabled: true}, + ContainerMemoryWriteback: MetricConfig{Enabled: true}, + ContainerNetworkIoUsageRxBytes: MetricConfig{Enabled: true}, + ContainerNetworkIoUsageRxDropped: MetricConfig{Enabled: true}, + ContainerNetworkIoUsageRxErrors: MetricConfig{Enabled: true}, + ContainerNetworkIoUsageRxPackets: MetricConfig{Enabled: true}, + ContainerNetworkIoUsageTxBytes: MetricConfig{Enabled: true}, + ContainerNetworkIoUsageTxDropped: MetricConfig{Enabled: true}, + ContainerNetworkIoUsageTxErrors: MetricConfig{Enabled: true}, + ContainerNetworkIoUsageTxPackets: MetricConfig{Enabled: true}, + ContainerPidsCount: MetricConfig{Enabled: true}, + ContainerPidsLimit: MetricConfig{Enabled: true}, + ContainerRestarts: MetricConfig{Enabled: true}, + ContainerUptime: MetricConfig{Enabled: true}, + }, + ResourceAttributes: ResourceAttributesConfig{ + ContainerCommandLine: ResourceAttributeConfig{Enabled: true}, + ContainerHostname: ResourceAttributeConfig{Enabled: true}, + ContainerID: ResourceAttributeConfig{Enabled: true}, + ContainerImageID: ResourceAttributeConfig{Enabled: true}, + ContainerImageName: ResourceAttributeConfig{Enabled: true}, + ContainerName: ResourceAttributeConfig{Enabled: true}, + ContainerRuntime: ResourceAttributeConfig{Enabled: true}, + }, + }, + }, + { + name: "none_set", + want: MetricsBuilderConfig{ + Metrics: MetricsConfig{ + ContainerBlockioIoMergedRecursive: MetricConfig{Enabled: false}, + ContainerBlockioIoQueuedRecursive: MetricConfig{Enabled: false}, + ContainerBlockioIoServiceBytesRecursive: MetricConfig{Enabled: false}, + ContainerBlockioIoServiceTimeRecursive: MetricConfig{Enabled: false}, + ContainerBlockioIoServicedRecursive: MetricConfig{Enabled: false}, + ContainerBlockioIoTimeRecursive: MetricConfig{Enabled: false}, + ContainerBlockioIoWaitTimeRecursive: MetricConfig{Enabled: false}, + ContainerBlockioSectorsRecursive: MetricConfig{Enabled: false}, + ContainerCPULimit: MetricConfig{Enabled: false}, + ContainerCPULogicalCount: MetricConfig{Enabled: false}, + ContainerCPUShares: MetricConfig{Enabled: false}, + ContainerCPUThrottlingDataPeriods: MetricConfig{Enabled: false}, + ContainerCPUThrottlingDataThrottledPeriods: MetricConfig{Enabled: false}, + ContainerCPUThrottlingDataThrottledTime: MetricConfig{Enabled: false}, + ContainerCPUUsageKernelmode: MetricConfig{Enabled: false}, + ContainerCPUUsagePercpu: MetricConfig{Enabled: false}, + ContainerCPUUsageSystem: MetricConfig{Enabled: false}, + ContainerCPUUsageTotal: MetricConfig{Enabled: false}, + ContainerCPUUsageUsermode: MetricConfig{Enabled: false}, + ContainerCPUUtilization: MetricConfig{Enabled: false}, + ContainerMemoryActiveAnon: MetricConfig{Enabled: false}, + ContainerMemoryActiveFile: MetricConfig{Enabled: false}, + ContainerMemoryAnon: MetricConfig{Enabled: false}, + ContainerMemoryCache: MetricConfig{Enabled: false}, + ContainerMemoryDirty: MetricConfig{Enabled: false}, + ContainerMemoryFails: MetricConfig{Enabled: false}, + ContainerMemoryFile: MetricConfig{Enabled: false}, + ContainerMemoryHierarchicalMemoryLimit: MetricConfig{Enabled: false}, + ContainerMemoryHierarchicalMemswLimit: MetricConfig{Enabled: false}, + ContainerMemoryInactiveAnon: MetricConfig{Enabled: false}, + ContainerMemoryInactiveFile: MetricConfig{Enabled: false}, + ContainerMemoryMappedFile: MetricConfig{Enabled: false}, + ContainerMemoryPercent: MetricConfig{Enabled: false}, + ContainerMemoryPgfault: MetricConfig{Enabled: false}, + ContainerMemoryPgmajfault: MetricConfig{Enabled: false}, + ContainerMemoryPgpgin: MetricConfig{Enabled: false}, + ContainerMemoryPgpgout: MetricConfig{Enabled: false}, + ContainerMemoryRss: MetricConfig{Enabled: false}, + ContainerMemoryRssHuge: MetricConfig{Enabled: false}, + ContainerMemoryTotalActiveAnon: MetricConfig{Enabled: false}, + ContainerMemoryTotalActiveFile: MetricConfig{Enabled: false}, + ContainerMemoryTotalCache: MetricConfig{Enabled: false}, + ContainerMemoryTotalDirty: MetricConfig{Enabled: false}, + ContainerMemoryTotalInactiveAnon: MetricConfig{Enabled: false}, + ContainerMemoryTotalInactiveFile: MetricConfig{Enabled: false}, + ContainerMemoryTotalMappedFile: MetricConfig{Enabled: false}, + ContainerMemoryTotalPgfault: MetricConfig{Enabled: false}, + ContainerMemoryTotalPgmajfault: MetricConfig{Enabled: false}, + ContainerMemoryTotalPgpgin: MetricConfig{Enabled: false}, + ContainerMemoryTotalPgpgout: MetricConfig{Enabled: false}, + ContainerMemoryTotalRss: MetricConfig{Enabled: false}, + ContainerMemoryTotalRssHuge: MetricConfig{Enabled: false}, + ContainerMemoryTotalUnevictable: MetricConfig{Enabled: false}, + ContainerMemoryTotalWriteback: MetricConfig{Enabled: false}, + ContainerMemoryUnevictable: MetricConfig{Enabled: false}, + ContainerMemoryUsageLimit: MetricConfig{Enabled: false}, + ContainerMemoryUsageMax: MetricConfig{Enabled: false}, + ContainerMemoryUsageTotal: MetricConfig{Enabled: false}, + ContainerMemoryWriteback: MetricConfig{Enabled: false}, + ContainerNetworkIoUsageRxBytes: MetricConfig{Enabled: false}, + ContainerNetworkIoUsageRxDropped: MetricConfig{Enabled: false}, + ContainerNetworkIoUsageRxErrors: MetricConfig{Enabled: false}, + ContainerNetworkIoUsageRxPackets: MetricConfig{Enabled: false}, + ContainerNetworkIoUsageTxBytes: MetricConfig{Enabled: false}, + ContainerNetworkIoUsageTxDropped: MetricConfig{Enabled: false}, + ContainerNetworkIoUsageTxErrors: MetricConfig{Enabled: false}, + ContainerNetworkIoUsageTxPackets: MetricConfig{Enabled: false}, + ContainerPidsCount: MetricConfig{Enabled: false}, + ContainerPidsLimit: MetricConfig{Enabled: false}, + ContainerRestarts: MetricConfig{Enabled: false}, + ContainerUptime: MetricConfig{Enabled: false}, + }, + ResourceAttributes: ResourceAttributesConfig{ + ContainerCommandLine: ResourceAttributeConfig{Enabled: false}, + ContainerHostname: ResourceAttributeConfig{Enabled: false}, + ContainerID: ResourceAttributeConfig{Enabled: false}, + ContainerImageID: ResourceAttributeConfig{Enabled: false}, + ContainerImageName: ResourceAttributeConfig{Enabled: false}, + ContainerName: ResourceAttributeConfig{Enabled: false}, + ContainerRuntime: ResourceAttributeConfig{Enabled: false}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := loadMetricsBuilderConfig(t, tt.name) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } + }) + } +} + +func loadMetricsBuilderConfig(t *testing.T, name string) MetricsBuilderConfig { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + sub, err := cm.Sub(name) + require.NoError(t, err) + cfg := DefaultMetricsBuilderConfig() + require.NoError(t, component.UnmarshalConfig(sub, &cfg)) + return cfg +} + +func TestResourceAttributesConfig(t *testing.T) { + tests := []struct { + name string + want ResourceAttributesConfig + }{ + { + name: "default", + want: DefaultResourceAttributesConfig(), + }, + { + name: "all_set", + want: ResourceAttributesConfig{ + ContainerCommandLine: ResourceAttributeConfig{Enabled: true}, + ContainerHostname: ResourceAttributeConfig{Enabled: true}, + ContainerID: ResourceAttributeConfig{Enabled: true}, + ContainerImageID: ResourceAttributeConfig{Enabled: true}, + ContainerImageName: ResourceAttributeConfig{Enabled: true}, + ContainerName: ResourceAttributeConfig{Enabled: true}, + ContainerRuntime: ResourceAttributeConfig{Enabled: true}, + }, + }, + { + name: "none_set", + want: ResourceAttributesConfig{ + ContainerCommandLine: ResourceAttributeConfig{Enabled: false}, + ContainerHostname: ResourceAttributeConfig{Enabled: false}, + ContainerID: ResourceAttributeConfig{Enabled: false}, + ContainerImageID: ResourceAttributeConfig{Enabled: false}, + ContainerImageName: ResourceAttributeConfig{Enabled: false}, + ContainerName: ResourceAttributeConfig{Enabled: false}, + ContainerRuntime: ResourceAttributeConfig{Enabled: false}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := loadResourceAttributesConfig(t, tt.name) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } + }) + } +} + +func loadResourceAttributesConfig(t *testing.T, name string) ResourceAttributesConfig { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + sub, err := cm.Sub(name) + require.NoError(t, err) + sub, err = sub.Sub("resource_attributes") + require.NoError(t, err) + cfg := DefaultResourceAttributesConfig() + require.NoError(t, component.UnmarshalConfig(sub, &cfg)) + return cfg +} diff --git a/internal/docker/receiver/internal/metadata/generated_metrics.go b/internal/docker/receiver/internal/metadata/generated_metrics.go new file mode 100644 index 000000000000..e485825d5e17 --- /dev/null +++ b/internal/docker/receiver/internal/metadata/generated_metrics.go @@ -0,0 +1,4416 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/filter" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" + conventions "go.opentelemetry.io/collector/semconv/v1.6.1" +) + +type metricContainerBlockioIoMergedRecursive struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.blockio.io_merged_recursive metric with initial data. +func (m *metricContainerBlockioIoMergedRecursive) init() { + m.data.SetName("container.blockio.io_merged_recursive") + m.data.SetDescription("Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups (Only available with cgroups v1).") + m.data.SetUnit("{operations}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerBlockioIoMergedRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) + dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) + dp.Attributes().PutStr("operation", operationAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerBlockioIoMergedRecursive) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerBlockioIoMergedRecursive) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerBlockioIoMergedRecursive(cfg MetricConfig) metricContainerBlockioIoMergedRecursive { + m := metricContainerBlockioIoMergedRecursive{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerBlockioIoQueuedRecursive struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.blockio.io_queued_recursive metric with initial data. +func (m *metricContainerBlockioIoQueuedRecursive) init() { + m.data.SetName("container.blockio.io_queued_recursive") + m.data.SetDescription("Number of requests queued up for this cgroup and its descendant cgroups (Only available with cgroups v1).") + m.data.SetUnit("{operations}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerBlockioIoQueuedRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) + dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) + dp.Attributes().PutStr("operation", operationAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerBlockioIoQueuedRecursive) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerBlockioIoQueuedRecursive) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerBlockioIoQueuedRecursive(cfg MetricConfig) metricContainerBlockioIoQueuedRecursive { + m := metricContainerBlockioIoQueuedRecursive{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerBlockioIoServiceBytesRecursive struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.blockio.io_service_bytes_recursive metric with initial data. +func (m *metricContainerBlockioIoServiceBytesRecursive) init() { + m.data.SetName("container.blockio.io_service_bytes_recursive") + m.data.SetDescription("Number of bytes transferred to/from the disk by the group and descendant groups.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerBlockioIoServiceBytesRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) + dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) + dp.Attributes().PutStr("operation", operationAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerBlockioIoServiceBytesRecursive) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerBlockioIoServiceBytesRecursive) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerBlockioIoServiceBytesRecursive(cfg MetricConfig) metricContainerBlockioIoServiceBytesRecursive { + m := metricContainerBlockioIoServiceBytesRecursive{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerBlockioIoServiceTimeRecursive struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.blockio.io_service_time_recursive metric with initial data. +func (m *metricContainerBlockioIoServiceTimeRecursive) init() { + m.data.SetName("container.blockio.io_service_time_recursive") + m.data.SetDescription("Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups (Only available with cgroups v1).") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerBlockioIoServiceTimeRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) + dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) + dp.Attributes().PutStr("operation", operationAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerBlockioIoServiceTimeRecursive) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerBlockioIoServiceTimeRecursive) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerBlockioIoServiceTimeRecursive(cfg MetricConfig) metricContainerBlockioIoServiceTimeRecursive { + m := metricContainerBlockioIoServiceTimeRecursive{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerBlockioIoServicedRecursive struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.blockio.io_serviced_recursive metric with initial data. +func (m *metricContainerBlockioIoServicedRecursive) init() { + m.data.SetName("container.blockio.io_serviced_recursive") + m.data.SetDescription("Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1).") + m.data.SetUnit("{operations}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerBlockioIoServicedRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) + dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) + dp.Attributes().PutStr("operation", operationAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerBlockioIoServicedRecursive) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerBlockioIoServicedRecursive) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerBlockioIoServicedRecursive(cfg MetricConfig) metricContainerBlockioIoServicedRecursive { + m := metricContainerBlockioIoServicedRecursive{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerBlockioIoTimeRecursive struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.blockio.io_time_recursive metric with initial data. +func (m *metricContainerBlockioIoTimeRecursive) init() { + m.data.SetName("container.blockio.io_time_recursive") + m.data.SetDescription("Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds (Only available with cgroups v1).") + m.data.SetUnit("ms") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerBlockioIoTimeRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) + dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) + dp.Attributes().PutStr("operation", operationAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerBlockioIoTimeRecursive) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerBlockioIoTimeRecursive) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerBlockioIoTimeRecursive(cfg MetricConfig) metricContainerBlockioIoTimeRecursive { + m := metricContainerBlockioIoTimeRecursive{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerBlockioIoWaitTimeRecursive struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.blockio.io_wait_time_recursive metric with initial data. +func (m *metricContainerBlockioIoWaitTimeRecursive) init() { + m.data.SetName("container.blockio.io_wait_time_recursive") + m.data.SetDescription("Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service (Only available with cgroups v1).") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerBlockioIoWaitTimeRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) + dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) + dp.Attributes().PutStr("operation", operationAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerBlockioIoWaitTimeRecursive) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerBlockioIoWaitTimeRecursive) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerBlockioIoWaitTimeRecursive(cfg MetricConfig) metricContainerBlockioIoWaitTimeRecursive { + m := metricContainerBlockioIoWaitTimeRecursive{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerBlockioSectorsRecursive struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.blockio.sectors_recursive metric with initial data. +func (m *metricContainerBlockioSectorsRecursive) init() { + m.data.SetName("container.blockio.sectors_recursive") + m.data.SetDescription("Number of sectors transferred to/from disk by the group and descendant groups (Only available with cgroups v1).") + m.data.SetUnit("{sectors}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerBlockioSectorsRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) + dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) + dp.Attributes().PutStr("operation", operationAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerBlockioSectorsRecursive) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerBlockioSectorsRecursive) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerBlockioSectorsRecursive(cfg MetricConfig) metricContainerBlockioSectorsRecursive { + m := metricContainerBlockioSectorsRecursive{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPULimit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.limit metric with initial data. +func (m *metricContainerCPULimit) init() { + m.data.SetName("container.cpu.limit") + m.data.SetDescription("CPU limit set for the container.") + m.data.SetUnit("{cpus}") + m.data.SetEmptyGauge() +} + +func (m *metricContainerCPULimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPULimit) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPULimit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPULimit(cfg MetricConfig) metricContainerCPULimit { + m := metricContainerCPULimit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPULogicalCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.logical.count metric with initial data. +func (m *metricContainerCPULogicalCount) init() { + m.data.SetName("container.cpu.logical.count") + m.data.SetDescription("Number of cores available to the container.") + m.data.SetUnit("{cpus}") + m.data.SetEmptyGauge() +} + +func (m *metricContainerCPULogicalCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPULogicalCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPULogicalCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPULogicalCount(cfg MetricConfig) metricContainerCPULogicalCount { + m := metricContainerCPULogicalCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUShares struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.shares metric with initial data. +func (m *metricContainerCPUShares) init() { + m.data.SetName("container.cpu.shares") + m.data.SetDescription("CPU shares set for the container.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricContainerCPUShares) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUShares) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUShares) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUShares(cfg MetricConfig) metricContainerCPUShares { + m := metricContainerCPUShares{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUThrottlingDataPeriods struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.throttling_data.periods metric with initial data. +func (m *metricContainerCPUThrottlingDataPeriods) init() { + m.data.SetName("container.cpu.throttling_data.periods") + m.data.SetDescription("Number of periods with throttling active.") + m.data.SetUnit("{periods}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerCPUThrottlingDataPeriods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUThrottlingDataPeriods) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUThrottlingDataPeriods) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUThrottlingDataPeriods(cfg MetricConfig) metricContainerCPUThrottlingDataPeriods { + m := metricContainerCPUThrottlingDataPeriods{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUThrottlingDataThrottledPeriods struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.throttling_data.throttled_periods metric with initial data. +func (m *metricContainerCPUThrottlingDataThrottledPeriods) init() { + m.data.SetName("container.cpu.throttling_data.throttled_periods") + m.data.SetDescription("Number of periods when the container hits its throttling limit.") + m.data.SetUnit("{periods}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerCPUThrottlingDataThrottledPeriods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUThrottlingDataThrottledPeriods) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUThrottlingDataThrottledPeriods) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUThrottlingDataThrottledPeriods(cfg MetricConfig) metricContainerCPUThrottlingDataThrottledPeriods { + m := metricContainerCPUThrottlingDataThrottledPeriods{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUThrottlingDataThrottledTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.throttling_data.throttled_time metric with initial data. +func (m *metricContainerCPUThrottlingDataThrottledTime) init() { + m.data.SetName("container.cpu.throttling_data.throttled_time") + m.data.SetDescription("Aggregate time the container was throttled.") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerCPUThrottlingDataThrottledTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUThrottlingDataThrottledTime) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUThrottlingDataThrottledTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUThrottlingDataThrottledTime(cfg MetricConfig) metricContainerCPUThrottlingDataThrottledTime { + m := metricContainerCPUThrottlingDataThrottledTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUUsageKernelmode struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.usage.kernelmode metric with initial data. +func (m *metricContainerCPUUsageKernelmode) init() { + m.data.SetName("container.cpu.usage.kernelmode") + m.data.SetDescription("Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows).") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerCPUUsageKernelmode) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUUsageKernelmode) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUUsageKernelmode) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUUsageKernelmode(cfg MetricConfig) metricContainerCPUUsageKernelmode { + m := metricContainerCPUUsageKernelmode{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUUsagePercpu struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.usage.percpu metric with initial data. +func (m *metricContainerCPUUsagePercpu) init() { + m.data.SetName("container.cpu.usage.percpu") + m.data.SetDescription("Per-core CPU usage by the container (Only available with cgroups v1).") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerCPUUsagePercpu) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, coreAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("core", coreAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUUsagePercpu) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUUsagePercpu) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUUsagePercpu(cfg MetricConfig) metricContainerCPUUsagePercpu { + m := metricContainerCPUUsagePercpu{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUUsageSystem struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.usage.system metric with initial data. +func (m *metricContainerCPUUsageSystem) init() { + m.data.SetName("container.cpu.usage.system") + m.data.SetDescription("System CPU usage, as reported by docker.") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerCPUUsageSystem) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUUsageSystem) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUUsageSystem) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUUsageSystem(cfg MetricConfig) metricContainerCPUUsageSystem { + m := metricContainerCPUUsageSystem{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUUsageTotal struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.usage.total metric with initial data. +func (m *metricContainerCPUUsageTotal) init() { + m.data.SetName("container.cpu.usage.total") + m.data.SetDescription("Total CPU time consumed.") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerCPUUsageTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUUsageTotal) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUUsageTotal) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUUsageTotal(cfg MetricConfig) metricContainerCPUUsageTotal { + m := metricContainerCPUUsageTotal{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUUsageUsermode struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.usage.usermode metric with initial data. +func (m *metricContainerCPUUsageUsermode) init() { + m.data.SetName("container.cpu.usage.usermode") + m.data.SetDescription("Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows).") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerCPUUsageUsermode) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUUsageUsermode) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUUsageUsermode) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUUsageUsermode(cfg MetricConfig) metricContainerCPUUsageUsermode { + m := metricContainerCPUUsageUsermode{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUUtilization struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.utilization metric with initial data. +func (m *metricContainerCPUUtilization) init() { + m.data.SetName("container.cpu.utilization") + m.data.SetDescription("Percent of CPU used by the container.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricContainerCPUUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUUtilization) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUUtilization) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUUtilization(cfg MetricConfig) metricContainerCPUUtilization { + m := metricContainerCPUUtilization{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryActiveAnon struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.active_anon metric with initial data. +func (m *metricContainerMemoryActiveAnon) init() { + m.data.SetName("container.memory.active_anon") + m.data.SetDescription("The amount of anonymous memory that has been identified as active by the kernel.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryActiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryActiveAnon) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryActiveAnon) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryActiveAnon(cfg MetricConfig) metricContainerMemoryActiveAnon { + m := metricContainerMemoryActiveAnon{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryActiveFile struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.active_file metric with initial data. +func (m *metricContainerMemoryActiveFile) init() { + m.data.SetName("container.memory.active_file") + m.data.SetDescription("Cache memory that has been identified as active by the kernel.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryActiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryActiveFile) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryActiveFile) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryActiveFile(cfg MetricConfig) metricContainerMemoryActiveFile { + m := metricContainerMemoryActiveFile{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryAnon struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.anon metric with initial data. +func (m *metricContainerMemoryAnon) init() { + m.data.SetName("container.memory.anon") + m.data.SetDescription("Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS) (Only available with cgroups v2).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryAnon) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryAnon) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryAnon(cfg MetricConfig) metricContainerMemoryAnon { + m := metricContainerMemoryAnon{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryCache struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.cache metric with initial data. +func (m *metricContainerMemoryCache) init() { + m.data.SetName("container.memory.cache") + m.data.SetDescription("The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryCache) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryCache) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryCache) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryCache(cfg MetricConfig) metricContainerMemoryCache { + m := metricContainerMemoryCache{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryDirty struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.dirty metric with initial data. +func (m *metricContainerMemoryDirty) init() { + m.data.SetName("container.memory.dirty") + m.data.SetDescription("Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryDirty) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryDirty) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryDirty) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryDirty(cfg MetricConfig) metricContainerMemoryDirty { + m := metricContainerMemoryDirty{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryFails struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.fails metric with initial data. +func (m *metricContainerMemoryFails) init() { + m.data.SetName("container.memory.fails") + m.data.SetDescription("Number of times the memory limit was hit.") + m.data.SetUnit("{fails}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryFails) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryFails) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryFails) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryFails(cfg MetricConfig) metricContainerMemoryFails { + m := metricContainerMemoryFails{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryFile struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.file metric with initial data. +func (m *metricContainerMemoryFile) init() { + m.data.SetName("container.memory.file") + m.data.SetDescription("Amount of memory used to cache filesystem data, including tmpfs and shared memory (Only available with cgroups v2).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryFile) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryFile) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryFile(cfg MetricConfig) metricContainerMemoryFile { + m := metricContainerMemoryFile{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryHierarchicalMemoryLimit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.hierarchical_memory_limit metric with initial data. +func (m *metricContainerMemoryHierarchicalMemoryLimit) init() { + m.data.SetName("container.memory.hierarchical_memory_limit") + m.data.SetDescription("The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryHierarchicalMemoryLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryHierarchicalMemoryLimit) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryHierarchicalMemoryLimit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryHierarchicalMemoryLimit(cfg MetricConfig) metricContainerMemoryHierarchicalMemoryLimit { + m := metricContainerMemoryHierarchicalMemoryLimit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryHierarchicalMemswLimit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.hierarchical_memsw_limit metric with initial data. +func (m *metricContainerMemoryHierarchicalMemswLimit) init() { + m.data.SetName("container.memory.hierarchical_memsw_limit") + m.data.SetDescription("The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryHierarchicalMemswLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryHierarchicalMemswLimit) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryHierarchicalMemswLimit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryHierarchicalMemswLimit(cfg MetricConfig) metricContainerMemoryHierarchicalMemswLimit { + m := metricContainerMemoryHierarchicalMemswLimit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryInactiveAnon struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.inactive_anon metric with initial data. +func (m *metricContainerMemoryInactiveAnon) init() { + m.data.SetName("container.memory.inactive_anon") + m.data.SetDescription("The amount of anonymous memory that has been identified as inactive by the kernel.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryInactiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryInactiveAnon) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryInactiveAnon) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryInactiveAnon(cfg MetricConfig) metricContainerMemoryInactiveAnon { + m := metricContainerMemoryInactiveAnon{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryInactiveFile struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.inactive_file metric with initial data. +func (m *metricContainerMemoryInactiveFile) init() { + m.data.SetName("container.memory.inactive_file") + m.data.SetDescription("Cache memory that has been identified as inactive by the kernel.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryInactiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryInactiveFile) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryInactiveFile) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryInactiveFile(cfg MetricConfig) metricContainerMemoryInactiveFile { + m := metricContainerMemoryInactiveFile{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryMappedFile struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.mapped_file metric with initial data. +func (m *metricContainerMemoryMappedFile) init() { + m.data.SetName("container.memory.mapped_file") + m.data.SetDescription("Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryMappedFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryMappedFile) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryMappedFile) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryMappedFile(cfg MetricConfig) metricContainerMemoryMappedFile { + m := metricContainerMemoryMappedFile{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryPercent struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.percent metric with initial data. +func (m *metricContainerMemoryPercent) init() { + m.data.SetName("container.memory.percent") + m.data.SetDescription("Percentage of memory used.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricContainerMemoryPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryPercent) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryPercent) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryPercent(cfg MetricConfig) metricContainerMemoryPercent { + m := metricContainerMemoryPercent{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryPgfault struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.pgfault metric with initial data. +func (m *metricContainerMemoryPgfault) init() { + m.data.SetName("container.memory.pgfault") + m.data.SetDescription("Indicate the number of times that a process of the cgroup triggered a page fault.") + m.data.SetUnit("{faults}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryPgfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryPgfault) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryPgfault) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryPgfault(cfg MetricConfig) metricContainerMemoryPgfault { + m := metricContainerMemoryPgfault{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryPgmajfault struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.pgmajfault metric with initial data. +func (m *metricContainerMemoryPgmajfault) init() { + m.data.SetName("container.memory.pgmajfault") + m.data.SetDescription("Indicate the number of times that a process of the cgroup triggered a major fault.") + m.data.SetUnit("{faults}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryPgmajfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryPgmajfault) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryPgmajfault) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryPgmajfault(cfg MetricConfig) metricContainerMemoryPgmajfault { + m := metricContainerMemoryPgmajfault{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryPgpgin struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.pgpgin metric with initial data. +func (m *metricContainerMemoryPgpgin) init() { + m.data.SetName("container.memory.pgpgin") + m.data.SetDescription("Number of pages read from disk by the cgroup (Only available with cgroups v1).") + m.data.SetUnit("{operations}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryPgpgin) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryPgpgin) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryPgpgin) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryPgpgin(cfg MetricConfig) metricContainerMemoryPgpgin { + m := metricContainerMemoryPgpgin{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryPgpgout struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.pgpgout metric with initial data. +func (m *metricContainerMemoryPgpgout) init() { + m.data.SetName("container.memory.pgpgout") + m.data.SetDescription("Number of pages written to disk by the cgroup (Only available with cgroups v1).") + m.data.SetUnit("{operations}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryPgpgout) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryPgpgout) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryPgpgout) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryPgpgout(cfg MetricConfig) metricContainerMemoryPgpgout { + m := metricContainerMemoryPgpgout{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryRss struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.rss metric with initial data. +func (m *metricContainerMemoryRss) init() { + m.data.SetName("container.memory.rss") + m.data.SetDescription("The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryRss) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryRss) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryRss(cfg MetricConfig) metricContainerMemoryRss { + m := metricContainerMemoryRss{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryRssHuge struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.rss_huge metric with initial data. +func (m *metricContainerMemoryRssHuge) init() { + m.data.SetName("container.memory.rss_huge") + m.data.SetDescription("Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryRssHuge) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryRssHuge) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryRssHuge) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryRssHuge(cfg MetricConfig) metricContainerMemoryRssHuge { + m := metricContainerMemoryRssHuge{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalActiveAnon struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_active_anon metric with initial data. +func (m *metricContainerMemoryTotalActiveAnon) init() { + m.data.SetName("container.memory.total_active_anon") + m.data.SetDescription("The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalActiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalActiveAnon) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalActiveAnon) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalActiveAnon(cfg MetricConfig) metricContainerMemoryTotalActiveAnon { + m := metricContainerMemoryTotalActiveAnon{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalActiveFile struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_active_file metric with initial data. +func (m *metricContainerMemoryTotalActiveFile) init() { + m.data.SetName("container.memory.total_active_file") + m.data.SetDescription("Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalActiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalActiveFile) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalActiveFile) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalActiveFile(cfg MetricConfig) metricContainerMemoryTotalActiveFile { + m := metricContainerMemoryTotalActiveFile{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalCache struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_cache metric with initial data. +func (m *metricContainerMemoryTotalCache) init() { + m.data.SetName("container.memory.total_cache") + m.data.SetDescription("Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalCache) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalCache) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalCache) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalCache(cfg MetricConfig) metricContainerMemoryTotalCache { + m := metricContainerMemoryTotalCache{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalDirty struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_dirty metric with initial data. +func (m *metricContainerMemoryTotalDirty) init() { + m.data.SetName("container.memory.total_dirty") + m.data.SetDescription("Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalDirty) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalDirty) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalDirty) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalDirty(cfg MetricConfig) metricContainerMemoryTotalDirty { + m := metricContainerMemoryTotalDirty{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalInactiveAnon struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_inactive_anon metric with initial data. +func (m *metricContainerMemoryTotalInactiveAnon) init() { + m.data.SetName("container.memory.total_inactive_anon") + m.data.SetDescription("The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalInactiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalInactiveAnon) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalInactiveAnon) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalInactiveAnon(cfg MetricConfig) metricContainerMemoryTotalInactiveAnon { + m := metricContainerMemoryTotalInactiveAnon{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalInactiveFile struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_inactive_file metric with initial data. +func (m *metricContainerMemoryTotalInactiveFile) init() { + m.data.SetName("container.memory.total_inactive_file") + m.data.SetDescription("Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalInactiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalInactiveFile) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalInactiveFile) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalInactiveFile(cfg MetricConfig) metricContainerMemoryTotalInactiveFile { + m := metricContainerMemoryTotalInactiveFile{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalMappedFile struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_mapped_file metric with initial data. +func (m *metricContainerMemoryTotalMappedFile) init() { + m.data.SetName("container.memory.total_mapped_file") + m.data.SetDescription("Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalMappedFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalMappedFile) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalMappedFile) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalMappedFile(cfg MetricConfig) metricContainerMemoryTotalMappedFile { + m := metricContainerMemoryTotalMappedFile{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalPgfault struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_pgfault metric with initial data. +func (m *metricContainerMemoryTotalPgfault) init() { + m.data.SetName("container.memory.total_pgfault") + m.data.SetDescription("Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1).") + m.data.SetUnit("{faults}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalPgfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalPgfault) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalPgfault) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalPgfault(cfg MetricConfig) metricContainerMemoryTotalPgfault { + m := metricContainerMemoryTotalPgfault{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalPgmajfault struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_pgmajfault metric with initial data. +func (m *metricContainerMemoryTotalPgmajfault) init() { + m.data.SetName("container.memory.total_pgmajfault") + m.data.SetDescription("Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1).") + m.data.SetUnit("{faults}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalPgmajfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalPgmajfault) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalPgmajfault) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalPgmajfault(cfg MetricConfig) metricContainerMemoryTotalPgmajfault { + m := metricContainerMemoryTotalPgmajfault{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalPgpgin struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_pgpgin metric with initial data. +func (m *metricContainerMemoryTotalPgpgin) init() { + m.data.SetName("container.memory.total_pgpgin") + m.data.SetDescription("Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1).") + m.data.SetUnit("{operations}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalPgpgin) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalPgpgin) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalPgpgin) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalPgpgin(cfg MetricConfig) metricContainerMemoryTotalPgpgin { + m := metricContainerMemoryTotalPgpgin{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalPgpgout struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_pgpgout metric with initial data. +func (m *metricContainerMemoryTotalPgpgout) init() { + m.data.SetName("container.memory.total_pgpgout") + m.data.SetDescription("Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1).") + m.data.SetUnit("{operations}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalPgpgout) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalPgpgout) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalPgpgout) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalPgpgout(cfg MetricConfig) metricContainerMemoryTotalPgpgout { + m := metricContainerMemoryTotalPgpgout{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalRss struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_rss metric with initial data. +func (m *metricContainerMemoryTotalRss) init() { + m.data.SetName("container.memory.total_rss") + m.data.SetDescription("The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalRss) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalRss) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalRss(cfg MetricConfig) metricContainerMemoryTotalRss { + m := metricContainerMemoryTotalRss{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalRssHuge struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_rss_huge metric with initial data. +func (m *metricContainerMemoryTotalRssHuge) init() { + m.data.SetName("container.memory.total_rss_huge") + m.data.SetDescription("Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalRssHuge) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalRssHuge) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalRssHuge) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalRssHuge(cfg MetricConfig) metricContainerMemoryTotalRssHuge { + m := metricContainerMemoryTotalRssHuge{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalUnevictable struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_unevictable metric with initial data. +func (m *metricContainerMemoryTotalUnevictable) init() { + m.data.SetName("container.memory.total_unevictable") + m.data.SetDescription("The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalUnevictable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalUnevictable) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalUnevictable) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalUnevictable(cfg MetricConfig) metricContainerMemoryTotalUnevictable { + m := metricContainerMemoryTotalUnevictable{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryTotalWriteback struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.total_writeback metric with initial data. +func (m *metricContainerMemoryTotalWriteback) init() { + m.data.SetName("container.memory.total_writeback") + m.data.SetDescription("Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryTotalWriteback) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryTotalWriteback) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryTotalWriteback) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryTotalWriteback(cfg MetricConfig) metricContainerMemoryTotalWriteback { + m := metricContainerMemoryTotalWriteback{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryUnevictable struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.unevictable metric with initial data. +func (m *metricContainerMemoryUnevictable) init() { + m.data.SetName("container.memory.unevictable") + m.data.SetDescription("The amount of memory that cannot be reclaimed.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryUnevictable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryUnevictable) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryUnevictable) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryUnevictable(cfg MetricConfig) metricContainerMemoryUnevictable { + m := metricContainerMemoryUnevictable{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryUsageLimit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.usage.limit metric with initial data. +func (m *metricContainerMemoryUsageLimit) init() { + m.data.SetName("container.memory.usage.limit") + m.data.SetDescription("Memory limit of the container.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryUsageLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryUsageLimit) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryUsageLimit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryUsageLimit(cfg MetricConfig) metricContainerMemoryUsageLimit { + m := metricContainerMemoryUsageLimit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryUsageMax struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.usage.max metric with initial data. +func (m *metricContainerMemoryUsageMax) init() { + m.data.SetName("container.memory.usage.max") + m.data.SetDescription("Maximum memory usage.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryUsageMax) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryUsageMax) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryUsageMax(cfg MetricConfig) metricContainerMemoryUsageMax { + m := metricContainerMemoryUsageMax{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryUsageTotal struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.usage.total metric with initial data. +func (m *metricContainerMemoryUsageTotal) init() { + m.data.SetName("container.memory.usage.total") + m.data.SetDescription("Memory usage of the container. This excludes the cache.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryUsageTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryUsageTotal) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryUsageTotal) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryUsageTotal(cfg MetricConfig) metricContainerMemoryUsageTotal { + m := metricContainerMemoryUsageTotal{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryWriteback struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.writeback metric with initial data. +func (m *metricContainerMemoryWriteback) init() { + m.data.SetName("container.memory.writeback") + m.data.SetDescription("Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1).") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryWriteback) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryWriteback) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryWriteback) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryWriteback(cfg MetricConfig) metricContainerMemoryWriteback { + m := metricContainerMemoryWriteback{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerNetworkIoUsageRxBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.network.io.usage.rx_bytes metric with initial data. +func (m *metricContainerNetworkIoUsageRxBytes) init() { + m.data.SetName("container.network.io.usage.rx_bytes") + m.data.SetDescription("Bytes received by the container.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerNetworkIoUsageRxBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("interface", interfaceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerNetworkIoUsageRxBytes) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerNetworkIoUsageRxBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerNetworkIoUsageRxBytes(cfg MetricConfig) metricContainerNetworkIoUsageRxBytes { + m := metricContainerNetworkIoUsageRxBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerNetworkIoUsageRxDropped struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.network.io.usage.rx_dropped metric with initial data. +func (m *metricContainerNetworkIoUsageRxDropped) init() { + m.data.SetName("container.network.io.usage.rx_dropped") + m.data.SetDescription("Incoming packets dropped.") + m.data.SetUnit("{packets}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerNetworkIoUsageRxDropped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("interface", interfaceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerNetworkIoUsageRxDropped) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerNetworkIoUsageRxDropped) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerNetworkIoUsageRxDropped(cfg MetricConfig) metricContainerNetworkIoUsageRxDropped { + m := metricContainerNetworkIoUsageRxDropped{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerNetworkIoUsageRxErrors struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.network.io.usage.rx_errors metric with initial data. +func (m *metricContainerNetworkIoUsageRxErrors) init() { + m.data.SetName("container.network.io.usage.rx_errors") + m.data.SetDescription("Received errors.") + m.data.SetUnit("{errors}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerNetworkIoUsageRxErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("interface", interfaceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerNetworkIoUsageRxErrors) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerNetworkIoUsageRxErrors) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerNetworkIoUsageRxErrors(cfg MetricConfig) metricContainerNetworkIoUsageRxErrors { + m := metricContainerNetworkIoUsageRxErrors{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerNetworkIoUsageRxPackets struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.network.io.usage.rx_packets metric with initial data. +func (m *metricContainerNetworkIoUsageRxPackets) init() { + m.data.SetName("container.network.io.usage.rx_packets") + m.data.SetDescription("Packets received.") + m.data.SetUnit("{packets}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerNetworkIoUsageRxPackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("interface", interfaceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerNetworkIoUsageRxPackets) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerNetworkIoUsageRxPackets) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerNetworkIoUsageRxPackets(cfg MetricConfig) metricContainerNetworkIoUsageRxPackets { + m := metricContainerNetworkIoUsageRxPackets{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerNetworkIoUsageTxBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.network.io.usage.tx_bytes metric with initial data. +func (m *metricContainerNetworkIoUsageTxBytes) init() { + m.data.SetName("container.network.io.usage.tx_bytes") + m.data.SetDescription("Bytes sent.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerNetworkIoUsageTxBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("interface", interfaceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerNetworkIoUsageTxBytes) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerNetworkIoUsageTxBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerNetworkIoUsageTxBytes(cfg MetricConfig) metricContainerNetworkIoUsageTxBytes { + m := metricContainerNetworkIoUsageTxBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerNetworkIoUsageTxDropped struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.network.io.usage.tx_dropped metric with initial data. +func (m *metricContainerNetworkIoUsageTxDropped) init() { + m.data.SetName("container.network.io.usage.tx_dropped") + m.data.SetDescription("Outgoing packets dropped.") + m.data.SetUnit("{packets}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerNetworkIoUsageTxDropped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("interface", interfaceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerNetworkIoUsageTxDropped) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerNetworkIoUsageTxDropped) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerNetworkIoUsageTxDropped(cfg MetricConfig) metricContainerNetworkIoUsageTxDropped { + m := metricContainerNetworkIoUsageTxDropped{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerNetworkIoUsageTxErrors struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.network.io.usage.tx_errors metric with initial data. +func (m *metricContainerNetworkIoUsageTxErrors) init() { + m.data.SetName("container.network.io.usage.tx_errors") + m.data.SetDescription("Sent errors.") + m.data.SetUnit("{errors}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerNetworkIoUsageTxErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("interface", interfaceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerNetworkIoUsageTxErrors) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerNetworkIoUsageTxErrors) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerNetworkIoUsageTxErrors(cfg MetricConfig) metricContainerNetworkIoUsageTxErrors { + m := metricContainerNetworkIoUsageTxErrors{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerNetworkIoUsageTxPackets struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.network.io.usage.tx_packets metric with initial data. +func (m *metricContainerNetworkIoUsageTxPackets) init() { + m.data.SetName("container.network.io.usage.tx_packets") + m.data.SetDescription("Packets sent.") + m.data.SetUnit("{packets}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerNetworkIoUsageTxPackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("interface", interfaceAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerNetworkIoUsageTxPackets) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerNetworkIoUsageTxPackets) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerNetworkIoUsageTxPackets(cfg MetricConfig) metricContainerNetworkIoUsageTxPackets { + m := metricContainerNetworkIoUsageTxPackets{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerPidsCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.pids.count metric with initial data. +func (m *metricContainerPidsCount) init() { + m.data.SetName("container.pids.count") + m.data.SetDescription("Number of pids in the container's cgroup.") + m.data.SetUnit("{pids}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerPidsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerPidsCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerPidsCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerPidsCount(cfg MetricConfig) metricContainerPidsCount { + m := metricContainerPidsCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerPidsLimit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.pids.limit metric with initial data. +func (m *metricContainerPidsLimit) init() { + m.data.SetName("container.pids.limit") + m.data.SetDescription("Maximum number of pids in the container's cgroup.") + m.data.SetUnit("{pids}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerPidsLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerPidsLimit) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerPidsLimit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerPidsLimit(cfg MetricConfig) metricContainerPidsLimit { + m := metricContainerPidsLimit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerRestarts struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.restarts metric with initial data. +func (m *metricContainerRestarts) init() { + m.data.SetName("container.restarts") + m.data.SetDescription("Number of restarts for the container.") + m.data.SetUnit("{restarts}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerRestarts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerRestarts) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerRestarts) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerRestarts(cfg MetricConfig) metricContainerRestarts { + m := metricContainerRestarts{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerUptime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.uptime metric with initial data. +func (m *metricContainerUptime) init() { + m.data.SetName("container.uptime") + m.data.SetDescription("Time elapsed since container start time.") + m.data.SetUnit("s") + m.data.SetEmptyGauge() +} + +func (m *metricContainerUptime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerUptime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerUptime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerUptime(cfg MetricConfig) metricContainerUptime { + m := metricContainerUptime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations +// required to produce metric representation defined in metadata and user config. +type MetricsBuilder struct { + config MetricsBuilderConfig // config of the metrics builder. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information. + resourceAttributeIncludeFilter map[string]filter.Filter + resourceAttributeExcludeFilter map[string]filter.Filter + metricContainerBlockioIoMergedRecursive metricContainerBlockioIoMergedRecursive + metricContainerBlockioIoQueuedRecursive metricContainerBlockioIoQueuedRecursive + metricContainerBlockioIoServiceBytesRecursive metricContainerBlockioIoServiceBytesRecursive + metricContainerBlockioIoServiceTimeRecursive metricContainerBlockioIoServiceTimeRecursive + metricContainerBlockioIoServicedRecursive metricContainerBlockioIoServicedRecursive + metricContainerBlockioIoTimeRecursive metricContainerBlockioIoTimeRecursive + metricContainerBlockioIoWaitTimeRecursive metricContainerBlockioIoWaitTimeRecursive + metricContainerBlockioSectorsRecursive metricContainerBlockioSectorsRecursive + metricContainerCPULimit metricContainerCPULimit + metricContainerCPULogicalCount metricContainerCPULogicalCount + metricContainerCPUShares metricContainerCPUShares + metricContainerCPUThrottlingDataPeriods metricContainerCPUThrottlingDataPeriods + metricContainerCPUThrottlingDataThrottledPeriods metricContainerCPUThrottlingDataThrottledPeriods + metricContainerCPUThrottlingDataThrottledTime metricContainerCPUThrottlingDataThrottledTime + metricContainerCPUUsageKernelmode metricContainerCPUUsageKernelmode + metricContainerCPUUsagePercpu metricContainerCPUUsagePercpu + metricContainerCPUUsageSystem metricContainerCPUUsageSystem + metricContainerCPUUsageTotal metricContainerCPUUsageTotal + metricContainerCPUUsageUsermode metricContainerCPUUsageUsermode + metricContainerCPUUtilization metricContainerCPUUtilization + metricContainerMemoryActiveAnon metricContainerMemoryActiveAnon + metricContainerMemoryActiveFile metricContainerMemoryActiveFile + metricContainerMemoryAnon metricContainerMemoryAnon + metricContainerMemoryCache metricContainerMemoryCache + metricContainerMemoryDirty metricContainerMemoryDirty + metricContainerMemoryFails metricContainerMemoryFails + metricContainerMemoryFile metricContainerMemoryFile + metricContainerMemoryHierarchicalMemoryLimit metricContainerMemoryHierarchicalMemoryLimit + metricContainerMemoryHierarchicalMemswLimit metricContainerMemoryHierarchicalMemswLimit + metricContainerMemoryInactiveAnon metricContainerMemoryInactiveAnon + metricContainerMemoryInactiveFile metricContainerMemoryInactiveFile + metricContainerMemoryMappedFile metricContainerMemoryMappedFile + metricContainerMemoryPercent metricContainerMemoryPercent + metricContainerMemoryPgfault metricContainerMemoryPgfault + metricContainerMemoryPgmajfault metricContainerMemoryPgmajfault + metricContainerMemoryPgpgin metricContainerMemoryPgpgin + metricContainerMemoryPgpgout metricContainerMemoryPgpgout + metricContainerMemoryRss metricContainerMemoryRss + metricContainerMemoryRssHuge metricContainerMemoryRssHuge + metricContainerMemoryTotalActiveAnon metricContainerMemoryTotalActiveAnon + metricContainerMemoryTotalActiveFile metricContainerMemoryTotalActiveFile + metricContainerMemoryTotalCache metricContainerMemoryTotalCache + metricContainerMemoryTotalDirty metricContainerMemoryTotalDirty + metricContainerMemoryTotalInactiveAnon metricContainerMemoryTotalInactiveAnon + metricContainerMemoryTotalInactiveFile metricContainerMemoryTotalInactiveFile + metricContainerMemoryTotalMappedFile metricContainerMemoryTotalMappedFile + metricContainerMemoryTotalPgfault metricContainerMemoryTotalPgfault + metricContainerMemoryTotalPgmajfault metricContainerMemoryTotalPgmajfault + metricContainerMemoryTotalPgpgin metricContainerMemoryTotalPgpgin + metricContainerMemoryTotalPgpgout metricContainerMemoryTotalPgpgout + metricContainerMemoryTotalRss metricContainerMemoryTotalRss + metricContainerMemoryTotalRssHuge metricContainerMemoryTotalRssHuge + metricContainerMemoryTotalUnevictable metricContainerMemoryTotalUnevictable + metricContainerMemoryTotalWriteback metricContainerMemoryTotalWriteback + metricContainerMemoryUnevictable metricContainerMemoryUnevictable + metricContainerMemoryUsageLimit metricContainerMemoryUsageLimit + metricContainerMemoryUsageMax metricContainerMemoryUsageMax + metricContainerMemoryUsageTotal metricContainerMemoryUsageTotal + metricContainerMemoryWriteback metricContainerMemoryWriteback + metricContainerNetworkIoUsageRxBytes metricContainerNetworkIoUsageRxBytes + metricContainerNetworkIoUsageRxDropped metricContainerNetworkIoUsageRxDropped + metricContainerNetworkIoUsageRxErrors metricContainerNetworkIoUsageRxErrors + metricContainerNetworkIoUsageRxPackets metricContainerNetworkIoUsageRxPackets + metricContainerNetworkIoUsageTxBytes metricContainerNetworkIoUsageTxBytes + metricContainerNetworkIoUsageTxDropped metricContainerNetworkIoUsageTxDropped + metricContainerNetworkIoUsageTxErrors metricContainerNetworkIoUsageTxErrors + metricContainerNetworkIoUsageTxPackets metricContainerNetworkIoUsageTxPackets + metricContainerPidsCount metricContainerPidsCount + metricContainerPidsLimit metricContainerPidsLimit + metricContainerRestarts metricContainerRestarts + metricContainerUptime metricContainerUptime +} + +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) + +// WithStartTime sets startTime on the metrics builder. +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { + mb.startTime = startTime + } +} + +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { + mb := &MetricsBuilder{ + config: mbc, + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricContainerBlockioIoMergedRecursive: newMetricContainerBlockioIoMergedRecursive(mbc.Metrics.ContainerBlockioIoMergedRecursive), + metricContainerBlockioIoQueuedRecursive: newMetricContainerBlockioIoQueuedRecursive(mbc.Metrics.ContainerBlockioIoQueuedRecursive), + metricContainerBlockioIoServiceBytesRecursive: newMetricContainerBlockioIoServiceBytesRecursive(mbc.Metrics.ContainerBlockioIoServiceBytesRecursive), + metricContainerBlockioIoServiceTimeRecursive: newMetricContainerBlockioIoServiceTimeRecursive(mbc.Metrics.ContainerBlockioIoServiceTimeRecursive), + metricContainerBlockioIoServicedRecursive: newMetricContainerBlockioIoServicedRecursive(mbc.Metrics.ContainerBlockioIoServicedRecursive), + metricContainerBlockioIoTimeRecursive: newMetricContainerBlockioIoTimeRecursive(mbc.Metrics.ContainerBlockioIoTimeRecursive), + metricContainerBlockioIoWaitTimeRecursive: newMetricContainerBlockioIoWaitTimeRecursive(mbc.Metrics.ContainerBlockioIoWaitTimeRecursive), + metricContainerBlockioSectorsRecursive: newMetricContainerBlockioSectorsRecursive(mbc.Metrics.ContainerBlockioSectorsRecursive), + metricContainerCPULimit: newMetricContainerCPULimit(mbc.Metrics.ContainerCPULimit), + metricContainerCPULogicalCount: newMetricContainerCPULogicalCount(mbc.Metrics.ContainerCPULogicalCount), + metricContainerCPUShares: newMetricContainerCPUShares(mbc.Metrics.ContainerCPUShares), + metricContainerCPUThrottlingDataPeriods: newMetricContainerCPUThrottlingDataPeriods(mbc.Metrics.ContainerCPUThrottlingDataPeriods), + metricContainerCPUThrottlingDataThrottledPeriods: newMetricContainerCPUThrottlingDataThrottledPeriods(mbc.Metrics.ContainerCPUThrottlingDataThrottledPeriods), + metricContainerCPUThrottlingDataThrottledTime: newMetricContainerCPUThrottlingDataThrottledTime(mbc.Metrics.ContainerCPUThrottlingDataThrottledTime), + metricContainerCPUUsageKernelmode: newMetricContainerCPUUsageKernelmode(mbc.Metrics.ContainerCPUUsageKernelmode), + metricContainerCPUUsagePercpu: newMetricContainerCPUUsagePercpu(mbc.Metrics.ContainerCPUUsagePercpu), + metricContainerCPUUsageSystem: newMetricContainerCPUUsageSystem(mbc.Metrics.ContainerCPUUsageSystem), + metricContainerCPUUsageTotal: newMetricContainerCPUUsageTotal(mbc.Metrics.ContainerCPUUsageTotal), + metricContainerCPUUsageUsermode: newMetricContainerCPUUsageUsermode(mbc.Metrics.ContainerCPUUsageUsermode), + metricContainerCPUUtilization: newMetricContainerCPUUtilization(mbc.Metrics.ContainerCPUUtilization), + metricContainerMemoryActiveAnon: newMetricContainerMemoryActiveAnon(mbc.Metrics.ContainerMemoryActiveAnon), + metricContainerMemoryActiveFile: newMetricContainerMemoryActiveFile(mbc.Metrics.ContainerMemoryActiveFile), + metricContainerMemoryAnon: newMetricContainerMemoryAnon(mbc.Metrics.ContainerMemoryAnon), + metricContainerMemoryCache: newMetricContainerMemoryCache(mbc.Metrics.ContainerMemoryCache), + metricContainerMemoryDirty: newMetricContainerMemoryDirty(mbc.Metrics.ContainerMemoryDirty), + metricContainerMemoryFails: newMetricContainerMemoryFails(mbc.Metrics.ContainerMemoryFails), + metricContainerMemoryFile: newMetricContainerMemoryFile(mbc.Metrics.ContainerMemoryFile), + metricContainerMemoryHierarchicalMemoryLimit: newMetricContainerMemoryHierarchicalMemoryLimit(mbc.Metrics.ContainerMemoryHierarchicalMemoryLimit), + metricContainerMemoryHierarchicalMemswLimit: newMetricContainerMemoryHierarchicalMemswLimit(mbc.Metrics.ContainerMemoryHierarchicalMemswLimit), + metricContainerMemoryInactiveAnon: newMetricContainerMemoryInactiveAnon(mbc.Metrics.ContainerMemoryInactiveAnon), + metricContainerMemoryInactiveFile: newMetricContainerMemoryInactiveFile(mbc.Metrics.ContainerMemoryInactiveFile), + metricContainerMemoryMappedFile: newMetricContainerMemoryMappedFile(mbc.Metrics.ContainerMemoryMappedFile), + metricContainerMemoryPercent: newMetricContainerMemoryPercent(mbc.Metrics.ContainerMemoryPercent), + metricContainerMemoryPgfault: newMetricContainerMemoryPgfault(mbc.Metrics.ContainerMemoryPgfault), + metricContainerMemoryPgmajfault: newMetricContainerMemoryPgmajfault(mbc.Metrics.ContainerMemoryPgmajfault), + metricContainerMemoryPgpgin: newMetricContainerMemoryPgpgin(mbc.Metrics.ContainerMemoryPgpgin), + metricContainerMemoryPgpgout: newMetricContainerMemoryPgpgout(mbc.Metrics.ContainerMemoryPgpgout), + metricContainerMemoryRss: newMetricContainerMemoryRss(mbc.Metrics.ContainerMemoryRss), + metricContainerMemoryRssHuge: newMetricContainerMemoryRssHuge(mbc.Metrics.ContainerMemoryRssHuge), + metricContainerMemoryTotalActiveAnon: newMetricContainerMemoryTotalActiveAnon(mbc.Metrics.ContainerMemoryTotalActiveAnon), + metricContainerMemoryTotalActiveFile: newMetricContainerMemoryTotalActiveFile(mbc.Metrics.ContainerMemoryTotalActiveFile), + metricContainerMemoryTotalCache: newMetricContainerMemoryTotalCache(mbc.Metrics.ContainerMemoryTotalCache), + metricContainerMemoryTotalDirty: newMetricContainerMemoryTotalDirty(mbc.Metrics.ContainerMemoryTotalDirty), + metricContainerMemoryTotalInactiveAnon: newMetricContainerMemoryTotalInactiveAnon(mbc.Metrics.ContainerMemoryTotalInactiveAnon), + metricContainerMemoryTotalInactiveFile: newMetricContainerMemoryTotalInactiveFile(mbc.Metrics.ContainerMemoryTotalInactiveFile), + metricContainerMemoryTotalMappedFile: newMetricContainerMemoryTotalMappedFile(mbc.Metrics.ContainerMemoryTotalMappedFile), + metricContainerMemoryTotalPgfault: newMetricContainerMemoryTotalPgfault(mbc.Metrics.ContainerMemoryTotalPgfault), + metricContainerMemoryTotalPgmajfault: newMetricContainerMemoryTotalPgmajfault(mbc.Metrics.ContainerMemoryTotalPgmajfault), + metricContainerMemoryTotalPgpgin: newMetricContainerMemoryTotalPgpgin(mbc.Metrics.ContainerMemoryTotalPgpgin), + metricContainerMemoryTotalPgpgout: newMetricContainerMemoryTotalPgpgout(mbc.Metrics.ContainerMemoryTotalPgpgout), + metricContainerMemoryTotalRss: newMetricContainerMemoryTotalRss(mbc.Metrics.ContainerMemoryTotalRss), + metricContainerMemoryTotalRssHuge: newMetricContainerMemoryTotalRssHuge(mbc.Metrics.ContainerMemoryTotalRssHuge), + metricContainerMemoryTotalUnevictable: newMetricContainerMemoryTotalUnevictable(mbc.Metrics.ContainerMemoryTotalUnevictable), + metricContainerMemoryTotalWriteback: newMetricContainerMemoryTotalWriteback(mbc.Metrics.ContainerMemoryTotalWriteback), + metricContainerMemoryUnevictable: newMetricContainerMemoryUnevictable(mbc.Metrics.ContainerMemoryUnevictable), + metricContainerMemoryUsageLimit: newMetricContainerMemoryUsageLimit(mbc.Metrics.ContainerMemoryUsageLimit), + metricContainerMemoryUsageMax: newMetricContainerMemoryUsageMax(mbc.Metrics.ContainerMemoryUsageMax), + metricContainerMemoryUsageTotal: newMetricContainerMemoryUsageTotal(mbc.Metrics.ContainerMemoryUsageTotal), + metricContainerMemoryWriteback: newMetricContainerMemoryWriteback(mbc.Metrics.ContainerMemoryWriteback), + metricContainerNetworkIoUsageRxBytes: newMetricContainerNetworkIoUsageRxBytes(mbc.Metrics.ContainerNetworkIoUsageRxBytes), + metricContainerNetworkIoUsageRxDropped: newMetricContainerNetworkIoUsageRxDropped(mbc.Metrics.ContainerNetworkIoUsageRxDropped), + metricContainerNetworkIoUsageRxErrors: newMetricContainerNetworkIoUsageRxErrors(mbc.Metrics.ContainerNetworkIoUsageRxErrors), + metricContainerNetworkIoUsageRxPackets: newMetricContainerNetworkIoUsageRxPackets(mbc.Metrics.ContainerNetworkIoUsageRxPackets), + metricContainerNetworkIoUsageTxBytes: newMetricContainerNetworkIoUsageTxBytes(mbc.Metrics.ContainerNetworkIoUsageTxBytes), + metricContainerNetworkIoUsageTxDropped: newMetricContainerNetworkIoUsageTxDropped(mbc.Metrics.ContainerNetworkIoUsageTxDropped), + metricContainerNetworkIoUsageTxErrors: newMetricContainerNetworkIoUsageTxErrors(mbc.Metrics.ContainerNetworkIoUsageTxErrors), + metricContainerNetworkIoUsageTxPackets: newMetricContainerNetworkIoUsageTxPackets(mbc.Metrics.ContainerNetworkIoUsageTxPackets), + metricContainerPidsCount: newMetricContainerPidsCount(mbc.Metrics.ContainerPidsCount), + metricContainerPidsLimit: newMetricContainerPidsLimit(mbc.Metrics.ContainerPidsLimit), + metricContainerRestarts: newMetricContainerRestarts(mbc.Metrics.ContainerRestarts), + metricContainerUptime: newMetricContainerUptime(mbc.Metrics.ContainerUptime), + resourceAttributeIncludeFilter: make(map[string]filter.Filter), + resourceAttributeExcludeFilter: make(map[string]filter.Filter), + } + if mbc.ResourceAttributes.ContainerCommandLine.MetricsInclude != nil { + mb.resourceAttributeIncludeFilter["container.command_line"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerCommandLine.MetricsInclude) + } + if mbc.ResourceAttributes.ContainerCommandLine.MetricsExclude != nil { + mb.resourceAttributeExcludeFilter["container.command_line"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerCommandLine.MetricsExclude) + } + if mbc.ResourceAttributes.ContainerHostname.MetricsInclude != nil { + mb.resourceAttributeIncludeFilter["container.hostname"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerHostname.MetricsInclude) + } + if mbc.ResourceAttributes.ContainerHostname.MetricsExclude != nil { + mb.resourceAttributeExcludeFilter["container.hostname"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerHostname.MetricsExclude) + } + if mbc.ResourceAttributes.ContainerID.MetricsInclude != nil { + mb.resourceAttributeIncludeFilter["container.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerID.MetricsInclude) + } + if mbc.ResourceAttributes.ContainerID.MetricsExclude != nil { + mb.resourceAttributeExcludeFilter["container.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerID.MetricsExclude) + } + if mbc.ResourceAttributes.ContainerImageID.MetricsInclude != nil { + mb.resourceAttributeIncludeFilter["container.image.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageID.MetricsInclude) + } + if mbc.ResourceAttributes.ContainerImageID.MetricsExclude != nil { + mb.resourceAttributeExcludeFilter["container.image.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageID.MetricsExclude) + } + if mbc.ResourceAttributes.ContainerImageName.MetricsInclude != nil { + mb.resourceAttributeIncludeFilter["container.image.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageName.MetricsInclude) + } + if mbc.ResourceAttributes.ContainerImageName.MetricsExclude != nil { + mb.resourceAttributeExcludeFilter["container.image.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageName.MetricsExclude) + } + if mbc.ResourceAttributes.ContainerName.MetricsInclude != nil { + mb.resourceAttributeIncludeFilter["container.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerName.MetricsInclude) + } + if mbc.ResourceAttributes.ContainerName.MetricsExclude != nil { + mb.resourceAttributeExcludeFilter["container.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerName.MetricsExclude) + } + if mbc.ResourceAttributes.ContainerRuntime.MetricsInclude != nil { + mb.resourceAttributeIncludeFilter["container.runtime"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerRuntime.MetricsInclude) + } + if mbc.ResourceAttributes.ContainerRuntime.MetricsExclude != nil { + mb.resourceAttributeExcludeFilter["container.runtime"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerRuntime.MetricsExclude) + } + + for _, op := range options { + op(mb) + } + return mb +} + +// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics. +func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder { + return NewResourceBuilder(mb.config.ResourceAttributes) +} + +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { + if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() + } +} + +// ResourceMetricsOption applies changes to provided resource metrics. +type ResourceMetricsOption func(pmetric.ResourceMetrics) + +// WithResource sets the provided resource on the emitted ResourceMetrics. +// It's recommended to use ResourceBuilder to create the resource. +func WithResource(res pcommon.Resource) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + res.CopyTo(rm.Resource()) + } +} + +// WithStartTimeOverride overrides start time for all the resource metrics data points. +// This option should be only used if different start time has to be set on metrics coming from different resources. +func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + var dps pmetric.NumberDataPointSlice + metrics := rm.ScopeMetrics().At(0).Metrics() + for i := 0; i < metrics.Len(); i++ { + switch metrics.At(i).Type() { + case pmetric.MetricTypeGauge: + dps = metrics.At(i).Gauge().DataPoints() + case pmetric.MetricTypeSum: + dps = metrics.At(i).Sum().DataPoints() + } + for j := 0; j < dps.Len(); j++ { + dps.At(j).SetStartTimestamp(start) + } + } + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. +// Resource attributes should be provided as ResourceMetricsOption arguments. +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { + rm := pmetric.NewResourceMetrics() + rm.SetSchemaUrl(conventions.SchemaURL) + ils := rm.ScopeMetrics().AppendEmpty() + ils.Scope().SetName("otelcol/docker/receiver") + ils.Scope().SetVersion(mb.buildInfo.Version) + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricContainerBlockioIoMergedRecursive.emit(ils.Metrics()) + mb.metricContainerBlockioIoQueuedRecursive.emit(ils.Metrics()) + mb.metricContainerBlockioIoServiceBytesRecursive.emit(ils.Metrics()) + mb.metricContainerBlockioIoServiceTimeRecursive.emit(ils.Metrics()) + mb.metricContainerBlockioIoServicedRecursive.emit(ils.Metrics()) + mb.metricContainerBlockioIoTimeRecursive.emit(ils.Metrics()) + mb.metricContainerBlockioIoWaitTimeRecursive.emit(ils.Metrics()) + mb.metricContainerBlockioSectorsRecursive.emit(ils.Metrics()) + mb.metricContainerCPULimit.emit(ils.Metrics()) + mb.metricContainerCPULogicalCount.emit(ils.Metrics()) + mb.metricContainerCPUShares.emit(ils.Metrics()) + mb.metricContainerCPUThrottlingDataPeriods.emit(ils.Metrics()) + mb.metricContainerCPUThrottlingDataThrottledPeriods.emit(ils.Metrics()) + mb.metricContainerCPUThrottlingDataThrottledTime.emit(ils.Metrics()) + mb.metricContainerCPUUsageKernelmode.emit(ils.Metrics()) + mb.metricContainerCPUUsagePercpu.emit(ils.Metrics()) + mb.metricContainerCPUUsageSystem.emit(ils.Metrics()) + mb.metricContainerCPUUsageTotal.emit(ils.Metrics()) + mb.metricContainerCPUUsageUsermode.emit(ils.Metrics()) + mb.metricContainerCPUUtilization.emit(ils.Metrics()) + mb.metricContainerMemoryActiveAnon.emit(ils.Metrics()) + mb.metricContainerMemoryActiveFile.emit(ils.Metrics()) + mb.metricContainerMemoryAnon.emit(ils.Metrics()) + mb.metricContainerMemoryCache.emit(ils.Metrics()) + mb.metricContainerMemoryDirty.emit(ils.Metrics()) + mb.metricContainerMemoryFails.emit(ils.Metrics()) + mb.metricContainerMemoryFile.emit(ils.Metrics()) + mb.metricContainerMemoryHierarchicalMemoryLimit.emit(ils.Metrics()) + mb.metricContainerMemoryHierarchicalMemswLimit.emit(ils.Metrics()) + mb.metricContainerMemoryInactiveAnon.emit(ils.Metrics()) + mb.metricContainerMemoryInactiveFile.emit(ils.Metrics()) + mb.metricContainerMemoryMappedFile.emit(ils.Metrics()) + mb.metricContainerMemoryPercent.emit(ils.Metrics()) + mb.metricContainerMemoryPgfault.emit(ils.Metrics()) + mb.metricContainerMemoryPgmajfault.emit(ils.Metrics()) + mb.metricContainerMemoryPgpgin.emit(ils.Metrics()) + mb.metricContainerMemoryPgpgout.emit(ils.Metrics()) + mb.metricContainerMemoryRss.emit(ils.Metrics()) + mb.metricContainerMemoryRssHuge.emit(ils.Metrics()) + mb.metricContainerMemoryTotalActiveAnon.emit(ils.Metrics()) + mb.metricContainerMemoryTotalActiveFile.emit(ils.Metrics()) + mb.metricContainerMemoryTotalCache.emit(ils.Metrics()) + mb.metricContainerMemoryTotalDirty.emit(ils.Metrics()) + mb.metricContainerMemoryTotalInactiveAnon.emit(ils.Metrics()) + mb.metricContainerMemoryTotalInactiveFile.emit(ils.Metrics()) + mb.metricContainerMemoryTotalMappedFile.emit(ils.Metrics()) + mb.metricContainerMemoryTotalPgfault.emit(ils.Metrics()) + mb.metricContainerMemoryTotalPgmajfault.emit(ils.Metrics()) + mb.metricContainerMemoryTotalPgpgin.emit(ils.Metrics()) + mb.metricContainerMemoryTotalPgpgout.emit(ils.Metrics()) + mb.metricContainerMemoryTotalRss.emit(ils.Metrics()) + mb.metricContainerMemoryTotalRssHuge.emit(ils.Metrics()) + mb.metricContainerMemoryTotalUnevictable.emit(ils.Metrics()) + mb.metricContainerMemoryTotalWriteback.emit(ils.Metrics()) + mb.metricContainerMemoryUnevictable.emit(ils.Metrics()) + mb.metricContainerMemoryUsageLimit.emit(ils.Metrics()) + mb.metricContainerMemoryUsageMax.emit(ils.Metrics()) + mb.metricContainerMemoryUsageTotal.emit(ils.Metrics()) + mb.metricContainerMemoryWriteback.emit(ils.Metrics()) + mb.metricContainerNetworkIoUsageRxBytes.emit(ils.Metrics()) + mb.metricContainerNetworkIoUsageRxDropped.emit(ils.Metrics()) + mb.metricContainerNetworkIoUsageRxErrors.emit(ils.Metrics()) + mb.metricContainerNetworkIoUsageRxPackets.emit(ils.Metrics()) + mb.metricContainerNetworkIoUsageTxBytes.emit(ils.Metrics()) + mb.metricContainerNetworkIoUsageTxDropped.emit(ils.Metrics()) + mb.metricContainerNetworkIoUsageTxErrors.emit(ils.Metrics()) + mb.metricContainerNetworkIoUsageTxPackets.emit(ils.Metrics()) + mb.metricContainerPidsCount.emit(ils.Metrics()) + mb.metricContainerPidsLimit.emit(ils.Metrics()) + mb.metricContainerRestarts.emit(ils.Metrics()) + mb.metricContainerUptime.emit(ils.Metrics()) + + for _, op := range rmo { + op(rm) + } + for attr, filter := range mb.resourceAttributeIncludeFilter { + if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) { + return + } + } + for attr, filter := range mb.resourceAttributeExcludeFilter { + if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) { + return + } + } + + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user config, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) + metrics := mb.metricsBuffer + mb.metricsBuffer = pmetric.NewMetrics() + return metrics +} + +// RecordContainerBlockioIoMergedRecursiveDataPoint adds a data point to container.blockio.io_merged_recursive metric. +func (mb *MetricsBuilder) RecordContainerBlockioIoMergedRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + mb.metricContainerBlockioIoMergedRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) +} + +// RecordContainerBlockioIoQueuedRecursiveDataPoint adds a data point to container.blockio.io_queued_recursive metric. +func (mb *MetricsBuilder) RecordContainerBlockioIoQueuedRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + mb.metricContainerBlockioIoQueuedRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) +} + +// RecordContainerBlockioIoServiceBytesRecursiveDataPoint adds a data point to container.blockio.io_service_bytes_recursive metric. +func (mb *MetricsBuilder) RecordContainerBlockioIoServiceBytesRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + mb.metricContainerBlockioIoServiceBytesRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) +} + +// RecordContainerBlockioIoServiceTimeRecursiveDataPoint adds a data point to container.blockio.io_service_time_recursive metric. +func (mb *MetricsBuilder) RecordContainerBlockioIoServiceTimeRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + mb.metricContainerBlockioIoServiceTimeRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) +} + +// RecordContainerBlockioIoServicedRecursiveDataPoint adds a data point to container.blockio.io_serviced_recursive metric. +func (mb *MetricsBuilder) RecordContainerBlockioIoServicedRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + mb.metricContainerBlockioIoServicedRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) +} + +// RecordContainerBlockioIoTimeRecursiveDataPoint adds a data point to container.blockio.io_time_recursive metric. +func (mb *MetricsBuilder) RecordContainerBlockioIoTimeRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + mb.metricContainerBlockioIoTimeRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) +} + +// RecordContainerBlockioIoWaitTimeRecursiveDataPoint adds a data point to container.blockio.io_wait_time_recursive metric. +func (mb *MetricsBuilder) RecordContainerBlockioIoWaitTimeRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + mb.metricContainerBlockioIoWaitTimeRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) +} + +// RecordContainerBlockioSectorsRecursiveDataPoint adds a data point to container.blockio.sectors_recursive metric. +func (mb *MetricsBuilder) RecordContainerBlockioSectorsRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { + mb.metricContainerBlockioSectorsRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) +} + +// RecordContainerCPULimitDataPoint adds a data point to container.cpu.limit metric. +func (mb *MetricsBuilder) RecordContainerCPULimitDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricContainerCPULimit.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPULogicalCountDataPoint adds a data point to container.cpu.logical.count metric. +func (mb *MetricsBuilder) RecordContainerCPULogicalCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerCPULogicalCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUSharesDataPoint adds a data point to container.cpu.shares metric. +func (mb *MetricsBuilder) RecordContainerCPUSharesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerCPUShares.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUThrottlingDataPeriodsDataPoint adds a data point to container.cpu.throttling_data.periods metric. +func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataPeriodsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerCPUThrottlingDataPeriods.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint adds a data point to container.cpu.throttling_data.throttled_periods metric. +func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerCPUThrottlingDataThrottledPeriods.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUThrottlingDataThrottledTimeDataPoint adds a data point to container.cpu.throttling_data.throttled_time metric. +func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataThrottledTimeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerCPUThrottlingDataThrottledTime.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUUsageKernelmodeDataPoint adds a data point to container.cpu.usage.kernelmode metric. +func (mb *MetricsBuilder) RecordContainerCPUUsageKernelmodeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerCPUUsageKernelmode.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUUsagePercpuDataPoint adds a data point to container.cpu.usage.percpu metric. +func (mb *MetricsBuilder) RecordContainerCPUUsagePercpuDataPoint(ts pcommon.Timestamp, val int64, coreAttributeValue string) { + mb.metricContainerCPUUsagePercpu.recordDataPoint(mb.startTime, ts, val, coreAttributeValue) +} + +// RecordContainerCPUUsageSystemDataPoint adds a data point to container.cpu.usage.system metric. +func (mb *MetricsBuilder) RecordContainerCPUUsageSystemDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerCPUUsageSystem.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUUsageTotalDataPoint adds a data point to container.cpu.usage.total metric. +func (mb *MetricsBuilder) RecordContainerCPUUsageTotalDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerCPUUsageTotal.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUUsageUsermodeDataPoint adds a data point to container.cpu.usage.usermode metric. +func (mb *MetricsBuilder) RecordContainerCPUUsageUsermodeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerCPUUsageUsermode.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUUtilizationDataPoint adds a data point to container.cpu.utilization metric. +func (mb *MetricsBuilder) RecordContainerCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricContainerCPUUtilization.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryActiveAnonDataPoint adds a data point to container.memory.active_anon metric. +func (mb *MetricsBuilder) RecordContainerMemoryActiveAnonDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryActiveAnon.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryActiveFileDataPoint adds a data point to container.memory.active_file metric. +func (mb *MetricsBuilder) RecordContainerMemoryActiveFileDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryActiveFile.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryAnonDataPoint adds a data point to container.memory.anon metric. +func (mb *MetricsBuilder) RecordContainerMemoryAnonDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryAnon.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryCacheDataPoint adds a data point to container.memory.cache metric. +func (mb *MetricsBuilder) RecordContainerMemoryCacheDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryCache.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryDirtyDataPoint adds a data point to container.memory.dirty metric. +func (mb *MetricsBuilder) RecordContainerMemoryDirtyDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryDirty.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryFailsDataPoint adds a data point to container.memory.fails metric. +func (mb *MetricsBuilder) RecordContainerMemoryFailsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryFails.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryFileDataPoint adds a data point to container.memory.file metric. +func (mb *MetricsBuilder) RecordContainerMemoryFileDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryFile.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryHierarchicalMemoryLimitDataPoint adds a data point to container.memory.hierarchical_memory_limit metric. +func (mb *MetricsBuilder) RecordContainerMemoryHierarchicalMemoryLimitDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryHierarchicalMemoryLimit.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryHierarchicalMemswLimitDataPoint adds a data point to container.memory.hierarchical_memsw_limit metric. +func (mb *MetricsBuilder) RecordContainerMemoryHierarchicalMemswLimitDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryHierarchicalMemswLimit.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryInactiveAnonDataPoint adds a data point to container.memory.inactive_anon metric. +func (mb *MetricsBuilder) RecordContainerMemoryInactiveAnonDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryInactiveAnon.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryInactiveFileDataPoint adds a data point to container.memory.inactive_file metric. +func (mb *MetricsBuilder) RecordContainerMemoryInactiveFileDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryInactiveFile.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryMappedFileDataPoint adds a data point to container.memory.mapped_file metric. +func (mb *MetricsBuilder) RecordContainerMemoryMappedFileDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryMappedFile.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryPercentDataPoint adds a data point to container.memory.percent metric. +func (mb *MetricsBuilder) RecordContainerMemoryPercentDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricContainerMemoryPercent.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryPgfaultDataPoint adds a data point to container.memory.pgfault metric. +func (mb *MetricsBuilder) RecordContainerMemoryPgfaultDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryPgfault.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryPgmajfaultDataPoint adds a data point to container.memory.pgmajfault metric. +func (mb *MetricsBuilder) RecordContainerMemoryPgmajfaultDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryPgmajfault.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryPgpginDataPoint adds a data point to container.memory.pgpgin metric. +func (mb *MetricsBuilder) RecordContainerMemoryPgpginDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryPgpgin.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryPgpgoutDataPoint adds a data point to container.memory.pgpgout metric. +func (mb *MetricsBuilder) RecordContainerMemoryPgpgoutDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryPgpgout.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryRssDataPoint adds a data point to container.memory.rss metric. +func (mb *MetricsBuilder) RecordContainerMemoryRssDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryRss.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryRssHugeDataPoint adds a data point to container.memory.rss_huge metric. +func (mb *MetricsBuilder) RecordContainerMemoryRssHugeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryRssHuge.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalActiveAnonDataPoint adds a data point to container.memory.total_active_anon metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalActiveAnonDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalActiveAnon.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalActiveFileDataPoint adds a data point to container.memory.total_active_file metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalActiveFileDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalActiveFile.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalCacheDataPoint adds a data point to container.memory.total_cache metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalCacheDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalCache.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalDirtyDataPoint adds a data point to container.memory.total_dirty metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalDirtyDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalDirty.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalInactiveAnonDataPoint adds a data point to container.memory.total_inactive_anon metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalInactiveAnonDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalInactiveAnon.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalInactiveFileDataPoint adds a data point to container.memory.total_inactive_file metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalInactiveFileDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalInactiveFile.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalMappedFileDataPoint adds a data point to container.memory.total_mapped_file metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalMappedFileDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalMappedFile.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalPgfaultDataPoint adds a data point to container.memory.total_pgfault metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalPgfaultDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalPgfault.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalPgmajfaultDataPoint adds a data point to container.memory.total_pgmajfault metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalPgmajfaultDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalPgmajfault.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalPgpginDataPoint adds a data point to container.memory.total_pgpgin metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalPgpginDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalPgpgin.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalPgpgoutDataPoint adds a data point to container.memory.total_pgpgout metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalPgpgoutDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalPgpgout.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalRssDataPoint adds a data point to container.memory.total_rss metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalRssDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalRss.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalRssHugeDataPoint adds a data point to container.memory.total_rss_huge metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalRssHugeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalRssHuge.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalUnevictableDataPoint adds a data point to container.memory.total_unevictable metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalUnevictableDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalUnevictable.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryTotalWritebackDataPoint adds a data point to container.memory.total_writeback metric. +func (mb *MetricsBuilder) RecordContainerMemoryTotalWritebackDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryTotalWriteback.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryUnevictableDataPoint adds a data point to container.memory.unevictable metric. +func (mb *MetricsBuilder) RecordContainerMemoryUnevictableDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryUnevictable.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryUsageLimitDataPoint adds a data point to container.memory.usage.limit metric. +func (mb *MetricsBuilder) RecordContainerMemoryUsageLimitDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryUsageLimit.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryUsageMaxDataPoint adds a data point to container.memory.usage.max metric. +func (mb *MetricsBuilder) RecordContainerMemoryUsageMaxDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryUsageMax.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryUsageTotalDataPoint adds a data point to container.memory.usage.total metric. +func (mb *MetricsBuilder) RecordContainerMemoryUsageTotalDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryUsageTotal.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryWritebackDataPoint adds a data point to container.memory.writeback metric. +func (mb *MetricsBuilder) RecordContainerMemoryWritebackDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryWriteback.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerNetworkIoUsageRxBytesDataPoint adds a data point to container.network.io.usage.rx_bytes metric. +func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxBytesDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + mb.metricContainerNetworkIoUsageRxBytes.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) +} + +// RecordContainerNetworkIoUsageRxDroppedDataPoint adds a data point to container.network.io.usage.rx_dropped metric. +func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxDroppedDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + mb.metricContainerNetworkIoUsageRxDropped.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) +} + +// RecordContainerNetworkIoUsageRxErrorsDataPoint adds a data point to container.network.io.usage.rx_errors metric. +func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + mb.metricContainerNetworkIoUsageRxErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) +} + +// RecordContainerNetworkIoUsageRxPacketsDataPoint adds a data point to container.network.io.usage.rx_packets metric. +func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxPacketsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + mb.metricContainerNetworkIoUsageRxPackets.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) +} + +// RecordContainerNetworkIoUsageTxBytesDataPoint adds a data point to container.network.io.usage.tx_bytes metric. +func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxBytesDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + mb.metricContainerNetworkIoUsageTxBytes.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) +} + +// RecordContainerNetworkIoUsageTxDroppedDataPoint adds a data point to container.network.io.usage.tx_dropped metric. +func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxDroppedDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + mb.metricContainerNetworkIoUsageTxDropped.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) +} + +// RecordContainerNetworkIoUsageTxErrorsDataPoint adds a data point to container.network.io.usage.tx_errors metric. +func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + mb.metricContainerNetworkIoUsageTxErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) +} + +// RecordContainerNetworkIoUsageTxPacketsDataPoint adds a data point to container.network.io.usage.tx_packets metric. +func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxPacketsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { + mb.metricContainerNetworkIoUsageTxPackets.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) +} + +// RecordContainerPidsCountDataPoint adds a data point to container.pids.count metric. +func (mb *MetricsBuilder) RecordContainerPidsCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerPidsCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerPidsLimitDataPoint adds a data point to container.pids.limit metric. +func (mb *MetricsBuilder) RecordContainerPidsLimitDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerPidsLimit.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerRestartsDataPoint adds a data point to container.restarts metric. +func (mb *MetricsBuilder) RecordContainerRestartsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerRestarts.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerUptimeDataPoint adds a data point to container.uptime metric. +func (mb *MetricsBuilder) RecordContainerUptimeDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricContainerUptime.recordDataPoint(mb.startTime, ts, val) +} + +// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, +// and metrics builder should update its startTime and reset it's internal state accordingly. +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) + for _, op := range options { + op(mb) + } +} diff --git a/internal/docker/receiver/internal/metadata/generated_metrics_test.go b/internal/docker/receiver/internal/metadata/generated_metrics_test.go new file mode 100644 index 000000000000..56834bc34c8c --- /dev/null +++ b/internal/docker/receiver/internal/metadata/generated_metrics_test.go @@ -0,0 +1,1413 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/receivertest" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" +) + +type testDataSet int + +const ( + testDataSetDefault testDataSet = iota + testDataSetAll + testDataSetNone +) + +func TestMetricsBuilder(t *testing.T) { + tests := []struct { + name string + metricsSet testDataSet + resAttrsSet testDataSet + expectEmpty bool + }{ + { + name: "default", + }, + { + name: "all_set", + metricsSet: testDataSetAll, + resAttrsSet: testDataSetAll, + }, + { + name: "none_set", + metricsSet: testDataSetNone, + resAttrsSet: testDataSetNone, + expectEmpty: true, + }, + { + name: "filter_set_include", + resAttrsSet: testDataSetAll, + }, + { + name: "filter_set_exclude", + resAttrsSet: testDataSetAll, + expectEmpty: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + start := pcommon.Timestamp(1_000_000_000) + ts := pcommon.Timestamp(1_000_001_000) + observedZapCore, observedLogs := observer.New(zap.WarnLevel) + settings := receivertest.NewNopCreateSettings() + settings.Logger = zap.New(observedZapCore) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) + + expectedWarnings := 0 + + assert.Equal(t, expectedWarnings, observedLogs.Len()) + + defaultMetricsCount := 0 + allMetricsCount := 0 + + allMetricsCount++ + mb.RecordContainerBlockioIoMergedRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") + + allMetricsCount++ + mb.RecordContainerBlockioIoQueuedRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerBlockioIoServiceBytesRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") + + allMetricsCount++ + mb.RecordContainerBlockioIoServiceTimeRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") + + allMetricsCount++ + mb.RecordContainerBlockioIoServicedRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") + + allMetricsCount++ + mb.RecordContainerBlockioIoTimeRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") + + allMetricsCount++ + mb.RecordContainerBlockioIoWaitTimeRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") + + allMetricsCount++ + mb.RecordContainerBlockioSectorsRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") + + allMetricsCount++ + mb.RecordContainerCPULimitDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerCPULogicalCountDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerCPUSharesDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerCPUThrottlingDataPeriodsDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerCPUThrottlingDataThrottledTimeDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerCPUUsageKernelmodeDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerCPUUsagePercpuDataPoint(ts, 1, "core-val") + + allMetricsCount++ + mb.RecordContainerCPUUsageSystemDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerCPUUsageTotalDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerCPUUsageUsermodeDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerCPUUtilizationDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryActiveAnonDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryActiveFileDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryAnonDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryCacheDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryDirtyDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryFailsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerMemoryFileDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryHierarchicalMemoryLimitDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryHierarchicalMemswLimitDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryInactiveAnonDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryInactiveFileDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryMappedFileDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerMemoryPercentDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryPgfaultDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryPgmajfaultDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryPgpginDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryPgpgoutDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryRssDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryRssHugeDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalActiveAnonDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalActiveFileDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerMemoryTotalCacheDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalDirtyDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalInactiveAnonDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalInactiveFileDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalMappedFileDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalPgfaultDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalPgmajfaultDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalPgpginDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalPgpgoutDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalRssDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalRssHugeDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalUnevictableDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryTotalWritebackDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryUnevictableDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerMemoryUsageLimitDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryUsageMaxDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerMemoryUsageTotalDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerMemoryWritebackDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerNetworkIoUsageRxBytesDataPoint(ts, 1, "interface-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerNetworkIoUsageRxDroppedDataPoint(ts, 1, "interface-val") + + allMetricsCount++ + mb.RecordContainerNetworkIoUsageRxErrorsDataPoint(ts, 1, "interface-val") + + allMetricsCount++ + mb.RecordContainerNetworkIoUsageRxPacketsDataPoint(ts, 1, "interface-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerNetworkIoUsageTxBytesDataPoint(ts, 1, "interface-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerNetworkIoUsageTxDroppedDataPoint(ts, 1, "interface-val") + + allMetricsCount++ + mb.RecordContainerNetworkIoUsageTxErrorsDataPoint(ts, 1, "interface-val") + + allMetricsCount++ + mb.RecordContainerNetworkIoUsageTxPacketsDataPoint(ts, 1, "interface-val") + + allMetricsCount++ + mb.RecordContainerPidsCountDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerPidsLimitDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerRestartsDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerUptimeDataPoint(ts, 1) + + rb := mb.NewResourceBuilder() + rb.SetContainerCommandLine("container.command_line-val") + rb.SetContainerHostname("container.hostname-val") + rb.SetContainerID("container.id-val") + rb.SetContainerImageID("container.image.id-val") + rb.SetContainerImageName("container.image.name-val") + rb.SetContainerName("container.name-val") + rb.SetContainerRuntime("container.runtime-val") + res := rb.Emit() + metrics := mb.Emit(WithResource(res)) + + if test.expectEmpty { + assert.Equal(t, 0, metrics.ResourceMetrics().Len()) + return + } + + assert.Equal(t, 1, metrics.ResourceMetrics().Len()) + rm := metrics.ResourceMetrics().At(0) + assert.Equal(t, res, rm.Resource()) + assert.Equal(t, 1, rm.ScopeMetrics().Len()) + ms := rm.ScopeMetrics().At(0).Metrics() + if test.metricsSet == testDataSetDefault { + assert.Equal(t, defaultMetricsCount, ms.Len()) + } + if test.metricsSet == testDataSetAll { + assert.Equal(t, allMetricsCount, ms.Len()) + } + validatedMetrics := make(map[string]bool) + for i := 0; i < ms.Len(); i++ { + switch ms.At(i).Name() { + case "container.blockio.io_merged_recursive": + assert.False(t, validatedMetrics["container.blockio.io_merged_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_merged_recursive") + validatedMetrics["container.blockio.io_merged_recursive"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "{operations}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("device_major") + assert.True(t, ok) + assert.EqualValues(t, "device_major-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("device_minor") + assert.True(t, ok) + assert.EqualValues(t, "device_minor-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("operation") + assert.True(t, ok) + assert.EqualValues(t, "operation-val", attrVal.Str()) + case "container.blockio.io_queued_recursive": + assert.False(t, validatedMetrics["container.blockio.io_queued_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_queued_recursive") + validatedMetrics["container.blockio.io_queued_recursive"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of requests queued up for this cgroup and its descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "{operations}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("device_major") + assert.True(t, ok) + assert.EqualValues(t, "device_major-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("device_minor") + assert.True(t, ok) + assert.EqualValues(t, "device_minor-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("operation") + assert.True(t, ok) + assert.EqualValues(t, "operation-val", attrVal.Str()) + case "container.blockio.io_service_bytes_recursive": + assert.False(t, validatedMetrics["container.blockio.io_service_bytes_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_service_bytes_recursive") + validatedMetrics["container.blockio.io_service_bytes_recursive"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of bytes transferred to/from the disk by the group and descendant groups.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("device_major") + assert.True(t, ok) + assert.EqualValues(t, "device_major-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("device_minor") + assert.True(t, ok) + assert.EqualValues(t, "device_minor-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("operation") + assert.True(t, ok) + assert.EqualValues(t, "operation-val", attrVal.Str()) + case "container.blockio.io_service_time_recursive": + assert.False(t, validatedMetrics["container.blockio.io_service_time_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_service_time_recursive") + validatedMetrics["container.blockio.io_service_time_recursive"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("device_major") + assert.True(t, ok) + assert.EqualValues(t, "device_major-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("device_minor") + assert.True(t, ok) + assert.EqualValues(t, "device_minor-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("operation") + assert.True(t, ok) + assert.EqualValues(t, "operation-val", attrVal.Str()) + case "container.blockio.io_serviced_recursive": + assert.False(t, validatedMetrics["container.blockio.io_serviced_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_serviced_recursive") + validatedMetrics["container.blockio.io_serviced_recursive"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "{operations}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("device_major") + assert.True(t, ok) + assert.EqualValues(t, "device_major-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("device_minor") + assert.True(t, ok) + assert.EqualValues(t, "device_minor-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("operation") + assert.True(t, ok) + assert.EqualValues(t, "operation-val", attrVal.Str()) + case "container.blockio.io_time_recursive": + assert.False(t, validatedMetrics["container.blockio.io_time_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_time_recursive") + validatedMetrics["container.blockio.io_time_recursive"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("device_major") + assert.True(t, ok) + assert.EqualValues(t, "device_major-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("device_minor") + assert.True(t, ok) + assert.EqualValues(t, "device_minor-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("operation") + assert.True(t, ok) + assert.EqualValues(t, "operation-val", attrVal.Str()) + case "container.blockio.io_wait_time_recursive": + assert.False(t, validatedMetrics["container.blockio.io_wait_time_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_wait_time_recursive") + validatedMetrics["container.blockio.io_wait_time_recursive"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("device_major") + assert.True(t, ok) + assert.EqualValues(t, "device_major-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("device_minor") + assert.True(t, ok) + assert.EqualValues(t, "device_minor-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("operation") + assert.True(t, ok) + assert.EqualValues(t, "operation-val", attrVal.Str()) + case "container.blockio.sectors_recursive": + assert.False(t, validatedMetrics["container.blockio.sectors_recursive"], "Found a duplicate in the metrics slice: container.blockio.sectors_recursive") + validatedMetrics["container.blockio.sectors_recursive"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of sectors transferred to/from disk by the group and descendant groups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "{sectors}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("device_major") + assert.True(t, ok) + assert.EqualValues(t, "device_major-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("device_minor") + assert.True(t, ok) + assert.EqualValues(t, "device_minor-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("operation") + assert.True(t, ok) + assert.EqualValues(t, "operation-val", attrVal.Str()) + case "container.cpu.limit": + assert.False(t, validatedMetrics["container.cpu.limit"], "Found a duplicate in the metrics slice: container.cpu.limit") + validatedMetrics["container.cpu.limit"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "CPU limit set for the container.", ms.At(i).Description()) + assert.Equal(t, "{cpus}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + case "container.cpu.logical.count": + assert.False(t, validatedMetrics["container.cpu.logical.count"], "Found a duplicate in the metrics slice: container.cpu.logical.count") + validatedMetrics["container.cpu.logical.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of cores available to the container.", ms.At(i).Description()) + assert.Equal(t, "{cpus}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.cpu.shares": + assert.False(t, validatedMetrics["container.cpu.shares"], "Found a duplicate in the metrics slice: container.cpu.shares") + validatedMetrics["container.cpu.shares"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "CPU shares set for the container.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.cpu.throttling_data.periods": + assert.False(t, validatedMetrics["container.cpu.throttling_data.periods"], "Found a duplicate in the metrics slice: container.cpu.throttling_data.periods") + validatedMetrics["container.cpu.throttling_data.periods"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of periods with throttling active.", ms.At(i).Description()) + assert.Equal(t, "{periods}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.cpu.throttling_data.throttled_periods": + assert.False(t, validatedMetrics["container.cpu.throttling_data.throttled_periods"], "Found a duplicate in the metrics slice: container.cpu.throttling_data.throttled_periods") + validatedMetrics["container.cpu.throttling_data.throttled_periods"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of periods when the container hits its throttling limit.", ms.At(i).Description()) + assert.Equal(t, "{periods}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.cpu.throttling_data.throttled_time": + assert.False(t, validatedMetrics["container.cpu.throttling_data.throttled_time"], "Found a duplicate in the metrics slice: container.cpu.throttling_data.throttled_time") + validatedMetrics["container.cpu.throttling_data.throttled_time"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Aggregate time the container was throttled.", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.cpu.usage.kernelmode": + assert.False(t, validatedMetrics["container.cpu.usage.kernelmode"], "Found a duplicate in the metrics slice: container.cpu.usage.kernelmode") + validatedMetrics["container.cpu.usage.kernelmode"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows).", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.cpu.usage.percpu": + assert.False(t, validatedMetrics["container.cpu.usage.percpu"], "Found a duplicate in the metrics slice: container.cpu.usage.percpu") + validatedMetrics["container.cpu.usage.percpu"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Per-core CPU usage by the container (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("core") + assert.True(t, ok) + assert.EqualValues(t, "core-val", attrVal.Str()) + case "container.cpu.usage.system": + assert.False(t, validatedMetrics["container.cpu.usage.system"], "Found a duplicate in the metrics slice: container.cpu.usage.system") + validatedMetrics["container.cpu.usage.system"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "System CPU usage, as reported by docker.", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.cpu.usage.total": + assert.False(t, validatedMetrics["container.cpu.usage.total"], "Found a duplicate in the metrics slice: container.cpu.usage.total") + validatedMetrics["container.cpu.usage.total"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Total CPU time consumed.", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.cpu.usage.usermode": + assert.False(t, validatedMetrics["container.cpu.usage.usermode"], "Found a duplicate in the metrics slice: container.cpu.usage.usermode") + validatedMetrics["container.cpu.usage.usermode"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows).", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.cpu.utilization": + assert.False(t, validatedMetrics["container.cpu.utilization"], "Found a duplicate in the metrics slice: container.cpu.utilization") + validatedMetrics["container.cpu.utilization"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Percent of CPU used by the container.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + case "container.memory.active_anon": + assert.False(t, validatedMetrics["container.memory.active_anon"], "Found a duplicate in the metrics slice: container.memory.active_anon") + validatedMetrics["container.memory.active_anon"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The amount of anonymous memory that has been identified as active by the kernel.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.active_file": + assert.False(t, validatedMetrics["container.memory.active_file"], "Found a duplicate in the metrics slice: container.memory.active_file") + validatedMetrics["container.memory.active_file"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Cache memory that has been identified as active by the kernel.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.anon": + assert.False(t, validatedMetrics["container.memory.anon"], "Found a duplicate in the metrics slice: container.memory.anon") + validatedMetrics["container.memory.anon"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS) (Only available with cgroups v2).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.cache": + assert.False(t, validatedMetrics["container.memory.cache"], "Found a duplicate in the metrics slice: container.memory.cache") + validatedMetrics["container.memory.cache"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.dirty": + assert.False(t, validatedMetrics["container.memory.dirty"], "Found a duplicate in the metrics slice: container.memory.dirty") + validatedMetrics["container.memory.dirty"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.fails": + assert.False(t, validatedMetrics["container.memory.fails"], "Found a duplicate in the metrics slice: container.memory.fails") + validatedMetrics["container.memory.fails"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of times the memory limit was hit.", ms.At(i).Description()) + assert.Equal(t, "{fails}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.file": + assert.False(t, validatedMetrics["container.memory.file"], "Found a duplicate in the metrics slice: container.memory.file") + validatedMetrics["container.memory.file"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Amount of memory used to cache filesystem data, including tmpfs and shared memory (Only available with cgroups v2).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.hierarchical_memory_limit": + assert.False(t, validatedMetrics["container.memory.hierarchical_memory_limit"], "Found a duplicate in the metrics slice: container.memory.hierarchical_memory_limit") + validatedMetrics["container.memory.hierarchical_memory_limit"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.hierarchical_memsw_limit": + assert.False(t, validatedMetrics["container.memory.hierarchical_memsw_limit"], "Found a duplicate in the metrics slice: container.memory.hierarchical_memsw_limit") + validatedMetrics["container.memory.hierarchical_memsw_limit"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.inactive_anon": + assert.False(t, validatedMetrics["container.memory.inactive_anon"], "Found a duplicate in the metrics slice: container.memory.inactive_anon") + validatedMetrics["container.memory.inactive_anon"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The amount of anonymous memory that has been identified as inactive by the kernel.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.inactive_file": + assert.False(t, validatedMetrics["container.memory.inactive_file"], "Found a duplicate in the metrics slice: container.memory.inactive_file") + validatedMetrics["container.memory.inactive_file"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Cache memory that has been identified as inactive by the kernel.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.mapped_file": + assert.False(t, validatedMetrics["container.memory.mapped_file"], "Found a duplicate in the metrics slice: container.memory.mapped_file") + validatedMetrics["container.memory.mapped_file"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.percent": + assert.False(t, validatedMetrics["container.memory.percent"], "Found a duplicate in the metrics slice: container.memory.percent") + validatedMetrics["container.memory.percent"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Percentage of memory used.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + case "container.memory.pgfault": + assert.False(t, validatedMetrics["container.memory.pgfault"], "Found a duplicate in the metrics slice: container.memory.pgfault") + validatedMetrics["container.memory.pgfault"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Indicate the number of times that a process of the cgroup triggered a page fault.", ms.At(i).Description()) + assert.Equal(t, "{faults}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.pgmajfault": + assert.False(t, validatedMetrics["container.memory.pgmajfault"], "Found a duplicate in the metrics slice: container.memory.pgmajfault") + validatedMetrics["container.memory.pgmajfault"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Indicate the number of times that a process of the cgroup triggered a major fault.", ms.At(i).Description()) + assert.Equal(t, "{faults}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.pgpgin": + assert.False(t, validatedMetrics["container.memory.pgpgin"], "Found a duplicate in the metrics slice: container.memory.pgpgin") + validatedMetrics["container.memory.pgpgin"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of pages read from disk by the cgroup (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "{operations}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.pgpgout": + assert.False(t, validatedMetrics["container.memory.pgpgout"], "Found a duplicate in the metrics slice: container.memory.pgpgout") + validatedMetrics["container.memory.pgpgout"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of pages written to disk by the cgroup (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "{operations}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.rss": + assert.False(t, validatedMetrics["container.memory.rss"], "Found a duplicate in the metrics slice: container.memory.rss") + validatedMetrics["container.memory.rss"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.rss_huge": + assert.False(t, validatedMetrics["container.memory.rss_huge"], "Found a duplicate in the metrics slice: container.memory.rss_huge") + validatedMetrics["container.memory.rss_huge"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_active_anon": + assert.False(t, validatedMetrics["container.memory.total_active_anon"], "Found a duplicate in the metrics slice: container.memory.total_active_anon") + validatedMetrics["container.memory.total_active_anon"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_active_file": + assert.False(t, validatedMetrics["container.memory.total_active_file"], "Found a duplicate in the metrics slice: container.memory.total_active_file") + validatedMetrics["container.memory.total_active_file"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_cache": + assert.False(t, validatedMetrics["container.memory.total_cache"], "Found a duplicate in the metrics slice: container.memory.total_cache") + validatedMetrics["container.memory.total_cache"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_dirty": + assert.False(t, validatedMetrics["container.memory.total_dirty"], "Found a duplicate in the metrics slice: container.memory.total_dirty") + validatedMetrics["container.memory.total_dirty"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_inactive_anon": + assert.False(t, validatedMetrics["container.memory.total_inactive_anon"], "Found a duplicate in the metrics slice: container.memory.total_inactive_anon") + validatedMetrics["container.memory.total_inactive_anon"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_inactive_file": + assert.False(t, validatedMetrics["container.memory.total_inactive_file"], "Found a duplicate in the metrics slice: container.memory.total_inactive_file") + validatedMetrics["container.memory.total_inactive_file"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_mapped_file": + assert.False(t, validatedMetrics["container.memory.total_mapped_file"], "Found a duplicate in the metrics slice: container.memory.total_mapped_file") + validatedMetrics["container.memory.total_mapped_file"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_pgfault": + assert.False(t, validatedMetrics["container.memory.total_pgfault"], "Found a duplicate in the metrics slice: container.memory.total_pgfault") + validatedMetrics["container.memory.total_pgfault"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "{faults}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_pgmajfault": + assert.False(t, validatedMetrics["container.memory.total_pgmajfault"], "Found a duplicate in the metrics slice: container.memory.total_pgmajfault") + validatedMetrics["container.memory.total_pgmajfault"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "{faults}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_pgpgin": + assert.False(t, validatedMetrics["container.memory.total_pgpgin"], "Found a duplicate in the metrics slice: container.memory.total_pgpgin") + validatedMetrics["container.memory.total_pgpgin"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "{operations}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_pgpgout": + assert.False(t, validatedMetrics["container.memory.total_pgpgout"], "Found a duplicate in the metrics slice: container.memory.total_pgpgout") + validatedMetrics["container.memory.total_pgpgout"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "{operations}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_rss": + assert.False(t, validatedMetrics["container.memory.total_rss"], "Found a duplicate in the metrics slice: container.memory.total_rss") + validatedMetrics["container.memory.total_rss"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_rss_huge": + assert.False(t, validatedMetrics["container.memory.total_rss_huge"], "Found a duplicate in the metrics slice: container.memory.total_rss_huge") + validatedMetrics["container.memory.total_rss_huge"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_unevictable": + assert.False(t, validatedMetrics["container.memory.total_unevictable"], "Found a duplicate in the metrics slice: container.memory.total_unevictable") + validatedMetrics["container.memory.total_unevictable"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.total_writeback": + assert.False(t, validatedMetrics["container.memory.total_writeback"], "Found a duplicate in the metrics slice: container.memory.total_writeback") + validatedMetrics["container.memory.total_writeback"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.unevictable": + assert.False(t, validatedMetrics["container.memory.unevictable"], "Found a duplicate in the metrics slice: container.memory.unevictable") + validatedMetrics["container.memory.unevictable"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The amount of memory that cannot be reclaimed.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.usage.limit": + assert.False(t, validatedMetrics["container.memory.usage.limit"], "Found a duplicate in the metrics slice: container.memory.usage.limit") + validatedMetrics["container.memory.usage.limit"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Memory limit of the container.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.usage.max": + assert.False(t, validatedMetrics["container.memory.usage.max"], "Found a duplicate in the metrics slice: container.memory.usage.max") + validatedMetrics["container.memory.usage.max"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Maximum memory usage.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.usage.total": + assert.False(t, validatedMetrics["container.memory.usage.total"], "Found a duplicate in the metrics slice: container.memory.usage.total") + validatedMetrics["container.memory.usage.total"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Memory usage of the container. This excludes the cache.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.writeback": + assert.False(t, validatedMetrics["container.memory.writeback"], "Found a duplicate in the metrics slice: container.memory.writeback") + validatedMetrics["container.memory.writeback"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.network.io.usage.rx_bytes": + assert.False(t, validatedMetrics["container.network.io.usage.rx_bytes"], "Found a duplicate in the metrics slice: container.network.io.usage.rx_bytes") + validatedMetrics["container.network.io.usage.rx_bytes"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes received by the container.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("interface") + assert.True(t, ok) + assert.EqualValues(t, "interface-val", attrVal.Str()) + case "container.network.io.usage.rx_dropped": + assert.False(t, validatedMetrics["container.network.io.usage.rx_dropped"], "Found a duplicate in the metrics slice: container.network.io.usage.rx_dropped") + validatedMetrics["container.network.io.usage.rx_dropped"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Incoming packets dropped.", ms.At(i).Description()) + assert.Equal(t, "{packets}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("interface") + assert.True(t, ok) + assert.EqualValues(t, "interface-val", attrVal.Str()) + case "container.network.io.usage.rx_errors": + assert.False(t, validatedMetrics["container.network.io.usage.rx_errors"], "Found a duplicate in the metrics slice: container.network.io.usage.rx_errors") + validatedMetrics["container.network.io.usage.rx_errors"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Received errors.", ms.At(i).Description()) + assert.Equal(t, "{errors}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("interface") + assert.True(t, ok) + assert.EqualValues(t, "interface-val", attrVal.Str()) + case "container.network.io.usage.rx_packets": + assert.False(t, validatedMetrics["container.network.io.usage.rx_packets"], "Found a duplicate in the metrics slice: container.network.io.usage.rx_packets") + validatedMetrics["container.network.io.usage.rx_packets"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Packets received.", ms.At(i).Description()) + assert.Equal(t, "{packets}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("interface") + assert.True(t, ok) + assert.EqualValues(t, "interface-val", attrVal.Str()) + case "container.network.io.usage.tx_bytes": + assert.False(t, validatedMetrics["container.network.io.usage.tx_bytes"], "Found a duplicate in the metrics slice: container.network.io.usage.tx_bytes") + validatedMetrics["container.network.io.usage.tx_bytes"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes sent.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("interface") + assert.True(t, ok) + assert.EqualValues(t, "interface-val", attrVal.Str()) + case "container.network.io.usage.tx_dropped": + assert.False(t, validatedMetrics["container.network.io.usage.tx_dropped"], "Found a duplicate in the metrics slice: container.network.io.usage.tx_dropped") + validatedMetrics["container.network.io.usage.tx_dropped"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Outgoing packets dropped.", ms.At(i).Description()) + assert.Equal(t, "{packets}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("interface") + assert.True(t, ok) + assert.EqualValues(t, "interface-val", attrVal.Str()) + case "container.network.io.usage.tx_errors": + assert.False(t, validatedMetrics["container.network.io.usage.tx_errors"], "Found a duplicate in the metrics slice: container.network.io.usage.tx_errors") + validatedMetrics["container.network.io.usage.tx_errors"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Sent errors.", ms.At(i).Description()) + assert.Equal(t, "{errors}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("interface") + assert.True(t, ok) + assert.EqualValues(t, "interface-val", attrVal.Str()) + case "container.network.io.usage.tx_packets": + assert.False(t, validatedMetrics["container.network.io.usage.tx_packets"], "Found a duplicate in the metrics slice: container.network.io.usage.tx_packets") + validatedMetrics["container.network.io.usage.tx_packets"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Packets sent.", ms.At(i).Description()) + assert.Equal(t, "{packets}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("interface") + assert.True(t, ok) + assert.EqualValues(t, "interface-val", attrVal.Str()) + case "container.pids.count": + assert.False(t, validatedMetrics["container.pids.count"], "Found a duplicate in the metrics slice: container.pids.count") + validatedMetrics["container.pids.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of pids in the container's cgroup.", ms.At(i).Description()) + assert.Equal(t, "{pids}", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.pids.limit": + assert.False(t, validatedMetrics["container.pids.limit"], "Found a duplicate in the metrics slice: container.pids.limit") + validatedMetrics["container.pids.limit"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Maximum number of pids in the container's cgroup.", ms.At(i).Description()) + assert.Equal(t, "{pids}", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.restarts": + assert.False(t, validatedMetrics["container.restarts"], "Found a duplicate in the metrics slice: container.restarts") + validatedMetrics["container.restarts"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of restarts for the container.", ms.At(i).Description()) + assert.Equal(t, "{restarts}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.uptime": + assert.False(t, validatedMetrics["container.uptime"], "Found a duplicate in the metrics slice: container.uptime") + validatedMetrics["container.uptime"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Time elapsed since container start time.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + } + } + }) + } +} diff --git a/internal/docker/receiver/internal/metadata/generated_resource.go b/internal/docker/receiver/internal/metadata/generated_resource.go new file mode 100644 index 000000000000..7b0c03fd65b5 --- /dev/null +++ b/internal/docker/receiver/internal/metadata/generated_resource.go @@ -0,0 +1,78 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceBuilder is a helper struct to build resources predefined in metadata.yaml. +// The ResourceBuilder is not thread-safe and must not to be used in multiple goroutines. +type ResourceBuilder struct { + config ResourceAttributesConfig + res pcommon.Resource +} + +// NewResourceBuilder creates a new ResourceBuilder. This method should be called on the start of the application. +func NewResourceBuilder(rac ResourceAttributesConfig) *ResourceBuilder { + return &ResourceBuilder{ + config: rac, + res: pcommon.NewResource(), + } +} + +// SetContainerCommandLine sets provided value as "container.command_line" attribute. +func (rb *ResourceBuilder) SetContainerCommandLine(val string) { + if rb.config.ContainerCommandLine.Enabled { + rb.res.Attributes().PutStr("container.command_line", val) + } +} + +// SetContainerHostname sets provided value as "container.hostname" attribute. +func (rb *ResourceBuilder) SetContainerHostname(val string) { + if rb.config.ContainerHostname.Enabled { + rb.res.Attributes().PutStr("container.hostname", val) + } +} + +// SetContainerID sets provided value as "container.id" attribute. +func (rb *ResourceBuilder) SetContainerID(val string) { + if rb.config.ContainerID.Enabled { + rb.res.Attributes().PutStr("container.id", val) + } +} + +// SetContainerImageID sets provided value as "container.image.id" attribute. +func (rb *ResourceBuilder) SetContainerImageID(val string) { + if rb.config.ContainerImageID.Enabled { + rb.res.Attributes().PutStr("container.image.id", val) + } +} + +// SetContainerImageName sets provided value as "container.image.name" attribute. +func (rb *ResourceBuilder) SetContainerImageName(val string) { + if rb.config.ContainerImageName.Enabled { + rb.res.Attributes().PutStr("container.image.name", val) + } +} + +// SetContainerName sets provided value as "container.name" attribute. +func (rb *ResourceBuilder) SetContainerName(val string) { + if rb.config.ContainerName.Enabled { + rb.res.Attributes().PutStr("container.name", val) + } +} + +// SetContainerRuntime sets provided value as "container.runtime" attribute. +func (rb *ResourceBuilder) SetContainerRuntime(val string) { + if rb.config.ContainerRuntime.Enabled { + rb.res.Attributes().PutStr("container.runtime", val) + } +} + +// Emit returns the built resource and resets the internal builder state. +func (rb *ResourceBuilder) Emit() pcommon.Resource { + r := rb.res + rb.res = pcommon.NewResource() + return r +} diff --git a/internal/docker/receiver/internal/metadata/generated_resource_test.go b/internal/docker/receiver/internal/metadata/generated_resource_test.go new file mode 100644 index 000000000000..5a9e037b2bdd --- /dev/null +++ b/internal/docker/receiver/internal/metadata/generated_resource_test.go @@ -0,0 +1,76 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestResourceBuilder(t *testing.T) { + for _, test := range []string{"default", "all_set", "none_set"} { + t.Run(test, func(t *testing.T) { + cfg := loadResourceAttributesConfig(t, test) + rb := NewResourceBuilder(cfg) + rb.SetContainerCommandLine("container.command_line-val") + rb.SetContainerHostname("container.hostname-val") + rb.SetContainerID("container.id-val") + rb.SetContainerImageID("container.image.id-val") + rb.SetContainerImageName("container.image.name-val") + rb.SetContainerName("container.name-val") + rb.SetContainerRuntime("container.runtime-val") + + res := rb.Emit() + assert.Equal(t, 0, rb.Emit().Attributes().Len()) // Second call should return empty Resource + + switch test { + case "default": + assert.Equal(t, 5, res.Attributes().Len()) + case "all_set": + assert.Equal(t, 7, res.Attributes().Len()) + case "none_set": + assert.Equal(t, 0, res.Attributes().Len()) + return + default: + assert.Failf(t, "unexpected test case: %s", test) + } + + val, ok := res.Attributes().Get("container.command_line") + assert.Equal(t, test == "all_set", ok) + if ok { + assert.EqualValues(t, "container.command_line-val", val.Str()) + } + val, ok = res.Attributes().Get("container.hostname") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "container.hostname-val", val.Str()) + } + val, ok = res.Attributes().Get("container.id") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "container.id-val", val.Str()) + } + val, ok = res.Attributes().Get("container.image.id") + assert.Equal(t, test == "all_set", ok) + if ok { + assert.EqualValues(t, "container.image.id-val", val.Str()) + } + val, ok = res.Attributes().Get("container.image.name") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "container.image.name-val", val.Str()) + } + val, ok = res.Attributes().Get("container.name") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "container.name-val", val.Str()) + } + val, ok = res.Attributes().Get("container.runtime") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "container.runtime-val", val.Str()) + } + }) + } +} diff --git a/internal/docker/receiver/internal/metadata/testdata/config.yaml b/internal/docker/receiver/internal/metadata/testdata/config.yaml new file mode 100644 index 000000000000..34f3f7419590 --- /dev/null +++ b/internal/docker/receiver/internal/metadata/testdata/config.yaml @@ -0,0 +1,379 @@ +default: +all_set: + metrics: + container.blockio.io_merged_recursive: + enabled: true + container.blockio.io_queued_recursive: + enabled: true + container.blockio.io_service_bytes_recursive: + enabled: true + container.blockio.io_service_time_recursive: + enabled: true + container.blockio.io_serviced_recursive: + enabled: true + container.blockio.io_time_recursive: + enabled: true + container.blockio.io_wait_time_recursive: + enabled: true + container.blockio.sectors_recursive: + enabled: true + container.cpu.limit: + enabled: true + container.cpu.logical.count: + enabled: true + container.cpu.shares: + enabled: true + container.cpu.throttling_data.periods: + enabled: true + container.cpu.throttling_data.throttled_periods: + enabled: true + container.cpu.throttling_data.throttled_time: + enabled: true + container.cpu.usage.kernelmode: + enabled: true + container.cpu.usage.percpu: + enabled: true + container.cpu.usage.system: + enabled: true + container.cpu.usage.total: + enabled: true + container.cpu.usage.usermode: + enabled: true + container.cpu.utilization: + enabled: true + container.memory.active_anon: + enabled: true + container.memory.active_file: + enabled: true + container.memory.anon: + enabled: true + container.memory.cache: + enabled: true + container.memory.dirty: + enabled: true + container.memory.fails: + enabled: true + container.memory.file: + enabled: true + container.memory.hierarchical_memory_limit: + enabled: true + container.memory.hierarchical_memsw_limit: + enabled: true + container.memory.inactive_anon: + enabled: true + container.memory.inactive_file: + enabled: true + container.memory.mapped_file: + enabled: true + container.memory.percent: + enabled: true + container.memory.pgfault: + enabled: true + container.memory.pgmajfault: + enabled: true + container.memory.pgpgin: + enabled: true + container.memory.pgpgout: + enabled: true + container.memory.rss: + enabled: true + container.memory.rss_huge: + enabled: true + container.memory.total_active_anon: + enabled: true + container.memory.total_active_file: + enabled: true + container.memory.total_cache: + enabled: true + container.memory.total_dirty: + enabled: true + container.memory.total_inactive_anon: + enabled: true + container.memory.total_inactive_file: + enabled: true + container.memory.total_mapped_file: + enabled: true + container.memory.total_pgfault: + enabled: true + container.memory.total_pgmajfault: + enabled: true + container.memory.total_pgpgin: + enabled: true + container.memory.total_pgpgout: + enabled: true + container.memory.total_rss: + enabled: true + container.memory.total_rss_huge: + enabled: true + container.memory.total_unevictable: + enabled: true + container.memory.total_writeback: + enabled: true + container.memory.unevictable: + enabled: true + container.memory.usage.limit: + enabled: true + container.memory.usage.max: + enabled: true + container.memory.usage.total: + enabled: true + container.memory.writeback: + enabled: true + container.network.io.usage.rx_bytes: + enabled: true + container.network.io.usage.rx_dropped: + enabled: true + container.network.io.usage.rx_errors: + enabled: true + container.network.io.usage.rx_packets: + enabled: true + container.network.io.usage.tx_bytes: + enabled: true + container.network.io.usage.tx_dropped: + enabled: true + container.network.io.usage.tx_errors: + enabled: true + container.network.io.usage.tx_packets: + enabled: true + container.pids.count: + enabled: true + container.pids.limit: + enabled: true + container.restarts: + enabled: true + container.uptime: + enabled: true + resource_attributes: + container.command_line: + enabled: true + container.hostname: + enabled: true + container.id: + enabled: true + container.image.id: + enabled: true + container.image.name: + enabled: true + container.name: + enabled: true + container.runtime: + enabled: true +none_set: + metrics: + container.blockio.io_merged_recursive: + enabled: false + container.blockio.io_queued_recursive: + enabled: false + container.blockio.io_service_bytes_recursive: + enabled: false + container.blockio.io_service_time_recursive: + enabled: false + container.blockio.io_serviced_recursive: + enabled: false + container.blockio.io_time_recursive: + enabled: false + container.blockio.io_wait_time_recursive: + enabled: false + container.blockio.sectors_recursive: + enabled: false + container.cpu.limit: + enabled: false + container.cpu.logical.count: + enabled: false + container.cpu.shares: + enabled: false + container.cpu.throttling_data.periods: + enabled: false + container.cpu.throttling_data.throttled_periods: + enabled: false + container.cpu.throttling_data.throttled_time: + enabled: false + container.cpu.usage.kernelmode: + enabled: false + container.cpu.usage.percpu: + enabled: false + container.cpu.usage.system: + enabled: false + container.cpu.usage.total: + enabled: false + container.cpu.usage.usermode: + enabled: false + container.cpu.utilization: + enabled: false + container.memory.active_anon: + enabled: false + container.memory.active_file: + enabled: false + container.memory.anon: + enabled: false + container.memory.cache: + enabled: false + container.memory.dirty: + enabled: false + container.memory.fails: + enabled: false + container.memory.file: + enabled: false + container.memory.hierarchical_memory_limit: + enabled: false + container.memory.hierarchical_memsw_limit: + enabled: false + container.memory.inactive_anon: + enabled: false + container.memory.inactive_file: + enabled: false + container.memory.mapped_file: + enabled: false + container.memory.percent: + enabled: false + container.memory.pgfault: + enabled: false + container.memory.pgmajfault: + enabled: false + container.memory.pgpgin: + enabled: false + container.memory.pgpgout: + enabled: false + container.memory.rss: + enabled: false + container.memory.rss_huge: + enabled: false + container.memory.total_active_anon: + enabled: false + container.memory.total_active_file: + enabled: false + container.memory.total_cache: + enabled: false + container.memory.total_dirty: + enabled: false + container.memory.total_inactive_anon: + enabled: false + container.memory.total_inactive_file: + enabled: false + container.memory.total_mapped_file: + enabled: false + container.memory.total_pgfault: + enabled: false + container.memory.total_pgmajfault: + enabled: false + container.memory.total_pgpgin: + enabled: false + container.memory.total_pgpgout: + enabled: false + container.memory.total_rss: + enabled: false + container.memory.total_rss_huge: + enabled: false + container.memory.total_unevictable: + enabled: false + container.memory.total_writeback: + enabled: false + container.memory.unevictable: + enabled: false + container.memory.usage.limit: + enabled: false + container.memory.usage.max: + enabled: false + container.memory.usage.total: + enabled: false + container.memory.writeback: + enabled: false + container.network.io.usage.rx_bytes: + enabled: false + container.network.io.usage.rx_dropped: + enabled: false + container.network.io.usage.rx_errors: + enabled: false + container.network.io.usage.rx_packets: + enabled: false + container.network.io.usage.tx_bytes: + enabled: false + container.network.io.usage.tx_dropped: + enabled: false + container.network.io.usage.tx_errors: + enabled: false + container.network.io.usage.tx_packets: + enabled: false + container.pids.count: + enabled: false + container.pids.limit: + enabled: false + container.restarts: + enabled: false + container.uptime: + enabled: false + resource_attributes: + container.command_line: + enabled: false + container.hostname: + enabled: false + container.id: + enabled: false + container.image.id: + enabled: false + container.image.name: + enabled: false + container.name: + enabled: false + container.runtime: + enabled: false +filter_set_include: + resource_attributes: + container.command_line: + enabled: true + metrics_include: + - regexp: ".*" + container.hostname: + enabled: true + metrics_include: + - regexp: ".*" + container.id: + enabled: true + metrics_include: + - regexp: ".*" + container.image.id: + enabled: true + metrics_include: + - regexp: ".*" + container.image.name: + enabled: true + metrics_include: + - regexp: ".*" + container.name: + enabled: true + metrics_include: + - regexp: ".*" + container.runtime: + enabled: true + metrics_include: + - regexp: ".*" +filter_set_exclude: + resource_attributes: + container.command_line: + enabled: true + metrics_exclude: + - strict: "container.command_line-val" + container.hostname: + enabled: true + metrics_exclude: + - strict: "container.hostname-val" + container.id: + enabled: true + metrics_exclude: + - strict: "container.id-val" + container.image.id: + enabled: true + metrics_exclude: + - strict: "container.image.id-val" + container.image.name: + enabled: true + metrics_exclude: + - strict: "container.image.name-val" + container.name: + enabled: true + metrics_exclude: + - strict: "container.name-val" + container.runtime: + enabled: true + metrics_exclude: + - strict: "container.runtime-val" diff --git a/internal/docker/receiver/metadata.yaml b/internal/docker/receiver/metadata.yaml new file mode 100644 index 000000000000..d7c32673e6e4 --- /dev/null +++ b/internal/docker/receiver/metadata.yaml @@ -0,0 +1,705 @@ +type: docker/receiver +scope_name: otelcol/docker/receiver + +parent: docker + +sem_conv_version: 1.6.1 + +# Note: there are other, additional resource attributes that the user can configure through the yaml +resource_attributes: + container.runtime: + description: "The runtime of the container. For this receiver, it will always be 'docker'." + type: string + enabled: true + container.id: + description: "The ID of the container." + type: string + enabled: true + container.image.name: + description: "The name of the docker image in use by the container." + type: string + enabled: true + container.name: + description: "The name of the container." + type: string + enabled: true + container.hostname: + description: "The hostname of the container." + type: string + enabled: true + container.image.id: + description: "The ID of the container image." + type: string + enabled: false + container.command_line: + description: "The full command executed by the container." + type: string + enabled: false + +attributes: + core: + description: "The CPU core number when utilising per-CPU metrics." + type: string + device_major: + description: "Device major number for block IO operations." + type: string + device_minor: + description: "Device minor number for block IO operations." + type: string + interface: + description: "Network interface." + type: string + operation: + description: "Type of BlockIO operation." + type: string + +metrics: + # CPU + container.cpu.usage.system: + enabled: false + description: "System CPU usage, as reported by docker." + extended_documentation: "Note this is the usage for the system, not the container." + unit: ns + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + container.cpu.usage.total: + enabled: true + description: "Total CPU time consumed." + unit: ns + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + container.cpu.usage.kernelmode: + enabled: true + description: >- + Time spent by tasks of the cgroup in kernel mode (Linux). + Time spent by all container processes in kernel mode (Windows). + unit: ns + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + container.cpu.usage.usermode: + enabled: true + description: >- + Time spent by tasks of the cgroup in user mode (Linux). + Time spent by all container processes in user mode (Windows). + unit: ns + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + container.cpu.usage.percpu: + enabled: false + description: "Per-core CPU usage by the container (Only available with cgroups v1)." + unit: ns + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - core + container.cpu.throttling_data.periods: + enabled: false + description: "Number of periods with throttling active." + unit: "{periods}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + container.cpu.throttling_data.throttled_periods: + enabled: false + description: "Number of periods when the container hits its throttling limit." + unit: "{periods}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + container.cpu.throttling_data.throttled_time: + enabled: false + description: "Aggregate time the container was throttled." + unit: ns + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + container.cpu.utilization: + enabled: true + description: "Percent of CPU used by the container." + unit: "1" + gauge: + value_type: double + container.cpu.limit: + enabled: false + description: "CPU limit set for the container." + extended_documentation: "This metric is only reported if the container has limits set with -cpus, -cpuset-cpus or -cpu-quota." + unit: "{cpus}" + gauge: + value_type: double + container.cpu.shares: + enabled: false + description: "CPU shares set for the container." + unit: "1" + gauge: + value_type: int + container.cpu.logical.count: + enabled: false + description: "Number of cores available to the container." + unit: "{cpus}" + gauge: + value_type: int + + + # Memory + container.memory.usage.limit: + enabled: true + description: "Memory limit of the container." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.usage.total: + enabled: true + description: "Memory usage of the container. This excludes the cache." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.usage.max: + enabled: false + description: "Maximum memory usage." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.percent: + enabled: true + description: "Percentage of memory used." + unit: 1 + gauge: + value_type: double + container.memory.cache: + enabled: false + description: "The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.rss: + enabled: false + description: "The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.rss_huge: + enabled: false + description: "Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.dirty: + enabled: false + description: "Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.writeback: + enabled: false + description: "Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.mapped_file: + enabled: false + description: "Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.pgpgin: + enabled: false + description: "Number of pages read from disk by the cgroup (Only available with cgroups v1)." + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt)." + unit: "{operations}" + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + container.memory.pgpgout: + enabled: false + description: "Number of pages written to disk by the cgroup (Only available with cgroups v1)." + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt)." + unit: "{operations}" + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + container.memory.pgfault: + enabled: false + description: "Indicate the number of times that a process of the cgroup triggered a page fault." + unit: "{faults}" + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + container.memory.pgmajfault: + enabled: false + description: "Indicate the number of times that a process of the cgroup triggered a major fault." + unit: "{faults}" + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + container.memory.inactive_anon: + enabled: false + description: "The amount of anonymous memory that has been identified as inactive by the kernel." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.active_anon: + enabled: false + description: "The amount of anonymous memory that has been identified as active by the kernel." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.inactive_file: + enabled: false + description: "Cache memory that has been identified as inactive by the kernel." + extended_documentation: "[More docs](https://docs.docker.com/config/containers/runmetrics/)" + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.active_file: + enabled: false + description: "Cache memory that has been identified as active by the kernel." + extended_documentation: "[More docs](https://docs.docker.com/config/containers/runmetrics/)" + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.unevictable: + enabled: false + description: "The amount of memory that cannot be reclaimed." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.hierarchical_memory_limit: + enabled: false + description: "The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.hierarchical_memsw_limit: + enabled: false + description: "The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.total_cache: + enabled: true + description: "Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.total_rss: + enabled: false + description: "The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.total_rss_huge: + enabled: false + description: "Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.total_dirty: + enabled: false + description: "Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.total_writeback: + enabled: false + description: "Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.total_mapped_file: + enabled: false + description: "Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.total_pgpgin: + enabled: false + description: "Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1)." + unit: "{operations}" + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + container.memory.total_pgpgout: + enabled: false + description: "Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1)." + unit: "{operations}" + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + container.memory.total_pgfault: + enabled: false + description: "Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1)." + unit: "{faults}" + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + container.memory.total_pgmajfault: + enabled: false + description: "Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1)." + unit: "{faults}" + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + container.memory.total_inactive_anon: + enabled: false + description: "The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.total_active_anon: + enabled: false + description: "The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.total_inactive_file: + enabled: false + description: "Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1)." + extended_documentation: "[More docs](https://docs.docker.com/config/containers/runmetrics/)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.total_active_file: + enabled: false + description: "Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1)." + extended_documentation: "[More docs](https://docs.docker.com/config/containers/runmetrics/)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.total_unevictable: + enabled: false + description: "The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1)." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.anon: + enabled: false + description: "Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS) (Only available with cgroups v2)." + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v2.txt)" + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.file: + enabled: true + description: "Amount of memory used to cache filesystem data, including tmpfs and shared memory (Only available with cgroups v2)." + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v2.txt)" + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.fails: + enabled: false + description: "Number of times the memory limit was hit." + unit: "{fails}" + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + + + # BlockIO (cgroup v1) and IO (cgroup v2) controllers + container.blockio.io_merged_recursive: + enabled: false + description: "Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups (Only available with cgroups v1)." + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." + unit: "{operations}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - device_major + - device_minor + - operation + container.blockio.io_queued_recursive: + enabled: false + description: "Number of requests queued up for this cgroup and its descendant cgroups (Only available with cgroups v1)." + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." + unit: "{operations}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - device_major + - device_minor + - operation + container.blockio.io_service_bytes_recursive: + enabled: true + description: "Number of bytes transferred to/from the disk by the group and descendant groups." + extended_documentation: >- + More docs + for [cgroups v1](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt) + and [cgroups v2](https://www.kernel.org/doc/Documentation/cgroup-v2.txt) + unit: By + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - device_major + - device_minor + - operation + container.blockio.io_service_time_recursive: + enabled: false + description: "Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups (Only available with cgroups v1)." + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." + unit: ns + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - device_major + - device_minor + - operation + container.blockio.io_serviced_recursive: + enabled: false + description: "Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1)." + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." + unit: "{operations}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - device_major + - device_minor + - operation + container.blockio.io_time_recursive: + enabled: false + description: "Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds (Only available with cgroups v1)." + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." + unit: ms + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - device_major + - device_minor + - operation + container.blockio.io_wait_time_recursive: + enabled: false + description: "Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service (Only available with cgroups v1)." + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." + unit: ns + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - device_major + - device_minor + - operation + container.blockio.sectors_recursive: + enabled: false + description: "Number of sectors transferred to/from disk by the group and descendant groups (Only available with cgroups v1)." + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." + unit: "{sectors}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - device_major + - device_minor + - operation + + # Network + container.network.io.usage.rx_bytes: + enabled: true + description: "Bytes received by the container." + unit: By + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - interface + container.network.io.usage.tx_bytes: + enabled: true + description: "Bytes sent." + unit: By + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - interface + container.network.io.usage.rx_dropped: + enabled: true + description: "Incoming packets dropped." + unit: "{packets}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - interface + container.network.io.usage.tx_dropped: + enabled: true + description: "Outgoing packets dropped." + unit: "{packets}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - interface + container.network.io.usage.rx_errors: + enabled: false + description: "Received errors." + unit: "{errors}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - interface + container.network.io.usage.tx_errors: + enabled: false + description: "Sent errors." + unit: "{errors}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - interface + container.network.io.usage.rx_packets: + enabled: false + description: "Packets received." + unit: "{packets}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - interface + container.network.io.usage.tx_packets: + enabled: false + description: "Packets sent." + unit: "{packets}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - interface + + # Pids + container.pids.count: + enabled: false + description: "Number of pids in the container's cgroup." + extended_documentation: "It requires docker API 1.23 or higher and kernel version >= 4.3 with pids cgroup supported. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/pids.txt)" + unit: "{pids}" + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + + container.pids.limit: + enabled: false + description: "Maximum number of pids in the container's cgroup." + extended_documentation: "It requires docker API 1.23 or higher and kernel version >= 4.3 with pids cgroup supported. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/pids.txt)" + unit: "{pids}" + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + + # Base + container.uptime: + enabled: false + description: "Time elapsed since container start time." + unit: s + gauge: + value_type: double + + # Container + container.restarts: + enabled: false + description: "Number of restarts for the container." + unit: "{restarts}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative diff --git a/internal/docker/receiver/metric_helper.go b/internal/docker/receiver/metric_helper.go new file mode 100644 index 000000000000..4e71f3e40869 --- /dev/null +++ b/internal/docker/receiver/metric_helper.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package receiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker/receiver" + +import ( + "fmt" + "strconv" + "strings" + + dtypes "github.com/docker/docker/api/types" + ctypes "github.com/docker/docker/api/types/container" +) + +const nanosInASecond = 1e9 + +// Following functions has been copied from: calculateCPUPercentUnix(), calculateMemUsageUnixNoCache(), calculateMemPercentUnixNoCache() +// https://github.com/docker/cli/blob/a2e9ed3b874fccc177b9349f3b0277612403934f/cli/command/container/stats_helpers.go + +// Copyright 2012-2017 Docker, Inc. +// This product includes software developed at Docker, Inc. (https://www.docker.com). +// The following is courtesy of our legal counsel: +// Use and transfer of Docker may be subject to certain restrictions by the +// United States and other governments. +// It is your responsibility to ensure that your use and/or transfer does not +// violate applicable laws. +// For more information, please see https://www.bis.doc.gov +// See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. + +func calculateCPUPercent(previous *dtypes.CPUStats, v *dtypes.CPUStats) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CPUUsage.TotalUsage) - float64(previous.CPUUsage.TotalUsage) + // calculate the change for the entire system between readings + systemDelta = float64(v.SystemUsage) - float64(previous.SystemUsage) + onlineCPUs = float64(v.OnlineCPUs) + ) + + if onlineCPUs == 0.0 { + onlineCPUs = float64(len(v.CPUUsage.PercpuUsage)) + } + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0 + } + return cpuPercent +} + +// calculateMemUsageNoCache calculate memory usage of the container. +// Cache is intentionally excluded to avoid misinterpretation of the output. +// +// On cgroup v1 host, the result is `mem.Usage - mem.Stats["total_inactive_file"]` . +// On cgroup v2 host, the result is `mem.Usage - mem.Stats["inactive_file"] `. +// +// This definition is consistent with cadvisor and containerd/CRI. +// * https://github.com/google/cadvisor/commit/307d1b1cb320fef66fab02db749f07a459245451 +// * https://github.com/containerd/cri/commit/6b8846cdf8b8c98c1d965313d66bc8489166059a +// +// On Docker 19.03 and older, the result was `mem.Usage - mem.Stats["cache"]`. +// See https://github.com/moby/moby/issues/40727 for the background. +func calculateMemUsageNoCache(memoryStats *dtypes.MemoryStats) uint64 { + // cgroup v1 + if v, isCgroup1 := memoryStats.Stats["total_inactive_file"]; isCgroup1 && v < memoryStats.Usage { + return memoryStats.Usage - v + } + // cgroup v2 + if v := memoryStats.Stats["inactive_file"]; v < memoryStats.Usage { + return memoryStats.Usage - v + } + return memoryStats.Usage +} + +func calculateMemoryPercent(limit uint64, usedNoCache uint64) float64 { + // MemoryStats.Limit will never be 0 unless the container is not running and we haven't + // got any data from cgroup + if limit != 0 { + return float64(usedNoCache) / float64(limit) * 100.0 + } + return 0.0 +} + +// calculateCPULimit calculate the number of cpus assigned to a container. +// +// Calculation is based on 3 alternatives by the following order: +// - nanocpus: if set by i.e docker run -cpus=2 +// - cpusetCpus: if set by i.e docker run -docker run -cpuset-cpus="0,2" +// - cpuquota: if set by i.e docker run -cpu-quota=50000 +// +// See https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler for background. +func calculateCPULimit(hostConfig *ctypes.HostConfig) (float64, error) { + var cpuLimit float64 + var err error + + switch { + case hostConfig.NanoCPUs > 0: + cpuLimit = float64(hostConfig.NanoCPUs) / nanosInASecond + case hostConfig.CpusetCpus != "": + cpuLimit, err = parseCPUSet(hostConfig.CpusetCpus) + if err != nil { + return cpuLimit, err + } + case hostConfig.CPUQuota > 0: + period := hostConfig.CPUPeriod + if period == 0 { + period = 100000 // Default CFS Period + } + cpuLimit = float64(hostConfig.CPUQuota) / float64(period) + } + return cpuLimit, nil +} + +// parseCPUSet helper function to decompose -cpuset-cpus value into number os cpus. +func parseCPUSet(line string) (float64, error) { + var numCPUs uint64 + + lineSlice := strings.Split(line, ",") + for _, l := range lineSlice { + lineParts := strings.Split(l, "-") + if len(lineParts) == 2 { + p0, err0 := strconv.Atoi(lineParts[0]) + if err0 != nil { + return 0, fmt.Errorf("invalid -cpuset-cpus value: %w", err0) + } + p1, err1 := strconv.Atoi(lineParts[1]) + if err1 != nil { + return 0, fmt.Errorf("invalid -cpuset-cpus value: %w", err1) + } + numCPUs += uint64(p1 - p0 + 1) + } else if len(lineParts) == 1 { + numCPUs++ + } + } + return float64(numCPUs), nil +} diff --git a/internal/docker/receiver/metric_helper_test.go b/internal/docker/receiver/metric_helper_test.go new file mode 100644 index 000000000000..e9d0635b3571 --- /dev/null +++ b/internal/docker/receiver/metric_helper_test.go @@ -0,0 +1,121 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package receiver + +import ( + "errors" + "testing" + + ctypes "github.com/docker/docker/api/types/container" + "github.com/stretchr/testify/assert" +) + +func Test_calculateCPULimit1(t *testing.T) { + tests := []struct { + name string + args *ctypes.HostConfig + want float64 + err error + }{ + { + "Test CPULimit", + &ctypes.HostConfig{ + Resources: ctypes.Resources{ + NanoCPUs: 2500000000, + }, + }, + 2.5, + nil, + }, + { + "Test CPUSetCpu", + &ctypes.HostConfig{ + Resources: ctypes.Resources{ + CpusetCpus: "0-2", + }, + }, + 3, + nil, + }, + { + "Test CPUQuota", + &ctypes.HostConfig{ + Resources: ctypes.Resources{ + CPUQuota: 50000, + }, + }, + 0.5, + nil, + }, + { + "Test CPUQuota Custom Period", + &ctypes.HostConfig{ + Resources: ctypes.Resources{ + CPUQuota: 300000, + CPUPeriod: 200000, + }, + }, + 1.5, + nil, + }, + { + "Test Default", + &ctypes.HostConfig{ + Resources: ctypes.Resources{ + NanoCPUs: 1800000000, + CpusetCpus: "0-1", + CPUQuota: 400000, + }, + }, + 1.8, + nil, + }, + { + "Test No Values", + &ctypes.HostConfig{ + Resources: ctypes.Resources{}, + }, + 0, + nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + want, err := calculateCPULimit(tt.args) + assert.Equalf(t, tt.want, want, "calculateCPULimit(%v)", tt.args) + assert.Equalf(t, tt.err, err, "calculateCPULimit(%v)", tt.args) + }) + } +} + +func Test_parseCPUSet(t *testing.T) { + tests := []struct { + input string + expected float64 + err error + }{ + {"0,2", 2, nil}, + {"0-2", 3, nil}, + {"0-2,4", 4, nil}, + {"0-2,4-5", 5, nil}, + {"a-b", 0, errors.New("invalid -cpuset-cpus value: strconv.Atoi: parsing \"a\": invalid syntax")}, + {"", 1, nil}, + } + + for _, test := range tests { + result, err := parseCPUSet(test.input) + + if err != nil && test.err != nil { + if err.Error() != test.err.Error() { + t.Errorf("parseCPUSet(%s) returned error %v, expected %v", test.input, err, test.err) + } + } else if !errors.Is(err, test.err) { + t.Errorf("parseCPUSet(%s) returned error %v, expected %v", test.input, err, test.err) + } + + if result != test.expected { + t.Errorf("parseCPUSet(%s) returned %f, expected %f", test.input, result, test.expected) + } + } +} diff --git a/internal/docker/receiver/metrics_receiver.go b/internal/docker/receiver/metrics_receiver.go new file mode 100644 index 000000000000..9d08156194ea --- /dev/null +++ b/internal/docker/receiver/metrics_receiver.go @@ -0,0 +1,331 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package receiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker/receiver" + +import ( + "context" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/api/types" + dtypes "github.com/docker/docker/api/types" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/scrapererror" + "go.opentelemetry.io/collector/receiver/scraperhelper" + "go.uber.org/multierr" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker/receiver/internal/metadata" +) + +var ( + defaultDockerAPIVersion = "1.25" + minimumRequiredDockerAPIVersion = docker.MustNewAPIVersion(defaultDockerAPIVersion) +) + +type resultV2 struct { + stats *dtypes.StatsJSON + container *docker.Container + err error +} + +type MetricsReceiver struct { + Config *Config + settings receiver.CreateSettings + client *docker.Client + mb *metadata.MetricsBuilder + cancel context.CancelFunc +} + +func NewMetricsReceiver(set receiver.CreateSettings, config *Config) *MetricsReceiver { + return &MetricsReceiver{ + Config: config, + settings: set, + mb: metadata.NewMetricsBuilder(config.MetricsBuilderConfig, set), + } +} + +func (r *MetricsReceiver) Start(ctx context.Context, _ component.Host) error { + dConfig, err := docker.NewConfig(r.Config.Endpoint, r.Config.Timeout, r.Config.ExcludedImages, r.Config.DockerAPIVersion) + if err != nil { + return err + } + + r.client, err = docker.NewDockerClient(dConfig, r.settings.Logger) + if err != nil { + return err + } + + if err = r.client.LoadContainerList(ctx); err != nil { + return err + } + + cctx, cancel := context.WithCancel(ctx) + r.cancel = cancel + + go r.client.ContainerEventLoop(cctx) + return nil +} + +func (r *MetricsReceiver) Shutdown(context.Context) error { + if r.cancel != nil { + r.cancel() + } + return nil +} + +func (r *MetricsReceiver) ScrapeV2(ctx context.Context) (pmetric.Metrics, error) { + containers := r.client.Containers() + results := make(chan resultV2, len(containers)) + + wg := &sync.WaitGroup{} + wg.Add(len(containers)) + for _, container := range containers { + go func(c docker.Container) { + defer wg.Done() + statsJSON, err := r.client.FetchContainerStatsAsJSON(ctx, c) + if err != nil { + results <- resultV2{nil, &c, err} + return + } + + results <- resultV2{ + stats: statsJSON, + container: &c, + err: nil} + }(container) + } + + wg.Wait() + close(results) + + var errs error + + now := pcommon.NewTimestampFromTime(time.Now()) + for res := range results { + if res.err != nil { + // Don't know the number of failed stats, but one container fetch is a partial error. + errs = multierr.Append(errs, scrapererror.NewPartialScrapeError(res.err, 0)) + continue + } + if err := r.recordContainerStats(now, res.stats, res.container); err != nil { + errs = multierr.Append(errs, err) + } + } + + return r.mb.Emit(), errs +} + +func (r *MetricsReceiver) recordContainerStats(now pcommon.Timestamp, containerStats *dtypes.StatsJSON, container *docker.Container) error { + var errs error + r.recordCPUMetrics(now, &containerStats.CPUStats, &containerStats.PreCPUStats) + r.recordMemoryMetrics(now, &containerStats.MemoryStats) + r.recordBlkioMetrics(now, &containerStats.BlkioStats) + r.recordNetworkMetrics(now, &containerStats.Networks) + r.recordPidsMetrics(now, &containerStats.PidsStats) + if err := r.recordBaseMetrics(now, container.ContainerJSONBase); err != nil { + errs = multierr.Append(errs, err) + } + if err := r.recordHostConfigMetrics(now, container.ContainerJSON); err != nil { + errs = multierr.Append(errs, err) + } + r.mb.RecordContainerRestartsDataPoint(now, int64(container.RestartCount)) + + // Always-present resource attrs + the user-configured resource attrs + rb := r.mb.NewResourceBuilder() + rb.SetContainerRuntime("docker") + rb.SetContainerHostname(container.Config.Hostname) + rb.SetContainerID(container.ID) + rb.SetContainerImageName(container.Config.Image) + rb.SetContainerName(strings.TrimPrefix(container.Name, "/")) + rb.SetContainerImageID(container.Image) + rb.SetContainerCommandLine(strings.Join(container.Config.Cmd, " ")) + resource := rb.Emit() + + for k, label := range r.Config.EnvVarsToMetricLabels { + if v := container.EnvMap[k]; v != "" { + resource.Attributes().PutStr(label, v) + } + } + for k, label := range r.Config.ContainerLabelsToMetricLabels { + if v := container.Config.Labels[k]; v != "" { + resource.Attributes().PutStr(label, v) + } + } + + r.mb.EmitForResource(metadata.WithResource(resource)) + return errs +} + +func (r *MetricsReceiver) recordMemoryMetrics(now pcommon.Timestamp, memoryStats *dtypes.MemoryStats) { + totalUsage := calculateMemUsageNoCache(memoryStats) + r.mb.RecordContainerMemoryUsageTotalDataPoint(now, int64(totalUsage)) + + r.mb.RecordContainerMemoryUsageLimitDataPoint(now, int64(memoryStats.Limit)) + + r.mb.RecordContainerMemoryPercentDataPoint(now, calculateMemoryPercent(memoryStats.Limit, totalUsage)) + + r.mb.RecordContainerMemoryUsageMaxDataPoint(now, int64(memoryStats.MaxUsage)) + + r.mb.RecordContainerMemoryFailsDataPoint(now, int64(memoryStats.Failcnt)) + + recorders := map[string]func(pcommon.Timestamp, int64){ + "cache": r.mb.RecordContainerMemoryCacheDataPoint, + "total_cache": r.mb.RecordContainerMemoryTotalCacheDataPoint, + "rss": r.mb.RecordContainerMemoryRssDataPoint, + "total_rss": r.mb.RecordContainerMemoryTotalRssDataPoint, + "rss_huge": r.mb.RecordContainerMemoryRssHugeDataPoint, + "total_rss_huge": r.mb.RecordContainerMemoryTotalRssHugeDataPoint, + "dirty": r.mb.RecordContainerMemoryDirtyDataPoint, + "total_dirty": r.mb.RecordContainerMemoryTotalDirtyDataPoint, + "writeback": r.mb.RecordContainerMemoryWritebackDataPoint, + "total_writeback": r.mb.RecordContainerMemoryTotalWritebackDataPoint, + "mapped_file": r.mb.RecordContainerMemoryMappedFileDataPoint, + "total_mapped_file": r.mb.RecordContainerMemoryTotalMappedFileDataPoint, + "pgpgin": r.mb.RecordContainerMemoryPgpginDataPoint, + "total_pgpgin": r.mb.RecordContainerMemoryTotalPgpginDataPoint, + "pgpgout": r.mb.RecordContainerMemoryPgpgoutDataPoint, + "total_pgpgout": r.mb.RecordContainerMemoryTotalPgpgoutDataPoint, + "pgfault": r.mb.RecordContainerMemoryPgfaultDataPoint, + "total_pgfault": r.mb.RecordContainerMemoryTotalPgfaultDataPoint, + "pgmajfault": r.mb.RecordContainerMemoryPgmajfaultDataPoint, + "total_pgmajfault": r.mb.RecordContainerMemoryTotalPgmajfaultDataPoint, + "inactive_anon": r.mb.RecordContainerMemoryInactiveAnonDataPoint, + "total_inactive_anon": r.mb.RecordContainerMemoryTotalInactiveAnonDataPoint, + "active_anon": r.mb.RecordContainerMemoryActiveAnonDataPoint, + "total_active_anon": r.mb.RecordContainerMemoryTotalActiveAnonDataPoint, + "inactive_file": r.mb.RecordContainerMemoryInactiveFileDataPoint, + "total_inactive_file": r.mb.RecordContainerMemoryTotalInactiveFileDataPoint, + "active_file": r.mb.RecordContainerMemoryActiveFileDataPoint, + "total_active_file": r.mb.RecordContainerMemoryTotalActiveFileDataPoint, + "unevictable": r.mb.RecordContainerMemoryUnevictableDataPoint, + "total_unevictable": r.mb.RecordContainerMemoryTotalUnevictableDataPoint, + "hierarchical_memory_limit": r.mb.RecordContainerMemoryHierarchicalMemoryLimitDataPoint, + "hierarchical_memsw_limit": r.mb.RecordContainerMemoryHierarchicalMemswLimitDataPoint, + "anon": r.mb.RecordContainerMemoryAnonDataPoint, + "file": r.mb.RecordContainerMemoryFileDataPoint, + } + + for name, val := range memoryStats.Stats { + if recorder, ok := recorders[name]; ok { + recorder(now, int64(val)) + } + } +} + +type blkioRecorder func(now pcommon.Timestamp, val int64, devMaj string, devMin string, operation string) + +func (r *MetricsReceiver) recordBlkioMetrics(now pcommon.Timestamp, blkioStats *dtypes.BlkioStats) { + recordSingleBlkioStat(now, blkioStats.IoMergedRecursive, r.mb.RecordContainerBlockioIoMergedRecursiveDataPoint) + recordSingleBlkioStat(now, blkioStats.IoQueuedRecursive, r.mb.RecordContainerBlockioIoQueuedRecursiveDataPoint) + recordSingleBlkioStat(now, blkioStats.IoServiceBytesRecursive, r.mb.RecordContainerBlockioIoServiceBytesRecursiveDataPoint) + recordSingleBlkioStat(now, blkioStats.IoServiceTimeRecursive, r.mb.RecordContainerBlockioIoServiceTimeRecursiveDataPoint) + recordSingleBlkioStat(now, blkioStats.IoServicedRecursive, r.mb.RecordContainerBlockioIoServicedRecursiveDataPoint) + recordSingleBlkioStat(now, blkioStats.IoTimeRecursive, r.mb.RecordContainerBlockioIoTimeRecursiveDataPoint) + recordSingleBlkioStat(now, blkioStats.IoWaitTimeRecursive, r.mb.RecordContainerBlockioIoWaitTimeRecursiveDataPoint) + recordSingleBlkioStat(now, blkioStats.SectorsRecursive, r.mb.RecordContainerBlockioSectorsRecursiveDataPoint) +} + +func recordSingleBlkioStat(now pcommon.Timestamp, statEntries []dtypes.BlkioStatEntry, recorder blkioRecorder) { + for _, stat := range statEntries { + recorder( + now, + int64(stat.Value), + strconv.FormatUint(stat.Major, 10), + strconv.FormatUint(stat.Minor, 10), + strings.ToLower(stat.Op)) + } +} + +func (r *MetricsReceiver) recordNetworkMetrics(now pcommon.Timestamp, networks *map[string]dtypes.NetworkStats) { + if networks == nil || *networks == nil { + return + } + + for netInterface, stats := range *networks { + r.mb.RecordContainerNetworkIoUsageRxBytesDataPoint(now, int64(stats.RxBytes), netInterface) + r.mb.RecordContainerNetworkIoUsageTxBytesDataPoint(now, int64(stats.TxBytes), netInterface) + r.mb.RecordContainerNetworkIoUsageRxDroppedDataPoint(now, int64(stats.RxDropped), netInterface) + r.mb.RecordContainerNetworkIoUsageTxDroppedDataPoint(now, int64(stats.TxDropped), netInterface) + r.mb.RecordContainerNetworkIoUsageRxPacketsDataPoint(now, int64(stats.RxPackets), netInterface) + r.mb.RecordContainerNetworkIoUsageTxPacketsDataPoint(now, int64(stats.TxPackets), netInterface) + r.mb.RecordContainerNetworkIoUsageRxErrorsDataPoint(now, int64(stats.RxErrors), netInterface) + r.mb.RecordContainerNetworkIoUsageTxErrorsDataPoint(now, int64(stats.TxErrors), netInterface) + } +} + +func (r *MetricsReceiver) recordCPUMetrics(now pcommon.Timestamp, cpuStats *dtypes.CPUStats, prevStats *dtypes.CPUStats) { + r.mb.RecordContainerCPUUsageSystemDataPoint(now, int64(cpuStats.SystemUsage)) + r.mb.RecordContainerCPUUsageTotalDataPoint(now, int64(cpuStats.CPUUsage.TotalUsage)) + r.mb.RecordContainerCPUUsageKernelmodeDataPoint(now, int64(cpuStats.CPUUsage.UsageInKernelmode)) + r.mb.RecordContainerCPUUsageUsermodeDataPoint(now, int64(cpuStats.CPUUsage.UsageInUsermode)) + r.mb.RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint(now, int64(cpuStats.ThrottlingData.ThrottledPeriods)) + r.mb.RecordContainerCPUThrottlingDataPeriodsDataPoint(now, int64(cpuStats.ThrottlingData.Periods)) + r.mb.RecordContainerCPUThrottlingDataThrottledTimeDataPoint(now, int64(cpuStats.ThrottlingData.ThrottledTime)) + r.mb.RecordContainerCPUUtilizationDataPoint(now, calculateCPUPercent(prevStats, cpuStats)) + r.mb.RecordContainerCPULogicalCountDataPoint(now, int64(cpuStats.OnlineCPUs)) + + for coreNum, v := range cpuStats.CPUUsage.PercpuUsage { + r.mb.RecordContainerCPUUsagePercpuDataPoint(now, int64(v), fmt.Sprintf("cpu%s", strconv.Itoa(coreNum))) + } +} + +func (r *MetricsReceiver) recordPidsMetrics(now pcommon.Timestamp, pidsStats *dtypes.PidsStats) { + // pidsStats are available when kernel version is >= 4.3 and pids_cgroup is supported, it is empty otherwise. + if pidsStats.Current != 0 { + r.mb.RecordContainerPidsCountDataPoint(now, int64(pidsStats.Current)) + if pidsStats.Limit != 0 { + r.mb.RecordContainerPidsLimitDataPoint(now, int64(pidsStats.Limit)) + } + } +} + +func (r *MetricsReceiver) recordBaseMetrics(now pcommon.Timestamp, base *types.ContainerJSONBase) error { + t, err := time.Parse(time.RFC3339, base.State.StartedAt) + if err != nil { + // value not available or invalid + return scrapererror.NewPartialScrapeError(fmt.Errorf("error retrieving container.uptime from Container.State.StartedAt: %w", err), 1) + } + if v := now.AsTime().Sub(t); v > 0 { + r.mb.RecordContainerUptimeDataPoint(now, v.Seconds()) + } + return nil +} + +func (r *MetricsReceiver) recordHostConfigMetrics(now pcommon.Timestamp, containerJSON *dtypes.ContainerJSON) error { + r.mb.RecordContainerCPUSharesDataPoint(now, containerJSON.HostConfig.CPUShares) + + cpuLimit, err := calculateCPULimit(containerJSON.HostConfig) + if err != nil { + return scrapererror.NewPartialScrapeError(fmt.Errorf("error retrieving container.cpu.limit: %w", err), 1) + } + if cpuLimit > 0 { + r.mb.RecordContainerCPULimitDataPoint(now, cpuLimit) + } + return nil +} + +func createMetricsReceiver( + _ context.Context, + params receiver.CreateSettings, + config component.Config, + consumer consumer.Metrics, +) (receiver.Metrics, error) { + dockerConfig := config.(*Config) + dsr := NewMetricsReceiver(params, dockerConfig) + + scrp, err := scraperhelper.NewScraper(Type.String(), dsr.ScrapeV2, scraperhelper.WithStart(dsr.Start), scraperhelper.WithShutdown(dsr.Shutdown)) + if err != nil { + return nil, err + } + + return scraperhelper.NewScraperControllerReceiver(&dsr.Config.ControllerConfig, params, consumer, scraperhelper.AddScraper(scrp)) +} diff --git a/internal/docker/receiver/metrics_receiver_test.go b/internal/docker/receiver/metrics_receiver_test.go new file mode 100644 index 000000000000..7ea06b5f400c --- /dev/null +++ b/internal/docker/receiver/metrics_receiver_test.go @@ -0,0 +1,433 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !windows + +// TODO review if tests should succeed on Windows + +package receiver + +import ( + "context" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/receiver/receivertest" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker/receiver/internal/metadata" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" +) + +var mockFolder = filepath.Join("testdata", "mock") + +var ( + metricEnabled = metadata.MetricConfig{Enabled: true} + allMetricsEnabled = metadata.MetricsConfig{ + ContainerBlockioIoMergedRecursive: metricEnabled, + ContainerBlockioIoQueuedRecursive: metricEnabled, + ContainerBlockioIoServiceBytesRecursive: metricEnabled, + ContainerBlockioIoServiceTimeRecursive: metricEnabled, + ContainerBlockioIoServicedRecursive: metricEnabled, + ContainerBlockioIoTimeRecursive: metricEnabled, + ContainerBlockioIoWaitTimeRecursive: metricEnabled, + ContainerBlockioSectorsRecursive: metricEnabled, + ContainerCPULimit: metricEnabled, + ContainerCPUShares: metricEnabled, + ContainerCPUUtilization: metricEnabled, + ContainerCPUThrottlingDataPeriods: metricEnabled, + ContainerCPUThrottlingDataThrottledPeriods: metricEnabled, + ContainerCPUThrottlingDataThrottledTime: metricEnabled, + ContainerCPUUsageKernelmode: metricEnabled, + ContainerCPUUsagePercpu: metricEnabled, + ContainerCPUUsageSystem: metricEnabled, + ContainerCPUUsageTotal: metricEnabled, + ContainerCPUUsageUsermode: metricEnabled, + ContainerCPULogicalCount: metricEnabled, + ContainerMemoryActiveAnon: metricEnabled, + ContainerMemoryActiveFile: metricEnabled, + ContainerMemoryCache: metricEnabled, + ContainerMemoryDirty: metricEnabled, + ContainerMemoryHierarchicalMemoryLimit: metricEnabled, + ContainerMemoryHierarchicalMemswLimit: metricEnabled, + ContainerMemoryInactiveAnon: metricEnabled, + ContainerMemoryInactiveFile: metricEnabled, + ContainerMemoryMappedFile: metricEnabled, + ContainerMemoryPercent: metricEnabled, + ContainerMemoryPgfault: metricEnabled, + ContainerMemoryPgmajfault: metricEnabled, + ContainerMemoryPgpgin: metricEnabled, + ContainerMemoryPgpgout: metricEnabled, + ContainerMemoryRss: metricEnabled, + ContainerMemoryRssHuge: metricEnabled, + ContainerMemoryTotalActiveAnon: metricEnabled, + ContainerMemoryTotalActiveFile: metricEnabled, + ContainerMemoryTotalCache: metricEnabled, + ContainerMemoryTotalDirty: metricEnabled, + ContainerMemoryTotalInactiveAnon: metricEnabled, + ContainerMemoryTotalInactiveFile: metricEnabled, + ContainerMemoryTotalMappedFile: metricEnabled, + ContainerMemoryTotalPgfault: metricEnabled, + ContainerMemoryTotalPgmajfault: metricEnabled, + ContainerMemoryTotalPgpgin: metricEnabled, + ContainerMemoryTotalPgpgout: metricEnabled, + ContainerMemoryTotalRss: metricEnabled, + ContainerMemoryTotalRssHuge: metricEnabled, + ContainerMemoryTotalUnevictable: metricEnabled, + ContainerMemoryTotalWriteback: metricEnabled, + ContainerMemoryUnevictable: metricEnabled, + ContainerMemoryUsageLimit: metricEnabled, + ContainerMemoryUsageMax: metricEnabled, + ContainerMemoryUsageTotal: metricEnabled, + ContainerMemoryWriteback: metricEnabled, + ContainerMemoryFails: metricEnabled, + ContainerNetworkIoUsageRxBytes: metricEnabled, + ContainerNetworkIoUsageRxDropped: metricEnabled, + ContainerNetworkIoUsageRxErrors: metricEnabled, + ContainerNetworkIoUsageRxPackets: metricEnabled, + ContainerNetworkIoUsageTxBytes: metricEnabled, + ContainerNetworkIoUsageTxDropped: metricEnabled, + ContainerNetworkIoUsageTxErrors: metricEnabled, + ContainerNetworkIoUsageTxPackets: metricEnabled, + ContainerPidsCount: metricEnabled, + ContainerPidsLimit: metricEnabled, + ContainerUptime: metricEnabled, + ContainerRestarts: metricEnabled, + ContainerMemoryAnon: metricEnabled, + ContainerMemoryFile: metricEnabled, + } + + resourceAttributeEnabled = metadata.ResourceAttributeConfig{Enabled: true} + allResourceAttributesEnabled = metadata.ResourceAttributesConfig{ + ContainerCommandLine: resourceAttributeEnabled, + ContainerHostname: resourceAttributeEnabled, + ContainerID: resourceAttributeEnabled, + ContainerImageID: resourceAttributeEnabled, + ContainerImageName: resourceAttributeEnabled, + ContainerName: resourceAttributeEnabled, + ContainerRuntime: resourceAttributeEnabled, + } +) + +func TestNewReceiver(t *testing.T) { + cfg := &Config{ + ControllerConfig: scraperhelper.ControllerConfig{ + CollectionInterval: 1 * time.Second, + }, + Endpoint: "unix:///run/some.sock", + DockerAPIVersion: defaultDockerAPIVersion, + } + mr := NewMetricsReceiver(receivertest.NewNopCreateSettings(), cfg) + assert.NotNil(t, mr) +} + +func TestErrorsInStart(t *testing.T) { + unreachable := "unix:///not/a/thing.sock" + cfg := &Config{ + ControllerConfig: scraperhelper.ControllerConfig{ + CollectionInterval: 1 * time.Second, + }, + Endpoint: unreachable, + DockerAPIVersion: defaultDockerAPIVersion, + } + recv := NewMetricsReceiver(receivertest.NewNopCreateSettings(), cfg) + assert.NotNil(t, recv) + + cfg.Endpoint = "..not/a/valid/endpoint" + err := recv.Start(context.Background(), componenttest.NewNopHost()) + assert.Error(t, err) + assert.Contains(t, err.Error(), "unable to parse docker host") + + cfg.Endpoint = unreachable + err = recv.Start(context.Background(), componenttest.NewNopHost()) + assert.Error(t, err) + assert.Contains(t, err.Error(), "context deadline exceeded") +} + +func TestScrapeV2(t *testing.T) { + + testCases := []struct { + desc string + expectedMetricsFile string + mockDockerEngine func(t *testing.T) *httptest.Server + cfgBuilder *testConfigBuilder + }{ + { + desc: "scrapeV2_single_container", + expectedMetricsFile: filepath.Join(mockFolder, "single_container", "expected_metrics.yaml"), + mockDockerEngine: func(t *testing.T) *httptest.Server { + t.Helper() + containerID := "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326" + mockServer, err := dockerMockServer(&map[string]string{ + "/v1.25/containers/json": filepath.Join(mockFolder, "single_container", "containers.json"), + "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "single_container", "container.json"), + "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "single_container", "stats.json"), + }) + require.NoError(t, err) + return mockServer + }, + cfgBuilder: newTestConfigBuilder(). + withDefaultLabels(). + withMetrics(allMetricsEnabled), + }, + { + desc: "scrapeV2_two_containers", + expectedMetricsFile: filepath.Join(mockFolder, "two_containers", "expected_metrics.yaml"), + mockDockerEngine: func(t *testing.T) *httptest.Server { + t.Helper() + containerIDs := []string{ + "89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7", + "a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce", + } + mockServer, err := dockerMockServer(&map[string]string{ + "/v1.25/containers/json": filepath.Join(mockFolder, "two_containers", "containers.json"), + "/v1.25/containers/" + containerIDs[0] + "/json": filepath.Join(mockFolder, "two_containers", "container1.json"), + "/v1.25/containers/" + containerIDs[1] + "/json": filepath.Join(mockFolder, "two_containers", "container2.json"), + "/v1.25/containers/" + containerIDs[0] + "/stats": filepath.Join(mockFolder, "two_containers", "stats1.json"), + "/v1.25/containers/" + containerIDs[1] + "/stats": filepath.Join(mockFolder, "two_containers", "stats2.json"), + }) + require.NoError(t, err) + return mockServer + }, + cfgBuilder: newTestConfigBuilder(). + withDefaultLabels(). + withMetrics(allMetricsEnabled), + }, + { + desc: "scrapeV2_no_pids_stats", + expectedMetricsFile: filepath.Join(mockFolder, "no_pids_stats", "expected_metrics.yaml"), + mockDockerEngine: func(t *testing.T) *httptest.Server { + t.Helper() + containerID := "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326" + mockServer, err := dockerMockServer(&map[string]string{ + "/v1.25/containers/json": filepath.Join(mockFolder, "no_pids_stats", "containers.json"), + "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "no_pids_stats", "container.json"), + "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "no_pids_stats", "stats.json"), + }) + require.NoError(t, err) + return mockServer + }, + cfgBuilder: newTestConfigBuilder(). + withDefaultLabels(). + withMetrics(allMetricsEnabled), + }, + { + desc: "scrapeV2_pid_stats_max", + expectedMetricsFile: filepath.Join(mockFolder, "pids_stats_max", "expected_metrics.yaml"), + mockDockerEngine: func(t *testing.T) *httptest.Server { + t.Helper() + containerID := "78de07328afff50a9777b07dd36a28c709dffe081baaf67235db618843399643" + mockServer, err := dockerMockServer(&map[string]string{ + "/v1.25/containers/json": filepath.Join(mockFolder, "pids_stats_max", "containers.json"), + "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "pids_stats_max", "container.json"), + "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "pids_stats_max", "stats.json"), + }) + require.NoError(t, err) + return mockServer + }, + cfgBuilder: newTestConfigBuilder(). + withDefaultLabels(). + withMetrics(allMetricsEnabled), + }, + { + desc: "scrapeV2_cpu_limit", + expectedMetricsFile: filepath.Join(mockFolder, "cpu_limit", "expected_metrics.yaml"), + mockDockerEngine: func(t *testing.T) *httptest.Server { + t.Helper() + containerID := "9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674" + mockServer, err := dockerMockServer(&map[string]string{ + "/v1.25/containers/json": filepath.Join(mockFolder, "cpu_limit", "containers.json"), + "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "cpu_limit", "container.json"), + "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "cpu_limit", "stats.json"), + }) + require.NoError(t, err) + return mockServer + }, + cfgBuilder: newTestConfigBuilder(). + withDefaultLabels(). + withMetrics(allMetricsEnabled), + }, + { + desc: "cgroups_v2_container", + expectedMetricsFile: filepath.Join(mockFolder, "cgroups_v2", "expected_metrics.yaml"), + mockDockerEngine: func(t *testing.T) *httptest.Server { + containerID := "f97ed5bca0a5a0b85bfd52c4144b96174e825c92a138bc0458f0e196f2c7c1b4" + mockServer, err := dockerMockServer(&map[string]string{ + "/v1.25/containers/json": filepath.Join(mockFolder, "cgroups_v2", "containers.json"), + "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "cgroups_v2", "container.json"), + "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "cgroups_v2", "stats.json"), + }) + require.NoError(t, err) + return mockServer + }, + cfgBuilder: newTestConfigBuilder(). + withDefaultLabels(). + withMetrics(allMetricsEnabled), + }, + { + desc: "scrapeV2_single_container_with_optional_resource_attributes", + expectedMetricsFile: filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "expected_metrics.yaml"), + mockDockerEngine: func(t *testing.T) *httptest.Server { + containerID := "73364842ef014441cac89fed05df19463b1230db25a31252cdf82e754f1ec581" + mockServer, err := dockerMockServer(&map[string]string{ + "/v1.25/containers/json": filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "containers.json"), + "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "container.json"), + "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "stats.json"), + }) + require.NoError(t, err) + return mockServer + }, + cfgBuilder: newTestConfigBuilder(). + withDefaultLabels(). + withMetrics(allMetricsEnabled). + withResourceAttributes(allResourceAttributesEnabled), + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + mockDockerEngine := tc.mockDockerEngine(t) + defer mockDockerEngine.Close() + + receiver := NewMetricsReceiver( + receivertest.NewNopCreateSettings(), tc.cfgBuilder.withEndpoint(mockDockerEngine.URL).build()) + err := receiver.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err) + defer func() { require.NoError(t, receiver.Shutdown(context.Background())) }() + + actualMetrics, err := receiver.ScrapeV2(context.Background()) + require.NoError(t, err) + + // Uncomment to regenerate 'expected_metrics.yaml' files + // golden.WriteMetrics(t, tc.expectedMetricsFile, actualMetrics) + + expectedMetrics, err := golden.ReadMetrics(tc.expectedMetricsFile) + + assert.NoError(t, err) + assert.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreTimestamp(), + pmetrictest.IgnoreMetricValues( + "container.uptime", // value depends on time.Now(), making it unpredictable as far as tests go + ), + )) + }) + } +} + +func TestRecordBaseMetrics(t *testing.T) { + cfg := CreateDefaultConfig().(*Config) + cfg.MetricsBuilderConfig.Metrics = metadata.MetricsConfig{ + ContainerUptime: metricEnabled, + } + r := NewMetricsReceiver(receivertest.NewNopCreateSettings(), cfg) + now := time.Now() + started := now.Add(-2 * time.Second).Format(time.RFC3339) + + t.Run("ok", func(t *testing.T) { + err := r.recordBaseMetrics( + pcommon.NewTimestampFromTime(now), + &types.ContainerJSONBase{ + State: &types.ContainerState{ + StartedAt: started, + }, + }, + ) + require.NoError(t, err) + m := r.mb.Emit().ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) + assert.Equal(t, "container.uptime", m.Name()) + dp := m.Gauge().DataPoints() + assert.Equal(t, 1, dp.Len()) + assert.Equal(t, 2, int(dp.At(0).DoubleValue())) + }) + + t.Run("error", func(t *testing.T) { + err := r.recordBaseMetrics( + pcommon.NewTimestampFromTime(now), + &types.ContainerJSONBase{ + State: &types.ContainerState{ + StartedAt: "bad date", + }, + }, + ) + require.Error(t, err) + }) +} + +func dockerMockServer(urlToFile *map[string]string) (*httptest.Server, error) { + urlToFileContents := make(map[string][]byte, len(*urlToFile)) + for urlPath, filePath := range *urlToFile { + err := func() error { + fileContents, err := os.ReadFile(filepath.Clean(filePath)) + if err != nil { + return err + } + urlToFileContents[urlPath] = fileContents + return nil + }() + if err != nil { + return nil, err + } + } + + return httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + data, ok := urlToFileContents[req.URL.Path] + if !ok { + rw.WriteHeader(http.StatusNotFound) + return + } + rw.WriteHeader(http.StatusOK) + _, _ = rw.Write(data) + })), nil +} + +type testConfigBuilder struct { + config *Config +} + +func newTestConfigBuilder() *testConfigBuilder { + return &testConfigBuilder{config: CreateDefaultConfig().(*Config)} +} + +func (cb *testConfigBuilder) withEndpoint(endpoint string) *testConfigBuilder { + cb.config.Endpoint = endpoint + return cb +} + +func (cb *testConfigBuilder) withMetrics(ms metadata.MetricsConfig) *testConfigBuilder { + cb.config.MetricsBuilderConfig.Metrics = ms + return cb +} + +func (cb *testConfigBuilder) withResourceAttributes(ras metadata.ResourceAttributesConfig) *testConfigBuilder { + cb.config.MetricsBuilderConfig.ResourceAttributes = ras + return cb +} + +func (cb *testConfigBuilder) withDefaultLabels() *testConfigBuilder { + cb.config.EnvVarsToMetricLabels = map[string]string{ + "ENV_VAR": "env-var-metric-label", + "ENV_VAR_2": "env-var-metric-label-2", + } + cb.config.ContainerLabelsToMetricLabels = map[string]string{ + "container.label": "container-metric-label", + "container.label.2": "container-metric-label-2", + } + return cb +} + +func (cb *testConfigBuilder) build() *Config { + return cb.config +} diff --git a/internal/docker/receiver/testdata/config.yaml b/internal/docker/receiver/testdata/config.yaml new file mode 100644 index 000000000000..998acb53ab28 --- /dev/null +++ b/internal/docker/receiver/testdata/config.yaml @@ -0,0 +1,20 @@ +docker: +docker/allsettings: + endpoint: http://example.com/ + collection_interval: 2s + timeout: 20s + api_version: '1.40' + container_labels_to_metric_labels: + my.container.label: my-metric-label + my.other.container.label: my-other-metric-label + env_vars_to_metric_labels: + MY_ENVIRONMENT_VARIABLE: my-metric-label + MY_OTHER_ENVIRONMENT_VARIABLE: my-other-metric-label + excluded_images: + - undesired-container + - another-*-container + metrics: + container.cpu.usage.system: + enabled: false + container.memory.total_rss: + enabled: true diff --git a/internal/docker/receiver/testdata/container.json b/internal/docker/receiver/testdata/container.json new file mode 100644 index 000000000000..bbc2089321ce --- /dev/null +++ b/internal/docker/receiver/testdata/container.json @@ -0,0 +1,226 @@ +{ + "AppArmorProfile": "docker-default", + "Args": [ + "myContainerArgs" + ], + "Config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "myCommand" + ], + "Domainname": "", + "Entrypoint": [ + "docker-entrypoint.sh" + ], + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "MY_ENV_VAR=my_env_var_value", + "MY_OTHER_ENV_VAR=my_other_env_var_value", + "INVALID_ENV_ENTRY" + ], + "ExposedPorts": { + "12345/tcp": {} + }, + "Hostname": "abcdef012345", + "Image": "myImage", + "Labels": { + "my.specified.docker.label": "my_specified_docker_label_value", + "other.specified.docker.label": "other_specified_docker_label_value" + }, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/": {} + }, + "WorkingDir": "" + }, + "Created": "2020-01-01T00:00:01.012345678Z", + "Driver": "overlay2", + "ExecIDs": null, + "GraphDriver": { + "Data": { + "LowerDir": "/var/lib/docker/overlay2/id/diff", + "MergedDir": "/var/lib/docker/overlay2/id/merged", + "UpperDir": "/var/lib/docker/overlay2/id/diff", + "WorkDir": "/var/lib/docker/overlay2/id/work" + }, + "Name": "overlay2" + }, + "HostConfig": { + "AutoRemove": false, + "Binds": [], + "BlkioDeviceReadBps": null, + "BlkioDeviceReadIOps": null, + "BlkioDeviceWriteBps": null, + "BlkioDeviceWriteIOps": null, + "BlkioWeight": 0, + "BlkioWeightDevice": null, + "CapAdd": null, + "CapDrop": null, + "Capabilities": null, + "Cgroup": "", + "CgroupParent": "", + "ConsoleSize": [ + 0, + 0 + ], + "ContainerIDFile": "", + "CpuCount": 0, + "CpuPercent": 0, + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuRealtimePeriod": 0, + "CpuRealtimeRuntime": 0, + "CpuShares": 0, + "CpusetCpus": "", + "CpusetMems": "", + "DeviceCgroupRules": null, + "DeviceRequests": null, + "Devices": null, + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "GroupAdd": null, + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "IpcMode": "shareable", + "Isolation": "", + "KernelMemory": 0, + "KernelMemoryTCP": 0, + "Links": null, + "LogConfig": { + "Config": {}, + "Type": "json-file" + }, + "MaskedPaths": [ + "/proc/asound", + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "Memory": 0, + "MemoryReservation": 0, + "MemorySwap": 0, + "MemorySwappiness": null, + "NanoCpus": 0, + "NetworkMode": "my-network", + "OomKillDisable": false, + "OomScoreAdj": 0, + "PidMode": "", + "PidsLimit": null, + "PortBindings": { + "27017/tcp": [ + { + "HostIp": "", + "HostPort": "12345" + } + ] + }, + "Privileged": false, + "PublishAllPorts": false, + "ReadonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ], + "ReadonlyRootfs": false, + "RestartPolicy": { + "MaximumRetryCount": 0, + "Name": "unless-stopped" + }, + "Runtime": "runc", + "SecurityOpt": null, + "ShmSize": 67108864, + "UTSMode": "", + "Ulimits": null, + "UsernsMode": "", + "VolumeDriver": "", + "VolumesFrom": [] + }, + "HostnamePath": "/var/lib/docker/containers/a2596076ca048f02bcd16a8acd12a7ea2d3bc430d1cde095357239dd3925a4c3/hostname", + "HostsPath": "/var/lib/docker/containers/a2596076ca048f02bcd16a8acd12a7ea2d3bc430d1cde095357239dd3925a4c3/hosts", + "Id": "a2596076ca048f02bcd16a8acd12a7ea2d3bc430d1cde095357239dd3925a4c3", + "Image": "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "LogPath": "/var/lib/docker/containers/a2596076ca048f02bcd16a8acd12a7ea2d3bc430d1cde095357239dd3925a4c3/a2596076ca048f02bcd16a8acd12a7ea2d3bc430d1cde095357239dd3925a4c3-json.log", + "MountLabel": "", + "Mounts": [], + "Name": "/my-container-name", + "NetworkSettings": { + "Bridge": "", + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "HairpinMode": false, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "MacAddress": "", + "Networks": { + "my-network": { + "Aliases": [ + "image", + "a2596076ca04" + ], + "DriverOpts": null, + "EndpointID": "da8ebd106dca82c182e6e304e8f2010bc85675090e67fd2cf8f3c0a9e453f04a", + "Gateway": "172.18.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAMConfig": null, + "IPAddress": "172.18.0.3", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "Links": null, + "MacAddress": "13:38:b1:9b:00:93", + "NetworkID": "b9628f23e094fe73cef71d4f5fcba490c23a0181b1dad2612bd2c9f31447e817" + } + }, + "Ports": { + "12345/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "12345" + } + ] + }, + "SandboxID": "8750b41a9a9e4a9d3e9e2ba54f5a8a01e7d3bfc1f506a363f472bcff5b97b834", + "SandboxKey": "/var/run/docker/netns/8750b41a9a9e", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null + }, + "Path": "docker-entrypoint.sh", + "Platform": "linux", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/a2596076ca048f02bcd16a8acd12a7ea2d3bc430d1cde095357239dd3925a4c3/resolv.conf", + "RestartCount": 0, + "State": { + "Dead": false, + "Error": "", + "ExitCode": 0, + "FinishedAt": "2021-01-01T00:00:09.012345678Z", + "OOMKilled": false, + "Paused": false, + "Pid": 23019, + "Restarting": false, + "Running": true, + "StartedAt": "2020-01-01T00:00:02.012345678Z", + "Status": "running" + } +} diff --git a/internal/docker/receiver/testdata/mock/cgroups_v2/container.json b/internal/docker/receiver/testdata/mock/cgroups_v2/container.json new file mode 100644 index 000000000000..2bb7c442d0c1 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/cgroups_v2/container.json @@ -0,0 +1,197 @@ +{ + "Id": "f97ed5bca0a5a0b85bfd52c4144b96174e825c92a138bc0458f0e196f2c7c1b4", + "Created": "2023-04-25T10:39:23.320048025Z", + "Path": "sh", + "Args": [ + "-c", + "\n dd if=/dev/urandom of=/dev/shm/file bs=32M\n ls -lh /dev/shm\n read\n" + ], + "State": { + "Status": "running", + "Running": true, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 4003, + "ExitCode": 0, + "Error": "", + "StartedAt": "2023-04-25T10:39:23.758576058Z", + "FinishedAt": "0001-01-01T00:00:00Z" + }, + "Image": "sha256:9ed4aefc74f6792b5a804d1d146fe4b4a2299147b0f50eaf2b08435d7b38c27e", + "ResolvConfPath": "/var/lib/docker/containers/f97ed5bca0a5a0b85bfd52c4144b96174e825c92a138bc0458f0e196f2c7c1b4/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/f97ed5bca0a5a0b85bfd52c4144b96174e825c92a138bc0458f0e196f2c7c1b4/hostname", + "HostsPath": "/var/lib/docker/containers/f97ed5bca0a5a0b85bfd52c4144b96174e825c92a138bc0458f0e196f2c7c1b4/hosts", + "LogPath": "/var/lib/docker/containers/f97ed5bca0a5a0b85bfd52c4144b96174e825c92a138bc0458f0e196f2c7c1b4/f97ed5bca0a5a0b85bfd52c4144b96174e825c92a138bc0458f0e196f2c7c1b4-json.log", + "Name": "/compassionate_mcnulty", + "RestartCount": 0, + "Driver": "overlay2", + "Platform": "linux", + "MountLabel": "", + "ProcessLabel": "", + "AppArmorProfile": "docker-default", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LogConfig": { + "Type": "json-file", + "Config": {} + }, + "NetworkMode": "default", + "PortBindings": {}, + "RestartPolicy": { + "Name": "no", + "MaximumRetryCount": 0 + }, + "AutoRemove": false, + "VolumeDriver": "", + "VolumesFrom": null, + "ConsoleSize": [ + 12, + 263 + ], + "CapAdd": null, + "CapDrop": null, + "CgroupnsMode": "private", + "Dns": [], + "DnsOptions": [], + "DnsSearch": [], + "ExtraHosts": null, + "GroupAdd": null, + "IpcMode": "private", + "Cgroup": "", + "Links": null, + "OomScoreAdj": 0, + "PidMode": "", + "Privileged": false, + "PublishAllPorts": false, + "ReadonlyRootfs": false, + "SecurityOpt": null, + "UTSMode": "", + "UsernsMode": "", + "ShmSize": 268435456, + "Runtime": "runc", + "Isolation": "", + "CpuShares": 0, + "Memory": 268435456, + "NanoCpus": 0, + "CgroupParent": "", + "BlkioWeight": 0, + "BlkioWeightDevice": [], + "BlkioDeviceReadBps": [], + "BlkioDeviceWriteBps": [], + "BlkioDeviceReadIOps": [], + "BlkioDeviceWriteIOps": [], + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuRealtimePeriod": 0, + "CpuRealtimeRuntime": 0, + "CpusetCpus": "", + "CpusetMems": "", + "Devices": [], + "DeviceCgroupRules": null, + "DeviceRequests": null, + "MemoryReservation": 0, + "MemorySwap": 536870912, + "MemorySwappiness": null, + "OomKillDisable": null, + "PidsLimit": null, + "Ulimits": null, + "CpuCount": 0, + "CpuPercent": 0, + "IOMaximumIOps": 0, + "IOMaximumBandwidth": 0, + "MaskedPaths": [ + "/proc/asound", + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "ReadonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ] + }, + "GraphDriver": { + "Data": { + "LowerDir": "/var/lib/docker/overlay2/f51311acca8a83fbe1c772b6aab9b599c944d7564a31e838188df747e11f6073-init/diff:/var/lib/docker/overlay2/b16f6db2d7165eaef678f40072fe213368e9b00db6f7d37419436c0f372b5633/diff", + "MergedDir": "/var/lib/docker/overlay2/f51311acca8a83fbe1c772b6aab9b599c944d7564a31e838188df747e11f6073/merged", + "UpperDir": "/var/lib/docker/overlay2/f51311acca8a83fbe1c772b6aab9b599c944d7564a31e838188df747e11f6073/diff", + "WorkDir": "/var/lib/docker/overlay2/f51311acca8a83fbe1c772b6aab9b599c944d7564a31e838188df747e11f6073/work" + }, + "Name": "overlay2" + }, + "Mounts": [], + "Config": { + "Hostname": "f97ed5bca0a5", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "sh", + "-c", + "\n dd if=/dev/urandom of=/dev/shm/file bs=32M\n ls -lh /dev/shm\n read\n" + ], + "Image": "alpine", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "OnBuild": null, + "Labels": {} + }, + "NetworkSettings": { + "Bridge": "", + "SandboxID": "d8598acd89f277906dec97b79e1ee6752a38fd5754b772bb8d2cf8d02e363d19", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": {}, + "SandboxKey": "/var/run/docker/netns/d8598acd89f2", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "349832a0f6c7de342c7f5c967c8fac7f2c7bdc0d7bf68476d21cd80a342f792a", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "MacAddress": "02:42:ac:11:00:02", + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "ffb5d6c9fe2f4befd62c554bfd0c3f9fa0b53a3fe5dddfc41b0fd8dd648f9c68", + "EndpointID": "349832a0f6c7de342c7f5c967c8fac7f2c7bdc0d7bf68476d21cd80a342f792a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02", + "DriverOpts": null + } + } + } +} diff --git a/internal/docker/receiver/testdata/mock/cgroups_v2/containers.json b/internal/docker/receiver/testdata/mock/cgroups_v2/containers.json new file mode 100644 index 000000000000..19ee08241de9 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/cgroups_v2/containers.json @@ -0,0 +1,39 @@ +[ + { + "Id": "f97ed5bca0a5a0b85bfd52c4144b96174e825c92a138bc0458f0e196f2c7c1b4", + "Names": [ + "/compassionate_mcnulty" + ], + "Image": "alpine", + "ImageID": "sha256:9ed4aefc74f6792b5a804d1d146fe4b4a2299147b0f50eaf2b08435d7b38c27e", + "Command": "sh -c '\n dd if=/dev/urandom of=/dev/shm/file bs=32M\n ls -lh /dev/shm\n read\n'", + "Created": 1682419163, + "Ports": [], + "Labels": {}, + "State": "running", + "Status": "Up 22 minutes", + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "ffb5d6c9fe2f4befd62c554bfd0c3f9fa0b53a3fe5dddfc41b0fd8dd648f9c68", + "EndpointID": "349832a0f6c7de342c7f5c967c8fac7f2c7bdc0d7bf68476d21cd80a342f792a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02", + "DriverOpts": null + } + } + }, + "Mounts": [] + } +] diff --git a/internal/docker/receiver/testdata/mock/cgroups_v2/expected_metrics.yaml b/internal/docker/receiver/testdata/mock/cgroups_v2/expected_metrics.yaml new file mode 100644 index 000000000000..f0145605005f --- /dev/null +++ b/internal/docker/receiver/testdata/mock/cgroups_v2/expected_metrics.yaml @@ -0,0 +1,427 @@ +resourceMetrics: + - resource: + attributes: + - key: container.hostname + value: + stringValue: f97ed5bca0a5 + - key: container.id + value: + stringValue: f97ed5bca0a5a0b85bfd52c4144b96174e825c92a138bc0458f0e196f2c7c1b4 + - key: container.image.name + value: + stringValue: alpine + - key: container.name + value: + stringValue: compassionate_mcnulty + - key: container.runtime + value: + stringValue: docker + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeMetrics: + - metrics: + - description: Number of bytes transferred to/from the disk by the group and descendant groups. + name: container.blockio.io_service_bytes_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "167936" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: 'Number of cores available to the container.' + gauge: + dataPoints: + - asInt: 2 + startTimeUnixNano: "1687762436124732000" + timeUnixNano: "1687762436137493000" + name: container.cpu.logical.count + unit: "{cpus}" + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.shares + unit: "1" + - description: Number of periods with throttling active. + name: container.cpu.throttling_data.periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Number of periods when the container hits its throttling limit. + name: container.cpu.throttling_data.throttled_periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Aggregate time the container was throttled. + name: container.cpu.throttling_data.throttled_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows). + name: container.cpu.usage.kernelmode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "970974000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: System CPU usage, as reported by docker. + name: container.cpu.usage.system + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4836970000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Total CPU time consumed. + name: container.cpu.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "999478000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows). + name: container.cpu.usage.usermode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "28503000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Percent of CPU used by the container. + gauge: + dataPoints: + - asDouble: 0.041326615629205886 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.utilization + unit: "1" + - description: The amount of anonymous memory that has been identified as active by the kernel. + name: container.memory.active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4096" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. + name: container.memory.active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS) (Only available with cgroups v2). + name: container.memory.anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "61440" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: 'Number of times the memory limit was hit.' + name: container.memory.fails + sum: + isMonotonic: true + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "{fails}" + - description: Amount of memory used to cache filesystem data, including tmpfs and shared memory (Only available with cgroups v2). + name: container.memory.file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "233848832" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. + name: container.memory.inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "233906176" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. + name: container.memory.inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Percentage of memory used. + gauge: + dataPoints: + - asDouble: 87.41302490234375 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.memory.percent + unit: "1" + - description: Indicate the number of times that a process of the cgroup triggered a page fault. + name: container.memory.pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9458" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup triggered a major fault. + name: container.memory.pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: The amount of memory that cannot be reclaimed. + name: container.memory.unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory limit of the container. + name: container.memory.usage.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "268435456" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Maximum memory usage. + name: container.memory.usage.max + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory usage of the container. This excludes the cache. + name: container.memory.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "234647552" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes received by the container. + name: container.network.io.usage.rx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1296" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Incoming packets dropped. + name: container.network.io.usage.rx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Received errors. + name: container.network.io.usage.rx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets received. + name: container.network.io.usage.rx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Bytes sent. + name: container.network.io.usage.tx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Outgoing packets dropped. + name: container.network.io.usage.tx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Sent errors. + name: container.network.io.usage.tx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets sent. + name: container.network.io.usage.tx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Number of pids in the container's cgroup. + name: container.pids.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{pids}' + - description: Maximum number of pids in the container's cgroup. + name: container.pids.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4694" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{restarts}' + - description: Time elapsed since container start time. + gauge: + dataPoints: + - asDouble: 1.5026932099271942e+07 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.uptime + unit: s + scope: + name: otelcol/docker/receiver + version: latest diff --git a/internal/docker/receiver/testdata/mock/cgroups_v2/stats.json b/internal/docker/receiver/testdata/mock/cgroups_v2/stats.json new file mode 100644 index 000000000000..30c0367e823f --- /dev/null +++ b/internal/docker/receiver/testdata/mock/cgroups_v2/stats.json @@ -0,0 +1,111 @@ +{ + "read": "2023-04-25T11:03:28.719828114Z", + "preread": "0001-01-01T00:00:00Z", + "pids_stats": { + "current": 1, + "limit": 4694 + }, + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 8, + "minor": 0, + "op": "read", + "value": 167936 + }, + { + "major": 8, + "minor": 0, + "op": "write", + "value": 0 + } + ], + "io_serviced_recursive": null, + "io_queue_recursive": null, + "io_service_time_recursive": null, + "io_wait_time_recursive": null, + "io_merged_recursive": null, + "io_time_recursive": null, + "sectors_recursive": null + }, + "num_procs": 0, + "storage_stats": {}, + "cpu_stats": { + "cpu_usage": { + "total_usage": 999478000, + "usage_in_kernelmode": 970974000, + "usage_in_usermode": 28503000 + }, + "system_cpu_usage": 4836970000000, + "online_cpus": 2, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "precpu_stats": { + "cpu_usage": { + "total_usage": 0, + "usage_in_kernelmode": 0, + "usage_in_usermode": 0 + }, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "memory_stats": { + "usage": 234647552, + "stats": { + "active_anon": 4096, + "active_file": 0, + "anon": 61440, + "anon_thp": 0, + "file": 233848832, + "file_dirty": 0, + "file_mapped": 0, + "file_writeback": 0, + "inactive_anon": 233906176, + "inactive_file": 0, + "kernel_stack": 16384, + "pgactivate": 7, + "pgdeactivate": 6, + "pgfault": 9458, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 4, + "pgrefill": 6, + "pgscan": 41, + "pgsteal": 41, + "shmem": 233848832, + "slab": 662184, + "slab_reclaimable": 592024, + "slab_unreclaimable": 70160, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 0, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + }, + "failcnt": 1, + "limit": 268435456 + }, + "name": "/compassionate_mcnulty", + "id": "f97ed5bca0a5a0b85bfd52c4144b96174e825c92a138bc0458f0e196f2c7c1b4", + "networks": { + "eth0": { + "rx_bytes": 1296, + "rx_packets": 16, + "rx_errors": 0, + "rx_dropped": 0, + "tx_bytes": 0, + "tx_packets": 0, + "tx_errors": 0, + "tx_dropped": 0 + } + } +} diff --git a/internal/docker/receiver/testdata/mock/cpu_limit/container.json b/internal/docker/receiver/testdata/mock/cpu_limit/container.json new file mode 100644 index 000000000000..f7c0905aebf5 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/cpu_limit/container.json @@ -0,0 +1,196 @@ +{ + "Id": "9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674", + "Created": "2023-04-17T13:51:04.607496655Z", + "Path": "sleep", + "Args": [ + "infinity" + ], + "State": { + "Status": "running", + "Running": true, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 135886, + "ExitCode": 0, + "Error": "", + "StartedAt": "2023-04-19T14:07:07.809461484Z", + "FinishedAt": "2023-04-19T14:06:53.167608711Z" + }, + "Image": "sha256:3fbaf71a998bae6e375be74b999bd418091bf6511e356a129fdc969c4a94a5bc", + "ResolvConfPath": "/var/lib/docker/containers/9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674/hostname", + "HostsPath": "/var/lib/docker/containers/9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674/hosts", + "LogPath": "/var/lib/docker/containers/9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674/9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674-json.log", + "Name": "/sleepy1", + "RestartCount": 0, + "Driver": "devicemapper", + "Platform": "linux", + "MountLabel": "", + "ProcessLabel": "", + "AppArmorProfile": "docker-default", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LogConfig": { + "Type": "json-file", + "Config": {} + }, + "NetworkMode": "default", + "PortBindings": {}, + "RestartPolicy": { + "Name": "no", + "MaximumRetryCount": 0 + }, + "AutoRemove": false, + "VolumeDriver": "", + "VolumesFrom": null, + "CapAdd": null, + "CapDrop": null, + "CgroupnsMode": "private", + "Dns": [], + "DnsOptions": [], + "DnsSearch": [], + "ExtraHosts": null, + "GroupAdd": null, + "IpcMode": "private", + "Cgroup": "", + "Links": null, + "OomScoreAdj": 0, + "PidMode": "", + "Privileged": false, + "PublishAllPorts": false, + "ReadonlyRootfs": false, + "SecurityOpt": null, + "UTSMode": "", + "UsernsMode": "", + "ShmSize": 67108864, + "Runtime": "runc", + "ConsoleSize": [ + 0, + 0 + ], + "Isolation": "", + "CpuShares": 0, + "Memory": 0, + "NanoCpus": 0, + "CgroupParent": "", + "BlkioWeight": 0, + "BlkioWeightDevice": [], + "BlkioDeviceReadBps": null, + "BlkioDeviceWriteBps": null, + "BlkioDeviceReadIOps": null, + "BlkioDeviceWriteIOps": null, + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuRealtimePeriod": 0, + "CpuRealtimeRuntime": 0, + "CpusetCpus": "0,3", + "CpusetMems": "", + "Devices": [], + "DeviceCgroupRules": null, + "DeviceRequests": null, + "KernelMemory": 0, + "KernelMemoryTCP": 0, + "MemoryReservation": 0, + "MemorySwap": 0, + "MemorySwappiness": null, + "OomKillDisable": null, + "PidsLimit": null, + "Ulimits": null, + "CpuCount": 0, + "CpuPercent": 0, + "IOMaximumIOps": 0, + "IOMaximumBandwidth": 0, + "MaskedPaths": [ + "/proc/asound", + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "ReadonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ] + }, + "GraphDriver": { + "Data": { + "DeviceId": "4", + "DeviceName": "docker-253:0-1050151-b0997978b757cf1dc712ad50496bf49e85cfd24d8b1c61853c16a0eec0ed4176", + "DeviceSize": "10737418240" + }, + "Name": "devicemapper" + }, + "Mounts": [], + "Config": { + "Hostname": "78de07328aff", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "sleep", + "infinity" + ], + "Image": "busybox", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "OnBuild": null, + "Labels": {} + }, + "NetworkSettings": { + "Bridge": "", + "SandboxID": "384e9c0ba138cdcf78d8abdbb0c55b725ff83d0d02ba3c7aa170b9c38ba5e1fc", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": {}, + "SandboxKey": "/var/run/docker/netns/384e9c0ba138", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "dccc9fc92b4d33e9a0b0f66c1daaf528e4241259d5f7609b93740c87765c7649", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "MacAddress": "02:42:ac:11:00:02", + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "8dd6b2854086c51888ebfaca18940146b4ccfc332a9bc3fbe7af7b4d9645bbce", + "EndpointID": "dccc9fc92b4d33e9a0b0f66c1daaf528e4241259d5f7609b93740c87765c7649", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02", + "DriverOpts": null + } + } + } +} diff --git a/internal/docker/receiver/testdata/mock/cpu_limit/containers.json b/internal/docker/receiver/testdata/mock/cpu_limit/containers.json new file mode 100644 index 000000000000..a693f919c4df --- /dev/null +++ b/internal/docker/receiver/testdata/mock/cpu_limit/containers.json @@ -0,0 +1,39 @@ +[ + { + "Id": "9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674", + "Names": [ + "/sleepy1" + ], + "Image": "busybox", + "ImageID": "sha256:3fbaf71a998bae6e375be74b999bd418091bf6511e356a129fdc969c4a94a5bc", + "Command": "sleep infinity", + "Created": 1681739464, + "Ports": [], + "Labels": {}, + "State": "running", + "Status": "Up 4 days", + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "8dd6b2854086c51888ebfaca18940146b4ccfc332a9bc3fbe7af7b4d9645bbce", + "EndpointID": "dccc9fc92b4d33e9a0b0f66c1daaf528e4241259d5f7609b93740c87765c7649", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02", + "DriverOpts": null + } + } + }, + "Mounts": [] + } +] diff --git a/internal/docker/receiver/testdata/mock/cpu_limit/expected_metrics.yaml b/internal/docker/receiver/testdata/mock/cpu_limit/expected_metrics.yaml new file mode 100644 index 000000000000..8ac35fc57694 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/cpu_limit/expected_metrics.yaml @@ -0,0 +1,487 @@ +resourceMetrics: + - resource: + attributes: + - key: container.hostname + value: + stringValue: 78de07328aff + - key: container.id + value: + stringValue: 9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674 + - key: container.image.name + value: + stringValue: busybox + - key: container.name + value: + stringValue: sleepy1 + - key: container.runtime + value: + stringValue: docker + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeMetrics: + - metrics: + - description: Number of bytes transferred to/from the disk by the group and descendant groups. + name: container.blockio.io_service_bytes_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1998848" + attributes: + - key: device_major + value: + stringValue: "253" + - key: device_minor + value: + stringValue: "1" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "253" + - key: device_minor + value: + stringValue: "1" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1998848" + attributes: + - key: device_major + value: + stringValue: "253" + - key: device_minor + value: + stringValue: "2" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "253" + - key: device_minor + value: + stringValue: "2" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1998848" + attributes: + - key: device_major + value: + stringValue: "7" + - key: device_minor + value: + stringValue: "2" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "7" + - key: device_minor + value: + stringValue: "2" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: CPU limit set for the container. + gauge: + dataPoints: + - asDouble: 2 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.limit + unit: '{cpus}' + - description: 'Number of cores available to the container.' + gauge: + dataPoints: + - asInt: 2 + startTimeUnixNano: "1687762436124732000" + timeUnixNano: "1687762436137493000" + name: container.cpu.logical.count + unit: "{cpus}" + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.shares + unit: "1" + - description: Number of periods with throttling active. + name: container.cpu.throttling_data.periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Number of periods when the container hits its throttling limit. + name: container.cpu.throttling_data.throttled_periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Aggregate time the container was throttled. + name: container.cpu.throttling_data.throttled_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows). + name: container.cpu.usage.kernelmode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5467000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: System CPU usage, as reported by docker. + name: container.cpu.usage.system + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "183556380000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Total CPU time consumed. + name: container.cpu.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10935000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows). + name: container.cpu.usage.usermode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5467000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Percent of CPU used by the container. + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.utilization + unit: "1" + - description: The amount of anonymous memory that has been identified as active by the kernel. + name: container.memory.active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4096" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. + name: container.memory.active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS) (Only available with cgroups v2). + name: container.memory.anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "61440" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: 'Number of times the memory limit was hit.' + name: container.memory.fails + sum: + isMonotonic: true + aggregationTemporality: 2 + dataPoints: + - asInt: "4" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "{fails}" + - description: Amount of memory used to cache filesystem data, including tmpfs and shared memory (Only available with cgroups v2). + name: container.memory.file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "233848832" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. + name: container.memory.inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "110592" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. + name: container.memory.inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1892352" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Percentage of memory used. + gauge: + dataPoints: + - asDouble: 0.016875995187363255 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.memory.percent + unit: "1" + - description: Indicate the number of times that a process of the cgroup triggered a page fault. + name: container.memory.pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1029" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup triggered a major fault. + name: container.memory.pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: The amount of memory that cannot be reclaimed. + name: container.memory.unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory limit of the container. + name: container.memory.usage.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2063048704" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Maximum memory usage. + name: container.memory.usage.max + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory usage of the container. This excludes the cache. + name: container.memory.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "348160" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes received by the container. + name: container.network.io.usage.rx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "3608" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Incoming packets dropped. + name: container.network.io.usage.rx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Received errors. + name: container.network.io.usage.rx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets received. + name: container.network.io.usage.rx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "44" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Bytes sent. + name: container.network.io.usage.tx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Outgoing packets dropped. + name: container.network.io.usage.tx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Sent errors. + name: container.network.io.usage.tx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets sent. + name: container.network.io.usage.tx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Number of pids in the container's cgroup. + name: container.pids.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{pids}' + - description: Maximum number of pids in the container's cgroup. + name: container.pids.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2192" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{restarts}' + - description: Time elapsed since container start time. + gauge: + dataPoints: + - asDouble: 1.5532824571167516e+07 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.uptime + unit: s + scope: + name: otelcol/docker/receiver + version: latest diff --git a/internal/docker/receiver/testdata/mock/cpu_limit/stats.json b/internal/docker/receiver/testdata/mock/cpu_limit/stats.json new file mode 100644 index 000000000000..dc51d90f0cfb --- /dev/null +++ b/internal/docker/receiver/testdata/mock/cpu_limit/stats.json @@ -0,0 +1,137 @@ +{ + "read": "2023-04-24T12:23:08.456710245Z", + "preread": "2023-04-24T12:23:07.447356277Z", + "pids_stats": { + "current": 1, + "limit": 2192 + }, + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 7, + "minor": 2, + "op": "read", + "value": 1998848 + }, + { + "major": 7, + "minor": 2, + "op": "write", + "value": 0 + }, + { + "major": 253, + "minor": 1, + "op": "read", + "value": 1998848 + }, + { + "major": 253, + "minor": 1, + "op": "write", + "value": 0 + }, + { + "major": 253, + "minor": 2, + "op": "read", + "value": 1998848 + }, + { + "major": 253, + "minor": 2, + "op": "write", + "value": 0 + } + ], + "io_serviced_recursive": null, + "io_queue_recursive": null, + "io_service_time_recursive": null, + "io_wait_time_recursive": null, + "io_merged_recursive": null, + "io_time_recursive": null, + "sectors_recursive": null + }, + "num_procs": 0, + "storage_stats": {}, + "cpu_stats": { + "cpu_usage": { + "total_usage": 10935000, + "usage_in_kernelmode": 5467000, + "usage_in_usermode": 5467000 + }, + "system_cpu_usage": 183556380000000, + "online_cpus": 2, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "precpu_stats": { + "cpu_usage": { + "total_usage": 10935000, + "usage_in_kernelmode": 5467000, + "usage_in_usermode": 5467000 + }, + "system_cpu_usage": 183554360000000, + "online_cpus": 2, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "memory_stats": { + "failcnt": 4, + "usage": 2240512, + "stats": { + "active_anon": 4096, + "active_file": 0, + "anon": 61440, + "anon_thp": 0, + "file": 233848832, + "file_dirty": 0, + "file_mapped": 1138688, + "file_writeback": 0, + "inactive_anon": 110592, + "inactive_file": 1892352, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 1029, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 12, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 165776, + "slab_reclaimable": 93752, + "slab_unreclaimable": 72024, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 0, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + }, + "limit": 2063048704 + }, + "name": "/sleepy1", + "id": "9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674", + "networks": { + "eth0": { + "rx_bytes": 3608, + "rx_packets": 44, + "rx_errors": 0, + "rx_dropped": 0, + "tx_bytes": 0, + "tx_packets": 0, + "tx_errors": 0, + "tx_dropped": 0 + } + } +} diff --git a/internal/docker/receiver/testdata/mock/no_pids_stats/container.json b/internal/docker/receiver/testdata/mock/no_pids_stats/container.json new file mode 100644 index 000000000000..5a3df7b67d4b --- /dev/null +++ b/internal/docker/receiver/testdata/mock/no_pids_stats/container.json @@ -0,0 +1,218 @@ +{ + "AppArmorProfile": "", + "Args": [], + "Config": { + "AttachStderr": true, + "AttachStdin": true, + "AttachStdout": true, + "Cmd": [ + "/bin/sh" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "ENV_VAR=env-var", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "8000/tcp": {} + }, + "Hostname": "10b703fb312b", + "Image": "ubuntu", + "Labels": { + "container.label": "container-label" + }, + "OnBuild": null, + "OpenStdin": true, + "StdinOnce": true, + "Tty": true, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2022-07-06T04:17:29.79437Z", + "Driver": "overlay2", + "ExecIDs": null, + "GraphDriver": { + "Data": { + "LowerDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859-init/diff:/var/lib/docker/overlay2/f11adae41a6c3a10b6e8fd2440b5170d8ff4f9979eecb1b43c19e2a996c9937a/diff", + "MergedDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859/merged", + "UpperDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859/diff", + "WorkDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859/work" + }, + "Name": "overlay2" + }, + "HostConfig": { + "AutoRemove": false, + "Binds": null, + "BlkioDeviceReadBps": null, + "BlkioDeviceReadIOps": null, + "BlkioDeviceWriteBps": null, + "BlkioDeviceWriteIOps": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [], + "CapAdd": null, + "CapDrop": null, + "Cgroup": "", + "CgroupParent": "", + "CgroupnsMode": "host", + "ConsoleSize": [ + 0, + 0 + ], + "ContainerIDFile": "", + "CpuCount": 0, + "CpuPercent": 0, + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuRealtimePeriod": 0, + "CpuRealtimeRuntime": 0, + "CpuShares": 0, + "CpusetCpus": "", + "CpusetMems": "", + "DeviceCgroupRules": null, + "DeviceRequests": null, + "Devices": [], + "Dns": [], + "DnsOptions": [], + "DnsSearch": [], + "ExtraHosts": null, + "GroupAdd": null, + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "IpcMode": "private", + "Isolation": "", + "KernelMemory": 0, + "KernelMemoryTCP": 0, + "Links": null, + "LogConfig": { + "Config": {}, + "Type": "json-file" + }, + "MaskedPaths": [ + "/proc/asound", + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "Memory": 0, + "MemoryReservation": 0, + "MemorySwap": 0, + "MemorySwappiness": null, + "NanoCpus": 0, + "NetworkMode": "default", + "OomKillDisable": false, + "OomScoreAdj": 0, + "PidMode": "", + "PidsLimit": null, + "PortBindings": { + "8000/tcp": [ + { + "HostIp": "", + "HostPort": "8000" + } + ] + }, + "Privileged": false, + "PublishAllPorts": false, + "ReadonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ], + "ReadonlyRootfs": false, + "RestartPolicy": { + "MaximumRetryCount": 0, + "Name": "no" + }, + "Runtime": "runc", + "SecurityOpt": null, + "ShmSize": 67108864, + "UTSMode": "", + "Ulimits": null, + "UsernsMode": "", + "VolumeDriver": "", + "VolumesFrom": null + }, + "HostnamePath": "/var/lib/docker/containers/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326/hostname", + "HostsPath": "/var/lib/docker/containers/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326/hosts", + "Id": "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326", + "Image": "sha256:825d55fb6340083b06e69e02e823a02918f3ffb575ed2a87026d4645a7fd9e1b", + "LogPath": "/var/lib/docker/containers/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326-json.log", + "MountLabel": "", + "Mounts": [], + "Name": "/bold_sinoussi", + "NetworkSettings": { + "Bridge": "", + "EndpointID": "e844b423ff61ed07aac37c6d9997903ee4771ccffc31ba7dbb3f58f364724170", + "Gateway": "10.255.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "HairpinMode": false, + "IPAddress": "10.255.0.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "MacAddress": "02:42:0a:ff:00:01", + "Networks": { + "bridge": { + "Aliases": null, + "DriverOpts": null, + "EndpointID": "e844b423ff61ed07aac37c6d9997903ee4771ccffc31ba7dbb3f58f364724170", + "Gateway": "10.255.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAMConfig": null, + "IPAddress": "10.255.0.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "Links": null, + "MacAddress": "02:42:0a:ff:00:01", + "NetworkID": "c44102203908a4202f675742fcc2384849f4d0b5534d7fb74fd0f3ea7dbee928" + } + }, + "Ports": { + "8000/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "8000" + }, + { + "HostIp": "::", + "HostPort": "8000" + } + ] + }, + "SandboxID": "b83b7db7e06d3ba7c4c05208d41d327b0be0e17bfb50a9a57f4d9a31f0fdd662", + "SandboxKey": "/var/run/docker/netns/b83b7db7e06d", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null + }, + "Path": "/bin/sh", + "Platform": "linux", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326/resolv.conf", + "RestartCount": 0, + "State": { + "Dead": false, + "Error": "", + "ExitCode": 0, + "FinishedAt": "0001-01-01T00:00:00Z", + "OOMKilled": false, + "Paused": false, + "Pid": 2968, + "Restarting": false, + "Running": true, + "StartedAt": "2022-07-06T04:17:30.2570682Z", + "Status": "running" + } +} diff --git a/internal/docker/receiver/testdata/mock/no_pids_stats/containers.json b/internal/docker/receiver/testdata/mock/no_pids_stats/containers.json new file mode 100644 index 000000000000..10be2e6878d5 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/no_pids_stats/containers.json @@ -0,0 +1,54 @@ +[ + { + "Command": "/bin/sh", + "Created": 1657081049, + "HostConfig": { + "NetworkMode": "default" + }, + "Id": "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326", + "Image": "ubuntu", + "ImageID": "sha256:825d55fb6340083b06e69e02e823a02918f3ffb575ed2a87026d4645a7fd9e1b", + "Labels": { + "container.label": "container-label" + }, + "Mounts": [], + "Names": [ + "/bold_sinoussi" + ], + "NetworkSettings": { + "Networks": { + "bridge": { + "Aliases": null, + "DriverOpts": null, + "EndpointID": "e844b423ff61ed07aac37c6d9997903ee4771ccffc31ba7dbb3f58f364724170", + "Gateway": "10.255.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAMConfig": null, + "IPAddress": "10.255.0.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "Links": null, + "MacAddress": "02:42:0a:ff:00:02", + "NetworkID": "c44102203908a4202f675742fcc2384849f4d0b5534d7fb74fd0f3ea7dbee928" + } + } + }, + "Ports": [ + { + "IP": "0.0.0.0", + "PrivatePort": 8000, + "PublicPort": 8000, + "Type": "tcp" + }, + { + "IP": "::", + "PrivatePort": 8000, + "PublicPort": 8000, + "Type": "tcp" + } + ], + "State": "running", + "Status": "Up 3 minutes" + } +] diff --git a/internal/docker/receiver/testdata/mock/no_pids_stats/expected_metrics.yaml b/internal/docker/receiver/testdata/mock/no_pids_stats/expected_metrics.yaml new file mode 100644 index 000000000000..32184eb3229a --- /dev/null +++ b/internal/docker/receiver/testdata/mock/no_pids_stats/expected_metrics.yaml @@ -0,0 +1,828 @@ +resourceMetrics: + - resource: + attributes: + - key: container-metric-label + value: + stringValue: container-label + - key: container.hostname + value: + stringValue: 10b703fb312b + - key: container.id + value: + stringValue: 10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326 + - key: container.image.name + value: + stringValue: ubuntu + - key: container.name + value: + stringValue: bold_sinoussi + - key: container.runtime + value: + stringValue: docker + - key: env-var-metric-label + value: + stringValue: env-var + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeMetrics: + - metrics: + - description: Number of bytes transferred to/from the disk by the group and descendant groups. + name: container.blockio.io_service_bytes_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: async + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: discard + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2502656" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2502656" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: sync + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2502656" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1). + name: container.blockio.io_serviced_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: async + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: discard + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "99" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "99" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: sync + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "99" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'Number of cores available to the container.' + gauge: + dataPoints: + - asInt: 8 + startTimeUnixNano: "1687762436124732000" + timeUnixNano: "1687762436137493000" + name: container.cpu.logical.count + unit: "{cpus}" + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.shares + unit: "1" + - description: Number of periods with throttling active. + name: container.cpu.throttling_data.periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Number of periods when the container hits its throttling limit. + name: container.cpu.throttling_data.throttled_periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Aggregate time the container was throttled. + name: container.cpu.throttling_data.throttled_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows). + name: container.cpu.usage.kernelmode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Per-core CPU usage by the container (Only available with cgroups v1). + name: container.cpu.usage.percpu + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1415045" + attributes: + - key: core + value: + stringValue: cpu0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: core + value: + stringValue: cpu1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "262690" + attributes: + - key: core + value: + stringValue: cpu2 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "762532" + attributes: + - key: core + value: + stringValue: cpu3 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "78532" + attributes: + - key: core + value: + stringValue: cpu4 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "28108575" + attributes: + - key: core + value: + stringValue: cpu5 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "8800811" + attributes: + - key: core + value: + stringValue: cpu6 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4191833" + attributes: + - key: core + value: + stringValue: cpu7 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: System CPU usage, as reported by docker. + name: container.cpu.usage.system + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "120830550000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Total CPU time consumed. + name: container.cpu.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "43620018" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows). + name: container.cpu.usage.usermode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Percent of CPU used by the container. + gauge: + dataPoints: + - asDouble: 0.0002888012543185477 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.utilization + unit: "1" + - description: The amount of anonymous memory that has been identified as active by the kernel. + name: container.memory.active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. + name: container.memory.active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "270336" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1). + name: container.memory.cache + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2433024" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1). + name: container.memory.dirty + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: 'Number of times the memory limit was hit.' + name: container.memory.fails + sum: + isMonotonic: true + aggregationTemporality: 2 + dataPoints: + - asInt: "4" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "{fails}" + - description: The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1). + name: container.memory.hierarchical_memory_limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9223372036854772000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1). + name: container.memory.hierarchical_memsw_limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9223372036854772000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. + name: container.memory.inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. + name: container.memory.inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2162688" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1). + name: container.memory.mapped_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1486848" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Percentage of memory used. + gauge: + dataPoints: + - asDouble: 0.006938014912420301 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.memory.percent + unit: "1" + - description: Indicate the number of times that a process of the cgroup triggered a page fault. + name: container.memory.pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "990" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup triggered a major fault. + name: container.memory.pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Number of pages read from disk by the cgroup (Only available with cgroups v1). + name: container.memory.pgpgin + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1287" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Number of pages written to disk by the cgroup (Only available with cgroups v1). + name: container.memory.pgpgout + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "667" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1).' + name: container.memory.rss + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1). + name: container.memory.rss_huge + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "270336" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1). + name: container.memory.total_cache + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2433024" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1). + name: container.memory.total_dirty + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2162688" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1). + name: container.memory.total_mapped_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1486848" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1). + name: container.memory.total_pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "990" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1). + name: container.memory.total_pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1). + name: container.memory.total_pgpgin + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1287" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1). + name: container.memory.total_pgpgout + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "667" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1).' + name: container.memory.total_rss + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1). + name: container.memory.total_rss_huge + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1). + name: container.memory.total_writeback + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory that cannot be reclaimed. + name: container.memory.unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory limit of the container. + name: container.memory.usage.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10449559552" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Maximum memory usage. + name: container.memory.usage.max + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "3932160" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory usage of the container. This excludes the cache. + name: container.memory.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "724992" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1). + name: container.memory.writeback + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes received by the container. + name: container.network.io.usage.rx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1532" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Incoming packets dropped. + name: container.network.io.usage.rx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Received errors. + name: container.network.io.usage.rx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets received. + name: container.network.io.usage.rx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "18" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Bytes sent. + name: container.network.io.usage.tx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Outgoing packets dropped. + name: container.network.io.usage.tx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Sent errors. + name: container.network.io.usage.tx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets sent. + name: container.network.io.usage.tx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{restarts}' + - description: Time elapsed since container start time. + gauge: + dataPoints: + - asDouble: 4.03649084666988e+07 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.uptime + unit: s + scope: + name: otelcol/docker/receiver + version: latest diff --git a/internal/docker/receiver/testdata/mock/no_pids_stats/stats.json b/internal/docker/receiver/testdata/mock/no_pids_stats/stats.json new file mode 100644 index 000000000000..269c0238f6e0 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/no_pids_stats/stats.json @@ -0,0 +1,180 @@ +{ + "blkio_stats": { + "io_merged_recursive": [], + "io_queue_recursive": [], + "io_service_bytes_recursive": [ + { + "major": 254, + "minor": 0, + "op": "Read", + "value": 2502656 + }, + { + "major": 254, + "minor": 0, + "op": "Write", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Sync", + "value": 2502656 + }, + { + "major": 254, + "minor": 0, + "op": "Async", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Discard", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Total", + "value": 2502656 + } + ], + "io_service_time_recursive": [], + "io_serviced_recursive": [ + { + "major": 254, + "minor": 0, + "op": "Read", + "value": 99 + }, + { + "major": 254, + "minor": 0, + "op": "Write", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Sync", + "value": 99 + }, + { + "major": 254, + "minor": 0, + "op": "Async", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Discard", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Total", + "value": 99 + } + ], + "io_time_recursive": [], + "io_wait_time_recursive": [], + "sectors_recursive": [] + }, + "cpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 1415045, + 0, + 262690, + 762532, + 78532, + 28108575, + 8800811, + 4191833 + ], + "total_usage": 43620018, + "usage_in_kernelmode": 10000000, + "usage_in_usermode": 10000000 + }, + "online_cpus": 8, + "system_cpu_usage": 120830550000000, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "id": "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326", + "memory_stats": { + "failcnt": 4, + "limit": 10449559552, + "max_usage": 3932160, + "stats": { + "active_anon": 0, + "active_file": 270336, + "cache": 2433024, + "dirty": 0, + "hierarchical_memory_limit": 9223372036854772000, + "hierarchical_memsw_limit": 9223372036854772000, + "inactive_anon": 0, + "inactive_file": 2162688, + "mapped_file": 1486848, + "pgfault": 990, + "pgmajfault": 0, + "pgpgin": 1287, + "pgpgout": 667, + "rss": 0, + "rss_huge": 0, + "total_active_anon": 0, + "total_active_file": 270336, + "total_cache": 2433024, + "total_dirty": 0, + "total_inactive_anon": 0, + "total_inactive_file": 2162688, + "total_mapped_file": 1486848, + "total_pgfault": 990, + "total_pgmajfault": 0, + "total_pgpgin": 1287, + "total_pgpgout": 667, + "total_rss": 0, + "total_rss_huge": 0, + "total_unevictable": 0, + "total_writeback": 0, + "unevictable": 0, + "writeback": 0 + }, + "usage": 2887680 + }, + "name": "/bold_sinoussi", + "networks": { + "eth0": { + "rx_bytes": 1532, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 18, + "tx_bytes": 0, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 0 + } + }, + "num_procs": 0, + "precpu_stats": { + "cpu_usage": { + "total_usage": 0, + "usage_in_kernelmode": 0, + "usage_in_usermode": 0 + }, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "preread": "0001-01-01T00:00:00Z", + "read": "2022-07-06T04:27:03.0439251Z", + "storage_stats": {} +} diff --git a/internal/docker/receiver/testdata/mock/pids_stats_max/container.json b/internal/docker/receiver/testdata/mock/pids_stats_max/container.json new file mode 100644 index 000000000000..ed9a0b4dd823 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/pids_stats_max/container.json @@ -0,0 +1,196 @@ +{ + "Id": "78de07328afff50a9777b07dd36a28c709dffe081baaf67235db618843399643", + "Created": "2023-04-17T13:51:04.607496655Z", + "Path": "sleep", + "Args": [ + "infinity" + ], + "State": { + "Status": "running", + "Running": true, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 135886, + "ExitCode": 0, + "Error": "", + "StartedAt": "2023-04-19T14:07:07.809461484Z", + "FinishedAt": "2023-04-19T14:06:53.167608711Z" + }, + "Image": "sha256:3fbaf71a998bae6e375be74b999bd418091bf6511e356a129fdc969c4a94a5bc", + "ResolvConfPath": "/var/lib/docker/containers/78de07328afff50a9777b07dd36a28c709dffe081baaf67235db618843399643/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/78de07328afff50a9777b07dd36a28c709dffe081baaf67235db618843399643/hostname", + "HostsPath": "/var/lib/docker/containers/78de07328afff50a9777b07dd36a28c709dffe081baaf67235db618843399643/hosts", + "LogPath": "/var/lib/docker/containers/78de07328afff50a9777b07dd36a28c709dffe081baaf67235db618843399643/78de07328afff50a9777b07dd36a28c709dffe081baaf67235db618843399643-json.log", + "Name": "/sleepy1", + "RestartCount": 0, + "Driver": "devicemapper", + "Platform": "linux", + "MountLabel": "", + "ProcessLabel": "", + "AppArmorProfile": "docker-default", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LogConfig": { + "Type": "json-file", + "Config": {} + }, + "NetworkMode": "default", + "PortBindings": {}, + "RestartPolicy": { + "Name": "no", + "MaximumRetryCount": 0 + }, + "AutoRemove": false, + "VolumeDriver": "", + "VolumesFrom": null, + "CapAdd": null, + "CapDrop": null, + "CgroupnsMode": "private", + "Dns": [], + "DnsOptions": [], + "DnsSearch": [], + "ExtraHosts": null, + "GroupAdd": null, + "IpcMode": "private", + "Cgroup": "", + "Links": null, + "OomScoreAdj": 0, + "PidMode": "", + "Privileged": false, + "PublishAllPorts": false, + "ReadonlyRootfs": false, + "SecurityOpt": null, + "UTSMode": "", + "UsernsMode": "", + "ShmSize": 67108864, + "Runtime": "runc", + "ConsoleSize": [ + 0, + 0 + ], + "Isolation": "", + "CpuShares": 0, + "Memory": 0, + "NanoCpus": 0, + "CgroupParent": "", + "BlkioWeight": 0, + "BlkioWeightDevice": [], + "BlkioDeviceReadBps": null, + "BlkioDeviceWriteBps": null, + "BlkioDeviceReadIOps": null, + "BlkioDeviceWriteIOps": null, + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuRealtimePeriod": 0, + "CpuRealtimeRuntime": 0, + "CpusetCpus": "", + "CpusetMems": "", + "Devices": [], + "DeviceCgroupRules": null, + "DeviceRequests": null, + "KernelMemory": 0, + "KernelMemoryTCP": 0, + "MemoryReservation": 0, + "MemorySwap": 0, + "MemorySwappiness": null, + "OomKillDisable": null, + "PidsLimit": null, + "Ulimits": null, + "CpuCount": 0, + "CpuPercent": 0, + "IOMaximumIOps": 0, + "IOMaximumBandwidth": 0, + "MaskedPaths": [ + "/proc/asound", + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "ReadonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ] + }, + "GraphDriver": { + "Data": { + "DeviceId": "4", + "DeviceName": "docker-253:0-1050151-b0997978b757cf1dc712ad50496bf49e85cfd24d8b1c61853c16a0eec0ed4176", + "DeviceSize": "10737418240" + }, + "Name": "devicemapper" + }, + "Mounts": [], + "Config": { + "Hostname": "78de07328aff", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "sleep", + "infinity" + ], + "Image": "busybox", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "OnBuild": null, + "Labels": {} + }, + "NetworkSettings": { + "Bridge": "", + "SandboxID": "384e9c0ba138cdcf78d8abdbb0c55b725ff83d0d02ba3c7aa170b9c38ba5e1fc", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": {}, + "SandboxKey": "/var/run/docker/netns/384e9c0ba138", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "dccc9fc92b4d33e9a0b0f66c1daaf528e4241259d5f7609b93740c87765c7649", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "MacAddress": "02:42:ac:11:00:02", + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "8dd6b2854086c51888ebfaca18940146b4ccfc332a9bc3fbe7af7b4d9645bbce", + "EndpointID": "dccc9fc92b4d33e9a0b0f66c1daaf528e4241259d5f7609b93740c87765c7649", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02", + "DriverOpts": null + } + } + } +} diff --git a/internal/docker/receiver/testdata/mock/pids_stats_max/containers.json b/internal/docker/receiver/testdata/mock/pids_stats_max/containers.json new file mode 100644 index 000000000000..ddc6324777b7 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/pids_stats_max/containers.json @@ -0,0 +1,39 @@ +[ + { + "Id": "78de07328afff50a9777b07dd36a28c709dffe081baaf67235db618843399643", + "Names": [ + "/sleepy1" + ], + "Image": "busybox", + "ImageID": "sha256:3fbaf71a998bae6e375be74b999bd418091bf6511e356a129fdc969c4a94a5bc", + "Command": "sleep infinity", + "Created": 1681739464, + "Ports": [], + "Labels": {}, + "State": "running", + "Status": "Up 4 days", + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "8dd6b2854086c51888ebfaca18940146b4ccfc332a9bc3fbe7af7b4d9645bbce", + "EndpointID": "dccc9fc92b4d33e9a0b0f66c1daaf528e4241259d5f7609b93740c87765c7649", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02", + "DriverOpts": null + } + } + }, + "Mounts": [] + } +] diff --git a/internal/docker/receiver/testdata/mock/pids_stats_max/expected_metrics.yaml b/internal/docker/receiver/testdata/mock/pids_stats_max/expected_metrics.yaml new file mode 100644 index 000000000000..875710d74345 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/pids_stats_max/expected_metrics.yaml @@ -0,0 +1,479 @@ +resourceMetrics: + - resource: + attributes: + - key: container.hostname + value: + stringValue: 78de07328aff + - key: container.id + value: + stringValue: 78de07328afff50a9777b07dd36a28c709dffe081baaf67235db618843399643 + - key: container.image.name + value: + stringValue: busybox + - key: container.name + value: + stringValue: sleepy1 + - key: container.runtime + value: + stringValue: docker + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeMetrics: + - metrics: + - description: Number of bytes transferred to/from the disk by the group and descendant groups. + name: container.blockio.io_service_bytes_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1998848" + attributes: + - key: device_major + value: + stringValue: "253" + - key: device_minor + value: + stringValue: "1" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "253" + - key: device_minor + value: + stringValue: "1" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1998848" + attributes: + - key: device_major + value: + stringValue: "253" + - key: device_minor + value: + stringValue: "2" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "253" + - key: device_minor + value: + stringValue: "2" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1998848" + attributes: + - key: device_major + value: + stringValue: "7" + - key: device_minor + value: + stringValue: "2" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "7" + - key: device_minor + value: + stringValue: "2" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: 'Number of cores available to the container.' + gauge: + dataPoints: + - asInt: 2 + startTimeUnixNano: "1687762436124732000" + timeUnixNano: "1687762436137493000" + name: container.cpu.logical.count + unit: "{cpus}" + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.shares + unit: "1" + - description: Number of periods with throttling active. + name: container.cpu.throttling_data.periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Number of periods when the container hits its throttling limit. + name: container.cpu.throttling_data.throttled_periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Aggregate time the container was throttled. + name: container.cpu.throttling_data.throttled_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows). + name: container.cpu.usage.kernelmode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5467000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: System CPU usage, as reported by docker. + name: container.cpu.usage.system + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "183556380000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Total CPU time consumed. + name: container.cpu.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10935000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows). + name: container.cpu.usage.usermode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5467000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Percent of CPU used by the container. + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.utilization + unit: "1" + - description: The amount of anonymous memory that has been identified as active by the kernel. + name: container.memory.active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4096" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. + name: container.memory.active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS) (Only available with cgroups v2). + name: container.memory.anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "114688" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: 'Number of times the memory limit was hit.' + name: container.memory.fails + sum: + isMonotonic: true + aggregationTemporality: 2 + dataPoints: + - asInt: "4" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "{fails}" + - description: Amount of memory used to cache filesystem data, including tmpfs and shared memory (Only available with cgroups v2). + name: container.memory.file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1892352" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. + name: container.memory.inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "110592" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. + name: container.memory.inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1892352" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Percentage of memory used. + gauge: + dataPoints: + - asDouble: 0.016875995187363255 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.memory.percent + unit: "1" + - description: Indicate the number of times that a process of the cgroup triggered a page fault. + name: container.memory.pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1029" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup triggered a major fault. + name: container.memory.pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: The amount of memory that cannot be reclaimed. + name: container.memory.unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory limit of the container. + name: container.memory.usage.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2063048704" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Maximum memory usage. + name: container.memory.usage.max + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory usage of the container. This excludes the cache. + name: container.memory.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "348160" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes received by the container. + name: container.network.io.usage.rx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "3608" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Incoming packets dropped. + name: container.network.io.usage.rx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Received errors. + name: container.network.io.usage.rx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets received. + name: container.network.io.usage.rx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "44" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Bytes sent. + name: container.network.io.usage.tx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Outgoing packets dropped. + name: container.network.io.usage.tx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Sent errors. + name: container.network.io.usage.tx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets sent. + name: container.network.io.usage.tx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Number of pids in the container's cgroup. + name: container.pids.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{pids}' + - description: Maximum number of pids in the container's cgroup. + name: container.pids.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2192" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{restarts}' + - description: Time elapsed since container start time. + gauge: + dataPoints: + - asDouble: 1.5532782694550516e+07 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.uptime + unit: s + scope: + name: otelcol/docker/receiver + version: latest diff --git a/internal/docker/receiver/testdata/mock/pids_stats_max/stats.json b/internal/docker/receiver/testdata/mock/pids_stats_max/stats.json new file mode 100644 index 000000000000..90daf395f0aa --- /dev/null +++ b/internal/docker/receiver/testdata/mock/pids_stats_max/stats.json @@ -0,0 +1,137 @@ +{ + "read": "2023-04-24T12:23:08.456710245Z", + "preread": "2023-04-24T12:23:07.447356277Z", + "pids_stats": { + "current": 1, + "limit": 2192 + }, + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 7, + "minor": 2, + "op": "read", + "value": 1998848 + }, + { + "major": 7, + "minor": 2, + "op": "write", + "value": 0 + }, + { + "major": 253, + "minor": 1, + "op": "read", + "value": 1998848 + }, + { + "major": 253, + "minor": 1, + "op": "write", + "value": 0 + }, + { + "major": 253, + "minor": 2, + "op": "read", + "value": 1998848 + }, + { + "major": 253, + "minor": 2, + "op": "write", + "value": 0 + } + ], + "io_serviced_recursive": null, + "io_queue_recursive": null, + "io_service_time_recursive": null, + "io_wait_time_recursive": null, + "io_merged_recursive": null, + "io_time_recursive": null, + "sectors_recursive": null + }, + "num_procs": 0, + "storage_stats": {}, + "cpu_stats": { + "cpu_usage": { + "total_usage": 10935000, + "usage_in_kernelmode": 5467000, + "usage_in_usermode": 5467000 + }, + "system_cpu_usage": 183556380000000, + "online_cpus": 2, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "precpu_stats": { + "cpu_usage": { + "total_usage": 10935000, + "usage_in_kernelmode": 5467000, + "usage_in_usermode": 5467000 + }, + "system_cpu_usage": 183554360000000, + "online_cpus": 2, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "memory_stats": { + "failcnt": 4, + "usage": 2240512, + "stats": { + "active_anon": 4096, + "active_file": 0, + "anon": 114688, + "anon_thp": 0, + "file": 1892352, + "file_dirty": 0, + "file_mapped": 1138688, + "file_writeback": 0, + "inactive_anon": 110592, + "inactive_file": 1892352, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 1029, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 12, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 165776, + "slab_reclaimable": 93752, + "slab_unreclaimable": 72024, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 0, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + }, + "limit": 2063048704 + }, + "name": "/sleepy1", + "id": "78de07328afff50a9777b07dd36a28c709dffe081baaf67235db618843399643", + "networks": { + "eth0": { + "rx_bytes": 3608, + "rx_packets": 44, + "rx_errors": 0, + "rx_dropped": 0, + "tx_bytes": 0, + "tx_packets": 0, + "tx_errors": 0, + "tx_dropped": 0 + } + } +} diff --git a/internal/docker/receiver/testdata/mock/single_container/container.json b/internal/docker/receiver/testdata/mock/single_container/container.json new file mode 100644 index 000000000000..db045c050bed --- /dev/null +++ b/internal/docker/receiver/testdata/mock/single_container/container.json @@ -0,0 +1,220 @@ +{ + "AppArmorProfile": "", + "Args": [], + "Config": { + "AttachStderr": true, + "AttachStdin": true, + "AttachStdout": true, + "Cmd": [ + "/bin/sh" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "ENV_VAR=env-var", + "ENV_VAR_2=env-var-2", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "8000/tcp": {} + }, + "Hostname": "10b703fb312b", + "Image": "ubuntu", + "Labels": { + "container.label": "container-label", + "container.label.2": "container-label-2" + }, + "OnBuild": null, + "OpenStdin": true, + "StdinOnce": true, + "Tty": true, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2022-07-06T04:17:29.79437Z", + "Driver": "overlay2", + "ExecIDs": null, + "GraphDriver": { + "Data": { + "LowerDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859-init/diff:/var/lib/docker/overlay2/f11adae41a6c3a10b6e8fd2440b5170d8ff4f9979eecb1b43c19e2a996c9937a/diff", + "MergedDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859/merged", + "UpperDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859/diff", + "WorkDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859/work" + }, + "Name": "overlay2" + }, + "HostConfig": { + "AutoRemove": false, + "Binds": null, + "BlkioDeviceReadBps": null, + "BlkioDeviceReadIOps": null, + "BlkioDeviceWriteBps": null, + "BlkioDeviceWriteIOps": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [], + "CapAdd": null, + "CapDrop": null, + "Cgroup": "", + "CgroupParent": "", + "CgroupnsMode": "host", + "ConsoleSize": [ + 0, + 0 + ], + "ContainerIDFile": "", + "CpuCount": 0, + "CpuPercent": 0, + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuRealtimePeriod": 0, + "CpuRealtimeRuntime": 0, + "CpuShares": 0, + "CpusetCpus": "", + "CpusetMems": "", + "DeviceCgroupRules": null, + "DeviceRequests": null, + "Devices": [], + "Dns": [], + "DnsOptions": [], + "DnsSearch": [], + "ExtraHosts": null, + "GroupAdd": null, + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "IpcMode": "private", + "Isolation": "", + "KernelMemory": 0, + "KernelMemoryTCP": 0, + "Links": null, + "LogConfig": { + "Config": {}, + "Type": "json-file" + }, + "MaskedPaths": [ + "/proc/asound", + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "Memory": 0, + "MemoryReservation": 0, + "MemorySwap": 0, + "MemorySwappiness": null, + "NanoCpus": 0, + "NetworkMode": "default", + "OomKillDisable": false, + "OomScoreAdj": 0, + "PidMode": "", + "PidsLimit": null, + "PortBindings": { + "8000/tcp": [ + { + "HostIp": "", + "HostPort": "8000" + } + ] + }, + "Privileged": false, + "PublishAllPorts": false, + "ReadonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ], + "ReadonlyRootfs": false, + "RestartPolicy": { + "MaximumRetryCount": 0, + "Name": "no" + }, + "Runtime": "runc", + "SecurityOpt": null, + "ShmSize": 67108864, + "UTSMode": "", + "Ulimits": null, + "UsernsMode": "", + "VolumeDriver": "", + "VolumesFrom": null + }, + "HostnamePath": "/var/lib/docker/containers/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326/hostname", + "HostsPath": "/var/lib/docker/containers/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326/hosts", + "Id": "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326", + "Image": "sha256:825d55fb6340083b06e69e02e823a02918f3ffb575ed2a87026d4645a7fd9e1b", + "LogPath": "/var/lib/docker/containers/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326-json.log", + "MountLabel": "", + "Mounts": [], + "Name": "/bold_sinoussi", + "NetworkSettings": { + "Bridge": "", + "EndpointID": "e844b423ff61ed07aac37c6d9997903ee4771ccffc31ba7dbb3f58f364724170", + "Gateway": "10.255.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "HairpinMode": false, + "IPAddress": "10.255.0.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "MacAddress": "02:42:0a:ff:00:01", + "Networks": { + "bridge": { + "Aliases": null, + "DriverOpts": null, + "EndpointID": "e844b423ff61ed07aac37c6d9997903ee4771ccffc31ba7dbb3f58f364724170", + "Gateway": "10.255.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAMConfig": null, + "IPAddress": "10.255.0.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "Links": null, + "MacAddress": "02:42:0a:ff:00:01", + "NetworkID": "c44102203908a4202f675742fcc2384849f4d0b5534d7fb74fd0f3ea7dbee928" + } + }, + "Ports": { + "8000/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "8000" + }, + { + "HostIp": "::", + "HostPort": "8000" + } + ] + }, + "SandboxID": "b83b7db7e06d3ba7c4c05208d41d327b0be0e17bfb50a9a57f4d9a31f0fdd662", + "SandboxKey": "/var/run/docker/netns/b83b7db7e06d", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null + }, + "Path": "/bin/sh", + "Platform": "linux", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326/resolv.conf", + "RestartCount": 0, + "State": { + "Dead": false, + "Error": "", + "ExitCode": 0, + "FinishedAt": "0001-01-01T00:00:00Z", + "OOMKilled": false, + "Paused": false, + "Pid": 2968, + "Restarting": false, + "Running": true, + "StartedAt": "2022-07-06T04:17:30.2570682Z", + "Status": "running" + } +} diff --git a/internal/docker/receiver/testdata/mock/single_container/containers.json b/internal/docker/receiver/testdata/mock/single_container/containers.json new file mode 100644 index 000000000000..1b89f984e28a --- /dev/null +++ b/internal/docker/receiver/testdata/mock/single_container/containers.json @@ -0,0 +1,55 @@ +[ + { + "Command": "/bin/sh", + "Created": 1657081049, + "HostConfig": { + "NetworkMode": "default" + }, + "Id": "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326", + "Image": "ubuntu", + "ImageID": "sha256:825d55fb6340083b06e69e02e823a02918f3ffb575ed2a87026d4645a7fd9e1b", + "Labels": { + "container.label": "container-label", + "container.label.2": "container-label-2" + }, + "Mounts": [], + "Names": [ + "/bold_sinoussi" + ], + "NetworkSettings": { + "Networks": { + "bridge": { + "Aliases": null, + "DriverOpts": null, + "EndpointID": "e844b423ff61ed07aac37c6d9997903ee4771ccffc31ba7dbb3f58f364724170", + "Gateway": "10.255.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAMConfig": null, + "IPAddress": "10.255.0.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "Links": null, + "MacAddress": "02:42:0a:ff:00:02", + "NetworkID": "c44102203908a4202f675742fcc2384849f4d0b5534d7fb74fd0f3ea7dbee928" + } + } + }, + "Ports": [ + { + "IP": "0.0.0.0", + "PrivatePort": 8000, + "PublicPort": 8000, + "Type": "tcp" + }, + { + "IP": "::", + "PrivatePort": 8000, + "PublicPort": 8000, + "Type": "tcp" + } + ], + "State": "running", + "Status": "Up 3 minutes" + } +] diff --git a/internal/docker/receiver/testdata/mock/single_container/expected_metrics.yaml b/internal/docker/receiver/testdata/mock/single_container/expected_metrics.yaml new file mode 100644 index 000000000000..34d3248bcb57 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/single_container/expected_metrics.yaml @@ -0,0 +1,841 @@ +resourceMetrics: + - resource: + attributes: + - key: container-metric-label + value: + stringValue: container-label + - key: container-metric-label-2 + value: + stringValue: container-label-2 + - key: container.hostname + value: + stringValue: 10b703fb312b + - key: container.id + value: + stringValue: 10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326 + - key: container.image.name + value: + stringValue: ubuntu + - key: container.name + value: + stringValue: bold_sinoussi + - key: container.runtime + value: + stringValue: docker + - key: env-var-metric-label + value: + stringValue: env-var + - key: env-var-metric-label-2 + value: + stringValue: env-var-2 + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeMetrics: + - metrics: + - description: Number of bytes transferred to/from the disk by the group and descendant groups. + name: container.blockio.io_service_bytes_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: async + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: discard + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2502656" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2502656" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: sync + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2502656" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1). + name: container.blockio.io_serviced_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: async + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: discard + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "99" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "99" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: sync + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "99" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'Number of cores available to the container.' + gauge: + dataPoints: + - asInt: 8 + startTimeUnixNano: "1687762436124732000" + timeUnixNano: "1687762436137493000" + name: container.cpu.logical.count + unit: "{cpus}" + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + timeUnixNano: "1657771705535206000" + name: container.cpu.shares + unit: "1" + - description: Number of periods with throttling active. + name: container.cpu.throttling_data.periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Number of periods when the container hits its throttling limit. + name: container.cpu.throttling_data.throttled_periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Aggregate time the container was throttled. + name: container.cpu.throttling_data.throttled_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows). + name: container.cpu.usage.kernelmode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Per-core CPU usage by the container (Only available with cgroups v1). + name: container.cpu.usage.percpu + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1415045" + attributes: + - key: core + value: + stringValue: cpu0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: core + value: + stringValue: cpu1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "262690" + attributes: + - key: core + value: + stringValue: cpu2 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "762532" + attributes: + - key: core + value: + stringValue: cpu3 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "78532" + attributes: + - key: core + value: + stringValue: cpu4 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "28108575" + attributes: + - key: core + value: + stringValue: cpu5 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "8800811" + attributes: + - key: core + value: + stringValue: cpu6 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4191833" + attributes: + - key: core + value: + stringValue: cpu7 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: System CPU usage, as reported by docker. + name: container.cpu.usage.system + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "120830550000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Total CPU time consumed. + name: container.cpu.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "43620018" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows). + name: container.cpu.usage.usermode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: 'Percent of CPU used by the container.' + gauge: + dataPoints: + - asDouble: 0.0002888012543185477 + startTimeUnixNano: "1687762436059456000" + timeUnixNano: "1687762436071484000" + name: container.cpu.utilization + unit: "1" + - description: The amount of anonymous memory that has been identified as active by the kernel. + name: container.memory.active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. + name: container.memory.active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "270336" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1). + name: container.memory.cache + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2433024" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1). + name: container.memory.dirty + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: 'Number of times the memory limit was hit.' + name: container.memory.fails + sum: + isMonotonic: true + aggregationTemporality: 2 + dataPoints: + - asInt: "4" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "{fails}" + - description: The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1). + name: container.memory.hierarchical_memory_limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9223372036854772000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1). + name: container.memory.hierarchical_memsw_limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9223372036854772000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. + name: container.memory.inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. + name: container.memory.inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2162688" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1). + name: container.memory.mapped_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1486848" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Percentage of memory used. + gauge: + dataPoints: + - asDouble: 0.006938014912420301 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.memory.percent + unit: "1" + - description: Indicate the number of times that a process of the cgroup triggered a page fault. + name: container.memory.pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "990" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup triggered a major fault. + name: container.memory.pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Number of pages read from disk by the cgroup (Only available with cgroups v1). + name: container.memory.pgpgin + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1287" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Number of pages written to disk by the cgroup (Only available with cgroups v1). + name: container.memory.pgpgout + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "667" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1).' + name: container.memory.rss + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1). + name: container.memory.rss_huge + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "270336" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1). + name: container.memory.total_cache + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2433024" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1). + name: container.memory.total_dirty + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2162688" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1). + name: container.memory.total_mapped_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1486848" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1). + name: container.memory.total_pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "990" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1). + name: container.memory.total_pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1). + name: container.memory.total_pgpgin + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1287" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1). + name: container.memory.total_pgpgout + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "667" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1).' + name: container.memory.total_rss + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1). + name: container.memory.total_rss_huge + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1). + name: container.memory.total_writeback + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory that cannot be reclaimed. + name: container.memory.unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory limit of the container. + name: container.memory.usage.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10449559552" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Maximum memory usage. + name: container.memory.usage.max + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "3932160" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory usage of the container. This excludes the cache. + name: container.memory.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "724992" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1). + name: container.memory.writeback + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes received by the container. + name: container.network.io.usage.rx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1532" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Incoming packets dropped. + name: container.network.io.usage.rx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Received errors. + name: container.network.io.usage.rx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets received. + name: container.network.io.usage.rx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "18" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Bytes sent. + name: container.network.io.usage.tx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Outgoing packets dropped. + name: container.network.io.usage.tx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Sent errors. + name: container.network.io.usage.tx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets sent. + name: container.network.io.usage.tx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Number of pids in the container's cgroup. + name: container.pids.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + timeUnixNano: "1657771705535206000" + isMonotonic: true + unit: "{restarts}" + - description: Time elapsed since container start time. + gauge: + dataPoints: + - asDouble: 3.06813858144158e+07 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.uptime + unit: s + scope: + name: otelcol/docker/receiver + version: latest diff --git a/internal/docker/receiver/testdata/mock/single_container/stats.json b/internal/docker/receiver/testdata/mock/single_container/stats.json new file mode 100644 index 000000000000..33d8145d0cf8 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/single_container/stats.json @@ -0,0 +1,183 @@ +{ + "blkio_stats": { + "io_merged_recursive": [], + "io_queue_recursive": [], + "io_service_bytes_recursive": [ + { + "major": 254, + "minor": 0, + "op": "Read", + "value": 2502656 + }, + { + "major": 254, + "minor": 0, + "op": "Write", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Sync", + "value": 2502656 + }, + { + "major": 254, + "minor": 0, + "op": "Async", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Discard", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Total", + "value": 2502656 + } + ], + "io_service_time_recursive": [], + "io_serviced_recursive": [ + { + "major": 254, + "minor": 0, + "op": "Read", + "value": 99 + }, + { + "major": 254, + "minor": 0, + "op": "Write", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Sync", + "value": 99 + }, + { + "major": 254, + "minor": 0, + "op": "Async", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Discard", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Total", + "value": 99 + } + ], + "io_time_recursive": [], + "io_wait_time_recursive": [], + "sectors_recursive": [] + }, + "cpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 1415045, + 0, + 262690, + 762532, + 78532, + 28108575, + 8800811, + 4191833 + ], + "total_usage": 43620018, + "usage_in_kernelmode": 10000000, + "usage_in_usermode": 10000000 + }, + "online_cpus": 8, + "system_cpu_usage": 120830550000000, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "id": "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326", + "memory_stats": { + "failcnt": 4, + "limit": 10449559552, + "max_usage": 3932160, + "stats": { + "active_anon": 0, + "active_file": 270336, + "cache": 2433024, + "dirty": 0, + "hierarchical_memory_limit": 9223372036854772000, + "hierarchical_memsw_limit": 9223372036854772000, + "inactive_anon": 0, + "inactive_file": 2162688, + "mapped_file": 1486848, + "pgfault": 990, + "pgmajfault": 0, + "pgpgin": 1287, + "pgpgout": 667, + "rss": 0, + "rss_huge": 0, + "total_active_anon": 0, + "total_active_file": 270336, + "total_cache": 2433024, + "total_dirty": 0, + "total_inactive_anon": 0, + "total_inactive_file": 2162688, + "total_mapped_file": 1486848, + "total_pgfault": 990, + "total_pgmajfault": 0, + "total_pgpgin": 1287, + "total_pgpgout": 667, + "total_rss": 0, + "total_rss_huge": 0, + "total_unevictable": 0, + "total_writeback": 0, + "unevictable": 0, + "writeback": 0 + }, + "usage": 2887680 + }, + "name": "/bold_sinoussi", + "networks": { + "eth0": { + "rx_bytes": 1532, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 18, + "tx_bytes": 0, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 0 + } + }, + "num_procs": 0, + "pids_stats": { + "current": 1 + }, + "precpu_stats": { + "cpu_usage": { + "total_usage": 0, + "usage_in_kernelmode": 0, + "usage_in_usermode": 0 + }, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "preread": "0001-01-01T00:00:00Z", + "read": "2022-07-06T04:27:03.0439251Z", + "storage_stats": {} +} diff --git a/internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/container.json b/internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/container.json new file mode 100644 index 000000000000..73b98c332a56 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/container.json @@ -0,0 +1,218 @@ +{ + "AppArmorProfile": "", + "Args": [], + "Config": { + "AttachStderr": true, + "AttachStdin": true, + "AttachStdout": true, + "Cmd": [ + "/bin/sh" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "ENV_VAR=env-var", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "8000/tcp": {} + }, + "Hostname": "10b703fb312b", + "Image": "ubuntu", + "Labels": { + "container.label": "container-label" + }, + "OnBuild": null, + "OpenStdin": true, + "StdinOnce": true, + "Tty": true, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2022-07-06T04:17:29.79437Z", + "Driver": "overlay2", + "ExecIDs": null, + "GraphDriver": { + "Data": { + "LowerDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859-init/diff:/var/lib/docker/overlay2/f11adae41a6c3a10b6e8fd2440b5170d8ff4f9979eecb1b43c19e2a996c9937a/diff", + "MergedDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859/merged", + "UpperDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859/diff", + "WorkDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859/work" + }, + "Name": "overlay2" + }, + "HostConfig": { + "AutoRemove": false, + "Binds": null, + "BlkioDeviceReadBps": null, + "BlkioDeviceReadIOps": null, + "BlkioDeviceWriteBps": null, + "BlkioDeviceWriteIOps": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [], + "CapAdd": null, + "CapDrop": null, + "Cgroup": "", + "CgroupParent": "", + "CgroupnsMode": "host", + "ConsoleSize": [ + 0, + 0 + ], + "ContainerIDFile": "", + "CpuCount": 0, + "CpuPercent": 0, + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuRealtimePeriod": 0, + "CpuRealtimeRuntime": 0, + "CpuShares": 0, + "CpusetCpus": "", + "CpusetMems": "", + "DeviceCgroupRules": null, + "DeviceRequests": null, + "Devices": [], + "Dns": [], + "DnsOptions": [], + "DnsSearch": [], + "ExtraHosts": null, + "GroupAdd": null, + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "IpcMode": "private", + "Isolation": "", + "KernelMemory": 0, + "KernelMemoryTCP": 0, + "Links": null, + "LogConfig": { + "Config": {}, + "Type": "json-file" + }, + "MaskedPaths": [ + "/proc/asound", + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "Memory": 0, + "MemoryReservation": 0, + "MemorySwap": 0, + "MemorySwappiness": null, + "NanoCpus": 0, + "NetworkMode": "default", + "OomKillDisable": false, + "OomScoreAdj": 0, + "PidMode": "", + "PidsLimit": null, + "PortBindings": { + "8000/tcp": [ + { + "HostIp": "", + "HostPort": "8000" + } + ] + }, + "Privileged": false, + "PublishAllPorts": false, + "ReadonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ], + "ReadonlyRootfs": false, + "RestartPolicy": { + "MaximumRetryCount": 0, + "Name": "no" + }, + "Runtime": "runc", + "SecurityOpt": null, + "ShmSize": 67108864, + "UTSMode": "", + "Ulimits": null, + "UsernsMode": "", + "VolumeDriver": "", + "VolumesFrom": null + }, + "HostnamePath": "/var/lib/docker/containers/73364842ef014441cac89fed05df19463b1230db25a31252cdf82e754f1ec581/hostname", + "HostsPath": "/var/lib/docker/containers/73364842ef014441cac89fed05df19463b1230db25a31252cdf82e754f1ec581/hosts", + "Id": "73364842ef014441cac89fed05df19463b1230db25a31252cdf82e754f1ec581", + "Image": "sha256:825d55fb6340083b06e69e02e823a02918f3ffb575ed2a87026d4645a7fd9e1b", + "LogPath": "/var/lib/docker/containers/73364842ef014441cac89fed05df19463b1230db25a31252cdf82e754f1ec581/73364842ef014441cac89fed05df19463b1230db25a31252cdf82e754f1ec581-json.log", + "MountLabel": "", + "Mounts": [], + "Name": "/bold_sinoussi", + "NetworkSettings": { + "Bridge": "", + "EndpointID": "6914fd894bfd0a86061aa9d4a100a3e235e7f922ebc782183f60101b207ec1c7", + "Gateway": "10.255.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "HairpinMode": false, + "IPAddress": "10.255.0.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "MacAddress": "02:42:0a:ff:00:01", + "Networks": { + "bridge": { + "Aliases": null, + "DriverOpts": null, + "EndpointID": "6914fd894bfd0a86061aa9d4a100a3e235e7f922ebc782183f60101b207ec1c7", + "Gateway": "10.255.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAMConfig": null, + "IPAddress": "10.255.0.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "Links": null, + "MacAddress": "02:42:0a:ff:00:01", + "NetworkID": "792aab49ed5f9b0f0a33af3a93d0581c1bd8ea3688482e1859c3e544256af057" + } + }, + "Ports": { + "8000/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "8000" + }, + { + "HostIp": "::", + "HostPort": "8000" + } + ] + }, + "SandboxID": "4602fa6944161b3c0e2ff24901ab5b5c0b64f4052f85555b87afeab456428c5e", + "SandboxKey": "/var/run/docker/netns/4602fa694416", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null + }, + "Path": "/bin/sh", + "Platform": "linux", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/73364842ef014441cac89fed05df19463b1230db25a31252cdf82e754f1ec581/resolv.conf", + "RestartCount": 0, + "State": { + "Dead": false, + "Error": "", + "ExitCode": 0, + "FinishedAt": "0001-01-01T00:00:00Z", + "OOMKilled": false, + "Paused": false, + "Pid": 2968, + "Restarting": false, + "Running": true, + "StartedAt": "2022-07-06T04:17:30.2570682Z", + "Status": "running" + } +} \ No newline at end of file diff --git a/internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/containers.json b/internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/containers.json new file mode 100644 index 000000000000..7b64d306f969 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/containers.json @@ -0,0 +1,54 @@ +[ + { + "Command": "/bin/sh", + "Created": 1657081049, + "HostConfig": { + "NetworkMode": "default" + }, + "Id": "73364842ef014441cac89fed05df19463b1230db25a31252cdf82e754f1ec581", + "Image": "ubuntu", + "ImageID": "sha256:825d55fb6340083b06e69e02e823a02918f3ffb575ed2a87026d4645a7fd9e1b", + "Labels": { + "container.label": "container-label" + }, + "Mounts": [], + "Names": [ + "/bold_sinoussi" + ], + "NetworkSettings": { + "Networks": { + "bridge": { + "Aliases": null, + "DriverOpts": null, + "EndpointID": "6914fd894bfd0a86061aa9d4a100a3e235e7f922ebc782183f60101b207ec1c7", + "Gateway": "10.255.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAMConfig": null, + "IPAddress": "10.255.0.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "Links": null, + "MacAddress": "02:42:0a:ff:00:02", + "NetworkID": "792aab49ed5f9b0f0a33af3a93d0581c1bd8ea3688482e1859c3e544256af057" + } + } + }, + "Ports": [ + { + "IP": "0.0.0.0", + "PrivatePort": 8000, + "PublicPort": 8000, + "Type": "tcp" + }, + { + "IP": "::", + "PrivatePort": 8000, + "PublicPort": 8000, + "Type": "tcp" + } + ], + "State": "running", + "Status": "Up 3 minutes" + } +] \ No newline at end of file diff --git a/internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/expected_metrics.yaml b/internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/expected_metrics.yaml new file mode 100644 index 000000000000..fb9c3e44a2af --- /dev/null +++ b/internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/expected_metrics.yaml @@ -0,0 +1,843 @@ +resourceMetrics: + - resource: + attributes: + - key: container-metric-label + value: + stringValue: container-label + - key: container.command_line + value: + stringValue: /bin/sh + - key: container.hostname + value: + stringValue: 10b703fb312b + - key: container.id + value: + stringValue: 73364842ef014441cac89fed05df19463b1230db25a31252cdf82e754f1ec581 + - key: container.image.id + value: + stringValue: sha256:825d55fb6340083b06e69e02e823a02918f3ffb575ed2a87026d4645a7fd9e1b + - key: container.image.name + value: + stringValue: ubuntu + - key: container.name + value: + stringValue: bold_sinoussi + - key: container.runtime + value: + stringValue: docker + - key: env-var-metric-label + value: + stringValue: env-var + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeMetrics: + - metrics: + - description: Number of bytes transferred to/from the disk by the group and descendant groups. + name: container.blockio.io_service_bytes_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: async + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: discard + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2502656" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2502656" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: sync + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2502656" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1). + name: container.blockio.io_serviced_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: async + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: discard + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "99" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "99" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: sync + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "99" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "254" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'Number of cores available to the container.' + gauge: + dataPoints: + - asInt: 8 + startTimeUnixNano: "1687762436124732000" + timeUnixNano: "1687762436137493000" + name: container.cpu.logical.count + unit: "{cpus}" + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1691752005571543000" + timeUnixNano: "1691752005573769000" + name: container.cpu.shares + unit: "1" + - description: Number of periods with throttling active. + name: container.cpu.throttling_data.periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Number of periods when the container hits its throttling limit. + name: container.cpu.throttling_data.throttled_periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Aggregate time the container was throttled. + name: container.cpu.throttling_data.throttled_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows). + name: container.cpu.usage.kernelmode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Per-core CPU usage by the container (Only available with cgroups v1). + name: container.cpu.usage.percpu + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1415045" + attributes: + - key: core + value: + stringValue: cpu0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: core + value: + stringValue: cpu1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "262690" + attributes: + - key: core + value: + stringValue: cpu2 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "762532" + attributes: + - key: core + value: + stringValue: cpu3 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "78532" + attributes: + - key: core + value: + stringValue: cpu4 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "28108575" + attributes: + - key: core + value: + stringValue: cpu5 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "8800811" + attributes: + - key: core + value: + stringValue: cpu6 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4191833" + attributes: + - key: core + value: + stringValue: cpu7 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: System CPU usage, as reported by docker. + name: container.cpu.usage.system + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "120830550000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Total CPU time consumed. + name: container.cpu.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "43620018" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows). + name: container.cpu.usage.usermode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: 'Percent of CPU used by the container.' + gauge: + dataPoints: + - asDouble: 0.0002888012543185477 + startTimeUnixNano: "1687762436337809000" + timeUnixNano: "1687762436345613000" + name: container.cpu.utilization + unit: "1" + - description: The amount of anonymous memory that has been identified as active by the kernel. + name: container.memory.active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. + name: container.memory.active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "270336" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1). + name: container.memory.cache + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2433024" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1). + name: container.memory.dirty + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: 'Number of times the memory limit was hit.' + name: container.memory.fails + sum: + isMonotonic: true + aggregationTemporality: 2 + dataPoints: + - asInt: "4" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "{fails}" + - description: The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1). + name: container.memory.hierarchical_memory_limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9223372036854772000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1). + name: container.memory.hierarchical_memsw_limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9223372036854772000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. + name: container.memory.inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. + name: container.memory.inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2162688" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1). + name: container.memory.mapped_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1486848" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Percentage of memory used. + gauge: + dataPoints: + - asDouble: 0.006938014912420301 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.memory.percent + unit: "1" + - description: Indicate the number of times that a process of the cgroup triggered a page fault. + name: container.memory.pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "990" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup triggered a major fault. + name: container.memory.pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Number of pages read from disk by the cgroup (Only available with cgroups v1). + name: container.memory.pgpgin + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1287" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Number of pages written to disk by the cgroup (Only available with cgroups v1). + name: container.memory.pgpgout + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "667" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1).' + name: container.memory.rss + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1). + name: container.memory.rss_huge + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "270336" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1). + name: container.memory.total_cache + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2433024" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1). + name: container.memory.total_dirty + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2162688" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1). + name: container.memory.total_mapped_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1486848" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1). + name: container.memory.total_pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "990" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1). + name: container.memory.total_pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1). + name: container.memory.total_pgpgin + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1287" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1). + name: container.memory.total_pgpgout + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "667" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1).' + name: container.memory.total_rss + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1). + name: container.memory.total_rss_huge + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1). + name: container.memory.total_writeback + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory that cannot be reclaimed. + name: container.memory.unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory limit of the container. + name: container.memory.usage.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10449559552" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Maximum memory usage. + name: container.memory.usage.max + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "3932160" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory usage of the container. This excludes the cache. + name: container.memory.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "724992" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1). + name: container.memory.writeback + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes received by the container. + name: container.network.io.usage.rx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1532" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Incoming packets dropped. + name: container.network.io.usage.rx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Received errors. + name: container.network.io.usage.rx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets received. + name: container.network.io.usage.rx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "18" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Bytes sent. + name: container.network.io.usage.tx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Outgoing packets dropped. + name: container.network.io.usage.tx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Sent errors. + name: container.network.io.usage.tx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets sent. + name: container.network.io.usage.tx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Number of pids in the container's cgroup. + name: container.pids.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1691752005571543000" + timeUnixNano: "1691752005573769000" + isMonotonic: true + unit: '{restarts}' + - description: Time elapsed since container start time. + gauge: + dataPoints: + - asDouble: 3.06813860885448e+07 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.uptime + unit: s + scope: + name: otelcol/docker/receiver + version: latest diff --git a/internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/stats.json b/internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/stats.json new file mode 100644 index 000000000000..3cd6f3b32ac8 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/single_container_with_optional_resource_attributes/stats.json @@ -0,0 +1,183 @@ +{ + "blkio_stats": { + "io_merged_recursive": [], + "io_queue_recursive": [], + "io_service_bytes_recursive": [ + { + "major": 254, + "minor": 0, + "op": "Read", + "value": 2502656 + }, + { + "major": 254, + "minor": 0, + "op": "Write", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Sync", + "value": 2502656 + }, + { + "major": 254, + "minor": 0, + "op": "Async", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Discard", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Total", + "value": 2502656 + } + ], + "io_service_time_recursive": [], + "io_serviced_recursive": [ + { + "major": 254, + "minor": 0, + "op": "Read", + "value": 99 + }, + { + "major": 254, + "minor": 0, + "op": "Write", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Sync", + "value": 99 + }, + { + "major": 254, + "minor": 0, + "op": "Async", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Discard", + "value": 0 + }, + { + "major": 254, + "minor": 0, + "op": "Total", + "value": 99 + } + ], + "io_time_recursive": [], + "io_wait_time_recursive": [], + "sectors_recursive": [] + }, + "cpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 1415045, + 0, + 262690, + 762532, + 78532, + 28108575, + 8800811, + 4191833 + ], + "total_usage": 43620018, + "usage_in_kernelmode": 10000000, + "usage_in_usermode": 10000000 + }, + "online_cpus": 8, + "system_cpu_usage": 120830550000000, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "id": "73364842ef014441cac89fed05df19463b1230db25a31252cdf82e754f1ec581", + "memory_stats": { + "failcnt": 4, + "limit": 10449559552, + "max_usage": 3932160, + "stats": { + "active_anon": 0, + "active_file": 270336, + "cache": 2433024, + "dirty": 0, + "hierarchical_memory_limit": 9223372036854772000, + "hierarchical_memsw_limit": 9223372036854772000, + "inactive_anon": 0, + "inactive_file": 2162688, + "mapped_file": 1486848, + "pgfault": 990, + "pgmajfault": 0, + "pgpgin": 1287, + "pgpgout": 667, + "rss": 0, + "rss_huge": 0, + "total_active_anon": 0, + "total_active_file": 270336, + "total_cache": 2433024, + "total_dirty": 0, + "total_inactive_anon": 0, + "total_inactive_file": 2162688, + "total_mapped_file": 1486848, + "total_pgfault": 990, + "total_pgmajfault": 0, + "total_pgpgin": 1287, + "total_pgpgout": 667, + "total_rss": 0, + "total_rss_huge": 0, + "total_unevictable": 0, + "total_writeback": 0, + "unevictable": 0, + "writeback": 0 + }, + "usage": 2887680 + }, + "name": "/bold_sinoussi", + "networks": { + "eth0": { + "rx_bytes": 1532, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 18, + "tx_bytes": 0, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 0 + } + }, + "num_procs": 0, + "pids_stats": { + "current": 1 + }, + "precpu_stats": { + "cpu_usage": { + "total_usage": 0, + "usage_in_kernelmode": 0, + "usage_in_usermode": 0 + }, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "preread": "0001-01-01T00:00:00Z", + "read": "2022-07-06T04:27:03.0439251Z", + "storage_stats": {} +} \ No newline at end of file diff --git a/internal/docker/receiver/testdata/mock/two_containers/container1.json b/internal/docker/receiver/testdata/mock/two_containers/container1.json new file mode 100644 index 000000000000..05f77ae4b159 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/two_containers/container1.json @@ -0,0 +1,218 @@ +{ + "AppArmorProfile": "docker-default", + "Args": [], + "Config": { + "AttachStderr": true, + "AttachStdin": true, + "AttachStdout": true, + "Cmd": [ + "/bin/sh" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "ENV_VAR=env-var2", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "8001/tcp": {} + }, + "Hostname": "89d28931fd8b", + "Image": "alpine", + "Labels": { + "container.label": "container-label2" + }, + "OnBuild": null, + "OpenStdin": true, + "StdinOnce": true, + "Tty": true, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2022-07-12T00:43:40.734856595Z", + "Driver": "overlay2", + "ExecIDs": null, + "GraphDriver": { + "Data": { + "LowerDir": "/var/lib/docker/overlay2/081b7392f02fe4752c4a9f0fbe03e2c4be635367abfdf746c734d00f2b2502a0-init/diff:/var/lib/docker/overlay2/371c57e1d897cef04ad750fd2d5d31a89ce46c876ba7a0ff2ce5f0bb3a273428/diff", + "MergedDir": "/var/lib/docker/overlay2/081b7392f02fe4752c4a9f0fbe03e2c4be635367abfdf746c734d00f2b2502a0/merged", + "UpperDir": "/var/lib/docker/overlay2/081b7392f02fe4752c4a9f0fbe03e2c4be635367abfdf746c734d00f2b2502a0/diff", + "WorkDir": "/var/lib/docker/overlay2/081b7392f02fe4752c4a9f0fbe03e2c4be635367abfdf746c734d00f2b2502a0/work" + }, + "Name": "overlay2" + }, + "HostConfig": { + "AutoRemove": false, + "Binds": null, + "BlkioDeviceReadBps": null, + "BlkioDeviceReadIOps": null, + "BlkioDeviceWriteBps": null, + "BlkioDeviceWriteIOps": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [], + "CapAdd": null, + "CapDrop": null, + "Cgroup": "", + "CgroupParent": "", + "CgroupnsMode": "host", + "ConsoleSize": [ + 0, + 0 + ], + "ContainerIDFile": "", + "CpuCount": 0, + "CpuPercent": 0, + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuRealtimePeriod": 0, + "CpuRealtimeRuntime": 0, + "CpuShares": 0, + "CpusetCpus": "", + "CpusetMems": "", + "DeviceCgroupRules": null, + "DeviceRequests": null, + "Devices": [], + "Dns": [], + "DnsOptions": [], + "DnsSearch": [], + "ExtraHosts": null, + "GroupAdd": null, + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "IpcMode": "private", + "Isolation": "", + "KernelMemory": 0, + "KernelMemoryTCP": 0, + "Links": null, + "LogConfig": { + "Config": {}, + "Type": "json-file" + }, + "MaskedPaths": [ + "/proc/asound", + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "Memory": 0, + "MemoryReservation": 0, + "MemorySwap": 0, + "MemorySwappiness": null, + "NanoCpus": 0, + "NetworkMode": "default", + "OomKillDisable": false, + "OomScoreAdj": 0, + "PidMode": "", + "PidsLimit": null, + "PortBindings": { + "8001/tcp": [ + { + "HostIp": "", + "HostPort": "8001" + } + ] + }, + "Privileged": false, + "PublishAllPorts": false, + "ReadonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ], + "ReadonlyRootfs": false, + "RestartPolicy": { + "MaximumRetryCount": 0, + "Name": "no" + }, + "Runtime": "runc", + "SecurityOpt": null, + "ShmSize": 67108864, + "UTSMode": "", + "Ulimits": null, + "UsernsMode": "", + "VolumeDriver": "", + "VolumesFrom": null + }, + "HostnamePath": "/var/lib/docker/containers/89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7/hostname", + "HostsPath": "/var/lib/docker/containers/89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7/hosts", + "Id": "89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7", + "Image": "sha256:e66264b98777e12192600bf9b4d663655c98a090072e1bab49e233d7531d1294", + "LogPath": "/var/lib/docker/containers/89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7/89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7-json.log", + "MountLabel": "", + "Mounts": [], + "Name": "/loving_torvalds", + "NetworkSettings": { + "Bridge": "", + "EndpointID": "9990c2e6968d0c4529bc0eef50fcefebe60fc22d698ad16dd786723f8d098913", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "HairpinMode": false, + "IPAddress": "172.17.0.3", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:03", + "Networks": { + "bridge": { + "Aliases": null, + "DriverOpts": null, + "EndpointID": "9990c2e6968d0c4529bc0eef50fcefebe60fc22d698ad16dd786723f8d098913", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAMConfig": null, + "IPAddress": "172.17.0.3", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "Links": null, + "MacAddress": "02:42:ac:11:00:03", + "NetworkID": "5426c33d912cdac32013f6bf1135ec0dc9319fed4e7a3a9cc6d86e7807030a60" + } + }, + "Ports": { + "8001/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "8001" + }, + { + "HostIp": "::", + "HostPort": "8001" + } + ] + }, + "SandboxID": "1b89b33b21d133dba8e55013eb5da5462e33d9f3faf528ff6d1dd999a79b9b7e", + "SandboxKey": "/var/run/docker/netns/1b89b33b21d1", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null + }, + "Path": "/bin/sh", + "Platform": "linux", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7/resolv.conf", + "RestartCount": 0, + "State": { + "Dead": false, + "Error": "", + "ExitCode": 0, + "FinishedAt": "0001-01-01T00:00:00Z", + "OOMKilled": false, + "Paused": false, + "Pid": 2429, + "Restarting": false, + "Running": true, + "StartedAt": "2022-07-12T00:43:41.187539768Z", + "Status": "running" + } +} diff --git a/internal/docker/receiver/testdata/mock/two_containers/container2.json b/internal/docker/receiver/testdata/mock/two_containers/container2.json new file mode 100644 index 000000000000..519406f3a5c7 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/two_containers/container2.json @@ -0,0 +1,218 @@ +{ + "AppArmorProfile": "docker-default", + "Args": [], + "Config": { + "AttachStderr": true, + "AttachStdin": true, + "AttachStdout": true, + "Cmd": [ + "/bin/sh" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "ENV_VAR=env-var", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "8000/tcp": {} + }, + "Hostname": "a359c0fc87c5", + "Image": "ubuntu", + "Labels": { + "container.label": "container-label" + }, + "OnBuild": null, + "OpenStdin": true, + "StdinOnce": true, + "Tty": true, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2022-07-12T00:42:44.766615793Z", + "Driver": "overlay2", + "ExecIDs": null, + "GraphDriver": { + "Data": { + "LowerDir": "/var/lib/docker/overlay2/b8a47a4fec0b50b641c2f89f975ac94c10ef071bfa2b5271e6f292c98769fbe3-init/diff:/var/lib/docker/overlay2/60f77c4f71ec7f5a6e39c5ac1111f22bb03e9b02b9ae723f575c905fc932cb19/diff", + "MergedDir": "/var/lib/docker/overlay2/b8a47a4fec0b50b641c2f89f975ac94c10ef071bfa2b5271e6f292c98769fbe3/merged", + "UpperDir": "/var/lib/docker/overlay2/b8a47a4fec0b50b641c2f89f975ac94c10ef071bfa2b5271e6f292c98769fbe3/diff", + "WorkDir": "/var/lib/docker/overlay2/b8a47a4fec0b50b641c2f89f975ac94c10ef071bfa2b5271e6f292c98769fbe3/work" + }, + "Name": "overlay2" + }, + "HostConfig": { + "AutoRemove": false, + "Binds": null, + "BlkioDeviceReadBps": null, + "BlkioDeviceReadIOps": null, + "BlkioDeviceWriteBps": null, + "BlkioDeviceWriteIOps": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [], + "CapAdd": null, + "CapDrop": null, + "Cgroup": "", + "CgroupParent": "", + "CgroupnsMode": "host", + "ConsoleSize": [ + 0, + 0 + ], + "ContainerIDFile": "", + "CpuCount": 0, + "CpuPercent": 0, + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuRealtimePeriod": 0, + "CpuRealtimeRuntime": 0, + "CpuShares": 0, + "CpusetCpus": "", + "CpusetMems": "", + "DeviceCgroupRules": null, + "DeviceRequests": null, + "Devices": [], + "Dns": [], + "DnsOptions": [], + "DnsSearch": [], + "ExtraHosts": null, + "GroupAdd": null, + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "IpcMode": "private", + "Isolation": "", + "KernelMemory": 0, + "KernelMemoryTCP": 0, + "Links": null, + "LogConfig": { + "Config": {}, + "Type": "json-file" + }, + "MaskedPaths": [ + "/proc/asound", + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "Memory": 0, + "MemoryReservation": 0, + "MemorySwap": 0, + "MemorySwappiness": null, + "NanoCpus": 0, + "NetworkMode": "default", + "OomKillDisable": false, + "OomScoreAdj": 0, + "PidMode": "", + "PidsLimit": null, + "PortBindings": { + "8000/tcp": [ + { + "HostIp": "", + "HostPort": "8000" + } + ] + }, + "Privileged": false, + "PublishAllPorts": false, + "ReadonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ], + "ReadonlyRootfs": false, + "RestartPolicy": { + "MaximumRetryCount": 0, + "Name": "no" + }, + "Runtime": "runc", + "SecurityOpt": null, + "ShmSize": 67108864, + "UTSMode": "", + "Ulimits": null, + "UsernsMode": "", + "VolumeDriver": "", + "VolumesFrom": null + }, + "HostnamePath": "/var/lib/docker/containers/a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce/hostname", + "HostsPath": "/var/lib/docker/containers/a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce/hosts", + "Id": "a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce", + "Image": "sha256:27941809078cc9b2802deb2b0bb6feed6c236cde01e487f200e24653533701ee", + "LogPath": "/var/lib/docker/containers/a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce/a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce-json.log", + "MountLabel": "", + "Mounts": [], + "Name": "/pensive_aryabhata", + "NetworkSettings": { + "Bridge": "", + "EndpointID": "704279259cac47ef58ca868d30a48414ac8fb2757618cec32570ea715b672ade", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "HairpinMode": false, + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02", + "Networks": { + "bridge": { + "Aliases": null, + "DriverOpts": null, + "EndpointID": "704279259cac47ef58ca868d30a48414ac8fb2757618cec32570ea715b672ade", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAMConfig": null, + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "Links": null, + "MacAddress": "02:42:ac:11:00:02", + "NetworkID": "5426c33d912cdac32013f6bf1135ec0dc9319fed4e7a3a9cc6d86e7807030a60" + } + }, + "Ports": { + "8000/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "8000" + }, + { + "HostIp": "::", + "HostPort": "8000" + } + ] + }, + "SandboxID": "6eb3878d8e7903513277d3a9fc93c4dd03d17d08c099e185e81e086e80e6c3ac", + "SandboxKey": "/var/run/docker/netns/6eb3878d8e79", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null + }, + "Path": "/bin/sh", + "Platform": "linux", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce/resolv.conf", + "RestartCount": 0, + "State": { + "Dead": false, + "Error": "", + "ExitCode": 0, + "FinishedAt": "0001-01-01T00:00:00Z", + "OOMKilled": false, + "Paused": false, + "Pid": 2327, + "Restarting": false, + "Running": true, + "StartedAt": "2022-07-12T00:42:45.21292516Z", + "Status": "running" + } +} diff --git a/internal/docker/receiver/testdata/mock/two_containers/containers.json b/internal/docker/receiver/testdata/mock/two_containers/containers.json new file mode 100644 index 000000000000..9370f8eab82c --- /dev/null +++ b/internal/docker/receiver/testdata/mock/two_containers/containers.json @@ -0,0 +1,106 @@ +[ + { + "Command": "/bin/sh", + "Created": 1657586620, + "HostConfig": { + "NetworkMode": "default" + }, + "Id": "89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7", + "Image": "alpine", + "ImageID": "sha256:e66264b98777e12192600bf9b4d663655c98a090072e1bab49e233d7531d1294", + "Labels": { + "container.label": "container-label2" + }, + "Mounts": [], + "Names": [ + "/loving_torvalds" + ], + "NetworkSettings": { + "Networks": { + "bridge": { + "Aliases": null, + "DriverOpts": null, + "EndpointID": "9990c2e6968d0c4529bc0eef50fcefebe60fc22d698ad16dd786723f8d098913", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAMConfig": null, + "IPAddress": "172.17.0.3", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "Links": null, + "MacAddress": "02:42:ac:11:00:03", + "NetworkID": "5426c33d912cdac32013f6bf1135ec0dc9319fed4e7a3a9cc6d86e7807030a60" + } + } + }, + "Ports": [ + { + "IP": "0.0.0.0", + "PrivatePort": 8001, + "PublicPort": 8001, + "Type": "tcp" + }, + { + "IP": "::", + "PrivatePort": 8001, + "PublicPort": 8001, + "Type": "tcp" + } + ], + "State": "running", + "Status": "Up 4 hours" + }, + { + "Command": "/bin/sh", + "Created": 1657586564, + "HostConfig": { + "NetworkMode": "default" + }, + "Id": "a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce", + "Image": "ubuntu", + "ImageID": "sha256:27941809078cc9b2802deb2b0bb6feed6c236cde01e487f200e24653533701ee", + "Labels": { + "container.label": "container-label" + }, + "Mounts": [], + "Names": [ + "/pensive_aryabhata" + ], + "NetworkSettings": { + "Networks": { + "bridge": { + "Aliases": null, + "DriverOpts": null, + "EndpointID": "704279259cac47ef58ca868d30a48414ac8fb2757618cec32570ea715b672ade", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAMConfig": null, + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "Links": null, + "MacAddress": "02:42:ac:11:00:02", + "NetworkID": "5426c33d912cdac32013f6bf1135ec0dc9319fed4e7a3a9cc6d86e7807030a60" + } + } + }, + "Ports": [ + { + "IP": "0.0.0.0", + "PrivatePort": 8000, + "PublicPort": 8000, + "Type": "tcp" + }, + { + "IP": "::", + "PrivatePort": 8000, + "PublicPort": 8000, + "Type": "tcp" + } + ], + "State": "running", + "Status": "Up 4 hours" + } +] diff --git a/internal/docker/receiver/testdata/mock/two_containers/expected_metrics.yaml b/internal/docker/receiver/testdata/mock/two_containers/expected_metrics.yaml new file mode 100644 index 000000000000..6cc5090c17dc --- /dev/null +++ b/internal/docker/receiver/testdata/mock/two_containers/expected_metrics.yaml @@ -0,0 +1,1571 @@ +resourceMetrics: + - resource: + attributes: + - key: container-metric-label + value: + stringValue: container-label + - key: container.hostname + value: + stringValue: a359c0fc87c5 + - key: container.id + value: + stringValue: a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce + - key: container.image.name + value: + stringValue: ubuntu + - key: container.name + value: + stringValue: pensive_aryabhata + - key: container.runtime + value: + stringValue: docker + - key: env-var-metric-label + value: + stringValue: env-var + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeMetrics: + - metrics: + - description: Number of bytes transferred to/from the disk by the group and descendant groups. + name: container.blockio.io_service_bytes_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: async + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: discard + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "73728" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "73728" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: sync + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "73728" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1). + name: container.blockio.io_serviced_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: async + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: discard + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: sync + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'Number of cores available to the container.' + gauge: + dataPoints: + - asInt: 1 + startTimeUnixNano: "1687762436124732000" + timeUnixNano: "1687762436137493000" + name: container.cpu.logical.count + unit: "{cpus}" + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + timeUnixNano: "1657771832637112000" + name: container.cpu.shares + unit: "1" + - description: Number of periods with throttling active. + name: container.cpu.throttling_data.periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Number of periods when the container hits its throttling limit. + name: container.cpu.throttling_data.throttled_periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Aggregate time the container was throttled. + name: container.cpu.throttling_data.throttled_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows). + name: container.cpu.usage.kernelmode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Per-core CPU usage by the container (Only available with cgroups v1). + name: container.cpu.usage.percpu + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "31093384" + attributes: + - key: core + value: + stringValue: cpu0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: System CPU usage, as reported by docker. + name: container.cpu.usage.system + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "14930240000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Total CPU time consumed. + name: container.cpu.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "31093384" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows). + name: container.cpu.usage.usermode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: 'Percent of CPU used by the container.' + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1687762436124732000" + timeUnixNano: "1687762436137493000" + name: container.cpu.utilization + unit: "1" + - description: The amount of anonymous memory that has been identified as active by the kernel. + name: container.memory.active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4096" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. + name: container.memory.active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "73728" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1). + name: container.memory.cache + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "73728" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1). + name: container.memory.dirty + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: 'Number of times the memory limit was hit.' + name: container.memory.fails + sum: + isMonotonic: true + aggregationTemporality: 2 + dataPoints: + - asInt: "4" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "{fails}" + - description: The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1). + name: container.memory.hierarchical_memory_limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9223372036854772000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1). + name: container.memory.hierarchical_memsw_limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9223372036854772000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. + name: container.memory.inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "106496" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. + name: container.memory.inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1). + name: container.memory.mapped_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Percentage of memory used. + gauge: + dataPoints: + - asDouble: 0.02053846320949035 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.memory.percent + unit: "1" + - description: Indicate the number of times that a process of the cgroup triggered a page fault. + name: container.memory.pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2417" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup triggered a major fault. + name: container.memory.pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Number of pages read from disk by the cgroup (Only available with cgroups v1). + name: container.memory.pgpgin + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1980" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Number of pages written to disk by the cgroup (Only available with cgroups v1). + name: container.memory.pgpgout + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1935" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1).' + name: container.memory.rss + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "110592" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1). + name: container.memory.rss_huge + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4096" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "73728" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1). + name: container.memory.total_cache + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "73728" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1). + name: container.memory.total_dirty + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "106496" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1). + name: container.memory.total_mapped_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1). + name: container.memory.total_pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2417" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1). + name: container.memory.total_pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1). + name: container.memory.total_pgpgin + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1980" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1). + name: container.memory.total_pgpgout + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1935" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1).' + name: container.memory.total_rss + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "110592" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1). + name: container.memory.total_rss_huge + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1). + name: container.memory.total_writeback + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory that cannot be reclaimed. + name: container.memory.unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory limit of the container. + name: container.memory.usage.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2074079232" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Maximum memory usage. + name: container.memory.usage.max + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6172672" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory usage of the container. This excludes the cache. + name: container.memory.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "425984" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1). + name: container.memory.writeback + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes received by the container. + name: container.network.io.usage.rx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12394" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Incoming packets dropped. + name: container.network.io.usage.rx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Received errors. + name: container.network.io.usage.rx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets received. + name: container.network.io.usage.rx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "96" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Bytes sent. + name: container.network.io.usage.tx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Outgoing packets dropped. + name: container.network.io.usage.tx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Sent errors. + name: container.network.io.usage.tx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets sent. + name: container.network.io.usage.tx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Number of pids in the container's cgroup. + name: container.pids.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + timeUnixNano: "1657771832637112000" + isMonotonic: true + unit: "{restarts}" + - description: Time elapsed since container start time. + gauge: + dataPoints: + - asDouble: 3.017587092456784e+07 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.uptime + unit: s + scope: + name: otelcol/docker/receiver + version: latest + - resource: + attributes: + - key: container-metric-label + value: + stringValue: container-label2 + - key: container.hostname + value: + stringValue: 89d28931fd8b + - key: container.id + value: + stringValue: 89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7 + - key: container.image.name + value: + stringValue: alpine + - key: container.name + value: + stringValue: loving_torvalds + - key: container.runtime + value: + stringValue: docker + - key: env-var-metric-label + value: + stringValue: env-var2 + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeMetrics: + - metrics: + - description: Number of bytes transferred to/from the disk by the group and descendant groups. + name: container.blockio.io_service_bytes_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: async + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: discard + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1187840" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1187840" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: sync + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1187840" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1). + name: container.blockio.io_serviced_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: async + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: discard + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "19" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "19" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: sync + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "19" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "8" + - key: device_minor + value: + stringValue: "0" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'Number of cores available to the container.' + gauge: + dataPoints: + - asInt: 1 + startTimeUnixNano: "1687762436124732000" + timeUnixNano: "1687762436137493000" + name: container.cpu.logical.count + unit: "{cpus}" + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + timeUnixNano: "1657771832637093000" + name: container.cpu.shares + unit: "1" + - description: Number of periods with throttling active. + name: container.cpu.throttling_data.periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Number of periods when the container hits its throttling limit. + name: container.cpu.throttling_data.throttled_periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Aggregate time the container was throttled. + name: container.cpu.throttling_data.throttled_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows). + name: container.cpu.usage.kernelmode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "20000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Per-core CPU usage by the container (Only available with cgroups v1). + name: container.cpu.usage.percpu + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "34117917" + attributes: + - key: core + value: + stringValue: cpu0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: System CPU usage, as reported by docker. + name: container.cpu.usage.system + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "14834790000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Total CPU time consumed. + name: container.cpu.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "34117917" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows). + name: container.cpu.usage.usermode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: 'Percent of CPU used by the container.' + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1687762436124732000" + timeUnixNano: "1687762436137493000" + name: container.cpu.utilization + unit: "1" + - description: The amount of anonymous memory that has been identified as active by the kernel. + name: container.memory.active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4096" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. + name: container.memory.active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "393216" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1). + name: container.memory.cache + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "921600" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1). + name: container.memory.dirty + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: 'Number of times the memory limit was hit.' + name: container.memory.fails + sum: + isMonotonic: true + aggregationTemporality: 2 + dataPoints: + - asInt: "4" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "{fails}" + - description: The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1). + name: container.memory.hierarchical_memory_limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9223372036854772000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1). + name: container.memory.hierarchical_memsw_limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9223372036854772000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. + name: container.memory.inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "147456" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. + name: container.memory.inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "528384" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1). + name: container.memory.mapped_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "843776" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Percentage of memory used. + gauge: + dataPoints: + - asDouble: 0.037324707178785346 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.memory.percent + unit: "1" + - description: Indicate the number of times that a process of the cgroup triggered a page fault. + name: container.memory.pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2469" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup triggered a major fault. + name: container.memory.pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Number of pages read from disk by the cgroup (Only available with cgroups v1). + name: container.memory.pgpgin + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2288" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Number of pages written to disk by the cgroup (Only available with cgroups v1). + name: container.memory.pgpgout + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2026" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1).' + name: container.memory.rss + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "151552" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1). + name: container.memory.rss_huge + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4096" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "393216" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1). + name: container.memory.total_cache + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "921600" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1). + name: container.memory.total_dirty + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "147456" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "528384" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1). + name: container.memory.total_mapped_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "843776" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1). + name: container.memory.total_pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2469" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1). + name: container.memory.total_pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1). + name: container.memory.total_pgpgin + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2288" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1). + name: container.memory.total_pgpgout + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2026" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: 'The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1).' + name: container.memory.total_rss + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "151552" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1). + name: container.memory.total_rss_huge + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1). + name: container.memory.total_unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1). + name: container.memory.total_writeback + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of memory that cannot be reclaimed. + name: container.memory.unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory limit of the container. + name: container.memory.usage.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2074079232" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Maximum memory usage. + name: container.memory.usage.max + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6201344" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory usage of the container. This excludes the cache. + name: container.memory.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "774144" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1). + name: container.memory.writeback + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes received by the container. + name: container.network.io.usage.rx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11313" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Incoming packets dropped. + name: container.network.io.usage.rx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Received errors. + name: container.network.io.usage.rx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets received. + name: container.network.io.usage.rx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "88" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Bytes sent. + name: container.network.io.usage.tx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Outgoing packets dropped. + name: container.network.io.usage.tx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Sent errors. + name: container.network.io.usage.tx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets sent. + name: container.network.io.usage.tx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Number of pids in the container's cgroup. + name: container.pids.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + timeUnixNano: "1657771832637093000" + isMonotonic: true + unit: "{restarts}" + - description: Time elapsed since container start time. + gauge: + dataPoints: + - asDouble: 3.0175814949953232e+07 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.uptime + unit: s + scope: + name: otelcol/docker/receiver + version: latest diff --git a/internal/docker/receiver/testdata/mock/two_containers/stats1.json b/internal/docker/receiver/testdata/mock/two_containers/stats1.json new file mode 100644 index 000000000000..6cd042489777 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/two_containers/stats1.json @@ -0,0 +1,181 @@ +{ + "blkio_stats": { + "io_merged_recursive": [], + "io_queue_recursive": [], + "io_service_bytes_recursive": [ + { + "major": 8, + "minor": 0, + "op": "Read", + "value": 1187840 + }, + { + "major": 8, + "minor": 0, + "op": "Write", + "value": 0 + }, + { + "major": 8, + "minor": 0, + "op": "Sync", + "value": 1187840 + }, + { + "major": 8, + "minor": 0, + "op": "Async", + "value": 0 + }, + { + "major": 8, + "minor": 0, + "op": "Discard", + "value": 0 + }, + { + "major": 8, + "minor": 0, + "op": "Total", + "value": 1187840 + } + ], + "io_service_time_recursive": [], + "io_serviced_recursive": [ + { + "major": 8, + "minor": 0, + "op": "Read", + "value": 19 + }, + { + "major": 8, + "minor": 0, + "op": "Write", + "value": 0 + }, + { + "major": 8, + "minor": 0, + "op": "Sync", + "value": 19 + }, + { + "major": 8, + "minor": 0, + "op": "Async", + "value": 0 + }, + { + "major": 8, + "minor": 0, + "op": "Discard", + "value": 0 + }, + { + "major": 8, + "minor": 0, + "op": "Total", + "value": 19 + } + ], + "io_time_recursive": [], + "io_wait_time_recursive": [], + "sectors_recursive": [] + }, + "cpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 34117917 + ], + "total_usage": 34117917, + "usage_in_kernelmode": 20000000, + "usage_in_usermode": 10000000 + }, + "online_cpus": 1, + "system_cpu_usage": 14834790000000, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "id": "89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7", + "memory_stats": { + "failcnt": 4, + "limit": 2074079232, + "max_usage": 6201344, + "stats": { + "active_anon": 4096, + "active_file": 393216, + "cache": 921600, + "dirty": 0, + "hierarchical_memory_limit": 9223372036854772000, + "hierarchical_memsw_limit": 9223372036854772000, + "inactive_anon": 147456, + "inactive_file": 528384, + "mapped_file": 843776, + "pgfault": 2469, + "pgmajfault": 8, + "pgpgin": 2288, + "pgpgout": 2026, + "rss": 151552, + "rss_huge": 0, + "total_active_anon": 4096, + "total_active_file": 393216, + "total_cache": 921600, + "total_dirty": 0, + "total_inactive_anon": 147456, + "total_inactive_file": 528384, + "total_mapped_file": 843776, + "total_pgfault": 2469, + "total_pgmajfault": 8, + "total_pgpgin": 2288, + "total_pgpgout": 2026, + "total_rss": 151552, + "total_rss_huge": 0, + "total_unevictable": 0, + "total_writeback": 0, + "unevictable": 0, + "writeback": 0 + }, + "usage": 1302528 + }, + "name": "/loving_torvalds", + "networks": { + "eth0": { + "rx_bytes": 11313, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 88, + "tx_bytes": 0, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 0 + } + }, + "num_procs": 0, + "pids_stats": { + "current": 1 + }, + "precpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 34117917 + ], + "total_usage": 34117917, + "usage_in_kernelmode": 20000000, + "usage_in_usermode": 10000000 + }, + "online_cpus": 1, + "system_cpu_usage": 14833820000000, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "preread": "2022-07-12T05:32:37.708457509Z", + "read": "2022-07-12T05:32:38.711168232Z", + "storage_stats": {} +} diff --git a/internal/docker/receiver/testdata/mock/two_containers/stats2.json b/internal/docker/receiver/testdata/mock/two_containers/stats2.json new file mode 100644 index 000000000000..f0565da48162 --- /dev/null +++ b/internal/docker/receiver/testdata/mock/two_containers/stats2.json @@ -0,0 +1,181 @@ +{ + "blkio_stats": { + "io_merged_recursive": [], + "io_queue_recursive": [], + "io_service_bytes_recursive": [ + { + "major": 8, + "minor": 0, + "op": "Read", + "value": 73728 + }, + { + "major": 8, + "minor": 0, + "op": "Write", + "value": 0 + }, + { + "major": 8, + "minor": 0, + "op": "Sync", + "value": 73728 + }, + { + "major": 8, + "minor": 0, + "op": "Async", + "value": 0 + }, + { + "major": 8, + "minor": 0, + "op": "Discard", + "value": 0 + }, + { + "major": 8, + "minor": 0, + "op": "Total", + "value": 73728 + } + ], + "io_service_time_recursive": [], + "io_serviced_recursive": [ + { + "major": 8, + "minor": 0, + "op": "Read", + "value": 1 + }, + { + "major": 8, + "minor": 0, + "op": "Write", + "value": 0 + }, + { + "major": 8, + "minor": 0, + "op": "Sync", + "value": 1 + }, + { + "major": 8, + "minor": 0, + "op": "Async", + "value": 0 + }, + { + "major": 8, + "minor": 0, + "op": "Discard", + "value": 0 + }, + { + "major": 8, + "minor": 0, + "op": "Total", + "value": 1 + } + ], + "io_time_recursive": [], + "io_wait_time_recursive": [], + "sectors_recursive": [] + }, + "cpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 31093384 + ], + "total_usage": 31093384, + "usage_in_kernelmode": 10000000, + "usage_in_usermode": 10000000 + }, + "online_cpus": 1, + "system_cpu_usage": 14930240000000, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "id": "a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce", + "memory_stats": { + "failcnt": 4, + "limit": 2074079232, + "max_usage": 6172672, + "stats": { + "active_anon": 4096, + "active_file": 73728, + "cache": 73728, + "dirty": 0, + "hierarchical_memory_limit": 9223372036854772000, + "hierarchical_memsw_limit": 9223372036854772000, + "inactive_anon": 106496, + "inactive_file": 0, + "mapped_file": 0, + "pgfault": 2417, + "pgmajfault": 1, + "pgpgin": 1980, + "pgpgout": 1935, + "rss": 110592, + "rss_huge": 0, + "total_active_anon": 4096, + "total_active_file": 73728, + "total_cache": 73728, + "total_dirty": 0, + "total_inactive_anon": 106496, + "total_inactive_file": 0, + "total_mapped_file": 0, + "total_pgfault": 2417, + "total_pgmajfault": 1, + "total_pgpgin": 1980, + "total_pgpgout": 1935, + "total_rss": 110592, + "total_rss_huge": 0, + "total_unevictable": 0, + "total_writeback": 0, + "unevictable": 0, + "writeback": 0 + }, + "usage": 425984 + }, + "name": "/pensive_aryabhata", + "networks": { + "eth0": { + "rx_bytes": 12394, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 96, + "tx_bytes": 0, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 0 + } + }, + "num_procs": 0, + "pids_stats": { + "current": 1 + }, + "precpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 31093384 + ], + "total_usage": 31093384, + "usage_in_kernelmode": 10000000, + "usage_in_usermode": 10000000 + }, + "online_cpus": 1, + "system_cpu_usage": 14929250000000, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "preread": "2022-07-12T05:34:15.325458676Z", + "read": "2022-07-12T05:34:16.328861358Z", + "storage_stats": {} +} diff --git a/internal/docker/receiver/testdata/stats.json b/internal/docker/receiver/testdata/stats.json new file mode 100644 index 000000000000..bd8be426e6f5 --- /dev/null +++ b/internal/docker/receiver/testdata/stats.json @@ -0,0 +1,200 @@ +{ + "blkio_stats": { + "io_merged_recursive": [], + "io_queue_recursive": [], + "io_service_bytes_recursive": [ + { + "major": 202, + "minor": 0, + "op": "Read", + "value": 56500224 + }, + { + "major": 202, + "minor": 0, + "op": "Write", + "value": 12103680 + }, + { + "major": 202, + "minor": 0, + "op": "Sync", + "value": 65314816 + }, + { + "major": 202, + "minor": 0, + "op": "Async", + "value": 3289088 + }, + { + "major": 202, + "minor": 0, + "op": "Discard", + "value": 0 + }, + { + "major": 202, + "minor": 0, + "op": "Total", + "value": 68603904 + }, + { + "major": 202, + "minor": 0, + "op": "", + "value": 68603904 + } + ], + "io_service_time_recursive": [], + "io_serviced_recursive": [ + { + "major": 202, + "minor": 0, + "op": "Read", + "value": 985 + }, + { + "major": 202, + "minor": 0, + "op": "Write", + "value": 2073 + }, + { + "major": 202, + "minor": 0, + "op": "Sync", + "value": 2902 + }, + { + "major": 202, + "minor": 0, + "op": "Async", + "value": 156 + }, + { + "major": 202, + "minor": 0, + "op": "Discard", + "value": 0 + }, + { + "major": 202, + "minor": 0, + "op": "Total", + "value": 3058 + } + ], + "io_time_recursive": [], + "io_wait_time_recursive": [], + "sectors_recursive": [] + }, + "cpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 8043152341, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "total_usage": 8043152341, + "usage_in_kernelmode": 970000000, + "usage_in_usermode": 3510000000 + }, + "online_cpus": 1, + "system_cpu_usage": 4525290000000, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "id": "a2596076ca048f02bcd16a8acd12a7ea2d3bc430d1cde095357239dd3925a4c3", + "memory_stats": { + "limit": 1026359296, + "max_usage": 325246976, + "stats": { + "active_anon": 72585216, + "active_file": 40316928, + "cache": 80760832, + "dirty": 0, + "hierarchical_memory_limit": 9223372036854771712, + "hierarchical_memsw_limit": 0, + "inactive_anon": 0, + "inactive_file": 40579072, + "mapped_file": 37711872, + "pgfault": 21714, + "pgmajfault": 396, + "pgpgin": 85140, + "pgpgout": 47694, + "rss": 72568832, + "rss_huge": 0, + "total_active_anon": 72585216, + "total_active_file": 40316928, + "total_cache": 80760832, + "total_dirty": 0, + "total_inactive_anon": 0, + "total_inactive_file": 40579072, + "total_mapped_file": 37711872, + "total_pgfault": 21714, + "total_pgmajfault": 396, + "total_pgpgin": 85140, + "total_pgpgout": 47694, + "total_rss": 72568832, + "total_rss_huge": 0, + "total_unevictable": 0, + "total_writeback": 0, + "unevictable": 0, + "writeback": 0 + }, + "usage": 156676096 + }, + "name": "/my-container", + "networks": { + "eth0": { + "rx_bytes": 2787669, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 16598, + "tx_bytes": 2275281, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9050 + } + }, + "num_procs": 0, + "pids_stats": { + "current": 34 + }, + "precpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 8041201425, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "total_usage": 8041201425, + "usage_in_kernelmode": 970000000, + "usage_in_usermode": 3510000000 + }, + "online_cpus": 1, + "system_cpu_usage": 4524280000000, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "preread": "2020-01-01T00:00:03.012345678Z", + "read": "2020-01-01T00:00:04.012345678Z", + "storage_stats": {} +} From 14e41f5c074ad806bf750c7959e7efe50b7eeda4 Mon Sep 17 00:00:00 2001 From: Adam Boguszewski Date: Tue, 14 May 2024 13:53:27 +0200 Subject: [PATCH 2/8] migrate dockerstats receiver to common package --- internal/docker/go.mod | 28 +- internal/docker/go.sum | 60 +- receiver/dockerstatsreceiver/config.go | 56 - receiver/dockerstatsreceiver/config_test.go | 110 - receiver/dockerstatsreceiver/factory.go | 24 +- .../generated_package_test.go | 3 +- receiver/dockerstatsreceiver/go.mod | 11 +- receiver/dockerstatsreceiver/go.sum | 6 +- .../internal/metadata/generated_config.go | 396 -- .../metadata/generated_config_test.go | 274 - .../internal/metadata/generated_metrics.go | 4416 ----------------- .../metadata/generated_metrics_test.go | 1413 ------ .../internal/metadata/generated_resource.go | 78 - .../metadata/generated_resource_test.go | 76 - .../internal/metadata/package_test.go | 14 - .../internal/metadata/testdata/config.yaml | 379 -- receiver/dockerstatsreceiver/metadata.yaml | 699 --- receiver/dockerstatsreceiver/metric_helper.go | 134 - .../dockerstatsreceiver/metric_helper_test.go | 121 - receiver/dockerstatsreceiver/receiver.go | 312 -- receiver/dockerstatsreceiver/receiver_test.go | 433 -- 21 files changed, 59 insertions(+), 8984 deletions(-) delete mode 100644 receiver/dockerstatsreceiver/config.go delete mode 100644 receiver/dockerstatsreceiver/config_test.go delete mode 100644 receiver/dockerstatsreceiver/internal/metadata/generated_config.go delete mode 100644 receiver/dockerstatsreceiver/internal/metadata/generated_config_test.go delete mode 100644 receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go delete mode 100644 receiver/dockerstatsreceiver/internal/metadata/generated_metrics_test.go delete mode 100644 receiver/dockerstatsreceiver/internal/metadata/generated_resource.go delete mode 100644 receiver/dockerstatsreceiver/internal/metadata/generated_resource_test.go delete mode 100644 receiver/dockerstatsreceiver/internal/metadata/package_test.go delete mode 100644 receiver/dockerstatsreceiver/internal/metadata/testdata/config.yaml delete mode 100644 receiver/dockerstatsreceiver/metric_helper.go delete mode 100644 receiver/dockerstatsreceiver/metric_helper_test.go delete mode 100644 receiver/dockerstatsreceiver/receiver.go delete mode 100644 receiver/dockerstatsreceiver/receiver_test.go diff --git a/internal/docker/go.mod b/internal/docker/go.mod index d7a9a405ed0f..5a7632055568 100644 --- a/internal/docker/go.mod +++ b/internal/docker/go.mod @@ -6,16 +6,16 @@ require ( github.com/docker/docker v26.1.2+incompatible github.com/gobwas/glob v0.2.3 github.com/google/go-cmp v0.6.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.100.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.100.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.101.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.101.0 github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/collector/component v0.100.0 - go.opentelemetry.io/collector/confmap v0.100.0 - go.opentelemetry.io/collector/consumer v0.100.0 - go.opentelemetry.io/collector/filter v0.100.0 - go.opentelemetry.io/collector/pdata v1.7.0 - go.opentelemetry.io/collector/receiver v0.100.0 - go.opentelemetry.io/collector/semconv v0.100.0 + go.opentelemetry.io/collector/component v0.101.0 + go.opentelemetry.io/collector/confmap v0.101.0 + go.opentelemetry.io/collector/consumer v0.101.0 + go.opentelemetry.io/collector/filter v0.101.0 + go.opentelemetry.io/collector/pdata v1.8.0 + go.opentelemetry.io/collector/receiver v0.101.0 + go.opentelemetry.io/collector/semconv v0.101.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 @@ -47,17 +47,17 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.100.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.53.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - go.opentelemetry.io/collector v0.100.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.100.0 // indirect + go.opentelemetry.io/collector v0.101.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.101.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.26.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.48.0 // indirect @@ -71,7 +71,7 @@ require ( golang.org/x/time v0.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/grpc v1.63.2 // indirect - google.golang.org/protobuf v1.34.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.0.3 // indirect ) diff --git a/internal/docker/go.sum b/internal/docker/go.sum index e34782d9444d..dcbcc9d20bbe 100644 --- a/internal/docker/go.sum +++ b/internal/docker/go.sum @@ -73,12 +73,12 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.100.0 h1:4cDqd1ZU/HQZmijLqEvzbVV/rdgUQG08IBq5vbO1308= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.100.0/go.mod h1:940rHhObAdeWLuBGBmuK4mRwrXSQv63SOX6arprCPrk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.100.0 h1:UtY5UvfEFNub+E5zT6vCxU18A2GYdMXZK/ClcwE2nNw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.100.0/go.mod h1:5Ak565rKB3OZTunqssZHuXEhGTz1f73VdkfwiVp8vxg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.100.0 h1:549nET9f5zIYC5F3/FBzgZfumjBOy4xx+9rCJ24TRxw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.100.0/go.mod h1:5r/6EFZnfR/zIP+fQQoD11x9b+TJYbR69kBgme0NKVs= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.101.0 h1:Ohhry/Fcxh7/ysAxFhW2IJR/4hWEPaizDNtg02upYLA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.101.0/go.mod h1:H2vPArfULuCAm4Y6GHNxuLrjFGSgO16NJgdGACxBhSM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.101.0 h1:TCQYvGS2MKTotOTQDnHUSd4ljEzXRzHXopdv71giKWU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.101.0/go.mod h1:Nl2d4DSK/IbaWnnBxYyhMNUW6C9sb5/4idVZrSW/5Ps= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0 h1:dVINhi/nne11lG+Xnwuy9t/N4xyaH2Om2EU+5lphCA4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0/go.mod h1:kjyfpKOuBfkx3UsJQsbQ5eTJM3yQWiRYaYxs47PpxvI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -88,8 +88,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= @@ -109,26 +109,26 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/collector v0.100.0 h1:Q6IAGjMzjkZ7WepuwyCa6UytDPP0O88GemonQOUjP2s= -go.opentelemetry.io/collector v0.100.0/go.mod h1:QlVjQWlrPtBwVRm8tr+3P4FzNZSlYEfuUSaWoAwK+ko= -go.opentelemetry.io/collector/component v0.100.0 h1:3Y6dl3uDkDzilaikYrPxbZDOlzrDijrF1cIPzfyTwWA= -go.opentelemetry.io/collector/component v0.100.0/go.mod h1:HLEqEBFzPW2umagnVC3gY8yogOBhbzvuzTBFUqH54HY= -go.opentelemetry.io/collector/config/configtelemetry v0.100.0 h1:unlhNrFFXCinxk6iPHPYwANO+eFY4S1NTb5knSxteW4= -go.opentelemetry.io/collector/config/configtelemetry v0.100.0/go.mod h1:YV5PaOdtnU1xRomPcYqoHmyCr48tnaAREeGO96EZw8o= -go.opentelemetry.io/collector/confmap v0.100.0 h1:r70znwLWUMFRWL4LRcWLhdFfzmTvehXgbnlHFCDm0Tc= -go.opentelemetry.io/collector/confmap v0.100.0/go.mod h1:BWKPIpYeUzSG6ZgCJMjF7xsLvyrvJCfYURl57E5vhiQ= -go.opentelemetry.io/collector/consumer v0.100.0 h1:8sALAcWvizSyrZJCF+zTqD2RLmZAyeCuaQrNS2q6ti0= -go.opentelemetry.io/collector/consumer v0.100.0/go.mod h1:JOPOq8nSTdnQwc2xdHl4hcuYBYV8gjN2SlFqlqBe/Nc= -go.opentelemetry.io/collector/filter v0.100.0 h1:XQyhnqJSK2sw+e9yvpkvl7y8QdJwH/gAnFoZDfEZ0dQ= -go.opentelemetry.io/collector/filter v0.100.0/go.mod h1:3xGRpZo11DMJTDtMUGsDNkxKM6LMHqROGrQ/aTvskh8= -go.opentelemetry.io/collector/pdata v1.7.0 h1:/WNsBbE6KM3TTPUb9v/5B7IDqnDkgf8GyFhVJJqu7II= -go.opentelemetry.io/collector/pdata v1.7.0/go.mod h1:ehCBBA5GoFrMZkwyZAKGY/lAVSgZf6rzUt3p9mddmPU= -go.opentelemetry.io/collector/pdata/testdata v0.100.0 h1:pliojioiAv+CuLNTK+8tnCD2UgiJbKX9q8bDnpHkV1U= -go.opentelemetry.io/collector/pdata/testdata v0.100.0/go.mod h1:01BHOXvXaQaLLt5J34S093u3e+j//RhbfmEujpFJ/ME= -go.opentelemetry.io/collector/receiver v0.100.0 h1:RFeOVhS7o39G562w0H0hqfh1o2QvK71ViHQuWnnfglI= -go.opentelemetry.io/collector/receiver v0.100.0/go.mod h1:Qo3xkorbUy0VXHh7WxMQyphIWiqxI3ZOG0O4YqQ2mCE= -go.opentelemetry.io/collector/semconv v0.100.0 h1:QArUvWcbmsMjM4PV0zngUHRizZeUXibsPBWjDuNJXAs= -go.opentelemetry.io/collector/semconv v0.100.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/collector v0.101.0 h1:jnCI/JZgpEYONWy4LCvif4CjMM7cPS4XvGHp3OrZpYo= +go.opentelemetry.io/collector v0.101.0/go.mod h1:N0xja/N3NUDIC55SjjNzyyIoxE6YoCEZC3aXQ39yIVs= +go.opentelemetry.io/collector/component v0.101.0 h1:2sILYgE8cZJj0Vseh6LUjS9iXPyqDPTx/R8yf8IPu+4= +go.opentelemetry.io/collector/component v0.101.0/go.mod h1:OB1uBpQZ2Ba6wVui/sthh6j+CPxVQIy2ou5rzZPINQQ= +go.opentelemetry.io/collector/config/configtelemetry v0.101.0 h1:G9RerNdBUm6rYW6wrJoKzleBiDsCGaCjtQx5UYr0hzw= +go.opentelemetry.io/collector/config/configtelemetry v0.101.0/go.mod h1:YV5PaOdtnU1xRomPcYqoHmyCr48tnaAREeGO96EZw8o= +go.opentelemetry.io/collector/confmap v0.101.0 h1:pGXZRBKnZqys1HgNECGSi8Pec5RBGa9vVCfrpcvW+kA= +go.opentelemetry.io/collector/confmap v0.101.0/go.mod h1:BWKPIpYeUzSG6ZgCJMjF7xsLvyrvJCfYURl57E5vhiQ= +go.opentelemetry.io/collector/consumer v0.101.0 h1:9tDxaeHe1+Uovf3fhdx7T4pV5mo/Dc0hniH7O5H3RBA= +go.opentelemetry.io/collector/consumer v0.101.0/go.mod h1:ud5k64on9m7hHTrhjEeLhWbLkd8+Gp06rDt3p86TKNs= +go.opentelemetry.io/collector/filter v0.101.0 h1:tNs6+liajg4hxSmtX5tcuGBefSPB+TEyyK3KTPp+dYY= +go.opentelemetry.io/collector/filter v0.101.0/go.mod h1:Kp9rCRB60SDm+pjrsaK95fkwfEXGh4j1yewvATTNkfI= +go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= +go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= +go.opentelemetry.io/collector/pdata/testdata v0.101.0 h1:JzeUtg5RN1iIFgY8DakGlqBkGxOTJlkaYlLausnEGKY= +go.opentelemetry.io/collector/pdata/testdata v0.101.0/go.mod h1:ZGobfCus4fWo5RduZ7ENI0+HD9BewgKuO6qU2rBVnUg= +go.opentelemetry.io/collector/receiver v0.101.0 h1:+YJQvcAw5Es15Ub8hYqqZumKbe7D0SMU8XCgGRxc25M= +go.opentelemetry.io/collector/receiver v0.101.0/go.mod h1:JFVHAkIIz9uOk85u9pHsYRcyFj1ZAUpw59ahNZ28+ko= +go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= +go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= @@ -200,8 +200,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= -google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= -google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/receiver/dockerstatsreceiver/config.go b/receiver/dockerstatsreceiver/config.go deleted file mode 100644 index 86825de8c65e..000000000000 --- a/receiver/dockerstatsreceiver/config.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package dockerstatsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver" - -import ( - "errors" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/receiver/scraperhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver/internal/metadata" -) - -var _ component.Config = (*Config)(nil) - -type Config struct { - scraperhelper.ControllerConfig `mapstructure:",squash"` - // The URL of the docker server. Default is "unix:///var/run/docker.sock" - Endpoint string `mapstructure:"endpoint"` - - // A mapping of container label names to MetricDescriptor label keys. - // The corresponding container label value will become the DataPoint label value - // for the mapped name. E.g. `io.kubernetes.container.name: container_spec_name` - // would result in a MetricDescriptor label called `container_spec_name` whose - // Metric DataPoints have the value of the `io.kubernetes.container.name` container label. - ContainerLabelsToMetricLabels map[string]string `mapstructure:"container_labels_to_metric_labels"` - - // A mapping of container environment variable names to MetricDescriptor label - // keys. The corresponding env var values become the DataPoint label value. - // E.g. `APP_VERSION: version` would result MetricDescriptors having a label - // key called `version` whose DataPoint label values are the value of the - // `APP_VERSION` environment variable configured for that particular container, if - // present. - EnvVarsToMetricLabels map[string]string `mapstructure:"env_vars_to_metric_labels"` - - // A list of filters whose matching images are to be excluded. Supports literals, globs, and regex. - ExcludedImages []string `mapstructure:"excluded_images"` - - // Docker client API version. Default is 1.22 - DockerAPIVersion string `mapstructure:"api_version"` - - // MetricsBuilderConfig config. Enable or disable stats by name. - metadata.MetricsBuilderConfig `mapstructure:",squash"` -} - -func (config Config) Validate() error { - if config.Endpoint == "" { - return errors.New("endpoint must be specified") - } - if err := docker.VersionIsValidAndGTE(config.DockerAPIVersion, minimumRequiredDockerAPIVersion); err != nil { - return err - } - return nil -} diff --git a/receiver/dockerstatsreceiver/config_test.go b/receiver/dockerstatsreceiver/config_test.go deleted file mode 100644 index d9a7db9f450d..000000000000 --- a/receiver/dockerstatsreceiver/config_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package dockerstatsreceiver - -import ( - "path/filepath" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/confmap/confmaptest" - "go.opentelemetry.io/collector/receiver/scraperhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver/internal/metadata" -) - -func TestLoadConfig(t *testing.T) { - t.Parallel() - - tests := []struct { - id component.ID - expected component.Config - }{ - { - id: component.NewIDWithName(metadata.Type, ""), - expected: createDefaultConfig(), - }, - { - id: component.NewIDWithName(metadata.Type, "allsettings"), - expected: &Config{ - ControllerConfig: scraperhelper.ControllerConfig{ - CollectionInterval: 2 * time.Second, - InitialDelay: time.Second, - Timeout: 20 * time.Second, - }, - - Endpoint: "http://example.com/", - DockerAPIVersion: "1.40", - - ExcludedImages: []string{ - "undesired-container", - "another-*-container", - }, - - ContainerLabelsToMetricLabels: map[string]string{ - "my.container.label": "my-metric-label", - "my.other.container.label": "my-other-metric-label", - }, - - EnvVarsToMetricLabels: map[string]string{ - "MY_ENVIRONMENT_VARIABLE": "my-metric-label", - "MY_OTHER_ENVIRONMENT_VARIABLE": "my-other-metric-label", - }, - MetricsBuilderConfig: func() metadata.MetricsBuilderConfig { - m := metadata.DefaultMetricsBuilderConfig() - m.Metrics.ContainerCPUUsageSystem = metadata.MetricConfig{ - Enabled: false, - } - m.Metrics.ContainerMemoryTotalRss = metadata.MetricConfig{ - Enabled: true, - } - return m - }(), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.id.String(), func(t *testing.T) { - cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) - require.NoError(t, err) - - factory := NewFactory() - cfg := factory.CreateDefaultConfig() - - sub, err := cm.Sub(tt.id.String()) - require.NoError(t, err) - require.NoError(t, component.UnmarshalConfig(sub, cfg)) - - assert.NoError(t, component.ValidateConfig(cfg)) - if diff := cmp.Diff(tt.expected, cfg, cmpopts.IgnoreUnexported(metadata.MetricConfig{}), cmpopts.IgnoreUnexported(metadata.ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } - }) - } -} - -func TestValidateErrors(t *testing.T) { - cfg := &Config{ControllerConfig: scraperhelper.NewDefaultControllerConfig()} - assert.Equal(t, "endpoint must be specified", component.ValidateConfig(cfg).Error()) - - cfg = &Config{ - DockerAPIVersion: "1.21", - Endpoint: "someEndpoint", - ControllerConfig: scraperhelper.ControllerConfig{CollectionInterval: 1 * time.Second}, - } - assert.Equal(t, `"api_version" 1.21 must be at least 1.25`, component.ValidateConfig(cfg).Error()) - - cfg = &Config{ - Endpoint: "someEndpoint", - DockerAPIVersion: "1.25", - ControllerConfig: scraperhelper.ControllerConfig{}, - } - assert.Equal(t, `"collection_interval": requires positive value`, component.ValidateConfig(cfg).Error()) -} diff --git a/receiver/dockerstatsreceiver/factory.go b/receiver/dockerstatsreceiver/factory.go index 64440fcc5412..fff35eb856da 100644 --- a/receiver/dockerstatsreceiver/factory.go +++ b/receiver/dockerstatsreceiver/factory.go @@ -5,48 +5,36 @@ package dockerstatsreceiver // import "github.com/open-telemetry/opentelemetry-c import ( "context" - "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/scraperhelper" + dockerReceiver "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker/receiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver/internal/metadata" ) func NewFactory() receiver.Factory { return receiver.NewFactory( metadata.Type, - createDefaultConfig, + dockerReceiver.CreateDefaultConfig, receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability)) } -func createDefaultConfig() component.Config { - scs := scraperhelper.NewDefaultControllerConfig() - scs.CollectionInterval = 10 * time.Second - scs.Timeout = 5 * time.Second - return &Config{ - ControllerConfig: scs, - Endpoint: "unix:///var/run/docker.sock", - DockerAPIVersion: defaultDockerAPIVersion, - MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), - } -} - func createMetricsReceiver( _ context.Context, params receiver.CreateSettings, config component.Config, consumer consumer.Metrics, ) (receiver.Metrics, error) { - dockerConfig := config.(*Config) - dsr := newMetricsReceiver(params, dockerConfig) + dockerConfig := config.(*dockerReceiver.Config) + dsr := dockerReceiver.NewMetricsReceiver(params, dockerConfig) - scrp, err := scraperhelper.NewScraper(metadata.Type.String(), dsr.scrapeV2, scraperhelper.WithStart(dsr.start), scraperhelper.WithShutdown(dsr.shutdown)) + scrp, err := scraperhelper.NewScraper(metadata.Type.String(), dsr.ScrapeV2, scraperhelper.WithStart(dsr.Start), scraperhelper.WithShutdown(dsr.Shutdown)) if err != nil { return nil, err } - return scraperhelper.NewScraperControllerReceiver(&dsr.config.ControllerConfig, params, consumer, scraperhelper.AddScraper(scrp)) + return scraperhelper.NewScraperControllerReceiver(&dsr.Config.ControllerConfig, params, consumer, scraperhelper.AddScraper(scrp)) } diff --git a/receiver/dockerstatsreceiver/generated_package_test.go b/receiver/dockerstatsreceiver/generated_package_test.go index bb69d6534c4a..deee05e3f813 100644 --- a/receiver/dockerstatsreceiver/generated_package_test.go +++ b/receiver/dockerstatsreceiver/generated_package_test.go @@ -3,9 +3,8 @@ package dockerstatsreceiver import ( - "testing" - "go.uber.org/goleak" + "testing" ) func TestMain(m *testing.M) { diff --git a/receiver/dockerstatsreceiver/go.mod b/receiver/dockerstatsreceiver/go.mod index 72a442a83647..5800f6ae1b07 100644 --- a/receiver/dockerstatsreceiver/go.mod +++ b/receiver/dockerstatsreceiver/go.mod @@ -3,24 +3,18 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/docker go 1.21.0 require ( - github.com/docker/docker v25.0.5+incompatible - github.com/google/go-cmp v0.6.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.101.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.101.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.101.0 github.com/stretchr/testify v1.9.0 github.com/testcontainers/testcontainers-go v0.31.0 go.opentelemetry.io/collector/component v0.101.0 go.opentelemetry.io/collector/confmap v0.101.0 go.opentelemetry.io/collector/consumer v0.101.0 - go.opentelemetry.io/collector/filter v0.101.0 go.opentelemetry.io/collector/pdata v1.8.0 go.opentelemetry.io/collector/receiver v0.101.0 go.opentelemetry.io/collector/semconv v0.101.0 go.opentelemetry.io/otel/metric v1.26.0 go.opentelemetry.io/otel/trace v1.26.0 go.uber.org/goleak v1.3.0 - go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 ) @@ -37,6 +31,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect + github.com/docker/docker v26.1.2+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -56,6 +51,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect @@ -63,7 +59,6 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -81,11 +76,13 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/collector v0.101.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.101.0 // indirect + go.opentelemetry.io/collector/filter v0.101.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.26.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.48.0 // indirect go.opentelemetry.io/otel/sdk v1.26.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.26.0 // indirect + go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/mod v0.16.0 // indirect golang.org/x/net v0.24.0 // indirect diff --git a/receiver/dockerstatsreceiver/go.sum b/receiver/dockerstatsreceiver/go.sum index 69373e9af00d..aafff456c6e5 100644 --- a/receiver/dockerstatsreceiver/go.sum +++ b/receiver/dockerstatsreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.2+incompatible h1:UVX5ZOrrfTGZZYEP+ZDq3Xn9PdHNXaSYMFPDumMqG2k= +github.com/docker/docker v26.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -81,6 +81,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/dockerstatsreceiver/internal/metadata/generated_config.go b/receiver/dockerstatsreceiver/internal/metadata/generated_config.go deleted file mode 100644 index 00d5e99a5ee6..000000000000 --- a/receiver/dockerstatsreceiver/internal/metadata/generated_config.go +++ /dev/null @@ -1,396 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/filter" -) - -// MetricConfig provides common config for a particular metric. -type MetricConfig struct { - Enabled bool `mapstructure:"enabled"` - - enabledSetByUser bool -} - -func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { - if parser == nil { - return nil - } - err := parser.Unmarshal(ms) - if err != nil { - return err - } - ms.enabledSetByUser = parser.IsSet("enabled") - return nil -} - -// MetricsConfig provides config for docker_stats metrics. -type MetricsConfig struct { - ContainerBlockioIoMergedRecursive MetricConfig `mapstructure:"container.blockio.io_merged_recursive"` - ContainerBlockioIoQueuedRecursive MetricConfig `mapstructure:"container.blockio.io_queued_recursive"` - ContainerBlockioIoServiceBytesRecursive MetricConfig `mapstructure:"container.blockio.io_service_bytes_recursive"` - ContainerBlockioIoServiceTimeRecursive MetricConfig `mapstructure:"container.blockio.io_service_time_recursive"` - ContainerBlockioIoServicedRecursive MetricConfig `mapstructure:"container.blockio.io_serviced_recursive"` - ContainerBlockioIoTimeRecursive MetricConfig `mapstructure:"container.blockio.io_time_recursive"` - ContainerBlockioIoWaitTimeRecursive MetricConfig `mapstructure:"container.blockio.io_wait_time_recursive"` - ContainerBlockioSectorsRecursive MetricConfig `mapstructure:"container.blockio.sectors_recursive"` - ContainerCPULimit MetricConfig `mapstructure:"container.cpu.limit"` - ContainerCPULogicalCount MetricConfig `mapstructure:"container.cpu.logical.count"` - ContainerCPUShares MetricConfig `mapstructure:"container.cpu.shares"` - ContainerCPUThrottlingDataPeriods MetricConfig `mapstructure:"container.cpu.throttling_data.periods"` - ContainerCPUThrottlingDataThrottledPeriods MetricConfig `mapstructure:"container.cpu.throttling_data.throttled_periods"` - ContainerCPUThrottlingDataThrottledTime MetricConfig `mapstructure:"container.cpu.throttling_data.throttled_time"` - ContainerCPUUsageKernelmode MetricConfig `mapstructure:"container.cpu.usage.kernelmode"` - ContainerCPUUsagePercpu MetricConfig `mapstructure:"container.cpu.usage.percpu"` - ContainerCPUUsageSystem MetricConfig `mapstructure:"container.cpu.usage.system"` - ContainerCPUUsageTotal MetricConfig `mapstructure:"container.cpu.usage.total"` - ContainerCPUUsageUsermode MetricConfig `mapstructure:"container.cpu.usage.usermode"` - ContainerCPUUtilization MetricConfig `mapstructure:"container.cpu.utilization"` - ContainerMemoryActiveAnon MetricConfig `mapstructure:"container.memory.active_anon"` - ContainerMemoryActiveFile MetricConfig `mapstructure:"container.memory.active_file"` - ContainerMemoryAnon MetricConfig `mapstructure:"container.memory.anon"` - ContainerMemoryCache MetricConfig `mapstructure:"container.memory.cache"` - ContainerMemoryDirty MetricConfig `mapstructure:"container.memory.dirty"` - ContainerMemoryFails MetricConfig `mapstructure:"container.memory.fails"` - ContainerMemoryFile MetricConfig `mapstructure:"container.memory.file"` - ContainerMemoryHierarchicalMemoryLimit MetricConfig `mapstructure:"container.memory.hierarchical_memory_limit"` - ContainerMemoryHierarchicalMemswLimit MetricConfig `mapstructure:"container.memory.hierarchical_memsw_limit"` - ContainerMemoryInactiveAnon MetricConfig `mapstructure:"container.memory.inactive_anon"` - ContainerMemoryInactiveFile MetricConfig `mapstructure:"container.memory.inactive_file"` - ContainerMemoryMappedFile MetricConfig `mapstructure:"container.memory.mapped_file"` - ContainerMemoryPercent MetricConfig `mapstructure:"container.memory.percent"` - ContainerMemoryPgfault MetricConfig `mapstructure:"container.memory.pgfault"` - ContainerMemoryPgmajfault MetricConfig `mapstructure:"container.memory.pgmajfault"` - ContainerMemoryPgpgin MetricConfig `mapstructure:"container.memory.pgpgin"` - ContainerMemoryPgpgout MetricConfig `mapstructure:"container.memory.pgpgout"` - ContainerMemoryRss MetricConfig `mapstructure:"container.memory.rss"` - ContainerMemoryRssHuge MetricConfig `mapstructure:"container.memory.rss_huge"` - ContainerMemoryTotalActiveAnon MetricConfig `mapstructure:"container.memory.total_active_anon"` - ContainerMemoryTotalActiveFile MetricConfig `mapstructure:"container.memory.total_active_file"` - ContainerMemoryTotalCache MetricConfig `mapstructure:"container.memory.total_cache"` - ContainerMemoryTotalDirty MetricConfig `mapstructure:"container.memory.total_dirty"` - ContainerMemoryTotalInactiveAnon MetricConfig `mapstructure:"container.memory.total_inactive_anon"` - ContainerMemoryTotalInactiveFile MetricConfig `mapstructure:"container.memory.total_inactive_file"` - ContainerMemoryTotalMappedFile MetricConfig `mapstructure:"container.memory.total_mapped_file"` - ContainerMemoryTotalPgfault MetricConfig `mapstructure:"container.memory.total_pgfault"` - ContainerMemoryTotalPgmajfault MetricConfig `mapstructure:"container.memory.total_pgmajfault"` - ContainerMemoryTotalPgpgin MetricConfig `mapstructure:"container.memory.total_pgpgin"` - ContainerMemoryTotalPgpgout MetricConfig `mapstructure:"container.memory.total_pgpgout"` - ContainerMemoryTotalRss MetricConfig `mapstructure:"container.memory.total_rss"` - ContainerMemoryTotalRssHuge MetricConfig `mapstructure:"container.memory.total_rss_huge"` - ContainerMemoryTotalUnevictable MetricConfig `mapstructure:"container.memory.total_unevictable"` - ContainerMemoryTotalWriteback MetricConfig `mapstructure:"container.memory.total_writeback"` - ContainerMemoryUnevictable MetricConfig `mapstructure:"container.memory.unevictable"` - ContainerMemoryUsageLimit MetricConfig `mapstructure:"container.memory.usage.limit"` - ContainerMemoryUsageMax MetricConfig `mapstructure:"container.memory.usage.max"` - ContainerMemoryUsageTotal MetricConfig `mapstructure:"container.memory.usage.total"` - ContainerMemoryWriteback MetricConfig `mapstructure:"container.memory.writeback"` - ContainerNetworkIoUsageRxBytes MetricConfig `mapstructure:"container.network.io.usage.rx_bytes"` - ContainerNetworkIoUsageRxDropped MetricConfig `mapstructure:"container.network.io.usage.rx_dropped"` - ContainerNetworkIoUsageRxErrors MetricConfig `mapstructure:"container.network.io.usage.rx_errors"` - ContainerNetworkIoUsageRxPackets MetricConfig `mapstructure:"container.network.io.usage.rx_packets"` - ContainerNetworkIoUsageTxBytes MetricConfig `mapstructure:"container.network.io.usage.tx_bytes"` - ContainerNetworkIoUsageTxDropped MetricConfig `mapstructure:"container.network.io.usage.tx_dropped"` - ContainerNetworkIoUsageTxErrors MetricConfig `mapstructure:"container.network.io.usage.tx_errors"` - ContainerNetworkIoUsageTxPackets MetricConfig `mapstructure:"container.network.io.usage.tx_packets"` - ContainerPidsCount MetricConfig `mapstructure:"container.pids.count"` - ContainerPidsLimit MetricConfig `mapstructure:"container.pids.limit"` - ContainerRestarts MetricConfig `mapstructure:"container.restarts"` - ContainerUptime MetricConfig `mapstructure:"container.uptime"` -} - -func DefaultMetricsConfig() MetricsConfig { - return MetricsConfig{ - ContainerBlockioIoMergedRecursive: MetricConfig{ - Enabled: false, - }, - ContainerBlockioIoQueuedRecursive: MetricConfig{ - Enabled: false, - }, - ContainerBlockioIoServiceBytesRecursive: MetricConfig{ - Enabled: true, - }, - ContainerBlockioIoServiceTimeRecursive: MetricConfig{ - Enabled: false, - }, - ContainerBlockioIoServicedRecursive: MetricConfig{ - Enabled: false, - }, - ContainerBlockioIoTimeRecursive: MetricConfig{ - Enabled: false, - }, - ContainerBlockioIoWaitTimeRecursive: MetricConfig{ - Enabled: false, - }, - ContainerBlockioSectorsRecursive: MetricConfig{ - Enabled: false, - }, - ContainerCPULimit: MetricConfig{ - Enabled: false, - }, - ContainerCPULogicalCount: MetricConfig{ - Enabled: false, - }, - ContainerCPUShares: MetricConfig{ - Enabled: false, - }, - ContainerCPUThrottlingDataPeriods: MetricConfig{ - Enabled: false, - }, - ContainerCPUThrottlingDataThrottledPeriods: MetricConfig{ - Enabled: false, - }, - ContainerCPUThrottlingDataThrottledTime: MetricConfig{ - Enabled: false, - }, - ContainerCPUUsageKernelmode: MetricConfig{ - Enabled: true, - }, - ContainerCPUUsagePercpu: MetricConfig{ - Enabled: false, - }, - ContainerCPUUsageSystem: MetricConfig{ - Enabled: false, - }, - ContainerCPUUsageTotal: MetricConfig{ - Enabled: true, - }, - ContainerCPUUsageUsermode: MetricConfig{ - Enabled: true, - }, - ContainerCPUUtilization: MetricConfig{ - Enabled: true, - }, - ContainerMemoryActiveAnon: MetricConfig{ - Enabled: false, - }, - ContainerMemoryActiveFile: MetricConfig{ - Enabled: false, - }, - ContainerMemoryAnon: MetricConfig{ - Enabled: false, - }, - ContainerMemoryCache: MetricConfig{ - Enabled: false, - }, - ContainerMemoryDirty: MetricConfig{ - Enabled: false, - }, - ContainerMemoryFails: MetricConfig{ - Enabled: false, - }, - ContainerMemoryFile: MetricConfig{ - Enabled: true, - }, - ContainerMemoryHierarchicalMemoryLimit: MetricConfig{ - Enabled: false, - }, - ContainerMemoryHierarchicalMemswLimit: MetricConfig{ - Enabled: false, - }, - ContainerMemoryInactiveAnon: MetricConfig{ - Enabled: false, - }, - ContainerMemoryInactiveFile: MetricConfig{ - Enabled: false, - }, - ContainerMemoryMappedFile: MetricConfig{ - Enabled: false, - }, - ContainerMemoryPercent: MetricConfig{ - Enabled: true, - }, - ContainerMemoryPgfault: MetricConfig{ - Enabled: false, - }, - ContainerMemoryPgmajfault: MetricConfig{ - Enabled: false, - }, - ContainerMemoryPgpgin: MetricConfig{ - Enabled: false, - }, - ContainerMemoryPgpgout: MetricConfig{ - Enabled: false, - }, - ContainerMemoryRss: MetricConfig{ - Enabled: false, - }, - ContainerMemoryRssHuge: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalActiveAnon: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalActiveFile: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalCache: MetricConfig{ - Enabled: true, - }, - ContainerMemoryTotalDirty: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalInactiveAnon: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalInactiveFile: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalMappedFile: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalPgfault: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalPgmajfault: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalPgpgin: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalPgpgout: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalRss: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalRssHuge: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalUnevictable: MetricConfig{ - Enabled: false, - }, - ContainerMemoryTotalWriteback: MetricConfig{ - Enabled: false, - }, - ContainerMemoryUnevictable: MetricConfig{ - Enabled: false, - }, - ContainerMemoryUsageLimit: MetricConfig{ - Enabled: true, - }, - ContainerMemoryUsageMax: MetricConfig{ - Enabled: false, - }, - ContainerMemoryUsageTotal: MetricConfig{ - Enabled: true, - }, - ContainerMemoryWriteback: MetricConfig{ - Enabled: false, - }, - ContainerNetworkIoUsageRxBytes: MetricConfig{ - Enabled: true, - }, - ContainerNetworkIoUsageRxDropped: MetricConfig{ - Enabled: true, - }, - ContainerNetworkIoUsageRxErrors: MetricConfig{ - Enabled: false, - }, - ContainerNetworkIoUsageRxPackets: MetricConfig{ - Enabled: false, - }, - ContainerNetworkIoUsageTxBytes: MetricConfig{ - Enabled: true, - }, - ContainerNetworkIoUsageTxDropped: MetricConfig{ - Enabled: true, - }, - ContainerNetworkIoUsageTxErrors: MetricConfig{ - Enabled: false, - }, - ContainerNetworkIoUsageTxPackets: MetricConfig{ - Enabled: false, - }, - ContainerPidsCount: MetricConfig{ - Enabled: false, - }, - ContainerPidsLimit: MetricConfig{ - Enabled: false, - }, - ContainerRestarts: MetricConfig{ - Enabled: false, - }, - ContainerUptime: MetricConfig{ - Enabled: false, - }, - } -} - -// ResourceAttributeConfig provides common config for a particular resource attribute. -type ResourceAttributeConfig struct { - Enabled bool `mapstructure:"enabled"` - // Experimental: MetricsInclude defines a list of filters for attribute values. - // If the list is not empty, only metrics with matching resource attribute values will be emitted. - MetricsInclude []filter.Config `mapstructure:"metrics_include"` - // Experimental: MetricsExclude defines a list of filters for attribute values. - // If the list is not empty, metrics with matching resource attribute values will not be emitted. - // MetricsInclude has higher priority than MetricsExclude. - MetricsExclude []filter.Config `mapstructure:"metrics_exclude"` - - enabledSetByUser bool -} - -func (rac *ResourceAttributeConfig) Unmarshal(parser *confmap.Conf) error { - if parser == nil { - return nil - } - err := parser.Unmarshal(rac) - if err != nil { - return err - } - rac.enabledSetByUser = parser.IsSet("enabled") - return nil -} - -// ResourceAttributesConfig provides config for docker_stats resource attributes. -type ResourceAttributesConfig struct { - ContainerCommandLine ResourceAttributeConfig `mapstructure:"container.command_line"` - ContainerHostname ResourceAttributeConfig `mapstructure:"container.hostname"` - ContainerID ResourceAttributeConfig `mapstructure:"container.id"` - ContainerImageID ResourceAttributeConfig `mapstructure:"container.image.id"` - ContainerImageName ResourceAttributeConfig `mapstructure:"container.image.name"` - ContainerName ResourceAttributeConfig `mapstructure:"container.name"` - ContainerRuntime ResourceAttributeConfig `mapstructure:"container.runtime"` -} - -func DefaultResourceAttributesConfig() ResourceAttributesConfig { - return ResourceAttributesConfig{ - ContainerCommandLine: ResourceAttributeConfig{ - Enabled: false, - }, - ContainerHostname: ResourceAttributeConfig{ - Enabled: true, - }, - ContainerID: ResourceAttributeConfig{ - Enabled: true, - }, - ContainerImageID: ResourceAttributeConfig{ - Enabled: false, - }, - ContainerImageName: ResourceAttributeConfig{ - Enabled: true, - }, - ContainerName: ResourceAttributeConfig{ - Enabled: true, - }, - ContainerRuntime: ResourceAttributeConfig{ - Enabled: true, - }, - } -} - -// MetricsBuilderConfig is a configuration for docker_stats metrics builder. -type MetricsBuilderConfig struct { - Metrics MetricsConfig `mapstructure:"metrics"` - ResourceAttributes ResourceAttributesConfig `mapstructure:"resource_attributes"` -} - -func DefaultMetricsBuilderConfig() MetricsBuilderConfig { - return MetricsBuilderConfig{ - Metrics: DefaultMetricsConfig(), - ResourceAttributes: DefaultResourceAttributesConfig(), - } -} diff --git a/receiver/dockerstatsreceiver/internal/metadata/generated_config_test.go b/receiver/dockerstatsreceiver/internal/metadata/generated_config_test.go deleted file mode 100644 index 645dd16efd1a..000000000000 --- a/receiver/dockerstatsreceiver/internal/metadata/generated_config_test.go +++ /dev/null @@ -1,274 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "path/filepath" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/confmap/confmaptest" -) - -func TestMetricsBuilderConfig(t *testing.T) { - tests := []struct { - name string - want MetricsBuilderConfig - }{ - { - name: "default", - want: DefaultMetricsBuilderConfig(), - }, - { - name: "all_set", - want: MetricsBuilderConfig{ - Metrics: MetricsConfig{ - ContainerBlockioIoMergedRecursive: MetricConfig{Enabled: true}, - ContainerBlockioIoQueuedRecursive: MetricConfig{Enabled: true}, - ContainerBlockioIoServiceBytesRecursive: MetricConfig{Enabled: true}, - ContainerBlockioIoServiceTimeRecursive: MetricConfig{Enabled: true}, - ContainerBlockioIoServicedRecursive: MetricConfig{Enabled: true}, - ContainerBlockioIoTimeRecursive: MetricConfig{Enabled: true}, - ContainerBlockioIoWaitTimeRecursive: MetricConfig{Enabled: true}, - ContainerBlockioSectorsRecursive: MetricConfig{Enabled: true}, - ContainerCPULimit: MetricConfig{Enabled: true}, - ContainerCPULogicalCount: MetricConfig{Enabled: true}, - ContainerCPUShares: MetricConfig{Enabled: true}, - ContainerCPUThrottlingDataPeriods: MetricConfig{Enabled: true}, - ContainerCPUThrottlingDataThrottledPeriods: MetricConfig{Enabled: true}, - ContainerCPUThrottlingDataThrottledTime: MetricConfig{Enabled: true}, - ContainerCPUUsageKernelmode: MetricConfig{Enabled: true}, - ContainerCPUUsagePercpu: MetricConfig{Enabled: true}, - ContainerCPUUsageSystem: MetricConfig{Enabled: true}, - ContainerCPUUsageTotal: MetricConfig{Enabled: true}, - ContainerCPUUsageUsermode: MetricConfig{Enabled: true}, - ContainerCPUUtilization: MetricConfig{Enabled: true}, - ContainerMemoryActiveAnon: MetricConfig{Enabled: true}, - ContainerMemoryActiveFile: MetricConfig{Enabled: true}, - ContainerMemoryAnon: MetricConfig{Enabled: true}, - ContainerMemoryCache: MetricConfig{Enabled: true}, - ContainerMemoryDirty: MetricConfig{Enabled: true}, - ContainerMemoryFails: MetricConfig{Enabled: true}, - ContainerMemoryFile: MetricConfig{Enabled: true}, - ContainerMemoryHierarchicalMemoryLimit: MetricConfig{Enabled: true}, - ContainerMemoryHierarchicalMemswLimit: MetricConfig{Enabled: true}, - ContainerMemoryInactiveAnon: MetricConfig{Enabled: true}, - ContainerMemoryInactiveFile: MetricConfig{Enabled: true}, - ContainerMemoryMappedFile: MetricConfig{Enabled: true}, - ContainerMemoryPercent: MetricConfig{Enabled: true}, - ContainerMemoryPgfault: MetricConfig{Enabled: true}, - ContainerMemoryPgmajfault: MetricConfig{Enabled: true}, - ContainerMemoryPgpgin: MetricConfig{Enabled: true}, - ContainerMemoryPgpgout: MetricConfig{Enabled: true}, - ContainerMemoryRss: MetricConfig{Enabled: true}, - ContainerMemoryRssHuge: MetricConfig{Enabled: true}, - ContainerMemoryTotalActiveAnon: MetricConfig{Enabled: true}, - ContainerMemoryTotalActiveFile: MetricConfig{Enabled: true}, - ContainerMemoryTotalCache: MetricConfig{Enabled: true}, - ContainerMemoryTotalDirty: MetricConfig{Enabled: true}, - ContainerMemoryTotalInactiveAnon: MetricConfig{Enabled: true}, - ContainerMemoryTotalInactiveFile: MetricConfig{Enabled: true}, - ContainerMemoryTotalMappedFile: MetricConfig{Enabled: true}, - ContainerMemoryTotalPgfault: MetricConfig{Enabled: true}, - ContainerMemoryTotalPgmajfault: MetricConfig{Enabled: true}, - ContainerMemoryTotalPgpgin: MetricConfig{Enabled: true}, - ContainerMemoryTotalPgpgout: MetricConfig{Enabled: true}, - ContainerMemoryTotalRss: MetricConfig{Enabled: true}, - ContainerMemoryTotalRssHuge: MetricConfig{Enabled: true}, - ContainerMemoryTotalUnevictable: MetricConfig{Enabled: true}, - ContainerMemoryTotalWriteback: MetricConfig{Enabled: true}, - ContainerMemoryUnevictable: MetricConfig{Enabled: true}, - ContainerMemoryUsageLimit: MetricConfig{Enabled: true}, - ContainerMemoryUsageMax: MetricConfig{Enabled: true}, - ContainerMemoryUsageTotal: MetricConfig{Enabled: true}, - ContainerMemoryWriteback: MetricConfig{Enabled: true}, - ContainerNetworkIoUsageRxBytes: MetricConfig{Enabled: true}, - ContainerNetworkIoUsageRxDropped: MetricConfig{Enabled: true}, - ContainerNetworkIoUsageRxErrors: MetricConfig{Enabled: true}, - ContainerNetworkIoUsageRxPackets: MetricConfig{Enabled: true}, - ContainerNetworkIoUsageTxBytes: MetricConfig{Enabled: true}, - ContainerNetworkIoUsageTxDropped: MetricConfig{Enabled: true}, - ContainerNetworkIoUsageTxErrors: MetricConfig{Enabled: true}, - ContainerNetworkIoUsageTxPackets: MetricConfig{Enabled: true}, - ContainerPidsCount: MetricConfig{Enabled: true}, - ContainerPidsLimit: MetricConfig{Enabled: true}, - ContainerRestarts: MetricConfig{Enabled: true}, - ContainerUptime: MetricConfig{Enabled: true}, - }, - ResourceAttributes: ResourceAttributesConfig{ - ContainerCommandLine: ResourceAttributeConfig{Enabled: true}, - ContainerHostname: ResourceAttributeConfig{Enabled: true}, - ContainerID: ResourceAttributeConfig{Enabled: true}, - ContainerImageID: ResourceAttributeConfig{Enabled: true}, - ContainerImageName: ResourceAttributeConfig{Enabled: true}, - ContainerName: ResourceAttributeConfig{Enabled: true}, - ContainerRuntime: ResourceAttributeConfig{Enabled: true}, - }, - }, - }, - { - name: "none_set", - want: MetricsBuilderConfig{ - Metrics: MetricsConfig{ - ContainerBlockioIoMergedRecursive: MetricConfig{Enabled: false}, - ContainerBlockioIoQueuedRecursive: MetricConfig{Enabled: false}, - ContainerBlockioIoServiceBytesRecursive: MetricConfig{Enabled: false}, - ContainerBlockioIoServiceTimeRecursive: MetricConfig{Enabled: false}, - ContainerBlockioIoServicedRecursive: MetricConfig{Enabled: false}, - ContainerBlockioIoTimeRecursive: MetricConfig{Enabled: false}, - ContainerBlockioIoWaitTimeRecursive: MetricConfig{Enabled: false}, - ContainerBlockioSectorsRecursive: MetricConfig{Enabled: false}, - ContainerCPULimit: MetricConfig{Enabled: false}, - ContainerCPULogicalCount: MetricConfig{Enabled: false}, - ContainerCPUShares: MetricConfig{Enabled: false}, - ContainerCPUThrottlingDataPeriods: MetricConfig{Enabled: false}, - ContainerCPUThrottlingDataThrottledPeriods: MetricConfig{Enabled: false}, - ContainerCPUThrottlingDataThrottledTime: MetricConfig{Enabled: false}, - ContainerCPUUsageKernelmode: MetricConfig{Enabled: false}, - ContainerCPUUsagePercpu: MetricConfig{Enabled: false}, - ContainerCPUUsageSystem: MetricConfig{Enabled: false}, - ContainerCPUUsageTotal: MetricConfig{Enabled: false}, - ContainerCPUUsageUsermode: MetricConfig{Enabled: false}, - ContainerCPUUtilization: MetricConfig{Enabled: false}, - ContainerMemoryActiveAnon: MetricConfig{Enabled: false}, - ContainerMemoryActiveFile: MetricConfig{Enabled: false}, - ContainerMemoryAnon: MetricConfig{Enabled: false}, - ContainerMemoryCache: MetricConfig{Enabled: false}, - ContainerMemoryDirty: MetricConfig{Enabled: false}, - ContainerMemoryFails: MetricConfig{Enabled: false}, - ContainerMemoryFile: MetricConfig{Enabled: false}, - ContainerMemoryHierarchicalMemoryLimit: MetricConfig{Enabled: false}, - ContainerMemoryHierarchicalMemswLimit: MetricConfig{Enabled: false}, - ContainerMemoryInactiveAnon: MetricConfig{Enabled: false}, - ContainerMemoryInactiveFile: MetricConfig{Enabled: false}, - ContainerMemoryMappedFile: MetricConfig{Enabled: false}, - ContainerMemoryPercent: MetricConfig{Enabled: false}, - ContainerMemoryPgfault: MetricConfig{Enabled: false}, - ContainerMemoryPgmajfault: MetricConfig{Enabled: false}, - ContainerMemoryPgpgin: MetricConfig{Enabled: false}, - ContainerMemoryPgpgout: MetricConfig{Enabled: false}, - ContainerMemoryRss: MetricConfig{Enabled: false}, - ContainerMemoryRssHuge: MetricConfig{Enabled: false}, - ContainerMemoryTotalActiveAnon: MetricConfig{Enabled: false}, - ContainerMemoryTotalActiveFile: MetricConfig{Enabled: false}, - ContainerMemoryTotalCache: MetricConfig{Enabled: false}, - ContainerMemoryTotalDirty: MetricConfig{Enabled: false}, - ContainerMemoryTotalInactiveAnon: MetricConfig{Enabled: false}, - ContainerMemoryTotalInactiveFile: MetricConfig{Enabled: false}, - ContainerMemoryTotalMappedFile: MetricConfig{Enabled: false}, - ContainerMemoryTotalPgfault: MetricConfig{Enabled: false}, - ContainerMemoryTotalPgmajfault: MetricConfig{Enabled: false}, - ContainerMemoryTotalPgpgin: MetricConfig{Enabled: false}, - ContainerMemoryTotalPgpgout: MetricConfig{Enabled: false}, - ContainerMemoryTotalRss: MetricConfig{Enabled: false}, - ContainerMemoryTotalRssHuge: MetricConfig{Enabled: false}, - ContainerMemoryTotalUnevictable: MetricConfig{Enabled: false}, - ContainerMemoryTotalWriteback: MetricConfig{Enabled: false}, - ContainerMemoryUnevictable: MetricConfig{Enabled: false}, - ContainerMemoryUsageLimit: MetricConfig{Enabled: false}, - ContainerMemoryUsageMax: MetricConfig{Enabled: false}, - ContainerMemoryUsageTotal: MetricConfig{Enabled: false}, - ContainerMemoryWriteback: MetricConfig{Enabled: false}, - ContainerNetworkIoUsageRxBytes: MetricConfig{Enabled: false}, - ContainerNetworkIoUsageRxDropped: MetricConfig{Enabled: false}, - ContainerNetworkIoUsageRxErrors: MetricConfig{Enabled: false}, - ContainerNetworkIoUsageRxPackets: MetricConfig{Enabled: false}, - ContainerNetworkIoUsageTxBytes: MetricConfig{Enabled: false}, - ContainerNetworkIoUsageTxDropped: MetricConfig{Enabled: false}, - ContainerNetworkIoUsageTxErrors: MetricConfig{Enabled: false}, - ContainerNetworkIoUsageTxPackets: MetricConfig{Enabled: false}, - ContainerPidsCount: MetricConfig{Enabled: false}, - ContainerPidsLimit: MetricConfig{Enabled: false}, - ContainerRestarts: MetricConfig{Enabled: false}, - ContainerUptime: MetricConfig{Enabled: false}, - }, - ResourceAttributes: ResourceAttributesConfig{ - ContainerCommandLine: ResourceAttributeConfig{Enabled: false}, - ContainerHostname: ResourceAttributeConfig{Enabled: false}, - ContainerID: ResourceAttributeConfig{Enabled: false}, - ContainerImageID: ResourceAttributeConfig{Enabled: false}, - ContainerImageName: ResourceAttributeConfig{Enabled: false}, - ContainerName: ResourceAttributeConfig{Enabled: false}, - ContainerRuntime: ResourceAttributeConfig{Enabled: false}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := loadMetricsBuilderConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } - }) - } -} - -func loadMetricsBuilderConfig(t *testing.T, name string) MetricsBuilderConfig { - cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) - require.NoError(t, err) - sub, err := cm.Sub(name) - require.NoError(t, err) - cfg := DefaultMetricsBuilderConfig() - require.NoError(t, component.UnmarshalConfig(sub, &cfg)) - return cfg -} - -func TestResourceAttributesConfig(t *testing.T) { - tests := []struct { - name string - want ResourceAttributesConfig - }{ - { - name: "default", - want: DefaultResourceAttributesConfig(), - }, - { - name: "all_set", - want: ResourceAttributesConfig{ - ContainerCommandLine: ResourceAttributeConfig{Enabled: true}, - ContainerHostname: ResourceAttributeConfig{Enabled: true}, - ContainerID: ResourceAttributeConfig{Enabled: true}, - ContainerImageID: ResourceAttributeConfig{Enabled: true}, - ContainerImageName: ResourceAttributeConfig{Enabled: true}, - ContainerName: ResourceAttributeConfig{Enabled: true}, - ContainerRuntime: ResourceAttributeConfig{Enabled: true}, - }, - }, - { - name: "none_set", - want: ResourceAttributesConfig{ - ContainerCommandLine: ResourceAttributeConfig{Enabled: false}, - ContainerHostname: ResourceAttributeConfig{Enabled: false}, - ContainerID: ResourceAttributeConfig{Enabled: false}, - ContainerImageID: ResourceAttributeConfig{Enabled: false}, - ContainerImageName: ResourceAttributeConfig{Enabled: false}, - ContainerName: ResourceAttributeConfig{Enabled: false}, - ContainerRuntime: ResourceAttributeConfig{Enabled: false}, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := loadResourceAttributesConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } - }) - } -} - -func loadResourceAttributesConfig(t *testing.T, name string) ResourceAttributesConfig { - cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) - require.NoError(t, err) - sub, err := cm.Sub(name) - require.NoError(t, err) - sub, err = sub.Sub("resource_attributes") - require.NoError(t, err) - cfg := DefaultResourceAttributesConfig() - require.NoError(t, component.UnmarshalConfig(sub, &cfg)) - return cfg -} diff --git a/receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go b/receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go deleted file mode 100644 index 376102e9b0d0..000000000000 --- a/receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go +++ /dev/null @@ -1,4416 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "time" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/filter" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.opentelemetry.io/collector/receiver" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" -) - -type metricContainerBlockioIoMergedRecursive struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.blockio.io_merged_recursive metric with initial data. -func (m *metricContainerBlockioIoMergedRecursive) init() { - m.data.SetName("container.blockio.io_merged_recursive") - m.data.SetDescription("Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups (Only available with cgroups v1).") - m.data.SetUnit("{operations}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerBlockioIoMergedRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) - dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) - dp.Attributes().PutStr("operation", operationAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerBlockioIoMergedRecursive) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerBlockioIoMergedRecursive) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerBlockioIoMergedRecursive(cfg MetricConfig) metricContainerBlockioIoMergedRecursive { - m := metricContainerBlockioIoMergedRecursive{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerBlockioIoQueuedRecursive struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.blockio.io_queued_recursive metric with initial data. -func (m *metricContainerBlockioIoQueuedRecursive) init() { - m.data.SetName("container.blockio.io_queued_recursive") - m.data.SetDescription("Number of requests queued up for this cgroup and its descendant cgroups (Only available with cgroups v1).") - m.data.SetUnit("{operations}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerBlockioIoQueuedRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) - dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) - dp.Attributes().PutStr("operation", operationAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerBlockioIoQueuedRecursive) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerBlockioIoQueuedRecursive) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerBlockioIoQueuedRecursive(cfg MetricConfig) metricContainerBlockioIoQueuedRecursive { - m := metricContainerBlockioIoQueuedRecursive{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerBlockioIoServiceBytesRecursive struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.blockio.io_service_bytes_recursive metric with initial data. -func (m *metricContainerBlockioIoServiceBytesRecursive) init() { - m.data.SetName("container.blockio.io_service_bytes_recursive") - m.data.SetDescription("Number of bytes transferred to/from the disk by the group and descendant groups.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerBlockioIoServiceBytesRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) - dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) - dp.Attributes().PutStr("operation", operationAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerBlockioIoServiceBytesRecursive) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerBlockioIoServiceBytesRecursive) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerBlockioIoServiceBytesRecursive(cfg MetricConfig) metricContainerBlockioIoServiceBytesRecursive { - m := metricContainerBlockioIoServiceBytesRecursive{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerBlockioIoServiceTimeRecursive struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.blockio.io_service_time_recursive metric with initial data. -func (m *metricContainerBlockioIoServiceTimeRecursive) init() { - m.data.SetName("container.blockio.io_service_time_recursive") - m.data.SetDescription("Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups (Only available with cgroups v1).") - m.data.SetUnit("ns") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerBlockioIoServiceTimeRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) - dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) - dp.Attributes().PutStr("operation", operationAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerBlockioIoServiceTimeRecursive) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerBlockioIoServiceTimeRecursive) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerBlockioIoServiceTimeRecursive(cfg MetricConfig) metricContainerBlockioIoServiceTimeRecursive { - m := metricContainerBlockioIoServiceTimeRecursive{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerBlockioIoServicedRecursive struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.blockio.io_serviced_recursive metric with initial data. -func (m *metricContainerBlockioIoServicedRecursive) init() { - m.data.SetName("container.blockio.io_serviced_recursive") - m.data.SetDescription("Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1).") - m.data.SetUnit("{operations}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerBlockioIoServicedRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) - dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) - dp.Attributes().PutStr("operation", operationAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerBlockioIoServicedRecursive) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerBlockioIoServicedRecursive) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerBlockioIoServicedRecursive(cfg MetricConfig) metricContainerBlockioIoServicedRecursive { - m := metricContainerBlockioIoServicedRecursive{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerBlockioIoTimeRecursive struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.blockio.io_time_recursive metric with initial data. -func (m *metricContainerBlockioIoTimeRecursive) init() { - m.data.SetName("container.blockio.io_time_recursive") - m.data.SetDescription("Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds (Only available with cgroups v1).") - m.data.SetUnit("ms") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerBlockioIoTimeRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) - dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) - dp.Attributes().PutStr("operation", operationAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerBlockioIoTimeRecursive) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerBlockioIoTimeRecursive) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerBlockioIoTimeRecursive(cfg MetricConfig) metricContainerBlockioIoTimeRecursive { - m := metricContainerBlockioIoTimeRecursive{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerBlockioIoWaitTimeRecursive struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.blockio.io_wait_time_recursive metric with initial data. -func (m *metricContainerBlockioIoWaitTimeRecursive) init() { - m.data.SetName("container.blockio.io_wait_time_recursive") - m.data.SetDescription("Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service (Only available with cgroups v1).") - m.data.SetUnit("ns") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerBlockioIoWaitTimeRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) - dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) - dp.Attributes().PutStr("operation", operationAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerBlockioIoWaitTimeRecursive) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerBlockioIoWaitTimeRecursive) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerBlockioIoWaitTimeRecursive(cfg MetricConfig) metricContainerBlockioIoWaitTimeRecursive { - m := metricContainerBlockioIoWaitTimeRecursive{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerBlockioSectorsRecursive struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.blockio.sectors_recursive metric with initial data. -func (m *metricContainerBlockioSectorsRecursive) init() { - m.data.SetName("container.blockio.sectors_recursive") - m.data.SetDescription("Number of sectors transferred to/from disk by the group and descendant groups (Only available with cgroups v1).") - m.data.SetUnit("{sectors}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerBlockioSectorsRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("device_major", deviceMajorAttributeValue) - dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue) - dp.Attributes().PutStr("operation", operationAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerBlockioSectorsRecursive) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerBlockioSectorsRecursive) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerBlockioSectorsRecursive(cfg MetricConfig) metricContainerBlockioSectorsRecursive { - m := metricContainerBlockioSectorsRecursive{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerCPULimit struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.cpu.limit metric with initial data. -func (m *metricContainerCPULimit) init() { - m.data.SetName("container.cpu.limit") - m.data.SetDescription("CPU limit set for the container.") - m.data.SetUnit("{cpus}") - m.data.SetEmptyGauge() -} - -func (m *metricContainerCPULimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerCPULimit) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerCPULimit) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerCPULimit(cfg MetricConfig) metricContainerCPULimit { - m := metricContainerCPULimit{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerCPULogicalCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.cpu.logical.count metric with initial data. -func (m *metricContainerCPULogicalCount) init() { - m.data.SetName("container.cpu.logical.count") - m.data.SetDescription("Number of cores available to the container.") - m.data.SetUnit("{cpus}") - m.data.SetEmptyGauge() -} - -func (m *metricContainerCPULogicalCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerCPULogicalCount) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerCPULogicalCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerCPULogicalCount(cfg MetricConfig) metricContainerCPULogicalCount { - m := metricContainerCPULogicalCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerCPUShares struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.cpu.shares metric with initial data. -func (m *metricContainerCPUShares) init() { - m.data.SetName("container.cpu.shares") - m.data.SetDescription("CPU shares set for the container.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricContainerCPUShares) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerCPUShares) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerCPUShares) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerCPUShares(cfg MetricConfig) metricContainerCPUShares { - m := metricContainerCPUShares{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerCPUThrottlingDataPeriods struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.cpu.throttling_data.periods metric with initial data. -func (m *metricContainerCPUThrottlingDataPeriods) init() { - m.data.SetName("container.cpu.throttling_data.periods") - m.data.SetDescription("Number of periods with throttling active.") - m.data.SetUnit("{periods}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerCPUThrottlingDataPeriods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerCPUThrottlingDataPeriods) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerCPUThrottlingDataPeriods) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerCPUThrottlingDataPeriods(cfg MetricConfig) metricContainerCPUThrottlingDataPeriods { - m := metricContainerCPUThrottlingDataPeriods{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerCPUThrottlingDataThrottledPeriods struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.cpu.throttling_data.throttled_periods metric with initial data. -func (m *metricContainerCPUThrottlingDataThrottledPeriods) init() { - m.data.SetName("container.cpu.throttling_data.throttled_periods") - m.data.SetDescription("Number of periods when the container hits its throttling limit.") - m.data.SetUnit("{periods}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerCPUThrottlingDataThrottledPeriods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerCPUThrottlingDataThrottledPeriods) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerCPUThrottlingDataThrottledPeriods) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerCPUThrottlingDataThrottledPeriods(cfg MetricConfig) metricContainerCPUThrottlingDataThrottledPeriods { - m := metricContainerCPUThrottlingDataThrottledPeriods{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerCPUThrottlingDataThrottledTime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.cpu.throttling_data.throttled_time metric with initial data. -func (m *metricContainerCPUThrottlingDataThrottledTime) init() { - m.data.SetName("container.cpu.throttling_data.throttled_time") - m.data.SetDescription("Aggregate time the container was throttled.") - m.data.SetUnit("ns") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerCPUThrottlingDataThrottledTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerCPUThrottlingDataThrottledTime) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerCPUThrottlingDataThrottledTime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerCPUThrottlingDataThrottledTime(cfg MetricConfig) metricContainerCPUThrottlingDataThrottledTime { - m := metricContainerCPUThrottlingDataThrottledTime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerCPUUsageKernelmode struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.cpu.usage.kernelmode metric with initial data. -func (m *metricContainerCPUUsageKernelmode) init() { - m.data.SetName("container.cpu.usage.kernelmode") - m.data.SetDescription("Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows).") - m.data.SetUnit("ns") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerCPUUsageKernelmode) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerCPUUsageKernelmode) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerCPUUsageKernelmode) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerCPUUsageKernelmode(cfg MetricConfig) metricContainerCPUUsageKernelmode { - m := metricContainerCPUUsageKernelmode{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerCPUUsagePercpu struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.cpu.usage.percpu metric with initial data. -func (m *metricContainerCPUUsagePercpu) init() { - m.data.SetName("container.cpu.usage.percpu") - m.data.SetDescription("Per-core CPU usage by the container (Only available with cgroups v1).") - m.data.SetUnit("ns") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerCPUUsagePercpu) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, coreAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("core", coreAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerCPUUsagePercpu) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerCPUUsagePercpu) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerCPUUsagePercpu(cfg MetricConfig) metricContainerCPUUsagePercpu { - m := metricContainerCPUUsagePercpu{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerCPUUsageSystem struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.cpu.usage.system metric with initial data. -func (m *metricContainerCPUUsageSystem) init() { - m.data.SetName("container.cpu.usage.system") - m.data.SetDescription("System CPU usage, as reported by docker.") - m.data.SetUnit("ns") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerCPUUsageSystem) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerCPUUsageSystem) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerCPUUsageSystem) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerCPUUsageSystem(cfg MetricConfig) metricContainerCPUUsageSystem { - m := metricContainerCPUUsageSystem{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerCPUUsageTotal struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.cpu.usage.total metric with initial data. -func (m *metricContainerCPUUsageTotal) init() { - m.data.SetName("container.cpu.usage.total") - m.data.SetDescription("Total CPU time consumed.") - m.data.SetUnit("ns") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerCPUUsageTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerCPUUsageTotal) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerCPUUsageTotal) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerCPUUsageTotal(cfg MetricConfig) metricContainerCPUUsageTotal { - m := metricContainerCPUUsageTotal{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerCPUUsageUsermode struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.cpu.usage.usermode metric with initial data. -func (m *metricContainerCPUUsageUsermode) init() { - m.data.SetName("container.cpu.usage.usermode") - m.data.SetDescription("Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows).") - m.data.SetUnit("ns") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerCPUUsageUsermode) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerCPUUsageUsermode) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerCPUUsageUsermode) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerCPUUsageUsermode(cfg MetricConfig) metricContainerCPUUsageUsermode { - m := metricContainerCPUUsageUsermode{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerCPUUtilization struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.cpu.utilization metric with initial data. -func (m *metricContainerCPUUtilization) init() { - m.data.SetName("container.cpu.utilization") - m.data.SetDescription("Percent of CPU used by the container.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricContainerCPUUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerCPUUtilization) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerCPUUtilization) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerCPUUtilization(cfg MetricConfig) metricContainerCPUUtilization { - m := metricContainerCPUUtilization{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryActiveAnon struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.active_anon metric with initial data. -func (m *metricContainerMemoryActiveAnon) init() { - m.data.SetName("container.memory.active_anon") - m.data.SetDescription("The amount of anonymous memory that has been identified as active by the kernel.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryActiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryActiveAnon) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryActiveAnon) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryActiveAnon(cfg MetricConfig) metricContainerMemoryActiveAnon { - m := metricContainerMemoryActiveAnon{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryActiveFile struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.active_file metric with initial data. -func (m *metricContainerMemoryActiveFile) init() { - m.data.SetName("container.memory.active_file") - m.data.SetDescription("Cache memory that has been identified as active by the kernel.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryActiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryActiveFile) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryActiveFile) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryActiveFile(cfg MetricConfig) metricContainerMemoryActiveFile { - m := metricContainerMemoryActiveFile{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryAnon struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.anon metric with initial data. -func (m *metricContainerMemoryAnon) init() { - m.data.SetName("container.memory.anon") - m.data.SetDescription("Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS) (Only available with cgroups v2).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryAnon) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryAnon) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryAnon(cfg MetricConfig) metricContainerMemoryAnon { - m := metricContainerMemoryAnon{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryCache struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.cache metric with initial data. -func (m *metricContainerMemoryCache) init() { - m.data.SetName("container.memory.cache") - m.data.SetDescription("The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryCache) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryCache) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryCache) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryCache(cfg MetricConfig) metricContainerMemoryCache { - m := metricContainerMemoryCache{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryDirty struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.dirty metric with initial data. -func (m *metricContainerMemoryDirty) init() { - m.data.SetName("container.memory.dirty") - m.data.SetDescription("Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryDirty) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryDirty) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryDirty) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryDirty(cfg MetricConfig) metricContainerMemoryDirty { - m := metricContainerMemoryDirty{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryFails struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.fails metric with initial data. -func (m *metricContainerMemoryFails) init() { - m.data.SetName("container.memory.fails") - m.data.SetDescription("Number of times the memory limit was hit.") - m.data.SetUnit("{fails}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryFails) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryFails) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryFails) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryFails(cfg MetricConfig) metricContainerMemoryFails { - m := metricContainerMemoryFails{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryFile struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.file metric with initial data. -func (m *metricContainerMemoryFile) init() { - m.data.SetName("container.memory.file") - m.data.SetDescription("Amount of memory used to cache filesystem data, including tmpfs and shared memory (Only available with cgroups v2).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryFile) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryFile) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryFile(cfg MetricConfig) metricContainerMemoryFile { - m := metricContainerMemoryFile{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryHierarchicalMemoryLimit struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.hierarchical_memory_limit metric with initial data. -func (m *metricContainerMemoryHierarchicalMemoryLimit) init() { - m.data.SetName("container.memory.hierarchical_memory_limit") - m.data.SetDescription("The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryHierarchicalMemoryLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryHierarchicalMemoryLimit) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryHierarchicalMemoryLimit) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryHierarchicalMemoryLimit(cfg MetricConfig) metricContainerMemoryHierarchicalMemoryLimit { - m := metricContainerMemoryHierarchicalMemoryLimit{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryHierarchicalMemswLimit struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.hierarchical_memsw_limit metric with initial data. -func (m *metricContainerMemoryHierarchicalMemswLimit) init() { - m.data.SetName("container.memory.hierarchical_memsw_limit") - m.data.SetDescription("The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryHierarchicalMemswLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryHierarchicalMemswLimit) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryHierarchicalMemswLimit) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryHierarchicalMemswLimit(cfg MetricConfig) metricContainerMemoryHierarchicalMemswLimit { - m := metricContainerMemoryHierarchicalMemswLimit{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryInactiveAnon struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.inactive_anon metric with initial data. -func (m *metricContainerMemoryInactiveAnon) init() { - m.data.SetName("container.memory.inactive_anon") - m.data.SetDescription("The amount of anonymous memory that has been identified as inactive by the kernel.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryInactiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryInactiveAnon) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryInactiveAnon) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryInactiveAnon(cfg MetricConfig) metricContainerMemoryInactiveAnon { - m := metricContainerMemoryInactiveAnon{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryInactiveFile struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.inactive_file metric with initial data. -func (m *metricContainerMemoryInactiveFile) init() { - m.data.SetName("container.memory.inactive_file") - m.data.SetDescription("Cache memory that has been identified as inactive by the kernel.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryInactiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryInactiveFile) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryInactiveFile) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryInactiveFile(cfg MetricConfig) metricContainerMemoryInactiveFile { - m := metricContainerMemoryInactiveFile{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryMappedFile struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.mapped_file metric with initial data. -func (m *metricContainerMemoryMappedFile) init() { - m.data.SetName("container.memory.mapped_file") - m.data.SetDescription("Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryMappedFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryMappedFile) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryMappedFile) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryMappedFile(cfg MetricConfig) metricContainerMemoryMappedFile { - m := metricContainerMemoryMappedFile{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryPercent struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.percent metric with initial data. -func (m *metricContainerMemoryPercent) init() { - m.data.SetName("container.memory.percent") - m.data.SetDescription("Percentage of memory used.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricContainerMemoryPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryPercent) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryPercent) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryPercent(cfg MetricConfig) metricContainerMemoryPercent { - m := metricContainerMemoryPercent{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryPgfault struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.pgfault metric with initial data. -func (m *metricContainerMemoryPgfault) init() { - m.data.SetName("container.memory.pgfault") - m.data.SetDescription("Indicate the number of times that a process of the cgroup triggered a page fault.") - m.data.SetUnit("{faults}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryPgfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryPgfault) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryPgfault) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryPgfault(cfg MetricConfig) metricContainerMemoryPgfault { - m := metricContainerMemoryPgfault{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryPgmajfault struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.pgmajfault metric with initial data. -func (m *metricContainerMemoryPgmajfault) init() { - m.data.SetName("container.memory.pgmajfault") - m.data.SetDescription("Indicate the number of times that a process of the cgroup triggered a major fault.") - m.data.SetUnit("{faults}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryPgmajfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryPgmajfault) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryPgmajfault) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryPgmajfault(cfg MetricConfig) metricContainerMemoryPgmajfault { - m := metricContainerMemoryPgmajfault{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryPgpgin struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.pgpgin metric with initial data. -func (m *metricContainerMemoryPgpgin) init() { - m.data.SetName("container.memory.pgpgin") - m.data.SetDescription("Number of pages read from disk by the cgroup (Only available with cgroups v1).") - m.data.SetUnit("{operations}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryPgpgin) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryPgpgin) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryPgpgin) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryPgpgin(cfg MetricConfig) metricContainerMemoryPgpgin { - m := metricContainerMemoryPgpgin{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryPgpgout struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.pgpgout metric with initial data. -func (m *metricContainerMemoryPgpgout) init() { - m.data.SetName("container.memory.pgpgout") - m.data.SetDescription("Number of pages written to disk by the cgroup (Only available with cgroups v1).") - m.data.SetUnit("{operations}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryPgpgout) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryPgpgout) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryPgpgout) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryPgpgout(cfg MetricConfig) metricContainerMemoryPgpgout { - m := metricContainerMemoryPgpgout{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryRss struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.rss metric with initial data. -func (m *metricContainerMemoryRss) init() { - m.data.SetName("container.memory.rss") - m.data.SetDescription("The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryRss) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryRss) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryRss(cfg MetricConfig) metricContainerMemoryRss { - m := metricContainerMemoryRss{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryRssHuge struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.rss_huge metric with initial data. -func (m *metricContainerMemoryRssHuge) init() { - m.data.SetName("container.memory.rss_huge") - m.data.SetDescription("Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryRssHuge) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryRssHuge) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryRssHuge) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryRssHuge(cfg MetricConfig) metricContainerMemoryRssHuge { - m := metricContainerMemoryRssHuge{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalActiveAnon struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_active_anon metric with initial data. -func (m *metricContainerMemoryTotalActiveAnon) init() { - m.data.SetName("container.memory.total_active_anon") - m.data.SetDescription("The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalActiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalActiveAnon) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalActiveAnon) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalActiveAnon(cfg MetricConfig) metricContainerMemoryTotalActiveAnon { - m := metricContainerMemoryTotalActiveAnon{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalActiveFile struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_active_file metric with initial data. -func (m *metricContainerMemoryTotalActiveFile) init() { - m.data.SetName("container.memory.total_active_file") - m.data.SetDescription("Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalActiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalActiveFile) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalActiveFile) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalActiveFile(cfg MetricConfig) metricContainerMemoryTotalActiveFile { - m := metricContainerMemoryTotalActiveFile{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalCache struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_cache metric with initial data. -func (m *metricContainerMemoryTotalCache) init() { - m.data.SetName("container.memory.total_cache") - m.data.SetDescription("Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalCache) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalCache) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalCache) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalCache(cfg MetricConfig) metricContainerMemoryTotalCache { - m := metricContainerMemoryTotalCache{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalDirty struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_dirty metric with initial data. -func (m *metricContainerMemoryTotalDirty) init() { - m.data.SetName("container.memory.total_dirty") - m.data.SetDescription("Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalDirty) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalDirty) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalDirty) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalDirty(cfg MetricConfig) metricContainerMemoryTotalDirty { - m := metricContainerMemoryTotalDirty{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalInactiveAnon struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_inactive_anon metric with initial data. -func (m *metricContainerMemoryTotalInactiveAnon) init() { - m.data.SetName("container.memory.total_inactive_anon") - m.data.SetDescription("The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalInactiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalInactiveAnon) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalInactiveAnon) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalInactiveAnon(cfg MetricConfig) metricContainerMemoryTotalInactiveAnon { - m := metricContainerMemoryTotalInactiveAnon{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalInactiveFile struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_inactive_file metric with initial data. -func (m *metricContainerMemoryTotalInactiveFile) init() { - m.data.SetName("container.memory.total_inactive_file") - m.data.SetDescription("Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalInactiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalInactiveFile) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalInactiveFile) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalInactiveFile(cfg MetricConfig) metricContainerMemoryTotalInactiveFile { - m := metricContainerMemoryTotalInactiveFile{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalMappedFile struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_mapped_file metric with initial data. -func (m *metricContainerMemoryTotalMappedFile) init() { - m.data.SetName("container.memory.total_mapped_file") - m.data.SetDescription("Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalMappedFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalMappedFile) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalMappedFile) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalMappedFile(cfg MetricConfig) metricContainerMemoryTotalMappedFile { - m := metricContainerMemoryTotalMappedFile{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalPgfault struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_pgfault metric with initial data. -func (m *metricContainerMemoryTotalPgfault) init() { - m.data.SetName("container.memory.total_pgfault") - m.data.SetDescription("Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1).") - m.data.SetUnit("{faults}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalPgfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalPgfault) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalPgfault) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalPgfault(cfg MetricConfig) metricContainerMemoryTotalPgfault { - m := metricContainerMemoryTotalPgfault{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalPgmajfault struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_pgmajfault metric with initial data. -func (m *metricContainerMemoryTotalPgmajfault) init() { - m.data.SetName("container.memory.total_pgmajfault") - m.data.SetDescription("Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1).") - m.data.SetUnit("{faults}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalPgmajfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalPgmajfault) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalPgmajfault) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalPgmajfault(cfg MetricConfig) metricContainerMemoryTotalPgmajfault { - m := metricContainerMemoryTotalPgmajfault{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalPgpgin struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_pgpgin metric with initial data. -func (m *metricContainerMemoryTotalPgpgin) init() { - m.data.SetName("container.memory.total_pgpgin") - m.data.SetDescription("Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1).") - m.data.SetUnit("{operations}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalPgpgin) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalPgpgin) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalPgpgin) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalPgpgin(cfg MetricConfig) metricContainerMemoryTotalPgpgin { - m := metricContainerMemoryTotalPgpgin{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalPgpgout struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_pgpgout metric with initial data. -func (m *metricContainerMemoryTotalPgpgout) init() { - m.data.SetName("container.memory.total_pgpgout") - m.data.SetDescription("Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1).") - m.data.SetUnit("{operations}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalPgpgout) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalPgpgout) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalPgpgout) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalPgpgout(cfg MetricConfig) metricContainerMemoryTotalPgpgout { - m := metricContainerMemoryTotalPgpgout{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalRss struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_rss metric with initial data. -func (m *metricContainerMemoryTotalRss) init() { - m.data.SetName("container.memory.total_rss") - m.data.SetDescription("The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalRss) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalRss) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalRss(cfg MetricConfig) metricContainerMemoryTotalRss { - m := metricContainerMemoryTotalRss{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalRssHuge struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_rss_huge metric with initial data. -func (m *metricContainerMemoryTotalRssHuge) init() { - m.data.SetName("container.memory.total_rss_huge") - m.data.SetDescription("Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalRssHuge) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalRssHuge) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalRssHuge) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalRssHuge(cfg MetricConfig) metricContainerMemoryTotalRssHuge { - m := metricContainerMemoryTotalRssHuge{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalUnevictable struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_unevictable metric with initial data. -func (m *metricContainerMemoryTotalUnevictable) init() { - m.data.SetName("container.memory.total_unevictable") - m.data.SetDescription("The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalUnevictable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalUnevictable) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalUnevictable) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalUnevictable(cfg MetricConfig) metricContainerMemoryTotalUnevictable { - m := metricContainerMemoryTotalUnevictable{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryTotalWriteback struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.total_writeback metric with initial data. -func (m *metricContainerMemoryTotalWriteback) init() { - m.data.SetName("container.memory.total_writeback") - m.data.SetDescription("Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryTotalWriteback) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryTotalWriteback) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryTotalWriteback) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryTotalWriteback(cfg MetricConfig) metricContainerMemoryTotalWriteback { - m := metricContainerMemoryTotalWriteback{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryUnevictable struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.unevictable metric with initial data. -func (m *metricContainerMemoryUnevictable) init() { - m.data.SetName("container.memory.unevictable") - m.data.SetDescription("The amount of memory that cannot be reclaimed.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryUnevictable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryUnevictable) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryUnevictable) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryUnevictable(cfg MetricConfig) metricContainerMemoryUnevictable { - m := metricContainerMemoryUnevictable{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryUsageLimit struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.usage.limit metric with initial data. -func (m *metricContainerMemoryUsageLimit) init() { - m.data.SetName("container.memory.usage.limit") - m.data.SetDescription("Memory limit of the container.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryUsageLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryUsageLimit) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryUsageLimit) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryUsageLimit(cfg MetricConfig) metricContainerMemoryUsageLimit { - m := metricContainerMemoryUsageLimit{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryUsageMax struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.usage.max metric with initial data. -func (m *metricContainerMemoryUsageMax) init() { - m.data.SetName("container.memory.usage.max") - m.data.SetDescription("Maximum memory usage.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryUsageMax) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryUsageMax) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryUsageMax(cfg MetricConfig) metricContainerMemoryUsageMax { - m := metricContainerMemoryUsageMax{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryUsageTotal struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.usage.total metric with initial data. -func (m *metricContainerMemoryUsageTotal) init() { - m.data.SetName("container.memory.usage.total") - m.data.SetDescription("Memory usage of the container. This excludes the cache.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryUsageTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryUsageTotal) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryUsageTotal) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryUsageTotal(cfg MetricConfig) metricContainerMemoryUsageTotal { - m := metricContainerMemoryUsageTotal{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerMemoryWriteback struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.memory.writeback metric with initial data. -func (m *metricContainerMemoryWriteback) init() { - m.data.SetName("container.memory.writeback") - m.data.SetDescription("Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1).") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerMemoryWriteback) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerMemoryWriteback) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerMemoryWriteback) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerMemoryWriteback(cfg MetricConfig) metricContainerMemoryWriteback { - m := metricContainerMemoryWriteback{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerNetworkIoUsageRxBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.network.io.usage.rx_bytes metric with initial data. -func (m *metricContainerNetworkIoUsageRxBytes) init() { - m.data.SetName("container.network.io.usage.rx_bytes") - m.data.SetDescription("Bytes received by the container.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerNetworkIoUsageRxBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("interface", interfaceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerNetworkIoUsageRxBytes) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerNetworkIoUsageRxBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerNetworkIoUsageRxBytes(cfg MetricConfig) metricContainerNetworkIoUsageRxBytes { - m := metricContainerNetworkIoUsageRxBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerNetworkIoUsageRxDropped struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.network.io.usage.rx_dropped metric with initial data. -func (m *metricContainerNetworkIoUsageRxDropped) init() { - m.data.SetName("container.network.io.usage.rx_dropped") - m.data.SetDescription("Incoming packets dropped.") - m.data.SetUnit("{packets}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerNetworkIoUsageRxDropped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("interface", interfaceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerNetworkIoUsageRxDropped) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerNetworkIoUsageRxDropped) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerNetworkIoUsageRxDropped(cfg MetricConfig) metricContainerNetworkIoUsageRxDropped { - m := metricContainerNetworkIoUsageRxDropped{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerNetworkIoUsageRxErrors struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.network.io.usage.rx_errors metric with initial data. -func (m *metricContainerNetworkIoUsageRxErrors) init() { - m.data.SetName("container.network.io.usage.rx_errors") - m.data.SetDescription("Received errors.") - m.data.SetUnit("{errors}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerNetworkIoUsageRxErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("interface", interfaceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerNetworkIoUsageRxErrors) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerNetworkIoUsageRxErrors) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerNetworkIoUsageRxErrors(cfg MetricConfig) metricContainerNetworkIoUsageRxErrors { - m := metricContainerNetworkIoUsageRxErrors{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerNetworkIoUsageRxPackets struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.network.io.usage.rx_packets metric with initial data. -func (m *metricContainerNetworkIoUsageRxPackets) init() { - m.data.SetName("container.network.io.usage.rx_packets") - m.data.SetDescription("Packets received.") - m.data.SetUnit("{packets}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerNetworkIoUsageRxPackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("interface", interfaceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerNetworkIoUsageRxPackets) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerNetworkIoUsageRxPackets) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerNetworkIoUsageRxPackets(cfg MetricConfig) metricContainerNetworkIoUsageRxPackets { - m := metricContainerNetworkIoUsageRxPackets{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerNetworkIoUsageTxBytes struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.network.io.usage.tx_bytes metric with initial data. -func (m *metricContainerNetworkIoUsageTxBytes) init() { - m.data.SetName("container.network.io.usage.tx_bytes") - m.data.SetDescription("Bytes sent.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerNetworkIoUsageTxBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("interface", interfaceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerNetworkIoUsageTxBytes) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerNetworkIoUsageTxBytes) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerNetworkIoUsageTxBytes(cfg MetricConfig) metricContainerNetworkIoUsageTxBytes { - m := metricContainerNetworkIoUsageTxBytes{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerNetworkIoUsageTxDropped struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.network.io.usage.tx_dropped metric with initial data. -func (m *metricContainerNetworkIoUsageTxDropped) init() { - m.data.SetName("container.network.io.usage.tx_dropped") - m.data.SetDescription("Outgoing packets dropped.") - m.data.SetUnit("{packets}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerNetworkIoUsageTxDropped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("interface", interfaceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerNetworkIoUsageTxDropped) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerNetworkIoUsageTxDropped) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerNetworkIoUsageTxDropped(cfg MetricConfig) metricContainerNetworkIoUsageTxDropped { - m := metricContainerNetworkIoUsageTxDropped{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerNetworkIoUsageTxErrors struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.network.io.usage.tx_errors metric with initial data. -func (m *metricContainerNetworkIoUsageTxErrors) init() { - m.data.SetName("container.network.io.usage.tx_errors") - m.data.SetDescription("Sent errors.") - m.data.SetUnit("{errors}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerNetworkIoUsageTxErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("interface", interfaceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerNetworkIoUsageTxErrors) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerNetworkIoUsageTxErrors) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerNetworkIoUsageTxErrors(cfg MetricConfig) metricContainerNetworkIoUsageTxErrors { - m := metricContainerNetworkIoUsageTxErrors{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerNetworkIoUsageTxPackets struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.network.io.usage.tx_packets metric with initial data. -func (m *metricContainerNetworkIoUsageTxPackets) init() { - m.data.SetName("container.network.io.usage.tx_packets") - m.data.SetDescription("Packets sent.") - m.data.SetUnit("{packets}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) -} - -func (m *metricContainerNetworkIoUsageTxPackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("interface", interfaceAttributeValue) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerNetworkIoUsageTxPackets) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerNetworkIoUsageTxPackets) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerNetworkIoUsageTxPackets(cfg MetricConfig) metricContainerNetworkIoUsageTxPackets { - m := metricContainerNetworkIoUsageTxPackets{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerPidsCount struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.pids.count metric with initial data. -func (m *metricContainerPidsCount) init() { - m.data.SetName("container.pids.count") - m.data.SetDescription("Number of pids in the container's cgroup.") - m.data.SetUnit("{pids}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerPidsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerPidsCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerPidsCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerPidsCount(cfg MetricConfig) metricContainerPidsCount { - m := metricContainerPidsCount{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerPidsLimit struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.pids.limit metric with initial data. -func (m *metricContainerPidsLimit) init() { - m.data.SetName("container.pids.limit") - m.data.SetDescription("Maximum number of pids in the container's cgroup.") - m.data.SetUnit("{pids}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerPidsLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerPidsLimit) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerPidsLimit) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerPidsLimit(cfg MetricConfig) metricContainerPidsLimit { - m := metricContainerPidsLimit{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerRestarts struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.restarts metric with initial data. -func (m *metricContainerRestarts) init() { - m.data.SetName("container.restarts") - m.data.SetDescription("Number of restarts for the container.") - m.data.SetUnit("{restarts}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricContainerRestarts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerRestarts) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerRestarts) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerRestarts(cfg MetricConfig) metricContainerRestarts { - m := metricContainerRestarts{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricContainerUptime struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills container.uptime metric with initial data. -func (m *metricContainerUptime) init() { - m.data.SetName("container.uptime") - m.data.SetDescription("Time elapsed since container start time.") - m.data.SetUnit("s") - m.data.SetEmptyGauge() -} - -func (m *metricContainerUptime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricContainerUptime) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricContainerUptime) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricContainerUptime(cfg MetricConfig) metricContainerUptime { - m := metricContainerUptime{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations -// required to produce metric representation defined in metadata and user config. -type MetricsBuilder struct { - config MetricsBuilderConfig // config of the metrics builder. - startTime pcommon.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. - buildInfo component.BuildInfo // contains version information. - resourceAttributeIncludeFilter map[string]filter.Filter - resourceAttributeExcludeFilter map[string]filter.Filter - metricContainerBlockioIoMergedRecursive metricContainerBlockioIoMergedRecursive - metricContainerBlockioIoQueuedRecursive metricContainerBlockioIoQueuedRecursive - metricContainerBlockioIoServiceBytesRecursive metricContainerBlockioIoServiceBytesRecursive - metricContainerBlockioIoServiceTimeRecursive metricContainerBlockioIoServiceTimeRecursive - metricContainerBlockioIoServicedRecursive metricContainerBlockioIoServicedRecursive - metricContainerBlockioIoTimeRecursive metricContainerBlockioIoTimeRecursive - metricContainerBlockioIoWaitTimeRecursive metricContainerBlockioIoWaitTimeRecursive - metricContainerBlockioSectorsRecursive metricContainerBlockioSectorsRecursive - metricContainerCPULimit metricContainerCPULimit - metricContainerCPULogicalCount metricContainerCPULogicalCount - metricContainerCPUShares metricContainerCPUShares - metricContainerCPUThrottlingDataPeriods metricContainerCPUThrottlingDataPeriods - metricContainerCPUThrottlingDataThrottledPeriods metricContainerCPUThrottlingDataThrottledPeriods - metricContainerCPUThrottlingDataThrottledTime metricContainerCPUThrottlingDataThrottledTime - metricContainerCPUUsageKernelmode metricContainerCPUUsageKernelmode - metricContainerCPUUsagePercpu metricContainerCPUUsagePercpu - metricContainerCPUUsageSystem metricContainerCPUUsageSystem - metricContainerCPUUsageTotal metricContainerCPUUsageTotal - metricContainerCPUUsageUsermode metricContainerCPUUsageUsermode - metricContainerCPUUtilization metricContainerCPUUtilization - metricContainerMemoryActiveAnon metricContainerMemoryActiveAnon - metricContainerMemoryActiveFile metricContainerMemoryActiveFile - metricContainerMemoryAnon metricContainerMemoryAnon - metricContainerMemoryCache metricContainerMemoryCache - metricContainerMemoryDirty metricContainerMemoryDirty - metricContainerMemoryFails metricContainerMemoryFails - metricContainerMemoryFile metricContainerMemoryFile - metricContainerMemoryHierarchicalMemoryLimit metricContainerMemoryHierarchicalMemoryLimit - metricContainerMemoryHierarchicalMemswLimit metricContainerMemoryHierarchicalMemswLimit - metricContainerMemoryInactiveAnon metricContainerMemoryInactiveAnon - metricContainerMemoryInactiveFile metricContainerMemoryInactiveFile - metricContainerMemoryMappedFile metricContainerMemoryMappedFile - metricContainerMemoryPercent metricContainerMemoryPercent - metricContainerMemoryPgfault metricContainerMemoryPgfault - metricContainerMemoryPgmajfault metricContainerMemoryPgmajfault - metricContainerMemoryPgpgin metricContainerMemoryPgpgin - metricContainerMemoryPgpgout metricContainerMemoryPgpgout - metricContainerMemoryRss metricContainerMemoryRss - metricContainerMemoryRssHuge metricContainerMemoryRssHuge - metricContainerMemoryTotalActiveAnon metricContainerMemoryTotalActiveAnon - metricContainerMemoryTotalActiveFile metricContainerMemoryTotalActiveFile - metricContainerMemoryTotalCache metricContainerMemoryTotalCache - metricContainerMemoryTotalDirty metricContainerMemoryTotalDirty - metricContainerMemoryTotalInactiveAnon metricContainerMemoryTotalInactiveAnon - metricContainerMemoryTotalInactiveFile metricContainerMemoryTotalInactiveFile - metricContainerMemoryTotalMappedFile metricContainerMemoryTotalMappedFile - metricContainerMemoryTotalPgfault metricContainerMemoryTotalPgfault - metricContainerMemoryTotalPgmajfault metricContainerMemoryTotalPgmajfault - metricContainerMemoryTotalPgpgin metricContainerMemoryTotalPgpgin - metricContainerMemoryTotalPgpgout metricContainerMemoryTotalPgpgout - metricContainerMemoryTotalRss metricContainerMemoryTotalRss - metricContainerMemoryTotalRssHuge metricContainerMemoryTotalRssHuge - metricContainerMemoryTotalUnevictable metricContainerMemoryTotalUnevictable - metricContainerMemoryTotalWriteback metricContainerMemoryTotalWriteback - metricContainerMemoryUnevictable metricContainerMemoryUnevictable - metricContainerMemoryUsageLimit metricContainerMemoryUsageLimit - metricContainerMemoryUsageMax metricContainerMemoryUsageMax - metricContainerMemoryUsageTotal metricContainerMemoryUsageTotal - metricContainerMemoryWriteback metricContainerMemoryWriteback - metricContainerNetworkIoUsageRxBytes metricContainerNetworkIoUsageRxBytes - metricContainerNetworkIoUsageRxDropped metricContainerNetworkIoUsageRxDropped - metricContainerNetworkIoUsageRxErrors metricContainerNetworkIoUsageRxErrors - metricContainerNetworkIoUsageRxPackets metricContainerNetworkIoUsageRxPackets - metricContainerNetworkIoUsageTxBytes metricContainerNetworkIoUsageTxBytes - metricContainerNetworkIoUsageTxDropped metricContainerNetworkIoUsageTxDropped - metricContainerNetworkIoUsageTxErrors metricContainerNetworkIoUsageTxErrors - metricContainerNetworkIoUsageTxPackets metricContainerNetworkIoUsageTxPackets - metricContainerPidsCount metricContainerPidsCount - metricContainerPidsLimit metricContainerPidsLimit - metricContainerRestarts metricContainerRestarts - metricContainerUptime metricContainerUptime -} - -// metricBuilderOption applies changes to default metrics builder. -type metricBuilderOption func(*MetricsBuilder) - -// WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { - return func(mb *MetricsBuilder) { - mb.startTime = startTime - } -} - -func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { - mb := &MetricsBuilder{ - config: mbc, - startTime: pcommon.NewTimestampFromTime(time.Now()), - metricsBuffer: pmetric.NewMetrics(), - buildInfo: settings.BuildInfo, - metricContainerBlockioIoMergedRecursive: newMetricContainerBlockioIoMergedRecursive(mbc.Metrics.ContainerBlockioIoMergedRecursive), - metricContainerBlockioIoQueuedRecursive: newMetricContainerBlockioIoQueuedRecursive(mbc.Metrics.ContainerBlockioIoQueuedRecursive), - metricContainerBlockioIoServiceBytesRecursive: newMetricContainerBlockioIoServiceBytesRecursive(mbc.Metrics.ContainerBlockioIoServiceBytesRecursive), - metricContainerBlockioIoServiceTimeRecursive: newMetricContainerBlockioIoServiceTimeRecursive(mbc.Metrics.ContainerBlockioIoServiceTimeRecursive), - metricContainerBlockioIoServicedRecursive: newMetricContainerBlockioIoServicedRecursive(mbc.Metrics.ContainerBlockioIoServicedRecursive), - metricContainerBlockioIoTimeRecursive: newMetricContainerBlockioIoTimeRecursive(mbc.Metrics.ContainerBlockioIoTimeRecursive), - metricContainerBlockioIoWaitTimeRecursive: newMetricContainerBlockioIoWaitTimeRecursive(mbc.Metrics.ContainerBlockioIoWaitTimeRecursive), - metricContainerBlockioSectorsRecursive: newMetricContainerBlockioSectorsRecursive(mbc.Metrics.ContainerBlockioSectorsRecursive), - metricContainerCPULimit: newMetricContainerCPULimit(mbc.Metrics.ContainerCPULimit), - metricContainerCPULogicalCount: newMetricContainerCPULogicalCount(mbc.Metrics.ContainerCPULogicalCount), - metricContainerCPUShares: newMetricContainerCPUShares(mbc.Metrics.ContainerCPUShares), - metricContainerCPUThrottlingDataPeriods: newMetricContainerCPUThrottlingDataPeriods(mbc.Metrics.ContainerCPUThrottlingDataPeriods), - metricContainerCPUThrottlingDataThrottledPeriods: newMetricContainerCPUThrottlingDataThrottledPeriods(mbc.Metrics.ContainerCPUThrottlingDataThrottledPeriods), - metricContainerCPUThrottlingDataThrottledTime: newMetricContainerCPUThrottlingDataThrottledTime(mbc.Metrics.ContainerCPUThrottlingDataThrottledTime), - metricContainerCPUUsageKernelmode: newMetricContainerCPUUsageKernelmode(mbc.Metrics.ContainerCPUUsageKernelmode), - metricContainerCPUUsagePercpu: newMetricContainerCPUUsagePercpu(mbc.Metrics.ContainerCPUUsagePercpu), - metricContainerCPUUsageSystem: newMetricContainerCPUUsageSystem(mbc.Metrics.ContainerCPUUsageSystem), - metricContainerCPUUsageTotal: newMetricContainerCPUUsageTotal(mbc.Metrics.ContainerCPUUsageTotal), - metricContainerCPUUsageUsermode: newMetricContainerCPUUsageUsermode(mbc.Metrics.ContainerCPUUsageUsermode), - metricContainerCPUUtilization: newMetricContainerCPUUtilization(mbc.Metrics.ContainerCPUUtilization), - metricContainerMemoryActiveAnon: newMetricContainerMemoryActiveAnon(mbc.Metrics.ContainerMemoryActiveAnon), - metricContainerMemoryActiveFile: newMetricContainerMemoryActiveFile(mbc.Metrics.ContainerMemoryActiveFile), - metricContainerMemoryAnon: newMetricContainerMemoryAnon(mbc.Metrics.ContainerMemoryAnon), - metricContainerMemoryCache: newMetricContainerMemoryCache(mbc.Metrics.ContainerMemoryCache), - metricContainerMemoryDirty: newMetricContainerMemoryDirty(mbc.Metrics.ContainerMemoryDirty), - metricContainerMemoryFails: newMetricContainerMemoryFails(mbc.Metrics.ContainerMemoryFails), - metricContainerMemoryFile: newMetricContainerMemoryFile(mbc.Metrics.ContainerMemoryFile), - metricContainerMemoryHierarchicalMemoryLimit: newMetricContainerMemoryHierarchicalMemoryLimit(mbc.Metrics.ContainerMemoryHierarchicalMemoryLimit), - metricContainerMemoryHierarchicalMemswLimit: newMetricContainerMemoryHierarchicalMemswLimit(mbc.Metrics.ContainerMemoryHierarchicalMemswLimit), - metricContainerMemoryInactiveAnon: newMetricContainerMemoryInactiveAnon(mbc.Metrics.ContainerMemoryInactiveAnon), - metricContainerMemoryInactiveFile: newMetricContainerMemoryInactiveFile(mbc.Metrics.ContainerMemoryInactiveFile), - metricContainerMemoryMappedFile: newMetricContainerMemoryMappedFile(mbc.Metrics.ContainerMemoryMappedFile), - metricContainerMemoryPercent: newMetricContainerMemoryPercent(mbc.Metrics.ContainerMemoryPercent), - metricContainerMemoryPgfault: newMetricContainerMemoryPgfault(mbc.Metrics.ContainerMemoryPgfault), - metricContainerMemoryPgmajfault: newMetricContainerMemoryPgmajfault(mbc.Metrics.ContainerMemoryPgmajfault), - metricContainerMemoryPgpgin: newMetricContainerMemoryPgpgin(mbc.Metrics.ContainerMemoryPgpgin), - metricContainerMemoryPgpgout: newMetricContainerMemoryPgpgout(mbc.Metrics.ContainerMemoryPgpgout), - metricContainerMemoryRss: newMetricContainerMemoryRss(mbc.Metrics.ContainerMemoryRss), - metricContainerMemoryRssHuge: newMetricContainerMemoryRssHuge(mbc.Metrics.ContainerMemoryRssHuge), - metricContainerMemoryTotalActiveAnon: newMetricContainerMemoryTotalActiveAnon(mbc.Metrics.ContainerMemoryTotalActiveAnon), - metricContainerMemoryTotalActiveFile: newMetricContainerMemoryTotalActiveFile(mbc.Metrics.ContainerMemoryTotalActiveFile), - metricContainerMemoryTotalCache: newMetricContainerMemoryTotalCache(mbc.Metrics.ContainerMemoryTotalCache), - metricContainerMemoryTotalDirty: newMetricContainerMemoryTotalDirty(mbc.Metrics.ContainerMemoryTotalDirty), - metricContainerMemoryTotalInactiveAnon: newMetricContainerMemoryTotalInactiveAnon(mbc.Metrics.ContainerMemoryTotalInactiveAnon), - metricContainerMemoryTotalInactiveFile: newMetricContainerMemoryTotalInactiveFile(mbc.Metrics.ContainerMemoryTotalInactiveFile), - metricContainerMemoryTotalMappedFile: newMetricContainerMemoryTotalMappedFile(mbc.Metrics.ContainerMemoryTotalMappedFile), - metricContainerMemoryTotalPgfault: newMetricContainerMemoryTotalPgfault(mbc.Metrics.ContainerMemoryTotalPgfault), - metricContainerMemoryTotalPgmajfault: newMetricContainerMemoryTotalPgmajfault(mbc.Metrics.ContainerMemoryTotalPgmajfault), - metricContainerMemoryTotalPgpgin: newMetricContainerMemoryTotalPgpgin(mbc.Metrics.ContainerMemoryTotalPgpgin), - metricContainerMemoryTotalPgpgout: newMetricContainerMemoryTotalPgpgout(mbc.Metrics.ContainerMemoryTotalPgpgout), - metricContainerMemoryTotalRss: newMetricContainerMemoryTotalRss(mbc.Metrics.ContainerMemoryTotalRss), - metricContainerMemoryTotalRssHuge: newMetricContainerMemoryTotalRssHuge(mbc.Metrics.ContainerMemoryTotalRssHuge), - metricContainerMemoryTotalUnevictable: newMetricContainerMemoryTotalUnevictable(mbc.Metrics.ContainerMemoryTotalUnevictable), - metricContainerMemoryTotalWriteback: newMetricContainerMemoryTotalWriteback(mbc.Metrics.ContainerMemoryTotalWriteback), - metricContainerMemoryUnevictable: newMetricContainerMemoryUnevictable(mbc.Metrics.ContainerMemoryUnevictable), - metricContainerMemoryUsageLimit: newMetricContainerMemoryUsageLimit(mbc.Metrics.ContainerMemoryUsageLimit), - metricContainerMemoryUsageMax: newMetricContainerMemoryUsageMax(mbc.Metrics.ContainerMemoryUsageMax), - metricContainerMemoryUsageTotal: newMetricContainerMemoryUsageTotal(mbc.Metrics.ContainerMemoryUsageTotal), - metricContainerMemoryWriteback: newMetricContainerMemoryWriteback(mbc.Metrics.ContainerMemoryWriteback), - metricContainerNetworkIoUsageRxBytes: newMetricContainerNetworkIoUsageRxBytes(mbc.Metrics.ContainerNetworkIoUsageRxBytes), - metricContainerNetworkIoUsageRxDropped: newMetricContainerNetworkIoUsageRxDropped(mbc.Metrics.ContainerNetworkIoUsageRxDropped), - metricContainerNetworkIoUsageRxErrors: newMetricContainerNetworkIoUsageRxErrors(mbc.Metrics.ContainerNetworkIoUsageRxErrors), - metricContainerNetworkIoUsageRxPackets: newMetricContainerNetworkIoUsageRxPackets(mbc.Metrics.ContainerNetworkIoUsageRxPackets), - metricContainerNetworkIoUsageTxBytes: newMetricContainerNetworkIoUsageTxBytes(mbc.Metrics.ContainerNetworkIoUsageTxBytes), - metricContainerNetworkIoUsageTxDropped: newMetricContainerNetworkIoUsageTxDropped(mbc.Metrics.ContainerNetworkIoUsageTxDropped), - metricContainerNetworkIoUsageTxErrors: newMetricContainerNetworkIoUsageTxErrors(mbc.Metrics.ContainerNetworkIoUsageTxErrors), - metricContainerNetworkIoUsageTxPackets: newMetricContainerNetworkIoUsageTxPackets(mbc.Metrics.ContainerNetworkIoUsageTxPackets), - metricContainerPidsCount: newMetricContainerPidsCount(mbc.Metrics.ContainerPidsCount), - metricContainerPidsLimit: newMetricContainerPidsLimit(mbc.Metrics.ContainerPidsLimit), - metricContainerRestarts: newMetricContainerRestarts(mbc.Metrics.ContainerRestarts), - metricContainerUptime: newMetricContainerUptime(mbc.Metrics.ContainerUptime), - resourceAttributeIncludeFilter: make(map[string]filter.Filter), - resourceAttributeExcludeFilter: make(map[string]filter.Filter), - } - if mbc.ResourceAttributes.ContainerCommandLine.MetricsInclude != nil { - mb.resourceAttributeIncludeFilter["container.command_line"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerCommandLine.MetricsInclude) - } - if mbc.ResourceAttributes.ContainerCommandLine.MetricsExclude != nil { - mb.resourceAttributeExcludeFilter["container.command_line"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerCommandLine.MetricsExclude) - } - if mbc.ResourceAttributes.ContainerHostname.MetricsInclude != nil { - mb.resourceAttributeIncludeFilter["container.hostname"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerHostname.MetricsInclude) - } - if mbc.ResourceAttributes.ContainerHostname.MetricsExclude != nil { - mb.resourceAttributeExcludeFilter["container.hostname"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerHostname.MetricsExclude) - } - if mbc.ResourceAttributes.ContainerID.MetricsInclude != nil { - mb.resourceAttributeIncludeFilter["container.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerID.MetricsInclude) - } - if mbc.ResourceAttributes.ContainerID.MetricsExclude != nil { - mb.resourceAttributeExcludeFilter["container.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerID.MetricsExclude) - } - if mbc.ResourceAttributes.ContainerImageID.MetricsInclude != nil { - mb.resourceAttributeIncludeFilter["container.image.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageID.MetricsInclude) - } - if mbc.ResourceAttributes.ContainerImageID.MetricsExclude != nil { - mb.resourceAttributeExcludeFilter["container.image.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageID.MetricsExclude) - } - if mbc.ResourceAttributes.ContainerImageName.MetricsInclude != nil { - mb.resourceAttributeIncludeFilter["container.image.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageName.MetricsInclude) - } - if mbc.ResourceAttributes.ContainerImageName.MetricsExclude != nil { - mb.resourceAttributeExcludeFilter["container.image.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageName.MetricsExclude) - } - if mbc.ResourceAttributes.ContainerName.MetricsInclude != nil { - mb.resourceAttributeIncludeFilter["container.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerName.MetricsInclude) - } - if mbc.ResourceAttributes.ContainerName.MetricsExclude != nil { - mb.resourceAttributeExcludeFilter["container.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerName.MetricsExclude) - } - if mbc.ResourceAttributes.ContainerRuntime.MetricsInclude != nil { - mb.resourceAttributeIncludeFilter["container.runtime"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerRuntime.MetricsInclude) - } - if mbc.ResourceAttributes.ContainerRuntime.MetricsExclude != nil { - mb.resourceAttributeExcludeFilter["container.runtime"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerRuntime.MetricsExclude) - } - - for _, op := range options { - op(mb) - } - return mb -} - -// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics. -func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder { - return NewResourceBuilder(mb.config.ResourceAttributes) -} - -// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { - if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { - mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() - } -} - -// ResourceMetricsOption applies changes to provided resource metrics. -type ResourceMetricsOption func(pmetric.ResourceMetrics) - -// WithResource sets the provided resource on the emitted ResourceMetrics. -// It's recommended to use ResourceBuilder to create the resource. -func WithResource(res pcommon.Resource) ResourceMetricsOption { - return func(rm pmetric.ResourceMetrics) { - res.CopyTo(rm.Resource()) - } -} - -// WithStartTimeOverride overrides start time for all the resource metrics data points. -// This option should be only used if different start time has to be set on metrics coming from different resources. -func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { - return func(rm pmetric.ResourceMetrics) { - var dps pmetric.NumberDataPointSlice - metrics := rm.ScopeMetrics().At(0).Metrics() - for i := 0; i < metrics.Len(); i++ { - switch metrics.At(i).Type() { - case pmetric.MetricTypeGauge: - dps = metrics.At(i).Gauge().DataPoints() - case pmetric.MetricTypeSum: - dps = metrics.At(i).Sum().DataPoints() - } - for j := 0; j < dps.Len(); j++ { - dps.At(j).SetStartTimestamp(start) - } - } - } -} - -// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for -// recording another set of data points as part of another resource. This function can be helpful when one scraper -// needs to emit metrics from several resources. Otherwise calling this function is not required, -// just `Emit` function can be called instead. -// Resource attributes should be provided as ResourceMetricsOption arguments. -func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { - rm := pmetric.NewResourceMetrics() - rm.SetSchemaUrl(conventions.SchemaURL) - ils := rm.ScopeMetrics().AppendEmpty() - ils.Scope().SetName("otelcol/dockerstatsreceiver") - ils.Scope().SetVersion(mb.buildInfo.Version) - ils.Metrics().EnsureCapacity(mb.metricsCapacity) - mb.metricContainerBlockioIoMergedRecursive.emit(ils.Metrics()) - mb.metricContainerBlockioIoQueuedRecursive.emit(ils.Metrics()) - mb.metricContainerBlockioIoServiceBytesRecursive.emit(ils.Metrics()) - mb.metricContainerBlockioIoServiceTimeRecursive.emit(ils.Metrics()) - mb.metricContainerBlockioIoServicedRecursive.emit(ils.Metrics()) - mb.metricContainerBlockioIoTimeRecursive.emit(ils.Metrics()) - mb.metricContainerBlockioIoWaitTimeRecursive.emit(ils.Metrics()) - mb.metricContainerBlockioSectorsRecursive.emit(ils.Metrics()) - mb.metricContainerCPULimit.emit(ils.Metrics()) - mb.metricContainerCPULogicalCount.emit(ils.Metrics()) - mb.metricContainerCPUShares.emit(ils.Metrics()) - mb.metricContainerCPUThrottlingDataPeriods.emit(ils.Metrics()) - mb.metricContainerCPUThrottlingDataThrottledPeriods.emit(ils.Metrics()) - mb.metricContainerCPUThrottlingDataThrottledTime.emit(ils.Metrics()) - mb.metricContainerCPUUsageKernelmode.emit(ils.Metrics()) - mb.metricContainerCPUUsagePercpu.emit(ils.Metrics()) - mb.metricContainerCPUUsageSystem.emit(ils.Metrics()) - mb.metricContainerCPUUsageTotal.emit(ils.Metrics()) - mb.metricContainerCPUUsageUsermode.emit(ils.Metrics()) - mb.metricContainerCPUUtilization.emit(ils.Metrics()) - mb.metricContainerMemoryActiveAnon.emit(ils.Metrics()) - mb.metricContainerMemoryActiveFile.emit(ils.Metrics()) - mb.metricContainerMemoryAnon.emit(ils.Metrics()) - mb.metricContainerMemoryCache.emit(ils.Metrics()) - mb.metricContainerMemoryDirty.emit(ils.Metrics()) - mb.metricContainerMemoryFails.emit(ils.Metrics()) - mb.metricContainerMemoryFile.emit(ils.Metrics()) - mb.metricContainerMemoryHierarchicalMemoryLimit.emit(ils.Metrics()) - mb.metricContainerMemoryHierarchicalMemswLimit.emit(ils.Metrics()) - mb.metricContainerMemoryInactiveAnon.emit(ils.Metrics()) - mb.metricContainerMemoryInactiveFile.emit(ils.Metrics()) - mb.metricContainerMemoryMappedFile.emit(ils.Metrics()) - mb.metricContainerMemoryPercent.emit(ils.Metrics()) - mb.metricContainerMemoryPgfault.emit(ils.Metrics()) - mb.metricContainerMemoryPgmajfault.emit(ils.Metrics()) - mb.metricContainerMemoryPgpgin.emit(ils.Metrics()) - mb.metricContainerMemoryPgpgout.emit(ils.Metrics()) - mb.metricContainerMemoryRss.emit(ils.Metrics()) - mb.metricContainerMemoryRssHuge.emit(ils.Metrics()) - mb.metricContainerMemoryTotalActiveAnon.emit(ils.Metrics()) - mb.metricContainerMemoryTotalActiveFile.emit(ils.Metrics()) - mb.metricContainerMemoryTotalCache.emit(ils.Metrics()) - mb.metricContainerMemoryTotalDirty.emit(ils.Metrics()) - mb.metricContainerMemoryTotalInactiveAnon.emit(ils.Metrics()) - mb.metricContainerMemoryTotalInactiveFile.emit(ils.Metrics()) - mb.metricContainerMemoryTotalMappedFile.emit(ils.Metrics()) - mb.metricContainerMemoryTotalPgfault.emit(ils.Metrics()) - mb.metricContainerMemoryTotalPgmajfault.emit(ils.Metrics()) - mb.metricContainerMemoryTotalPgpgin.emit(ils.Metrics()) - mb.metricContainerMemoryTotalPgpgout.emit(ils.Metrics()) - mb.metricContainerMemoryTotalRss.emit(ils.Metrics()) - mb.metricContainerMemoryTotalRssHuge.emit(ils.Metrics()) - mb.metricContainerMemoryTotalUnevictable.emit(ils.Metrics()) - mb.metricContainerMemoryTotalWriteback.emit(ils.Metrics()) - mb.metricContainerMemoryUnevictable.emit(ils.Metrics()) - mb.metricContainerMemoryUsageLimit.emit(ils.Metrics()) - mb.metricContainerMemoryUsageMax.emit(ils.Metrics()) - mb.metricContainerMemoryUsageTotal.emit(ils.Metrics()) - mb.metricContainerMemoryWriteback.emit(ils.Metrics()) - mb.metricContainerNetworkIoUsageRxBytes.emit(ils.Metrics()) - mb.metricContainerNetworkIoUsageRxDropped.emit(ils.Metrics()) - mb.metricContainerNetworkIoUsageRxErrors.emit(ils.Metrics()) - mb.metricContainerNetworkIoUsageRxPackets.emit(ils.Metrics()) - mb.metricContainerNetworkIoUsageTxBytes.emit(ils.Metrics()) - mb.metricContainerNetworkIoUsageTxDropped.emit(ils.Metrics()) - mb.metricContainerNetworkIoUsageTxErrors.emit(ils.Metrics()) - mb.metricContainerNetworkIoUsageTxPackets.emit(ils.Metrics()) - mb.metricContainerPidsCount.emit(ils.Metrics()) - mb.metricContainerPidsLimit.emit(ils.Metrics()) - mb.metricContainerRestarts.emit(ils.Metrics()) - mb.metricContainerUptime.emit(ils.Metrics()) - - for _, op := range rmo { - op(rm) - } - for attr, filter := range mb.resourceAttributeIncludeFilter { - if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) { - return - } - } - for attr, filter := range mb.resourceAttributeExcludeFilter { - if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) { - return - } - } - - if ils.Metrics().Len() > 0 { - mb.updateCapacity(rm) - rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) - } -} - -// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for -// recording another set of metrics. This function will be responsible for applying all the transformations required to -// produce metric representation defined in metadata and user config, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { - mb.EmitForResource(rmo...) - metrics := mb.metricsBuffer - mb.metricsBuffer = pmetric.NewMetrics() - return metrics -} - -// RecordContainerBlockioIoMergedRecursiveDataPoint adds a data point to container.blockio.io_merged_recursive metric. -func (mb *MetricsBuilder) RecordContainerBlockioIoMergedRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - mb.metricContainerBlockioIoMergedRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) -} - -// RecordContainerBlockioIoQueuedRecursiveDataPoint adds a data point to container.blockio.io_queued_recursive metric. -func (mb *MetricsBuilder) RecordContainerBlockioIoQueuedRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - mb.metricContainerBlockioIoQueuedRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) -} - -// RecordContainerBlockioIoServiceBytesRecursiveDataPoint adds a data point to container.blockio.io_service_bytes_recursive metric. -func (mb *MetricsBuilder) RecordContainerBlockioIoServiceBytesRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - mb.metricContainerBlockioIoServiceBytesRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) -} - -// RecordContainerBlockioIoServiceTimeRecursiveDataPoint adds a data point to container.blockio.io_service_time_recursive metric. -func (mb *MetricsBuilder) RecordContainerBlockioIoServiceTimeRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - mb.metricContainerBlockioIoServiceTimeRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) -} - -// RecordContainerBlockioIoServicedRecursiveDataPoint adds a data point to container.blockio.io_serviced_recursive metric. -func (mb *MetricsBuilder) RecordContainerBlockioIoServicedRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - mb.metricContainerBlockioIoServicedRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) -} - -// RecordContainerBlockioIoTimeRecursiveDataPoint adds a data point to container.blockio.io_time_recursive metric. -func (mb *MetricsBuilder) RecordContainerBlockioIoTimeRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - mb.metricContainerBlockioIoTimeRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) -} - -// RecordContainerBlockioIoWaitTimeRecursiveDataPoint adds a data point to container.blockio.io_wait_time_recursive metric. -func (mb *MetricsBuilder) RecordContainerBlockioIoWaitTimeRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - mb.metricContainerBlockioIoWaitTimeRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) -} - -// RecordContainerBlockioSectorsRecursiveDataPoint adds a data point to container.blockio.sectors_recursive metric. -func (mb *MetricsBuilder) RecordContainerBlockioSectorsRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) { - mb.metricContainerBlockioSectorsRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) -} - -// RecordContainerCPULimitDataPoint adds a data point to container.cpu.limit metric. -func (mb *MetricsBuilder) RecordContainerCPULimitDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricContainerCPULimit.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerCPULogicalCountDataPoint adds a data point to container.cpu.logical.count metric. -func (mb *MetricsBuilder) RecordContainerCPULogicalCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerCPULogicalCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerCPUSharesDataPoint adds a data point to container.cpu.shares metric. -func (mb *MetricsBuilder) RecordContainerCPUSharesDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerCPUShares.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerCPUThrottlingDataPeriodsDataPoint adds a data point to container.cpu.throttling_data.periods metric. -func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataPeriodsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerCPUThrottlingDataPeriods.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint adds a data point to container.cpu.throttling_data.throttled_periods metric. -func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerCPUThrottlingDataThrottledPeriods.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerCPUThrottlingDataThrottledTimeDataPoint adds a data point to container.cpu.throttling_data.throttled_time metric. -func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataThrottledTimeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerCPUThrottlingDataThrottledTime.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerCPUUsageKernelmodeDataPoint adds a data point to container.cpu.usage.kernelmode metric. -func (mb *MetricsBuilder) RecordContainerCPUUsageKernelmodeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerCPUUsageKernelmode.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerCPUUsagePercpuDataPoint adds a data point to container.cpu.usage.percpu metric. -func (mb *MetricsBuilder) RecordContainerCPUUsagePercpuDataPoint(ts pcommon.Timestamp, val int64, coreAttributeValue string) { - mb.metricContainerCPUUsagePercpu.recordDataPoint(mb.startTime, ts, val, coreAttributeValue) -} - -// RecordContainerCPUUsageSystemDataPoint adds a data point to container.cpu.usage.system metric. -func (mb *MetricsBuilder) RecordContainerCPUUsageSystemDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerCPUUsageSystem.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerCPUUsageTotalDataPoint adds a data point to container.cpu.usage.total metric. -func (mb *MetricsBuilder) RecordContainerCPUUsageTotalDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerCPUUsageTotal.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerCPUUsageUsermodeDataPoint adds a data point to container.cpu.usage.usermode metric. -func (mb *MetricsBuilder) RecordContainerCPUUsageUsermodeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerCPUUsageUsermode.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerCPUUtilizationDataPoint adds a data point to container.cpu.utilization metric. -func (mb *MetricsBuilder) RecordContainerCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricContainerCPUUtilization.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryActiveAnonDataPoint adds a data point to container.memory.active_anon metric. -func (mb *MetricsBuilder) RecordContainerMemoryActiveAnonDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryActiveAnon.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryActiveFileDataPoint adds a data point to container.memory.active_file metric. -func (mb *MetricsBuilder) RecordContainerMemoryActiveFileDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryActiveFile.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryAnonDataPoint adds a data point to container.memory.anon metric. -func (mb *MetricsBuilder) RecordContainerMemoryAnonDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryAnon.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryCacheDataPoint adds a data point to container.memory.cache metric. -func (mb *MetricsBuilder) RecordContainerMemoryCacheDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryCache.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryDirtyDataPoint adds a data point to container.memory.dirty metric. -func (mb *MetricsBuilder) RecordContainerMemoryDirtyDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryDirty.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryFailsDataPoint adds a data point to container.memory.fails metric. -func (mb *MetricsBuilder) RecordContainerMemoryFailsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryFails.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryFileDataPoint adds a data point to container.memory.file metric. -func (mb *MetricsBuilder) RecordContainerMemoryFileDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryFile.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryHierarchicalMemoryLimitDataPoint adds a data point to container.memory.hierarchical_memory_limit metric. -func (mb *MetricsBuilder) RecordContainerMemoryHierarchicalMemoryLimitDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryHierarchicalMemoryLimit.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryHierarchicalMemswLimitDataPoint adds a data point to container.memory.hierarchical_memsw_limit metric. -func (mb *MetricsBuilder) RecordContainerMemoryHierarchicalMemswLimitDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryHierarchicalMemswLimit.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryInactiveAnonDataPoint adds a data point to container.memory.inactive_anon metric. -func (mb *MetricsBuilder) RecordContainerMemoryInactiveAnonDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryInactiveAnon.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryInactiveFileDataPoint adds a data point to container.memory.inactive_file metric. -func (mb *MetricsBuilder) RecordContainerMemoryInactiveFileDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryInactiveFile.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryMappedFileDataPoint adds a data point to container.memory.mapped_file metric. -func (mb *MetricsBuilder) RecordContainerMemoryMappedFileDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryMappedFile.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryPercentDataPoint adds a data point to container.memory.percent metric. -func (mb *MetricsBuilder) RecordContainerMemoryPercentDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricContainerMemoryPercent.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryPgfaultDataPoint adds a data point to container.memory.pgfault metric. -func (mb *MetricsBuilder) RecordContainerMemoryPgfaultDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryPgfault.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryPgmajfaultDataPoint adds a data point to container.memory.pgmajfault metric. -func (mb *MetricsBuilder) RecordContainerMemoryPgmajfaultDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryPgmajfault.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryPgpginDataPoint adds a data point to container.memory.pgpgin metric. -func (mb *MetricsBuilder) RecordContainerMemoryPgpginDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryPgpgin.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryPgpgoutDataPoint adds a data point to container.memory.pgpgout metric. -func (mb *MetricsBuilder) RecordContainerMemoryPgpgoutDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryPgpgout.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryRssDataPoint adds a data point to container.memory.rss metric. -func (mb *MetricsBuilder) RecordContainerMemoryRssDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryRss.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryRssHugeDataPoint adds a data point to container.memory.rss_huge metric. -func (mb *MetricsBuilder) RecordContainerMemoryRssHugeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryRssHuge.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalActiveAnonDataPoint adds a data point to container.memory.total_active_anon metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalActiveAnonDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalActiveAnon.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalActiveFileDataPoint adds a data point to container.memory.total_active_file metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalActiveFileDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalActiveFile.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalCacheDataPoint adds a data point to container.memory.total_cache metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalCacheDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalCache.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalDirtyDataPoint adds a data point to container.memory.total_dirty metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalDirtyDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalDirty.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalInactiveAnonDataPoint adds a data point to container.memory.total_inactive_anon metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalInactiveAnonDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalInactiveAnon.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalInactiveFileDataPoint adds a data point to container.memory.total_inactive_file metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalInactiveFileDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalInactiveFile.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalMappedFileDataPoint adds a data point to container.memory.total_mapped_file metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalMappedFileDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalMappedFile.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalPgfaultDataPoint adds a data point to container.memory.total_pgfault metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalPgfaultDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalPgfault.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalPgmajfaultDataPoint adds a data point to container.memory.total_pgmajfault metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalPgmajfaultDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalPgmajfault.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalPgpginDataPoint adds a data point to container.memory.total_pgpgin metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalPgpginDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalPgpgin.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalPgpgoutDataPoint adds a data point to container.memory.total_pgpgout metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalPgpgoutDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalPgpgout.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalRssDataPoint adds a data point to container.memory.total_rss metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalRssDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalRss.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalRssHugeDataPoint adds a data point to container.memory.total_rss_huge metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalRssHugeDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalRssHuge.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalUnevictableDataPoint adds a data point to container.memory.total_unevictable metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalUnevictableDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalUnevictable.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryTotalWritebackDataPoint adds a data point to container.memory.total_writeback metric. -func (mb *MetricsBuilder) RecordContainerMemoryTotalWritebackDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryTotalWriteback.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryUnevictableDataPoint adds a data point to container.memory.unevictable metric. -func (mb *MetricsBuilder) RecordContainerMemoryUnevictableDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryUnevictable.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryUsageLimitDataPoint adds a data point to container.memory.usage.limit metric. -func (mb *MetricsBuilder) RecordContainerMemoryUsageLimitDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryUsageLimit.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryUsageMaxDataPoint adds a data point to container.memory.usage.max metric. -func (mb *MetricsBuilder) RecordContainerMemoryUsageMaxDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryUsageMax.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryUsageTotalDataPoint adds a data point to container.memory.usage.total metric. -func (mb *MetricsBuilder) RecordContainerMemoryUsageTotalDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryUsageTotal.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerMemoryWritebackDataPoint adds a data point to container.memory.writeback metric. -func (mb *MetricsBuilder) RecordContainerMemoryWritebackDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerMemoryWriteback.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerNetworkIoUsageRxBytesDataPoint adds a data point to container.network.io.usage.rx_bytes metric. -func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxBytesDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - mb.metricContainerNetworkIoUsageRxBytes.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) -} - -// RecordContainerNetworkIoUsageRxDroppedDataPoint adds a data point to container.network.io.usage.rx_dropped metric. -func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxDroppedDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - mb.metricContainerNetworkIoUsageRxDropped.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) -} - -// RecordContainerNetworkIoUsageRxErrorsDataPoint adds a data point to container.network.io.usage.rx_errors metric. -func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - mb.metricContainerNetworkIoUsageRxErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) -} - -// RecordContainerNetworkIoUsageRxPacketsDataPoint adds a data point to container.network.io.usage.rx_packets metric. -func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxPacketsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - mb.metricContainerNetworkIoUsageRxPackets.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) -} - -// RecordContainerNetworkIoUsageTxBytesDataPoint adds a data point to container.network.io.usage.tx_bytes metric. -func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxBytesDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - mb.metricContainerNetworkIoUsageTxBytes.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) -} - -// RecordContainerNetworkIoUsageTxDroppedDataPoint adds a data point to container.network.io.usage.tx_dropped metric. -func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxDroppedDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - mb.metricContainerNetworkIoUsageTxDropped.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) -} - -// RecordContainerNetworkIoUsageTxErrorsDataPoint adds a data point to container.network.io.usage.tx_errors metric. -func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - mb.metricContainerNetworkIoUsageTxErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) -} - -// RecordContainerNetworkIoUsageTxPacketsDataPoint adds a data point to container.network.io.usage.tx_packets metric. -func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxPacketsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) { - mb.metricContainerNetworkIoUsageTxPackets.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue) -} - -// RecordContainerPidsCountDataPoint adds a data point to container.pids.count metric. -func (mb *MetricsBuilder) RecordContainerPidsCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerPidsCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerPidsLimitDataPoint adds a data point to container.pids.limit metric. -func (mb *MetricsBuilder) RecordContainerPidsLimitDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerPidsLimit.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerRestartsDataPoint adds a data point to container.restarts metric. -func (mb *MetricsBuilder) RecordContainerRestartsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricContainerRestarts.recordDataPoint(mb.startTime, ts, val) -} - -// RecordContainerUptimeDataPoint adds a data point to container.uptime metric. -func (mb *MetricsBuilder) RecordContainerUptimeDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricContainerUptime.recordDataPoint(mb.startTime, ts, val) -} - -// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, -// and metrics builder should update its startTime and reset it's internal state accordingly. -func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pcommon.NewTimestampFromTime(time.Now()) - for _, op := range options { - op(mb) - } -} diff --git a/receiver/dockerstatsreceiver/internal/metadata/generated_metrics_test.go b/receiver/dockerstatsreceiver/internal/metadata/generated_metrics_test.go deleted file mode 100644 index 56834bc34c8c..000000000000 --- a/receiver/dockerstatsreceiver/internal/metadata/generated_metrics_test.go +++ /dev/null @@ -1,1413 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.opentelemetry.io/collector/receiver/receivertest" - "go.uber.org/zap" - "go.uber.org/zap/zaptest/observer" -) - -type testDataSet int - -const ( - testDataSetDefault testDataSet = iota - testDataSetAll - testDataSetNone -) - -func TestMetricsBuilder(t *testing.T) { - tests := []struct { - name string - metricsSet testDataSet - resAttrsSet testDataSet - expectEmpty bool - }{ - { - name: "default", - }, - { - name: "all_set", - metricsSet: testDataSetAll, - resAttrsSet: testDataSetAll, - }, - { - name: "none_set", - metricsSet: testDataSetNone, - resAttrsSet: testDataSetNone, - expectEmpty: true, - }, - { - name: "filter_set_include", - resAttrsSet: testDataSetAll, - }, - { - name: "filter_set_exclude", - resAttrsSet: testDataSetAll, - expectEmpty: true, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - start := pcommon.Timestamp(1_000_000_000) - ts := pcommon.Timestamp(1_000_001_000) - observedZapCore, observedLogs := observer.New(zap.WarnLevel) - settings := receivertest.NewNopCreateSettings() - settings.Logger = zap.New(observedZapCore) - mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) - - expectedWarnings := 0 - - assert.Equal(t, expectedWarnings, observedLogs.Len()) - - defaultMetricsCount := 0 - allMetricsCount := 0 - - allMetricsCount++ - mb.RecordContainerBlockioIoMergedRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") - - allMetricsCount++ - mb.RecordContainerBlockioIoQueuedRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerBlockioIoServiceBytesRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") - - allMetricsCount++ - mb.RecordContainerBlockioIoServiceTimeRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") - - allMetricsCount++ - mb.RecordContainerBlockioIoServicedRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") - - allMetricsCount++ - mb.RecordContainerBlockioIoTimeRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") - - allMetricsCount++ - mb.RecordContainerBlockioIoWaitTimeRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") - - allMetricsCount++ - mb.RecordContainerBlockioSectorsRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") - - allMetricsCount++ - mb.RecordContainerCPULimitDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerCPULogicalCountDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerCPUSharesDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerCPUThrottlingDataPeriodsDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerCPUThrottlingDataThrottledTimeDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerCPUUsageKernelmodeDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerCPUUsagePercpuDataPoint(ts, 1, "core-val") - - allMetricsCount++ - mb.RecordContainerCPUUsageSystemDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerCPUUsageTotalDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerCPUUsageUsermodeDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerCPUUtilizationDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryActiveAnonDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryActiveFileDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryAnonDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryCacheDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryDirtyDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryFailsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerMemoryFileDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryHierarchicalMemoryLimitDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryHierarchicalMemswLimitDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryInactiveAnonDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryInactiveFileDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryMappedFileDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerMemoryPercentDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryPgfaultDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryPgmajfaultDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryPgpginDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryPgpgoutDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryRssDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryRssHugeDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalActiveAnonDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalActiveFileDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerMemoryTotalCacheDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalDirtyDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalInactiveAnonDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalInactiveFileDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalMappedFileDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalPgfaultDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalPgmajfaultDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalPgpginDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalPgpgoutDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalRssDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalRssHugeDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalUnevictableDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryTotalWritebackDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryUnevictableDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerMemoryUsageLimitDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryUsageMaxDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerMemoryUsageTotalDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerMemoryWritebackDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerNetworkIoUsageRxBytesDataPoint(ts, 1, "interface-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerNetworkIoUsageRxDroppedDataPoint(ts, 1, "interface-val") - - allMetricsCount++ - mb.RecordContainerNetworkIoUsageRxErrorsDataPoint(ts, 1, "interface-val") - - allMetricsCount++ - mb.RecordContainerNetworkIoUsageRxPacketsDataPoint(ts, 1, "interface-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerNetworkIoUsageTxBytesDataPoint(ts, 1, "interface-val") - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordContainerNetworkIoUsageTxDroppedDataPoint(ts, 1, "interface-val") - - allMetricsCount++ - mb.RecordContainerNetworkIoUsageTxErrorsDataPoint(ts, 1, "interface-val") - - allMetricsCount++ - mb.RecordContainerNetworkIoUsageTxPacketsDataPoint(ts, 1, "interface-val") - - allMetricsCount++ - mb.RecordContainerPidsCountDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerPidsLimitDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerRestartsDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordContainerUptimeDataPoint(ts, 1) - - rb := mb.NewResourceBuilder() - rb.SetContainerCommandLine("container.command_line-val") - rb.SetContainerHostname("container.hostname-val") - rb.SetContainerID("container.id-val") - rb.SetContainerImageID("container.image.id-val") - rb.SetContainerImageName("container.image.name-val") - rb.SetContainerName("container.name-val") - rb.SetContainerRuntime("container.runtime-val") - res := rb.Emit() - metrics := mb.Emit(WithResource(res)) - - if test.expectEmpty { - assert.Equal(t, 0, metrics.ResourceMetrics().Len()) - return - } - - assert.Equal(t, 1, metrics.ResourceMetrics().Len()) - rm := metrics.ResourceMetrics().At(0) - assert.Equal(t, res, rm.Resource()) - assert.Equal(t, 1, rm.ScopeMetrics().Len()) - ms := rm.ScopeMetrics().At(0).Metrics() - if test.metricsSet == testDataSetDefault { - assert.Equal(t, defaultMetricsCount, ms.Len()) - } - if test.metricsSet == testDataSetAll { - assert.Equal(t, allMetricsCount, ms.Len()) - } - validatedMetrics := make(map[string]bool) - for i := 0; i < ms.Len(); i++ { - switch ms.At(i).Name() { - case "container.blockio.io_merged_recursive": - assert.False(t, validatedMetrics["container.blockio.io_merged_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_merged_recursive") - validatedMetrics["container.blockio.io_merged_recursive"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "{operations}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("device_major") - assert.True(t, ok) - assert.EqualValues(t, "device_major-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("device_minor") - assert.True(t, ok) - assert.EqualValues(t, "device_minor-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("operation") - assert.True(t, ok) - assert.EqualValues(t, "operation-val", attrVal.Str()) - case "container.blockio.io_queued_recursive": - assert.False(t, validatedMetrics["container.blockio.io_queued_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_queued_recursive") - validatedMetrics["container.blockio.io_queued_recursive"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of requests queued up for this cgroup and its descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "{operations}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("device_major") - assert.True(t, ok) - assert.EqualValues(t, "device_major-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("device_minor") - assert.True(t, ok) - assert.EqualValues(t, "device_minor-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("operation") - assert.True(t, ok) - assert.EqualValues(t, "operation-val", attrVal.Str()) - case "container.blockio.io_service_bytes_recursive": - assert.False(t, validatedMetrics["container.blockio.io_service_bytes_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_service_bytes_recursive") - validatedMetrics["container.blockio.io_service_bytes_recursive"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of bytes transferred to/from the disk by the group and descendant groups.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("device_major") - assert.True(t, ok) - assert.EqualValues(t, "device_major-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("device_minor") - assert.True(t, ok) - assert.EqualValues(t, "device_minor-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("operation") - assert.True(t, ok) - assert.EqualValues(t, "operation-val", attrVal.Str()) - case "container.blockio.io_service_time_recursive": - assert.False(t, validatedMetrics["container.blockio.io_service_time_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_service_time_recursive") - validatedMetrics["container.blockio.io_service_time_recursive"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "ns", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("device_major") - assert.True(t, ok) - assert.EqualValues(t, "device_major-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("device_minor") - assert.True(t, ok) - assert.EqualValues(t, "device_minor-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("operation") - assert.True(t, ok) - assert.EqualValues(t, "operation-val", attrVal.Str()) - case "container.blockio.io_serviced_recursive": - assert.False(t, validatedMetrics["container.blockio.io_serviced_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_serviced_recursive") - validatedMetrics["container.blockio.io_serviced_recursive"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "{operations}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("device_major") - assert.True(t, ok) - assert.EqualValues(t, "device_major-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("device_minor") - assert.True(t, ok) - assert.EqualValues(t, "device_minor-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("operation") - assert.True(t, ok) - assert.EqualValues(t, "operation-val", attrVal.Str()) - case "container.blockio.io_time_recursive": - assert.False(t, validatedMetrics["container.blockio.io_time_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_time_recursive") - validatedMetrics["container.blockio.io_time_recursive"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("device_major") - assert.True(t, ok) - assert.EqualValues(t, "device_major-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("device_minor") - assert.True(t, ok) - assert.EqualValues(t, "device_minor-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("operation") - assert.True(t, ok) - assert.EqualValues(t, "operation-val", attrVal.Str()) - case "container.blockio.io_wait_time_recursive": - assert.False(t, validatedMetrics["container.blockio.io_wait_time_recursive"], "Found a duplicate in the metrics slice: container.blockio.io_wait_time_recursive") - validatedMetrics["container.blockio.io_wait_time_recursive"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "ns", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("device_major") - assert.True(t, ok) - assert.EqualValues(t, "device_major-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("device_minor") - assert.True(t, ok) - assert.EqualValues(t, "device_minor-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("operation") - assert.True(t, ok) - assert.EqualValues(t, "operation-val", attrVal.Str()) - case "container.blockio.sectors_recursive": - assert.False(t, validatedMetrics["container.blockio.sectors_recursive"], "Found a duplicate in the metrics slice: container.blockio.sectors_recursive") - validatedMetrics["container.blockio.sectors_recursive"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of sectors transferred to/from disk by the group and descendant groups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "{sectors}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("device_major") - assert.True(t, ok) - assert.EqualValues(t, "device_major-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("device_minor") - assert.True(t, ok) - assert.EqualValues(t, "device_minor-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("operation") - assert.True(t, ok) - assert.EqualValues(t, "operation-val", attrVal.Str()) - case "container.cpu.limit": - assert.False(t, validatedMetrics["container.cpu.limit"], "Found a duplicate in the metrics slice: container.cpu.limit") - validatedMetrics["container.cpu.limit"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "CPU limit set for the container.", ms.At(i).Description()) - assert.Equal(t, "{cpus}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - case "container.cpu.logical.count": - assert.False(t, validatedMetrics["container.cpu.logical.count"], "Found a duplicate in the metrics slice: container.cpu.logical.count") - validatedMetrics["container.cpu.logical.count"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Number of cores available to the container.", ms.At(i).Description()) - assert.Equal(t, "{cpus}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.cpu.shares": - assert.False(t, validatedMetrics["container.cpu.shares"], "Found a duplicate in the metrics slice: container.cpu.shares") - validatedMetrics["container.cpu.shares"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "CPU shares set for the container.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.cpu.throttling_data.periods": - assert.False(t, validatedMetrics["container.cpu.throttling_data.periods"], "Found a duplicate in the metrics slice: container.cpu.throttling_data.periods") - validatedMetrics["container.cpu.throttling_data.periods"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of periods with throttling active.", ms.At(i).Description()) - assert.Equal(t, "{periods}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.cpu.throttling_data.throttled_periods": - assert.False(t, validatedMetrics["container.cpu.throttling_data.throttled_periods"], "Found a duplicate in the metrics slice: container.cpu.throttling_data.throttled_periods") - validatedMetrics["container.cpu.throttling_data.throttled_periods"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of periods when the container hits its throttling limit.", ms.At(i).Description()) - assert.Equal(t, "{periods}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.cpu.throttling_data.throttled_time": - assert.False(t, validatedMetrics["container.cpu.throttling_data.throttled_time"], "Found a duplicate in the metrics slice: container.cpu.throttling_data.throttled_time") - validatedMetrics["container.cpu.throttling_data.throttled_time"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Aggregate time the container was throttled.", ms.At(i).Description()) - assert.Equal(t, "ns", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.cpu.usage.kernelmode": - assert.False(t, validatedMetrics["container.cpu.usage.kernelmode"], "Found a duplicate in the metrics slice: container.cpu.usage.kernelmode") - validatedMetrics["container.cpu.usage.kernelmode"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows).", ms.At(i).Description()) - assert.Equal(t, "ns", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.cpu.usage.percpu": - assert.False(t, validatedMetrics["container.cpu.usage.percpu"], "Found a duplicate in the metrics slice: container.cpu.usage.percpu") - validatedMetrics["container.cpu.usage.percpu"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Per-core CPU usage by the container (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "ns", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("core") - assert.True(t, ok) - assert.EqualValues(t, "core-val", attrVal.Str()) - case "container.cpu.usage.system": - assert.False(t, validatedMetrics["container.cpu.usage.system"], "Found a duplicate in the metrics slice: container.cpu.usage.system") - validatedMetrics["container.cpu.usage.system"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "System CPU usage, as reported by docker.", ms.At(i).Description()) - assert.Equal(t, "ns", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.cpu.usage.total": - assert.False(t, validatedMetrics["container.cpu.usage.total"], "Found a duplicate in the metrics slice: container.cpu.usage.total") - validatedMetrics["container.cpu.usage.total"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Total CPU time consumed.", ms.At(i).Description()) - assert.Equal(t, "ns", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.cpu.usage.usermode": - assert.False(t, validatedMetrics["container.cpu.usage.usermode"], "Found a duplicate in the metrics slice: container.cpu.usage.usermode") - validatedMetrics["container.cpu.usage.usermode"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows).", ms.At(i).Description()) - assert.Equal(t, "ns", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.cpu.utilization": - assert.False(t, validatedMetrics["container.cpu.utilization"], "Found a duplicate in the metrics slice: container.cpu.utilization") - validatedMetrics["container.cpu.utilization"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Percent of CPU used by the container.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - case "container.memory.active_anon": - assert.False(t, validatedMetrics["container.memory.active_anon"], "Found a duplicate in the metrics slice: container.memory.active_anon") - validatedMetrics["container.memory.active_anon"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "The amount of anonymous memory that has been identified as active by the kernel.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.active_file": - assert.False(t, validatedMetrics["container.memory.active_file"], "Found a duplicate in the metrics slice: container.memory.active_file") - validatedMetrics["container.memory.active_file"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Cache memory that has been identified as active by the kernel.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.anon": - assert.False(t, validatedMetrics["container.memory.anon"], "Found a duplicate in the metrics slice: container.memory.anon") - validatedMetrics["container.memory.anon"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS) (Only available with cgroups v2).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.cache": - assert.False(t, validatedMetrics["container.memory.cache"], "Found a duplicate in the metrics slice: container.memory.cache") - validatedMetrics["container.memory.cache"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.dirty": - assert.False(t, validatedMetrics["container.memory.dirty"], "Found a duplicate in the metrics slice: container.memory.dirty") - validatedMetrics["container.memory.dirty"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.fails": - assert.False(t, validatedMetrics["container.memory.fails"], "Found a duplicate in the metrics slice: container.memory.fails") - validatedMetrics["container.memory.fails"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of times the memory limit was hit.", ms.At(i).Description()) - assert.Equal(t, "{fails}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.file": - assert.False(t, validatedMetrics["container.memory.file"], "Found a duplicate in the metrics slice: container.memory.file") - validatedMetrics["container.memory.file"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Amount of memory used to cache filesystem data, including tmpfs and shared memory (Only available with cgroups v2).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.hierarchical_memory_limit": - assert.False(t, validatedMetrics["container.memory.hierarchical_memory_limit"], "Found a duplicate in the metrics slice: container.memory.hierarchical_memory_limit") - validatedMetrics["container.memory.hierarchical_memory_limit"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.hierarchical_memsw_limit": - assert.False(t, validatedMetrics["container.memory.hierarchical_memsw_limit"], "Found a duplicate in the metrics slice: container.memory.hierarchical_memsw_limit") - validatedMetrics["container.memory.hierarchical_memsw_limit"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.inactive_anon": - assert.False(t, validatedMetrics["container.memory.inactive_anon"], "Found a duplicate in the metrics slice: container.memory.inactive_anon") - validatedMetrics["container.memory.inactive_anon"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "The amount of anonymous memory that has been identified as inactive by the kernel.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.inactive_file": - assert.False(t, validatedMetrics["container.memory.inactive_file"], "Found a duplicate in the metrics slice: container.memory.inactive_file") - validatedMetrics["container.memory.inactive_file"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Cache memory that has been identified as inactive by the kernel.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.mapped_file": - assert.False(t, validatedMetrics["container.memory.mapped_file"], "Found a duplicate in the metrics slice: container.memory.mapped_file") - validatedMetrics["container.memory.mapped_file"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.percent": - assert.False(t, validatedMetrics["container.memory.percent"], "Found a duplicate in the metrics slice: container.memory.percent") - validatedMetrics["container.memory.percent"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Percentage of memory used.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - case "container.memory.pgfault": - assert.False(t, validatedMetrics["container.memory.pgfault"], "Found a duplicate in the metrics slice: container.memory.pgfault") - validatedMetrics["container.memory.pgfault"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Indicate the number of times that a process of the cgroup triggered a page fault.", ms.At(i).Description()) - assert.Equal(t, "{faults}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.pgmajfault": - assert.False(t, validatedMetrics["container.memory.pgmajfault"], "Found a duplicate in the metrics slice: container.memory.pgmajfault") - validatedMetrics["container.memory.pgmajfault"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Indicate the number of times that a process of the cgroup triggered a major fault.", ms.At(i).Description()) - assert.Equal(t, "{faults}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.pgpgin": - assert.False(t, validatedMetrics["container.memory.pgpgin"], "Found a duplicate in the metrics slice: container.memory.pgpgin") - validatedMetrics["container.memory.pgpgin"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of pages read from disk by the cgroup (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "{operations}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.pgpgout": - assert.False(t, validatedMetrics["container.memory.pgpgout"], "Found a duplicate in the metrics slice: container.memory.pgpgout") - validatedMetrics["container.memory.pgpgout"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of pages written to disk by the cgroup (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "{operations}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.rss": - assert.False(t, validatedMetrics["container.memory.rss"], "Found a duplicate in the metrics slice: container.memory.rss") - validatedMetrics["container.memory.rss"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.rss_huge": - assert.False(t, validatedMetrics["container.memory.rss_huge"], "Found a duplicate in the metrics slice: container.memory.rss_huge") - validatedMetrics["container.memory.rss_huge"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_active_anon": - assert.False(t, validatedMetrics["container.memory.total_active_anon"], "Found a duplicate in the metrics slice: container.memory.total_active_anon") - validatedMetrics["container.memory.total_active_anon"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_active_file": - assert.False(t, validatedMetrics["container.memory.total_active_file"], "Found a duplicate in the metrics slice: container.memory.total_active_file") - validatedMetrics["container.memory.total_active_file"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_cache": - assert.False(t, validatedMetrics["container.memory.total_cache"], "Found a duplicate in the metrics slice: container.memory.total_cache") - validatedMetrics["container.memory.total_cache"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_dirty": - assert.False(t, validatedMetrics["container.memory.total_dirty"], "Found a duplicate in the metrics slice: container.memory.total_dirty") - validatedMetrics["container.memory.total_dirty"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_inactive_anon": - assert.False(t, validatedMetrics["container.memory.total_inactive_anon"], "Found a duplicate in the metrics slice: container.memory.total_inactive_anon") - validatedMetrics["container.memory.total_inactive_anon"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_inactive_file": - assert.False(t, validatedMetrics["container.memory.total_inactive_file"], "Found a duplicate in the metrics slice: container.memory.total_inactive_file") - validatedMetrics["container.memory.total_inactive_file"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_mapped_file": - assert.False(t, validatedMetrics["container.memory.total_mapped_file"], "Found a duplicate in the metrics slice: container.memory.total_mapped_file") - validatedMetrics["container.memory.total_mapped_file"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_pgfault": - assert.False(t, validatedMetrics["container.memory.total_pgfault"], "Found a duplicate in the metrics slice: container.memory.total_pgfault") - validatedMetrics["container.memory.total_pgfault"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "{faults}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_pgmajfault": - assert.False(t, validatedMetrics["container.memory.total_pgmajfault"], "Found a duplicate in the metrics slice: container.memory.total_pgmajfault") - validatedMetrics["container.memory.total_pgmajfault"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "{faults}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_pgpgin": - assert.False(t, validatedMetrics["container.memory.total_pgpgin"], "Found a duplicate in the metrics slice: container.memory.total_pgpgin") - validatedMetrics["container.memory.total_pgpgin"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "{operations}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_pgpgout": - assert.False(t, validatedMetrics["container.memory.total_pgpgout"], "Found a duplicate in the metrics slice: container.memory.total_pgpgout") - validatedMetrics["container.memory.total_pgpgout"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "{operations}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_rss": - assert.False(t, validatedMetrics["container.memory.total_rss"], "Found a duplicate in the metrics slice: container.memory.total_rss") - validatedMetrics["container.memory.total_rss"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_rss_huge": - assert.False(t, validatedMetrics["container.memory.total_rss_huge"], "Found a duplicate in the metrics slice: container.memory.total_rss_huge") - validatedMetrics["container.memory.total_rss_huge"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_unevictable": - assert.False(t, validatedMetrics["container.memory.total_unevictable"], "Found a duplicate in the metrics slice: container.memory.total_unevictable") - validatedMetrics["container.memory.total_unevictable"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.total_writeback": - assert.False(t, validatedMetrics["container.memory.total_writeback"], "Found a duplicate in the metrics slice: container.memory.total_writeback") - validatedMetrics["container.memory.total_writeback"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.unevictable": - assert.False(t, validatedMetrics["container.memory.unevictable"], "Found a duplicate in the metrics slice: container.memory.unevictable") - validatedMetrics["container.memory.unevictable"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "The amount of memory that cannot be reclaimed.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.usage.limit": - assert.False(t, validatedMetrics["container.memory.usage.limit"], "Found a duplicate in the metrics slice: container.memory.usage.limit") - validatedMetrics["container.memory.usage.limit"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Memory limit of the container.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.usage.max": - assert.False(t, validatedMetrics["container.memory.usage.max"], "Found a duplicate in the metrics slice: container.memory.usage.max") - validatedMetrics["container.memory.usage.max"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Maximum memory usage.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.usage.total": - assert.False(t, validatedMetrics["container.memory.usage.total"], "Found a duplicate in the metrics slice: container.memory.usage.total") - validatedMetrics["container.memory.usage.total"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Memory usage of the container. This excludes the cache.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.memory.writeback": - assert.False(t, validatedMetrics["container.memory.writeback"], "Found a duplicate in the metrics slice: container.memory.writeback") - validatedMetrics["container.memory.writeback"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1).", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.network.io.usage.rx_bytes": - assert.False(t, validatedMetrics["container.network.io.usage.rx_bytes"], "Found a duplicate in the metrics slice: container.network.io.usage.rx_bytes") - validatedMetrics["container.network.io.usage.rx_bytes"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Bytes received by the container.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("interface") - assert.True(t, ok) - assert.EqualValues(t, "interface-val", attrVal.Str()) - case "container.network.io.usage.rx_dropped": - assert.False(t, validatedMetrics["container.network.io.usage.rx_dropped"], "Found a duplicate in the metrics slice: container.network.io.usage.rx_dropped") - validatedMetrics["container.network.io.usage.rx_dropped"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Incoming packets dropped.", ms.At(i).Description()) - assert.Equal(t, "{packets}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("interface") - assert.True(t, ok) - assert.EqualValues(t, "interface-val", attrVal.Str()) - case "container.network.io.usage.rx_errors": - assert.False(t, validatedMetrics["container.network.io.usage.rx_errors"], "Found a duplicate in the metrics slice: container.network.io.usage.rx_errors") - validatedMetrics["container.network.io.usage.rx_errors"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Received errors.", ms.At(i).Description()) - assert.Equal(t, "{errors}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("interface") - assert.True(t, ok) - assert.EqualValues(t, "interface-val", attrVal.Str()) - case "container.network.io.usage.rx_packets": - assert.False(t, validatedMetrics["container.network.io.usage.rx_packets"], "Found a duplicate in the metrics slice: container.network.io.usage.rx_packets") - validatedMetrics["container.network.io.usage.rx_packets"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Packets received.", ms.At(i).Description()) - assert.Equal(t, "{packets}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("interface") - assert.True(t, ok) - assert.EqualValues(t, "interface-val", attrVal.Str()) - case "container.network.io.usage.tx_bytes": - assert.False(t, validatedMetrics["container.network.io.usage.tx_bytes"], "Found a duplicate in the metrics slice: container.network.io.usage.tx_bytes") - validatedMetrics["container.network.io.usage.tx_bytes"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Bytes sent.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("interface") - assert.True(t, ok) - assert.EqualValues(t, "interface-val", attrVal.Str()) - case "container.network.io.usage.tx_dropped": - assert.False(t, validatedMetrics["container.network.io.usage.tx_dropped"], "Found a duplicate in the metrics slice: container.network.io.usage.tx_dropped") - validatedMetrics["container.network.io.usage.tx_dropped"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Outgoing packets dropped.", ms.At(i).Description()) - assert.Equal(t, "{packets}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("interface") - assert.True(t, ok) - assert.EqualValues(t, "interface-val", attrVal.Str()) - case "container.network.io.usage.tx_errors": - assert.False(t, validatedMetrics["container.network.io.usage.tx_errors"], "Found a duplicate in the metrics slice: container.network.io.usage.tx_errors") - validatedMetrics["container.network.io.usage.tx_errors"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Sent errors.", ms.At(i).Description()) - assert.Equal(t, "{errors}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("interface") - assert.True(t, ok) - assert.EqualValues(t, "interface-val", attrVal.Str()) - case "container.network.io.usage.tx_packets": - assert.False(t, validatedMetrics["container.network.io.usage.tx_packets"], "Found a duplicate in the metrics slice: container.network.io.usage.tx_packets") - validatedMetrics["container.network.io.usage.tx_packets"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Packets sent.", ms.At(i).Description()) - assert.Equal(t, "{packets}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("interface") - assert.True(t, ok) - assert.EqualValues(t, "interface-val", attrVal.Str()) - case "container.pids.count": - assert.False(t, validatedMetrics["container.pids.count"], "Found a duplicate in the metrics slice: container.pids.count") - validatedMetrics["container.pids.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of pids in the container's cgroup.", ms.At(i).Description()) - assert.Equal(t, "{pids}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.pids.limit": - assert.False(t, validatedMetrics["container.pids.limit"], "Found a duplicate in the metrics slice: container.pids.limit") - validatedMetrics["container.pids.limit"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Maximum number of pids in the container's cgroup.", ms.At(i).Description()) - assert.Equal(t, "{pids}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.restarts": - assert.False(t, validatedMetrics["container.restarts"], "Found a duplicate in the metrics slice: container.restarts") - validatedMetrics["container.restarts"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of restarts for the container.", ms.At(i).Description()) - assert.Equal(t, "{restarts}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "container.uptime": - assert.False(t, validatedMetrics["container.uptime"], "Found a duplicate in the metrics slice: container.uptime") - validatedMetrics["container.uptime"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Time elapsed since container start time.", ms.At(i).Description()) - assert.Equal(t, "s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - } - } - }) - } -} diff --git a/receiver/dockerstatsreceiver/internal/metadata/generated_resource.go b/receiver/dockerstatsreceiver/internal/metadata/generated_resource.go deleted file mode 100644 index 7b0c03fd65b5..000000000000 --- a/receiver/dockerstatsreceiver/internal/metadata/generated_resource.go +++ /dev/null @@ -1,78 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/pdata/pcommon" -) - -// ResourceBuilder is a helper struct to build resources predefined in metadata.yaml. -// The ResourceBuilder is not thread-safe and must not to be used in multiple goroutines. -type ResourceBuilder struct { - config ResourceAttributesConfig - res pcommon.Resource -} - -// NewResourceBuilder creates a new ResourceBuilder. This method should be called on the start of the application. -func NewResourceBuilder(rac ResourceAttributesConfig) *ResourceBuilder { - return &ResourceBuilder{ - config: rac, - res: pcommon.NewResource(), - } -} - -// SetContainerCommandLine sets provided value as "container.command_line" attribute. -func (rb *ResourceBuilder) SetContainerCommandLine(val string) { - if rb.config.ContainerCommandLine.Enabled { - rb.res.Attributes().PutStr("container.command_line", val) - } -} - -// SetContainerHostname sets provided value as "container.hostname" attribute. -func (rb *ResourceBuilder) SetContainerHostname(val string) { - if rb.config.ContainerHostname.Enabled { - rb.res.Attributes().PutStr("container.hostname", val) - } -} - -// SetContainerID sets provided value as "container.id" attribute. -func (rb *ResourceBuilder) SetContainerID(val string) { - if rb.config.ContainerID.Enabled { - rb.res.Attributes().PutStr("container.id", val) - } -} - -// SetContainerImageID sets provided value as "container.image.id" attribute. -func (rb *ResourceBuilder) SetContainerImageID(val string) { - if rb.config.ContainerImageID.Enabled { - rb.res.Attributes().PutStr("container.image.id", val) - } -} - -// SetContainerImageName sets provided value as "container.image.name" attribute. -func (rb *ResourceBuilder) SetContainerImageName(val string) { - if rb.config.ContainerImageName.Enabled { - rb.res.Attributes().PutStr("container.image.name", val) - } -} - -// SetContainerName sets provided value as "container.name" attribute. -func (rb *ResourceBuilder) SetContainerName(val string) { - if rb.config.ContainerName.Enabled { - rb.res.Attributes().PutStr("container.name", val) - } -} - -// SetContainerRuntime sets provided value as "container.runtime" attribute. -func (rb *ResourceBuilder) SetContainerRuntime(val string) { - if rb.config.ContainerRuntime.Enabled { - rb.res.Attributes().PutStr("container.runtime", val) - } -} - -// Emit returns the built resource and resets the internal builder state. -func (rb *ResourceBuilder) Emit() pcommon.Resource { - r := rb.res - rb.res = pcommon.NewResource() - return r -} diff --git a/receiver/dockerstatsreceiver/internal/metadata/generated_resource_test.go b/receiver/dockerstatsreceiver/internal/metadata/generated_resource_test.go deleted file mode 100644 index 5a9e037b2bdd..000000000000 --- a/receiver/dockerstatsreceiver/internal/metadata/generated_resource_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestResourceBuilder(t *testing.T) { - for _, test := range []string{"default", "all_set", "none_set"} { - t.Run(test, func(t *testing.T) { - cfg := loadResourceAttributesConfig(t, test) - rb := NewResourceBuilder(cfg) - rb.SetContainerCommandLine("container.command_line-val") - rb.SetContainerHostname("container.hostname-val") - rb.SetContainerID("container.id-val") - rb.SetContainerImageID("container.image.id-val") - rb.SetContainerImageName("container.image.name-val") - rb.SetContainerName("container.name-val") - rb.SetContainerRuntime("container.runtime-val") - - res := rb.Emit() - assert.Equal(t, 0, rb.Emit().Attributes().Len()) // Second call should return empty Resource - - switch test { - case "default": - assert.Equal(t, 5, res.Attributes().Len()) - case "all_set": - assert.Equal(t, 7, res.Attributes().Len()) - case "none_set": - assert.Equal(t, 0, res.Attributes().Len()) - return - default: - assert.Failf(t, "unexpected test case: %s", test) - } - - val, ok := res.Attributes().Get("container.command_line") - assert.Equal(t, test == "all_set", ok) - if ok { - assert.EqualValues(t, "container.command_line-val", val.Str()) - } - val, ok = res.Attributes().Get("container.hostname") - assert.True(t, ok) - if ok { - assert.EqualValues(t, "container.hostname-val", val.Str()) - } - val, ok = res.Attributes().Get("container.id") - assert.True(t, ok) - if ok { - assert.EqualValues(t, "container.id-val", val.Str()) - } - val, ok = res.Attributes().Get("container.image.id") - assert.Equal(t, test == "all_set", ok) - if ok { - assert.EqualValues(t, "container.image.id-val", val.Str()) - } - val, ok = res.Attributes().Get("container.image.name") - assert.True(t, ok) - if ok { - assert.EqualValues(t, "container.image.name-val", val.Str()) - } - val, ok = res.Attributes().Get("container.name") - assert.True(t, ok) - if ok { - assert.EqualValues(t, "container.name-val", val.Str()) - } - val, ok = res.Attributes().Get("container.runtime") - assert.True(t, ok) - if ok { - assert.EqualValues(t, "container.runtime-val", val.Str()) - } - }) - } -} diff --git a/receiver/dockerstatsreceiver/internal/metadata/package_test.go b/receiver/dockerstatsreceiver/internal/metadata/package_test.go deleted file mode 100644 index 1aba5ec4bb0b..000000000000 --- a/receiver/dockerstatsreceiver/internal/metadata/package_test.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package metadata - -import ( - "testing" - - "go.uber.org/goleak" -) - -func TestMain(m *testing.M) { - goleak.VerifyTestMain(m) -} diff --git a/receiver/dockerstatsreceiver/internal/metadata/testdata/config.yaml b/receiver/dockerstatsreceiver/internal/metadata/testdata/config.yaml deleted file mode 100644 index 34f3f7419590..000000000000 --- a/receiver/dockerstatsreceiver/internal/metadata/testdata/config.yaml +++ /dev/null @@ -1,379 +0,0 @@ -default: -all_set: - metrics: - container.blockio.io_merged_recursive: - enabled: true - container.blockio.io_queued_recursive: - enabled: true - container.blockio.io_service_bytes_recursive: - enabled: true - container.blockio.io_service_time_recursive: - enabled: true - container.blockio.io_serviced_recursive: - enabled: true - container.blockio.io_time_recursive: - enabled: true - container.blockio.io_wait_time_recursive: - enabled: true - container.blockio.sectors_recursive: - enabled: true - container.cpu.limit: - enabled: true - container.cpu.logical.count: - enabled: true - container.cpu.shares: - enabled: true - container.cpu.throttling_data.periods: - enabled: true - container.cpu.throttling_data.throttled_periods: - enabled: true - container.cpu.throttling_data.throttled_time: - enabled: true - container.cpu.usage.kernelmode: - enabled: true - container.cpu.usage.percpu: - enabled: true - container.cpu.usage.system: - enabled: true - container.cpu.usage.total: - enabled: true - container.cpu.usage.usermode: - enabled: true - container.cpu.utilization: - enabled: true - container.memory.active_anon: - enabled: true - container.memory.active_file: - enabled: true - container.memory.anon: - enabled: true - container.memory.cache: - enabled: true - container.memory.dirty: - enabled: true - container.memory.fails: - enabled: true - container.memory.file: - enabled: true - container.memory.hierarchical_memory_limit: - enabled: true - container.memory.hierarchical_memsw_limit: - enabled: true - container.memory.inactive_anon: - enabled: true - container.memory.inactive_file: - enabled: true - container.memory.mapped_file: - enabled: true - container.memory.percent: - enabled: true - container.memory.pgfault: - enabled: true - container.memory.pgmajfault: - enabled: true - container.memory.pgpgin: - enabled: true - container.memory.pgpgout: - enabled: true - container.memory.rss: - enabled: true - container.memory.rss_huge: - enabled: true - container.memory.total_active_anon: - enabled: true - container.memory.total_active_file: - enabled: true - container.memory.total_cache: - enabled: true - container.memory.total_dirty: - enabled: true - container.memory.total_inactive_anon: - enabled: true - container.memory.total_inactive_file: - enabled: true - container.memory.total_mapped_file: - enabled: true - container.memory.total_pgfault: - enabled: true - container.memory.total_pgmajfault: - enabled: true - container.memory.total_pgpgin: - enabled: true - container.memory.total_pgpgout: - enabled: true - container.memory.total_rss: - enabled: true - container.memory.total_rss_huge: - enabled: true - container.memory.total_unevictable: - enabled: true - container.memory.total_writeback: - enabled: true - container.memory.unevictable: - enabled: true - container.memory.usage.limit: - enabled: true - container.memory.usage.max: - enabled: true - container.memory.usage.total: - enabled: true - container.memory.writeback: - enabled: true - container.network.io.usage.rx_bytes: - enabled: true - container.network.io.usage.rx_dropped: - enabled: true - container.network.io.usage.rx_errors: - enabled: true - container.network.io.usage.rx_packets: - enabled: true - container.network.io.usage.tx_bytes: - enabled: true - container.network.io.usage.tx_dropped: - enabled: true - container.network.io.usage.tx_errors: - enabled: true - container.network.io.usage.tx_packets: - enabled: true - container.pids.count: - enabled: true - container.pids.limit: - enabled: true - container.restarts: - enabled: true - container.uptime: - enabled: true - resource_attributes: - container.command_line: - enabled: true - container.hostname: - enabled: true - container.id: - enabled: true - container.image.id: - enabled: true - container.image.name: - enabled: true - container.name: - enabled: true - container.runtime: - enabled: true -none_set: - metrics: - container.blockio.io_merged_recursive: - enabled: false - container.blockio.io_queued_recursive: - enabled: false - container.blockio.io_service_bytes_recursive: - enabled: false - container.blockio.io_service_time_recursive: - enabled: false - container.blockio.io_serviced_recursive: - enabled: false - container.blockio.io_time_recursive: - enabled: false - container.blockio.io_wait_time_recursive: - enabled: false - container.blockio.sectors_recursive: - enabled: false - container.cpu.limit: - enabled: false - container.cpu.logical.count: - enabled: false - container.cpu.shares: - enabled: false - container.cpu.throttling_data.periods: - enabled: false - container.cpu.throttling_data.throttled_periods: - enabled: false - container.cpu.throttling_data.throttled_time: - enabled: false - container.cpu.usage.kernelmode: - enabled: false - container.cpu.usage.percpu: - enabled: false - container.cpu.usage.system: - enabled: false - container.cpu.usage.total: - enabled: false - container.cpu.usage.usermode: - enabled: false - container.cpu.utilization: - enabled: false - container.memory.active_anon: - enabled: false - container.memory.active_file: - enabled: false - container.memory.anon: - enabled: false - container.memory.cache: - enabled: false - container.memory.dirty: - enabled: false - container.memory.fails: - enabled: false - container.memory.file: - enabled: false - container.memory.hierarchical_memory_limit: - enabled: false - container.memory.hierarchical_memsw_limit: - enabled: false - container.memory.inactive_anon: - enabled: false - container.memory.inactive_file: - enabled: false - container.memory.mapped_file: - enabled: false - container.memory.percent: - enabled: false - container.memory.pgfault: - enabled: false - container.memory.pgmajfault: - enabled: false - container.memory.pgpgin: - enabled: false - container.memory.pgpgout: - enabled: false - container.memory.rss: - enabled: false - container.memory.rss_huge: - enabled: false - container.memory.total_active_anon: - enabled: false - container.memory.total_active_file: - enabled: false - container.memory.total_cache: - enabled: false - container.memory.total_dirty: - enabled: false - container.memory.total_inactive_anon: - enabled: false - container.memory.total_inactive_file: - enabled: false - container.memory.total_mapped_file: - enabled: false - container.memory.total_pgfault: - enabled: false - container.memory.total_pgmajfault: - enabled: false - container.memory.total_pgpgin: - enabled: false - container.memory.total_pgpgout: - enabled: false - container.memory.total_rss: - enabled: false - container.memory.total_rss_huge: - enabled: false - container.memory.total_unevictable: - enabled: false - container.memory.total_writeback: - enabled: false - container.memory.unevictable: - enabled: false - container.memory.usage.limit: - enabled: false - container.memory.usage.max: - enabled: false - container.memory.usage.total: - enabled: false - container.memory.writeback: - enabled: false - container.network.io.usage.rx_bytes: - enabled: false - container.network.io.usage.rx_dropped: - enabled: false - container.network.io.usage.rx_errors: - enabled: false - container.network.io.usage.rx_packets: - enabled: false - container.network.io.usage.tx_bytes: - enabled: false - container.network.io.usage.tx_dropped: - enabled: false - container.network.io.usage.tx_errors: - enabled: false - container.network.io.usage.tx_packets: - enabled: false - container.pids.count: - enabled: false - container.pids.limit: - enabled: false - container.restarts: - enabled: false - container.uptime: - enabled: false - resource_attributes: - container.command_line: - enabled: false - container.hostname: - enabled: false - container.id: - enabled: false - container.image.id: - enabled: false - container.image.name: - enabled: false - container.name: - enabled: false - container.runtime: - enabled: false -filter_set_include: - resource_attributes: - container.command_line: - enabled: true - metrics_include: - - regexp: ".*" - container.hostname: - enabled: true - metrics_include: - - regexp: ".*" - container.id: - enabled: true - metrics_include: - - regexp: ".*" - container.image.id: - enabled: true - metrics_include: - - regexp: ".*" - container.image.name: - enabled: true - metrics_include: - - regexp: ".*" - container.name: - enabled: true - metrics_include: - - regexp: ".*" - container.runtime: - enabled: true - metrics_include: - - regexp: ".*" -filter_set_exclude: - resource_attributes: - container.command_line: - enabled: true - metrics_exclude: - - strict: "container.command_line-val" - container.hostname: - enabled: true - metrics_exclude: - - strict: "container.hostname-val" - container.id: - enabled: true - metrics_exclude: - - strict: "container.id-val" - container.image.id: - enabled: true - metrics_exclude: - - strict: "container.image.id-val" - container.image.name: - enabled: true - metrics_exclude: - - strict: "container.image.name-val" - container.name: - enabled: true - metrics_exclude: - - strict: "container.name-val" - container.runtime: - enabled: true - metrics_exclude: - - strict: "container.runtime-val" diff --git a/receiver/dockerstatsreceiver/metadata.yaml b/receiver/dockerstatsreceiver/metadata.yaml index 1618783d0bc9..5de36f88c9f7 100644 --- a/receiver/dockerstatsreceiver/metadata.yaml +++ b/receiver/dockerstatsreceiver/metadata.yaml @@ -11,702 +11,3 @@ status: unsupported_platforms: [darwin, windows] sem_conv_version: 1.6.1 - -# Note: there are other, additional resource attributes that the user can configure through the yaml -resource_attributes: - container.runtime: - description: "The runtime of the container. For this receiver, it will always be 'docker'." - type: string - enabled: true - container.id: - description: "The ID of the container." - type: string - enabled: true - container.image.name: - description: "The name of the docker image in use by the container." - type: string - enabled: true - container.name: - description: "The name of the container." - type: string - enabled: true - container.hostname: - description: "The hostname of the container." - type: string - enabled: true - container.image.id: - description: "The ID of the container image." - type: string - enabled: false - container.command_line: - description: "The full command executed by the container." - type: string - enabled: false - -attributes: - core: - description: "The CPU core number when utilising per-CPU metrics." - type: string - device_major: - description: "Device major number for block IO operations." - type: string - device_minor: - description: "Device minor number for block IO operations." - type: string - interface: - description: "Network interface." - type: string - operation: - description: "Type of BlockIO operation." - type: string - -metrics: - # CPU - container.cpu.usage.system: - enabled: false - description: "System CPU usage, as reported by docker." - extended_documentation: "Note this is the usage for the system, not the container." - unit: ns - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - container.cpu.usage.total: - enabled: true - description: "Total CPU time consumed." - unit: ns - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - container.cpu.usage.kernelmode: - enabled: true - description: >- - Time spent by tasks of the cgroup in kernel mode (Linux). - Time spent by all container processes in kernel mode (Windows). - unit: ns - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - container.cpu.usage.usermode: - enabled: true - description: >- - Time spent by tasks of the cgroup in user mode (Linux). - Time spent by all container processes in user mode (Windows). - unit: ns - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - container.cpu.usage.percpu: - enabled: false - description: "Per-core CPU usage by the container (Only available with cgroups v1)." - unit: ns - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - core - container.cpu.throttling_data.periods: - enabled: false - description: "Number of periods with throttling active." - unit: "{periods}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - container.cpu.throttling_data.throttled_periods: - enabled: false - description: "Number of periods when the container hits its throttling limit." - unit: "{periods}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - container.cpu.throttling_data.throttled_time: - enabled: false - description: "Aggregate time the container was throttled." - unit: ns - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - container.cpu.utilization: - enabled: true - description: "Percent of CPU used by the container." - unit: "1" - gauge: - value_type: double - container.cpu.limit: - enabled: false - description: "CPU limit set for the container." - extended_documentation: "This metric is only reported if the container has limits set with -cpus, -cpuset-cpus or -cpu-quota." - unit: "{cpus}" - gauge: - value_type: double - container.cpu.shares: - enabled: false - description: "CPU shares set for the container." - unit: "1" - gauge: - value_type: int - container.cpu.logical.count: - enabled: false - description: "Number of cores available to the container." - unit: "{cpus}" - gauge: - value_type: int - - - # Memory - container.memory.usage.limit: - enabled: true - description: "Memory limit of the container." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.usage.total: - enabled: true - description: "Memory usage of the container. This excludes the cache." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.usage.max: - enabled: false - description: "Maximum memory usage." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.percent: - enabled: true - description: "Percentage of memory used." - unit: 1 - gauge: - value_type: double - container.memory.cache: - enabled: false - description: "The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.rss: - enabled: false - description: "The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.rss_huge: - enabled: false - description: "Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.dirty: - enabled: false - description: "Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.writeback: - enabled: false - description: "Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.mapped_file: - enabled: false - description: "Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.pgpgin: - enabled: false - description: "Number of pages read from disk by the cgroup (Only available with cgroups v1)." - extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt)." - unit: "{operations}" - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: true - container.memory.pgpgout: - enabled: false - description: "Number of pages written to disk by the cgroup (Only available with cgroups v1)." - extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt)." - unit: "{operations}" - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: true - container.memory.pgfault: - enabled: false - description: "Indicate the number of times that a process of the cgroup triggered a page fault." - unit: "{faults}" - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: true - container.memory.pgmajfault: - enabled: false - description: "Indicate the number of times that a process of the cgroup triggered a major fault." - unit: "{faults}" - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: true - container.memory.inactive_anon: - enabled: false - description: "The amount of anonymous memory that has been identified as inactive by the kernel." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.active_anon: - enabled: false - description: "The amount of anonymous memory that has been identified as active by the kernel." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.inactive_file: - enabled: false - description: "Cache memory that has been identified as inactive by the kernel." - extended_documentation: "[More docs](https://docs.docker.com/config/containers/runmetrics/)" - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.active_file: - enabled: false - description: "Cache memory that has been identified as active by the kernel." - extended_documentation: "[More docs](https://docs.docker.com/config/containers/runmetrics/)" - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.unevictable: - enabled: false - description: "The amount of memory that cannot be reclaimed." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.hierarchical_memory_limit: - enabled: false - description: "The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.hierarchical_memsw_limit: - enabled: false - description: "The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.total_cache: - enabled: true - description: "Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.total_rss: - enabled: false - description: "The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.total_rss_huge: - enabled: false - description: "Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.total_dirty: - enabled: false - description: "Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.total_writeback: - enabled: false - description: "Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.total_mapped_file: - enabled: false - description: "Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.total_pgpgin: - enabled: false - description: "Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1)." - unit: "{operations}" - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: true - container.memory.total_pgpgout: - enabled: false - description: "Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1)." - unit: "{operations}" - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: true - container.memory.total_pgfault: - enabled: false - description: "Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1)." - unit: "{faults}" - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: true - container.memory.total_pgmajfault: - enabled: false - description: "Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1)." - unit: "{faults}" - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: true - container.memory.total_inactive_anon: - enabled: false - description: "The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.total_active_anon: - enabled: false - description: "The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.total_inactive_file: - enabled: false - description: "Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1)." - extended_documentation: "[More docs](https://docs.docker.com/config/containers/runmetrics/)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.total_active_file: - enabled: false - description: "Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1)." - extended_documentation: "[More docs](https://docs.docker.com/config/containers/runmetrics/)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.total_unevictable: - enabled: false - description: "The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1)." - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.anon: - enabled: false - description: "Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS) (Only available with cgroups v2)." - extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v2.txt)" - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.file: - enabled: true - description: "Amount of memory used to cache filesystem data, including tmpfs and shared memory (Only available with cgroups v2)." - extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v2.txt)" - unit: By - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - container.memory.fails: - enabled: false - description: "Number of times the memory limit was hit." - unit: "{fails}" - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: true - - - # BlockIO (cgroup v1) and IO (cgroup v2) controllers - container.blockio.io_merged_recursive: - enabled: false - description: "Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups (Only available with cgroups v1)." - extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." - unit: "{operations}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - device_major - - device_minor - - operation - container.blockio.io_queued_recursive: - enabled: false - description: "Number of requests queued up for this cgroup and its descendant cgroups (Only available with cgroups v1)." - extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." - unit: "{operations}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - device_major - - device_minor - - operation - container.blockio.io_service_bytes_recursive: - enabled: true - description: "Number of bytes transferred to/from the disk by the group and descendant groups." - extended_documentation: >- - More docs - for [cgroups v1](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt) - and [cgroups v2](https://www.kernel.org/doc/Documentation/cgroup-v2.txt) - unit: By - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - device_major - - device_minor - - operation - container.blockio.io_service_time_recursive: - enabled: false - description: "Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups (Only available with cgroups v1)." - extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." - unit: ns - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - device_major - - device_minor - - operation - container.blockio.io_serviced_recursive: - enabled: false - description: "Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1)." - extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." - unit: "{operations}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - device_major - - device_minor - - operation - container.blockio.io_time_recursive: - enabled: false - description: "Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds (Only available with cgroups v1)." - extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." - unit: ms - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - device_major - - device_minor - - operation - container.blockio.io_wait_time_recursive: - enabled: false - description: "Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service (Only available with cgroups v1)." - extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." - unit: ns - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - device_major - - device_minor - - operation - container.blockio.sectors_recursive: - enabled: false - description: "Number of sectors transferred to/from disk by the group and descendant groups (Only available with cgroups v1)." - extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." - unit: "{sectors}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - device_major - - device_minor - - operation - - # Network - container.network.io.usage.rx_bytes: - enabled: true - description: "Bytes received by the container." - unit: By - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - interface - container.network.io.usage.tx_bytes: - enabled: true - description: "Bytes sent." - unit: By - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - interface - container.network.io.usage.rx_dropped: - enabled: true - description: "Incoming packets dropped." - unit: "{packets}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - interface - container.network.io.usage.tx_dropped: - enabled: true - description: "Outgoing packets dropped." - unit: "{packets}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - interface - container.network.io.usage.rx_errors: - enabled: false - description: "Received errors." - unit: "{errors}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - interface - container.network.io.usage.tx_errors: - enabled: false - description: "Sent errors." - unit: "{errors}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - interface - container.network.io.usage.rx_packets: - enabled: false - description: "Packets received." - unit: "{packets}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - interface - container.network.io.usage.tx_packets: - enabled: false - description: "Packets sent." - unit: "{packets}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative - attributes: - - interface - - # Pids - container.pids.count: - enabled: false - description: "Number of pids in the container's cgroup." - extended_documentation: "It requires docker API 1.23 or higher and kernel version >= 4.3 with pids cgroup supported. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/pids.txt)" - unit: "{pids}" - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - - container.pids.limit: - enabled: false - description: "Maximum number of pids in the container's cgroup." - extended_documentation: "It requires docker API 1.23 or higher and kernel version >= 4.3 with pids cgroup supported. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/pids.txt)" - unit: "{pids}" - sum: - value_type: int - aggregation_temporality: cumulative - monotonic: false - - # Base - container.uptime: - enabled: false - description: "Time elapsed since container start time." - unit: s - gauge: - value_type: double - - # Container - container.restarts: - enabled: false - description: "Number of restarts for the container." - unit: "{restarts}" - sum: - value_type: int - monotonic: true - aggregation_temporality: cumulative diff --git a/receiver/dockerstatsreceiver/metric_helper.go b/receiver/dockerstatsreceiver/metric_helper.go deleted file mode 100644 index d4070f196959..000000000000 --- a/receiver/dockerstatsreceiver/metric_helper.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package dockerstatsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver" - -import ( - "fmt" - "strconv" - "strings" - - dtypes "github.com/docker/docker/api/types" - ctypes "github.com/docker/docker/api/types/container" -) - -const nanosInASecond = 1e9 - -// Following functions has been copied from: calculateCPUPercentUnix(), calculateMemUsageUnixNoCache(), calculateMemPercentUnixNoCache() -// https://github.com/docker/cli/blob/a2e9ed3b874fccc177b9349f3b0277612403934f/cli/command/container/stats_helpers.go - -// Copyright 2012-2017 Docker, Inc. -// This product includes software developed at Docker, Inc. (https://www.docker.com). -// The following is courtesy of our legal counsel: -// Use and transfer of Docker may be subject to certain restrictions by the -// United States and other governments. -// It is your responsibility to ensure that your use and/or transfer does not -// violate applicable laws. -// For more information, please see https://www.bis.doc.gov -// See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. - -func calculateCPUPercent(previous *dtypes.CPUStats, v *dtypes.CPUStats) float64 { - var ( - cpuPercent = 0.0 - // calculate the change for the cpu usage of the container in between readings - cpuDelta = float64(v.CPUUsage.TotalUsage) - float64(previous.CPUUsage.TotalUsage) - // calculate the change for the entire system between readings - systemDelta = float64(v.SystemUsage) - float64(previous.SystemUsage) - onlineCPUs = float64(v.OnlineCPUs) - ) - - if onlineCPUs == 0.0 { - onlineCPUs = float64(len(v.CPUUsage.PercpuUsage)) - } - if systemDelta > 0.0 && cpuDelta > 0.0 { - cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0 - } - return cpuPercent -} - -// calculateMemUsageNoCache calculate memory usage of the container. -// Cache is intentionally excluded to avoid misinterpretation of the output. -// -// On cgroup v1 host, the result is `mem.Usage - mem.Stats["total_inactive_file"]` . -// On cgroup v2 host, the result is `mem.Usage - mem.Stats["inactive_file"] `. -// -// This definition is consistent with cadvisor and containerd/CRI. -// * https://github.com/google/cadvisor/commit/307d1b1cb320fef66fab02db749f07a459245451 -// * https://github.com/containerd/cri/commit/6b8846cdf8b8c98c1d965313d66bc8489166059a -// -// On Docker 19.03 and older, the result was `mem.Usage - mem.Stats["cache"]`. -// See https://github.com/moby/moby/issues/40727 for the background. -func calculateMemUsageNoCache(memoryStats *dtypes.MemoryStats) uint64 { - // cgroup v1 - if v, isCgroup1 := memoryStats.Stats["total_inactive_file"]; isCgroup1 && v < memoryStats.Usage { - return memoryStats.Usage - v - } - // cgroup v2 - if v := memoryStats.Stats["inactive_file"]; v < memoryStats.Usage { - return memoryStats.Usage - v - } - return memoryStats.Usage -} - -func calculateMemoryPercent(limit uint64, usedNoCache uint64) float64 { - // MemoryStats.Limit will never be 0 unless the container is not running and we haven't - // got any data from cgroup - if limit != 0 { - return float64(usedNoCache) / float64(limit) * 100.0 - } - return 0.0 -} - -// calculateCPULimit calculate the number of cpus assigned to a container. -// -// Calculation is based on 3 alternatives by the following order: -// - nanocpus: if set by i.e docker run -cpus=2 -// - cpusetCpus: if set by i.e docker run -docker run -cpuset-cpus="0,2" -// - cpuquota: if set by i.e docker run -cpu-quota=50000 -// -// See https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler for background. -func calculateCPULimit(hostConfig *ctypes.HostConfig) (float64, error) { - var cpuLimit float64 - var err error - - switch { - case hostConfig.NanoCPUs > 0: - cpuLimit = float64(hostConfig.NanoCPUs) / nanosInASecond - case hostConfig.CpusetCpus != "": - cpuLimit, err = parseCPUSet(hostConfig.CpusetCpus) - if err != nil { - return cpuLimit, err - } - case hostConfig.CPUQuota > 0: - period := hostConfig.CPUPeriod - if period == 0 { - period = 100000 // Default CFS Period - } - cpuLimit = float64(hostConfig.CPUQuota) / float64(period) - } - return cpuLimit, nil -} - -// parseCPUSet helper function to decompose -cpuset-cpus value into number os cpus. -func parseCPUSet(line string) (float64, error) { - var numCPUs uint64 - - lineSlice := strings.Split(line, ",") - for _, l := range lineSlice { - lineParts := strings.Split(l, "-") - if len(lineParts) == 2 { - p0, err0 := strconv.Atoi(lineParts[0]) - if err0 != nil { - return 0, fmt.Errorf("invalid -cpuset-cpus value: %w", err0) - } - p1, err1 := strconv.Atoi(lineParts[1]) - if err1 != nil { - return 0, fmt.Errorf("invalid -cpuset-cpus value: %w", err1) - } - numCPUs += uint64(p1 - p0 + 1) - } else if len(lineParts) == 1 { - numCPUs++ - } - } - return float64(numCPUs), nil -} diff --git a/receiver/dockerstatsreceiver/metric_helper_test.go b/receiver/dockerstatsreceiver/metric_helper_test.go deleted file mode 100644 index 04402519c714..000000000000 --- a/receiver/dockerstatsreceiver/metric_helper_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package dockerstatsreceiver - -import ( - "errors" - "testing" - - ctypes "github.com/docker/docker/api/types/container" - "github.com/stretchr/testify/assert" -) - -func Test_calculateCPULimit1(t *testing.T) { - tests := []struct { - name string - args *ctypes.HostConfig - want float64 - err error - }{ - { - "Test CPULimit", - &ctypes.HostConfig{ - Resources: ctypes.Resources{ - NanoCPUs: 2500000000, - }, - }, - 2.5, - nil, - }, - { - "Test CPUSetCpu", - &ctypes.HostConfig{ - Resources: ctypes.Resources{ - CpusetCpus: "0-2", - }, - }, - 3, - nil, - }, - { - "Test CPUQuota", - &ctypes.HostConfig{ - Resources: ctypes.Resources{ - CPUQuota: 50000, - }, - }, - 0.5, - nil, - }, - { - "Test CPUQuota Custom Period", - &ctypes.HostConfig{ - Resources: ctypes.Resources{ - CPUQuota: 300000, - CPUPeriod: 200000, - }, - }, - 1.5, - nil, - }, - { - "Test Default", - &ctypes.HostConfig{ - Resources: ctypes.Resources{ - NanoCPUs: 1800000000, - CpusetCpus: "0-1", - CPUQuota: 400000, - }, - }, - 1.8, - nil, - }, - { - "Test No Values", - &ctypes.HostConfig{ - Resources: ctypes.Resources{}, - }, - 0, - nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - want, err := calculateCPULimit(tt.args) - assert.Equalf(t, tt.want, want, "calculateCPULimit(%v)", tt.args) - assert.Equalf(t, tt.err, err, "calculateCPULimit(%v)", tt.args) - }) - } -} - -func Test_parseCPUSet(t *testing.T) { - tests := []struct { - input string - expected float64 - err error - }{ - {"0,2", 2, nil}, - {"0-2", 3, nil}, - {"0-2,4", 4, nil}, - {"0-2,4-5", 5, nil}, - {"a-b", 0, errors.New("invalid -cpuset-cpus value: strconv.Atoi: parsing \"a\": invalid syntax")}, - {"", 1, nil}, - } - - for _, test := range tests { - result, err := parseCPUSet(test.input) - - if err != nil && test.err != nil { - if err.Error() != test.err.Error() { - t.Errorf("parseCPUSet(%s) returned error %v, expected %v", test.input, err, test.err) - } - } else if !errors.Is(err, test.err) { - t.Errorf("parseCPUSet(%s) returned error %v, expected %v", test.input, err, test.err) - } - - if result != test.expected { - t.Errorf("parseCPUSet(%s) returned %f, expected %f", test.input, result, test.expected) - } - } -} diff --git a/receiver/dockerstatsreceiver/receiver.go b/receiver/dockerstatsreceiver/receiver.go deleted file mode 100644 index 34fe519de302..000000000000 --- a/receiver/dockerstatsreceiver/receiver.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package dockerstatsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver" - -import ( - "context" - "fmt" - "strconv" - "strings" - "sync" - "time" - - "github.com/docker/docker/api/types" - dtypes "github.com/docker/docker/api/types" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.opentelemetry.io/collector/receiver" - "go.opentelemetry.io/collector/receiver/scrapererror" - "go.uber.org/multierr" - - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver/internal/metadata" -) - -var ( - defaultDockerAPIVersion = "1.25" - minimumRequiredDockerAPIVersion = docker.MustNewAPIVersion(defaultDockerAPIVersion) -) - -type resultV2 struct { - stats *dtypes.StatsJSON - container *docker.Container - err error -} - -type metricsReceiver struct { - config *Config - settings receiver.CreateSettings - client *docker.Client - mb *metadata.MetricsBuilder - cancel context.CancelFunc -} - -func newMetricsReceiver(set receiver.CreateSettings, config *Config) *metricsReceiver { - return &metricsReceiver{ - config: config, - settings: set, - mb: metadata.NewMetricsBuilder(config.MetricsBuilderConfig, set), - } -} - -func (r *metricsReceiver) start(ctx context.Context, _ component.Host) error { - dConfig, err := docker.NewConfig(r.config.Endpoint, r.config.Timeout, r.config.ExcludedImages, r.config.DockerAPIVersion) - if err != nil { - return err - } - - r.client, err = docker.NewDockerClient(dConfig, r.settings.Logger) - if err != nil { - return err - } - - if err = r.client.LoadContainerList(ctx); err != nil { - return err - } - - cctx, cancel := context.WithCancel(ctx) - r.cancel = cancel - - go r.client.ContainerEventLoop(cctx) - return nil -} - -func (r *metricsReceiver) shutdown(context.Context) error { - if r.cancel != nil { - r.cancel() - } - return nil -} - -func (r *metricsReceiver) scrapeV2(ctx context.Context) (pmetric.Metrics, error) { - containers := r.client.Containers() - results := make(chan resultV2, len(containers)) - - wg := &sync.WaitGroup{} - wg.Add(len(containers)) - for _, container := range containers { - go func(c docker.Container) { - defer wg.Done() - statsJSON, err := r.client.FetchContainerStatsAsJSON(ctx, c) - if err != nil { - results <- resultV2{nil, &c, err} - return - } - - results <- resultV2{ - stats: statsJSON, - container: &c, - err: nil} - }(container) - } - - wg.Wait() - close(results) - - var errs error - - now := pcommon.NewTimestampFromTime(time.Now()) - for res := range results { - if res.err != nil { - // Don't know the number of failed stats, but one container fetch is a partial error. - errs = multierr.Append(errs, scrapererror.NewPartialScrapeError(res.err, 0)) - continue - } - if err := r.recordContainerStats(now, res.stats, res.container); err != nil { - errs = multierr.Append(errs, err) - } - } - - return r.mb.Emit(), errs -} - -func (r *metricsReceiver) recordContainerStats(now pcommon.Timestamp, containerStats *dtypes.StatsJSON, container *docker.Container) error { - var errs error - r.recordCPUMetrics(now, &containerStats.CPUStats, &containerStats.PreCPUStats) - r.recordMemoryMetrics(now, &containerStats.MemoryStats) - r.recordBlkioMetrics(now, &containerStats.BlkioStats) - r.recordNetworkMetrics(now, &containerStats.Networks) - r.recordPidsMetrics(now, &containerStats.PidsStats) - if err := r.recordBaseMetrics(now, container.ContainerJSONBase); err != nil { - errs = multierr.Append(errs, err) - } - if err := r.recordHostConfigMetrics(now, container.ContainerJSON); err != nil { - errs = multierr.Append(errs, err) - } - r.mb.RecordContainerRestartsDataPoint(now, int64(container.RestartCount)) - - // Always-present resource attrs + the user-configured resource attrs - rb := r.mb.NewResourceBuilder() - rb.SetContainerRuntime("docker") - rb.SetContainerHostname(container.Config.Hostname) - rb.SetContainerID(container.ID) - rb.SetContainerImageName(container.Config.Image) - rb.SetContainerName(strings.TrimPrefix(container.Name, "/")) - rb.SetContainerImageID(container.Image) - rb.SetContainerCommandLine(strings.Join(container.Config.Cmd, " ")) - resource := rb.Emit() - - for k, label := range r.config.EnvVarsToMetricLabels { - if v := container.EnvMap[k]; v != "" { - resource.Attributes().PutStr(label, v) - } - } - for k, label := range r.config.ContainerLabelsToMetricLabels { - if v := container.Config.Labels[k]; v != "" { - resource.Attributes().PutStr(label, v) - } - } - - r.mb.EmitForResource(metadata.WithResource(resource)) - return errs -} - -func (r *metricsReceiver) recordMemoryMetrics(now pcommon.Timestamp, memoryStats *dtypes.MemoryStats) { - totalUsage := calculateMemUsageNoCache(memoryStats) - r.mb.RecordContainerMemoryUsageTotalDataPoint(now, int64(totalUsage)) - - r.mb.RecordContainerMemoryUsageLimitDataPoint(now, int64(memoryStats.Limit)) - - r.mb.RecordContainerMemoryPercentDataPoint(now, calculateMemoryPercent(memoryStats.Limit, totalUsage)) - - r.mb.RecordContainerMemoryUsageMaxDataPoint(now, int64(memoryStats.MaxUsage)) - - r.mb.RecordContainerMemoryFailsDataPoint(now, int64(memoryStats.Failcnt)) - - recorders := map[string]func(pcommon.Timestamp, int64){ - "cache": r.mb.RecordContainerMemoryCacheDataPoint, - "total_cache": r.mb.RecordContainerMemoryTotalCacheDataPoint, - "rss": r.mb.RecordContainerMemoryRssDataPoint, - "total_rss": r.mb.RecordContainerMemoryTotalRssDataPoint, - "rss_huge": r.mb.RecordContainerMemoryRssHugeDataPoint, - "total_rss_huge": r.mb.RecordContainerMemoryTotalRssHugeDataPoint, - "dirty": r.mb.RecordContainerMemoryDirtyDataPoint, - "total_dirty": r.mb.RecordContainerMemoryTotalDirtyDataPoint, - "writeback": r.mb.RecordContainerMemoryWritebackDataPoint, - "total_writeback": r.mb.RecordContainerMemoryTotalWritebackDataPoint, - "mapped_file": r.mb.RecordContainerMemoryMappedFileDataPoint, - "total_mapped_file": r.mb.RecordContainerMemoryTotalMappedFileDataPoint, - "pgpgin": r.mb.RecordContainerMemoryPgpginDataPoint, - "total_pgpgin": r.mb.RecordContainerMemoryTotalPgpginDataPoint, - "pgpgout": r.mb.RecordContainerMemoryPgpgoutDataPoint, - "total_pgpgout": r.mb.RecordContainerMemoryTotalPgpgoutDataPoint, - "pgfault": r.mb.RecordContainerMemoryPgfaultDataPoint, - "total_pgfault": r.mb.RecordContainerMemoryTotalPgfaultDataPoint, - "pgmajfault": r.mb.RecordContainerMemoryPgmajfaultDataPoint, - "total_pgmajfault": r.mb.RecordContainerMemoryTotalPgmajfaultDataPoint, - "inactive_anon": r.mb.RecordContainerMemoryInactiveAnonDataPoint, - "total_inactive_anon": r.mb.RecordContainerMemoryTotalInactiveAnonDataPoint, - "active_anon": r.mb.RecordContainerMemoryActiveAnonDataPoint, - "total_active_anon": r.mb.RecordContainerMemoryTotalActiveAnonDataPoint, - "inactive_file": r.mb.RecordContainerMemoryInactiveFileDataPoint, - "total_inactive_file": r.mb.RecordContainerMemoryTotalInactiveFileDataPoint, - "active_file": r.mb.RecordContainerMemoryActiveFileDataPoint, - "total_active_file": r.mb.RecordContainerMemoryTotalActiveFileDataPoint, - "unevictable": r.mb.RecordContainerMemoryUnevictableDataPoint, - "total_unevictable": r.mb.RecordContainerMemoryTotalUnevictableDataPoint, - "hierarchical_memory_limit": r.mb.RecordContainerMemoryHierarchicalMemoryLimitDataPoint, - "hierarchical_memsw_limit": r.mb.RecordContainerMemoryHierarchicalMemswLimitDataPoint, - "anon": r.mb.RecordContainerMemoryAnonDataPoint, - "file": r.mb.RecordContainerMemoryFileDataPoint, - } - - for name, val := range memoryStats.Stats { - if recorder, ok := recorders[name]; ok { - recorder(now, int64(val)) - } - } -} - -type blkioRecorder func(now pcommon.Timestamp, val int64, devMaj string, devMin string, operation string) - -func (r *metricsReceiver) recordBlkioMetrics(now pcommon.Timestamp, blkioStats *dtypes.BlkioStats) { - recordSingleBlkioStat(now, blkioStats.IoMergedRecursive, r.mb.RecordContainerBlockioIoMergedRecursiveDataPoint) - recordSingleBlkioStat(now, blkioStats.IoQueuedRecursive, r.mb.RecordContainerBlockioIoQueuedRecursiveDataPoint) - recordSingleBlkioStat(now, blkioStats.IoServiceBytesRecursive, r.mb.RecordContainerBlockioIoServiceBytesRecursiveDataPoint) - recordSingleBlkioStat(now, blkioStats.IoServiceTimeRecursive, r.mb.RecordContainerBlockioIoServiceTimeRecursiveDataPoint) - recordSingleBlkioStat(now, blkioStats.IoServicedRecursive, r.mb.RecordContainerBlockioIoServicedRecursiveDataPoint) - recordSingleBlkioStat(now, blkioStats.IoTimeRecursive, r.mb.RecordContainerBlockioIoTimeRecursiveDataPoint) - recordSingleBlkioStat(now, blkioStats.IoWaitTimeRecursive, r.mb.RecordContainerBlockioIoWaitTimeRecursiveDataPoint) - recordSingleBlkioStat(now, blkioStats.SectorsRecursive, r.mb.RecordContainerBlockioSectorsRecursiveDataPoint) -} - -func recordSingleBlkioStat(now pcommon.Timestamp, statEntries []dtypes.BlkioStatEntry, recorder blkioRecorder) { - for _, stat := range statEntries { - recorder( - now, - int64(stat.Value), - strconv.FormatUint(stat.Major, 10), - strconv.FormatUint(stat.Minor, 10), - strings.ToLower(stat.Op)) - } -} - -func (r *metricsReceiver) recordNetworkMetrics(now pcommon.Timestamp, networks *map[string]dtypes.NetworkStats) { - if networks == nil || *networks == nil { - return - } - - for netInterface, stats := range *networks { - r.mb.RecordContainerNetworkIoUsageRxBytesDataPoint(now, int64(stats.RxBytes), netInterface) - r.mb.RecordContainerNetworkIoUsageTxBytesDataPoint(now, int64(stats.TxBytes), netInterface) - r.mb.RecordContainerNetworkIoUsageRxDroppedDataPoint(now, int64(stats.RxDropped), netInterface) - r.mb.RecordContainerNetworkIoUsageTxDroppedDataPoint(now, int64(stats.TxDropped), netInterface) - r.mb.RecordContainerNetworkIoUsageRxPacketsDataPoint(now, int64(stats.RxPackets), netInterface) - r.mb.RecordContainerNetworkIoUsageTxPacketsDataPoint(now, int64(stats.TxPackets), netInterface) - r.mb.RecordContainerNetworkIoUsageRxErrorsDataPoint(now, int64(stats.RxErrors), netInterface) - r.mb.RecordContainerNetworkIoUsageTxErrorsDataPoint(now, int64(stats.TxErrors), netInterface) - } -} - -func (r *metricsReceiver) recordCPUMetrics(now pcommon.Timestamp, cpuStats *dtypes.CPUStats, prevStats *dtypes.CPUStats) { - r.mb.RecordContainerCPUUsageSystemDataPoint(now, int64(cpuStats.SystemUsage)) - r.mb.RecordContainerCPUUsageTotalDataPoint(now, int64(cpuStats.CPUUsage.TotalUsage)) - r.mb.RecordContainerCPUUsageKernelmodeDataPoint(now, int64(cpuStats.CPUUsage.UsageInKernelmode)) - r.mb.RecordContainerCPUUsageUsermodeDataPoint(now, int64(cpuStats.CPUUsage.UsageInUsermode)) - r.mb.RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint(now, int64(cpuStats.ThrottlingData.ThrottledPeriods)) - r.mb.RecordContainerCPUThrottlingDataPeriodsDataPoint(now, int64(cpuStats.ThrottlingData.Periods)) - r.mb.RecordContainerCPUThrottlingDataThrottledTimeDataPoint(now, int64(cpuStats.ThrottlingData.ThrottledTime)) - r.mb.RecordContainerCPUUtilizationDataPoint(now, calculateCPUPercent(prevStats, cpuStats)) - r.mb.RecordContainerCPULogicalCountDataPoint(now, int64(cpuStats.OnlineCPUs)) - - for coreNum, v := range cpuStats.CPUUsage.PercpuUsage { - r.mb.RecordContainerCPUUsagePercpuDataPoint(now, int64(v), fmt.Sprintf("cpu%s", strconv.Itoa(coreNum))) - } -} - -func (r *metricsReceiver) recordPidsMetrics(now pcommon.Timestamp, pidsStats *dtypes.PidsStats) { - // pidsStats are available when kernel version is >= 4.3 and pids_cgroup is supported, it is empty otherwise. - if pidsStats.Current != 0 { - r.mb.RecordContainerPidsCountDataPoint(now, int64(pidsStats.Current)) - if pidsStats.Limit != 0 { - r.mb.RecordContainerPidsLimitDataPoint(now, int64(pidsStats.Limit)) - } - } -} - -func (r *metricsReceiver) recordBaseMetrics(now pcommon.Timestamp, base *types.ContainerJSONBase) error { - t, err := time.Parse(time.RFC3339, base.State.StartedAt) - if err != nil { - // value not available or invalid - return scrapererror.NewPartialScrapeError(fmt.Errorf("error retrieving container.uptime from Container.State.StartedAt: %w", err), 1) - } - if v := now.AsTime().Sub(t); v > 0 { - r.mb.RecordContainerUptimeDataPoint(now, v.Seconds()) - } - return nil -} - -func (r *metricsReceiver) recordHostConfigMetrics(now pcommon.Timestamp, containerJSON *dtypes.ContainerJSON) error { - r.mb.RecordContainerCPUSharesDataPoint(now, containerJSON.HostConfig.CPUShares) - - cpuLimit, err := calculateCPULimit(containerJSON.HostConfig) - if err != nil { - return scrapererror.NewPartialScrapeError(fmt.Errorf("error retrieving container.cpu.limit: %w", err), 1) - } - if cpuLimit > 0 { - r.mb.RecordContainerCPULimitDataPoint(now, cpuLimit) - } - return nil -} diff --git a/receiver/dockerstatsreceiver/receiver_test.go b/receiver/dockerstatsreceiver/receiver_test.go deleted file mode 100644 index 70a793413e36..000000000000 --- a/receiver/dockerstatsreceiver/receiver_test.go +++ /dev/null @@ -1,433 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build !windows - -// TODO review if tests should succeed on Windows - -package dockerstatsreceiver - -import ( - "context" - "net/http" - "net/http/httptest" - "os" - "path/filepath" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/receiver/receivertest" - "go.opentelemetry.io/collector/receiver/scraperhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver/internal/metadata" -) - -var mockFolder = filepath.Join("testdata", "mock") - -var ( - metricEnabled = metadata.MetricConfig{Enabled: true} - allMetricsEnabled = metadata.MetricsConfig{ - ContainerBlockioIoMergedRecursive: metricEnabled, - ContainerBlockioIoQueuedRecursive: metricEnabled, - ContainerBlockioIoServiceBytesRecursive: metricEnabled, - ContainerBlockioIoServiceTimeRecursive: metricEnabled, - ContainerBlockioIoServicedRecursive: metricEnabled, - ContainerBlockioIoTimeRecursive: metricEnabled, - ContainerBlockioIoWaitTimeRecursive: metricEnabled, - ContainerBlockioSectorsRecursive: metricEnabled, - ContainerCPULimit: metricEnabled, - ContainerCPUShares: metricEnabled, - ContainerCPUUtilization: metricEnabled, - ContainerCPUThrottlingDataPeriods: metricEnabled, - ContainerCPUThrottlingDataThrottledPeriods: metricEnabled, - ContainerCPUThrottlingDataThrottledTime: metricEnabled, - ContainerCPUUsageKernelmode: metricEnabled, - ContainerCPUUsagePercpu: metricEnabled, - ContainerCPUUsageSystem: metricEnabled, - ContainerCPUUsageTotal: metricEnabled, - ContainerCPUUsageUsermode: metricEnabled, - ContainerCPULogicalCount: metricEnabled, - ContainerMemoryActiveAnon: metricEnabled, - ContainerMemoryActiveFile: metricEnabled, - ContainerMemoryCache: metricEnabled, - ContainerMemoryDirty: metricEnabled, - ContainerMemoryHierarchicalMemoryLimit: metricEnabled, - ContainerMemoryHierarchicalMemswLimit: metricEnabled, - ContainerMemoryInactiveAnon: metricEnabled, - ContainerMemoryInactiveFile: metricEnabled, - ContainerMemoryMappedFile: metricEnabled, - ContainerMemoryPercent: metricEnabled, - ContainerMemoryPgfault: metricEnabled, - ContainerMemoryPgmajfault: metricEnabled, - ContainerMemoryPgpgin: metricEnabled, - ContainerMemoryPgpgout: metricEnabled, - ContainerMemoryRss: metricEnabled, - ContainerMemoryRssHuge: metricEnabled, - ContainerMemoryTotalActiveAnon: metricEnabled, - ContainerMemoryTotalActiveFile: metricEnabled, - ContainerMemoryTotalCache: metricEnabled, - ContainerMemoryTotalDirty: metricEnabled, - ContainerMemoryTotalInactiveAnon: metricEnabled, - ContainerMemoryTotalInactiveFile: metricEnabled, - ContainerMemoryTotalMappedFile: metricEnabled, - ContainerMemoryTotalPgfault: metricEnabled, - ContainerMemoryTotalPgmajfault: metricEnabled, - ContainerMemoryTotalPgpgin: metricEnabled, - ContainerMemoryTotalPgpgout: metricEnabled, - ContainerMemoryTotalRss: metricEnabled, - ContainerMemoryTotalRssHuge: metricEnabled, - ContainerMemoryTotalUnevictable: metricEnabled, - ContainerMemoryTotalWriteback: metricEnabled, - ContainerMemoryUnevictable: metricEnabled, - ContainerMemoryUsageLimit: metricEnabled, - ContainerMemoryUsageMax: metricEnabled, - ContainerMemoryUsageTotal: metricEnabled, - ContainerMemoryWriteback: metricEnabled, - ContainerMemoryFails: metricEnabled, - ContainerNetworkIoUsageRxBytes: metricEnabled, - ContainerNetworkIoUsageRxDropped: metricEnabled, - ContainerNetworkIoUsageRxErrors: metricEnabled, - ContainerNetworkIoUsageRxPackets: metricEnabled, - ContainerNetworkIoUsageTxBytes: metricEnabled, - ContainerNetworkIoUsageTxDropped: metricEnabled, - ContainerNetworkIoUsageTxErrors: metricEnabled, - ContainerNetworkIoUsageTxPackets: metricEnabled, - ContainerPidsCount: metricEnabled, - ContainerPidsLimit: metricEnabled, - ContainerUptime: metricEnabled, - ContainerRestarts: metricEnabled, - ContainerMemoryAnon: metricEnabled, - ContainerMemoryFile: metricEnabled, - } - - resourceAttributeEnabled = metadata.ResourceAttributeConfig{Enabled: true} - allResourceAttributesEnabled = metadata.ResourceAttributesConfig{ - ContainerCommandLine: resourceAttributeEnabled, - ContainerHostname: resourceAttributeEnabled, - ContainerID: resourceAttributeEnabled, - ContainerImageID: resourceAttributeEnabled, - ContainerImageName: resourceAttributeEnabled, - ContainerName: resourceAttributeEnabled, - ContainerRuntime: resourceAttributeEnabled, - } -) - -func TestNewReceiver(t *testing.T) { - cfg := &Config{ - ControllerConfig: scraperhelper.ControllerConfig{ - CollectionInterval: 1 * time.Second, - }, - Endpoint: "unix:///run/some.sock", - DockerAPIVersion: defaultDockerAPIVersion, - } - mr := newMetricsReceiver(receivertest.NewNopCreateSettings(), cfg) - assert.NotNil(t, mr) -} - -func TestErrorsInStart(t *testing.T) { - unreachable := "unix:///not/a/thing.sock" - cfg := &Config{ - ControllerConfig: scraperhelper.ControllerConfig{ - CollectionInterval: 1 * time.Second, - }, - Endpoint: unreachable, - DockerAPIVersion: defaultDockerAPIVersion, - } - recv := newMetricsReceiver(receivertest.NewNopCreateSettings(), cfg) - assert.NotNil(t, recv) - - cfg.Endpoint = "..not/a/valid/endpoint" - err := recv.start(context.Background(), componenttest.NewNopHost()) - assert.Error(t, err) - assert.Contains(t, err.Error(), "unable to parse docker host") - - cfg.Endpoint = unreachable - err = recv.start(context.Background(), componenttest.NewNopHost()) - assert.Error(t, err) - assert.Contains(t, err.Error(), "context deadline exceeded") -} - -func TestScrapeV2(t *testing.T) { - - testCases := []struct { - desc string - expectedMetricsFile string - mockDockerEngine func(t *testing.T) *httptest.Server - cfgBuilder *testConfigBuilder - }{ - { - desc: "scrapeV2_single_container", - expectedMetricsFile: filepath.Join(mockFolder, "single_container", "expected_metrics.yaml"), - mockDockerEngine: func(t *testing.T) *httptest.Server { - t.Helper() - containerID := "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326" - mockServer, err := dockerMockServer(&map[string]string{ - "/v1.25/containers/json": filepath.Join(mockFolder, "single_container", "containers.json"), - "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "single_container", "container.json"), - "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "single_container", "stats.json"), - }) - require.NoError(t, err) - return mockServer - }, - cfgBuilder: newTestConfigBuilder(). - withDefaultLabels(). - withMetrics(allMetricsEnabled), - }, - { - desc: "scrapeV2_two_containers", - expectedMetricsFile: filepath.Join(mockFolder, "two_containers", "expected_metrics.yaml"), - mockDockerEngine: func(t *testing.T) *httptest.Server { - t.Helper() - containerIDs := []string{ - "89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7", - "a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce", - } - mockServer, err := dockerMockServer(&map[string]string{ - "/v1.25/containers/json": filepath.Join(mockFolder, "two_containers", "containers.json"), - "/v1.25/containers/" + containerIDs[0] + "/json": filepath.Join(mockFolder, "two_containers", "container1.json"), - "/v1.25/containers/" + containerIDs[1] + "/json": filepath.Join(mockFolder, "two_containers", "container2.json"), - "/v1.25/containers/" + containerIDs[0] + "/stats": filepath.Join(mockFolder, "two_containers", "stats1.json"), - "/v1.25/containers/" + containerIDs[1] + "/stats": filepath.Join(mockFolder, "two_containers", "stats2.json"), - }) - require.NoError(t, err) - return mockServer - }, - cfgBuilder: newTestConfigBuilder(). - withDefaultLabels(). - withMetrics(allMetricsEnabled), - }, - { - desc: "scrapeV2_no_pids_stats", - expectedMetricsFile: filepath.Join(mockFolder, "no_pids_stats", "expected_metrics.yaml"), - mockDockerEngine: func(t *testing.T) *httptest.Server { - t.Helper() - containerID := "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326" - mockServer, err := dockerMockServer(&map[string]string{ - "/v1.25/containers/json": filepath.Join(mockFolder, "no_pids_stats", "containers.json"), - "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "no_pids_stats", "container.json"), - "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "no_pids_stats", "stats.json"), - }) - require.NoError(t, err) - return mockServer - }, - cfgBuilder: newTestConfigBuilder(). - withDefaultLabels(). - withMetrics(allMetricsEnabled), - }, - { - desc: "scrapeV2_pid_stats_max", - expectedMetricsFile: filepath.Join(mockFolder, "pids_stats_max", "expected_metrics.yaml"), - mockDockerEngine: func(t *testing.T) *httptest.Server { - t.Helper() - containerID := "78de07328afff50a9777b07dd36a28c709dffe081baaf67235db618843399643" - mockServer, err := dockerMockServer(&map[string]string{ - "/v1.25/containers/json": filepath.Join(mockFolder, "pids_stats_max", "containers.json"), - "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "pids_stats_max", "container.json"), - "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "pids_stats_max", "stats.json"), - }) - require.NoError(t, err) - return mockServer - }, - cfgBuilder: newTestConfigBuilder(). - withDefaultLabels(). - withMetrics(allMetricsEnabled), - }, - { - desc: "scrapeV2_cpu_limit", - expectedMetricsFile: filepath.Join(mockFolder, "cpu_limit", "expected_metrics.yaml"), - mockDockerEngine: func(t *testing.T) *httptest.Server { - t.Helper() - containerID := "9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674" - mockServer, err := dockerMockServer(&map[string]string{ - "/v1.25/containers/json": filepath.Join(mockFolder, "cpu_limit", "containers.json"), - "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "cpu_limit", "container.json"), - "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "cpu_limit", "stats.json"), - }) - require.NoError(t, err) - return mockServer - }, - cfgBuilder: newTestConfigBuilder(). - withDefaultLabels(). - withMetrics(allMetricsEnabled), - }, - { - desc: "cgroups_v2_container", - expectedMetricsFile: filepath.Join(mockFolder, "cgroups_v2", "expected_metrics.yaml"), - mockDockerEngine: func(t *testing.T) *httptest.Server { - containerID := "f97ed5bca0a5a0b85bfd52c4144b96174e825c92a138bc0458f0e196f2c7c1b4" - mockServer, err := dockerMockServer(&map[string]string{ - "/v1.25/containers/json": filepath.Join(mockFolder, "cgroups_v2", "containers.json"), - "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "cgroups_v2", "container.json"), - "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "cgroups_v2", "stats.json"), - }) - require.NoError(t, err) - return mockServer - }, - cfgBuilder: newTestConfigBuilder(). - withDefaultLabels(). - withMetrics(allMetricsEnabled), - }, - { - desc: "scrapeV2_single_container_with_optional_resource_attributes", - expectedMetricsFile: filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "expected_metrics.yaml"), - mockDockerEngine: func(t *testing.T) *httptest.Server { - containerID := "73364842ef014441cac89fed05df19463b1230db25a31252cdf82e754f1ec581" - mockServer, err := dockerMockServer(&map[string]string{ - "/v1.25/containers/json": filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "containers.json"), - "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "container.json"), - "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "stats.json"), - }) - require.NoError(t, err) - return mockServer - }, - cfgBuilder: newTestConfigBuilder(). - withDefaultLabels(). - withMetrics(allMetricsEnabled). - withResourceAttributes(allResourceAttributesEnabled), - }, - } - - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - mockDockerEngine := tc.mockDockerEngine(t) - defer mockDockerEngine.Close() - - receiver := newMetricsReceiver( - receivertest.NewNopCreateSettings(), tc.cfgBuilder.withEndpoint(mockDockerEngine.URL).build()) - err := receiver.start(context.Background(), componenttest.NewNopHost()) - require.NoError(t, err) - defer func() { require.NoError(t, receiver.shutdown(context.Background())) }() - - actualMetrics, err := receiver.scrapeV2(context.Background()) - require.NoError(t, err) - - // Uncomment to regenerate 'expected_metrics.yaml' files - // golden.WriteMetrics(t, tc.expectedMetricsFile, actualMetrics) - - expectedMetrics, err := golden.ReadMetrics(tc.expectedMetricsFile) - - assert.NoError(t, err) - assert.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, - pmetrictest.IgnoreMetricDataPointsOrder(), - pmetrictest.IgnoreResourceMetricsOrder(), - pmetrictest.IgnoreStartTimestamp(), - pmetrictest.IgnoreTimestamp(), - pmetrictest.IgnoreMetricValues( - "container.uptime", // value depends on time.Now(), making it unpredictable as far as tests go - ), - )) - }) - } -} - -func TestRecordBaseMetrics(t *testing.T) { - cfg := createDefaultConfig().(*Config) - cfg.MetricsBuilderConfig.Metrics = metadata.MetricsConfig{ - ContainerUptime: metricEnabled, - } - r := newMetricsReceiver(receivertest.NewNopCreateSettings(), cfg) - now := time.Now() - started := now.Add(-2 * time.Second).Format(time.RFC3339) - - t.Run("ok", func(t *testing.T) { - err := r.recordBaseMetrics( - pcommon.NewTimestampFromTime(now), - &types.ContainerJSONBase{ - State: &types.ContainerState{ - StartedAt: started, - }, - }, - ) - require.NoError(t, err) - m := r.mb.Emit().ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) - assert.Equal(t, "container.uptime", m.Name()) - dp := m.Gauge().DataPoints() - assert.Equal(t, 1, dp.Len()) - assert.Equal(t, 2, int(dp.At(0).DoubleValue())) - }) - - t.Run("error", func(t *testing.T) { - err := r.recordBaseMetrics( - pcommon.NewTimestampFromTime(now), - &types.ContainerJSONBase{ - State: &types.ContainerState{ - StartedAt: "bad date", - }, - }, - ) - require.Error(t, err) - }) -} - -func dockerMockServer(urlToFile *map[string]string) (*httptest.Server, error) { - urlToFileContents := make(map[string][]byte, len(*urlToFile)) - for urlPath, filePath := range *urlToFile { - err := func() error { - fileContents, err := os.ReadFile(filepath.Clean(filePath)) - if err != nil { - return err - } - urlToFileContents[urlPath] = fileContents - return nil - }() - if err != nil { - return nil, err - } - } - - return httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - data, ok := urlToFileContents[req.URL.Path] - if !ok { - rw.WriteHeader(http.StatusNotFound) - return - } - rw.WriteHeader(http.StatusOK) - _, _ = rw.Write(data) - })), nil -} - -type testConfigBuilder struct { - config *Config -} - -func newTestConfigBuilder() *testConfigBuilder { - return &testConfigBuilder{config: createDefaultConfig().(*Config)} -} - -func (cb *testConfigBuilder) withEndpoint(endpoint string) *testConfigBuilder { - cb.config.Endpoint = endpoint - return cb -} - -func (cb *testConfigBuilder) withMetrics(ms metadata.MetricsConfig) *testConfigBuilder { - cb.config.MetricsBuilderConfig.Metrics = ms - return cb -} - -func (cb *testConfigBuilder) withResourceAttributes(ras metadata.ResourceAttributesConfig) *testConfigBuilder { - cb.config.MetricsBuilderConfig.ResourceAttributes = ras - return cb -} - -func (cb *testConfigBuilder) withDefaultLabels() *testConfigBuilder { - cb.config.EnvVarsToMetricLabels = map[string]string{ - "ENV_VAR": "env-var-metric-label", - "ENV_VAR_2": "env-var-metric-label-2", - } - cb.config.ContainerLabelsToMetricLabels = map[string]string{ - "container.label": "container-metric-label", - "container.label.2": "container-metric-label-2", - } - return cb -} - -func (cb *testConfigBuilder) build() *Config { - return cb.config -} From 2de7215df4b76b6d743e56f43639383b9a066a7d Mon Sep 17 00:00:00 2001 From: Adam Boguszewski Date: Tue, 14 May 2024 15:09:01 +0200 Subject: [PATCH 3/8] fix integration_test.go --- receiver/dockerstatsreceiver/integration_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/receiver/dockerstatsreceiver/integration_test.go b/receiver/dockerstatsreceiver/integration_test.go index b47f47231e15..33a476390138 100644 --- a/receiver/dockerstatsreceiver/integration_test.go +++ b/receiver/dockerstatsreceiver/integration_test.go @@ -3,13 +3,14 @@ //go:build integration -package dockerstatsreceiver +package receiver import ( "context" "testing" "time" + dockerReceiver "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker/receiver" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" @@ -25,9 +26,9 @@ import ( "go.uber.org/zap/zaptest" ) -func factory() (rcvr.Factory, *Config) { +func factory() (rcvr.Factory, *dockerReceiver.Config) { f := NewFactory() - config := f.CreateDefaultConfig().(*Config) + config := f.CreateDefaultConfig().(*dockerReceiver.Config) config.CollectionInterval = 1 * time.Second return f, config } From 17eb6a0f2bc9f19279ce2746b7ad0fc0ef9646f2 Mon Sep 17 00:00:00 2001 From: Adam Boguszewski Date: Tue, 14 May 2024 15:28:05 +0200 Subject: [PATCH 4/8] run go mod tidy --- cmd/otelcontribcol/go.mod | 3 ++- cmd/otelcontribcol/go.sum | 6 ++++-- go.mod | 3 ++- go.sum | 6 ++++-- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index 0a5d8a2313ee..2d6e8f951722 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -428,7 +428,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.2+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -587,6 +587,7 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/cmd/otelcontribcol/go.sum b/cmd/otelcontribcol/go.sum index 68b87ae4d10e..104776c15c79 100644 --- a/cmd/otelcontribcol/go.sum +++ b/cmd/otelcontribcol/go.sum @@ -1215,8 +1215,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.2+incompatible h1:UVX5ZOrrfTGZZYEP+ZDq3Xn9PdHNXaSYMFPDumMqG2k= +github.com/docker/docker v26.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -1926,6 +1926,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= diff --git a/go.mod b/go.mod index 56b2a3f1d74e..e5c5e6d22cbd 100644 --- a/go.mod +++ b/go.mod @@ -376,7 +376,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.2+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -536,6 +536,7 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/go.sum b/go.sum index 734731dbd136..010f3cb09df3 100644 --- a/go.sum +++ b/go.sum @@ -1214,8 +1214,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.2+incompatible h1:UVX5ZOrrfTGZZYEP+ZDq3Xn9PdHNXaSYMFPDumMqG2k= +github.com/docker/docker v26.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -1926,6 +1926,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= From 4887259ea38b0fc1bd7840bb9b6b721ccaea81b6 Mon Sep 17 00:00:00 2001 From: Adam Boguszewski Date: Wed, 22 May 2024 15:29:58 +0200 Subject: [PATCH 5/8] fix integration_test.go --- receiver/dockerstatsreceiver/integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/dockerstatsreceiver/integration_test.go b/receiver/dockerstatsreceiver/integration_test.go index 33a476390138..6c8f99c810e4 100644 --- a/receiver/dockerstatsreceiver/integration_test.go +++ b/receiver/dockerstatsreceiver/integration_test.go @@ -3,7 +3,7 @@ //go:build integration -package receiver +package dockerstatsreceiver import ( "context" From ae23e5210e79404951c8cd2c6a5356baed9c8fd5 Mon Sep 17 00:00:00 2001 From: Adam Boguszewski Date: Wed, 22 May 2024 15:31:48 +0200 Subject: [PATCH 6/8] go mod tidy in otelcontrib --- cmd/otelcontribcol/go.mod | 4 ++-- cmd/otelcontribcol/go.sum | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index 2d6e8f951722..7379e4fe46f3 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -4,7 +4,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/cmd/otelcontrib go 1.21.0 -toolchain go1.21.10 +toolchain go1.21.4 require ( github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.101.0 @@ -428,7 +428,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.2+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect diff --git a/cmd/otelcontribcol/go.sum b/cmd/otelcontribcol/go.sum index 104776c15c79..2541a4e8c6e4 100644 --- a/cmd/otelcontribcol/go.sum +++ b/cmd/otelcontribcol/go.sum @@ -1215,8 +1215,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.2+incompatible h1:UVX5ZOrrfTGZZYEP+ZDq3Xn9PdHNXaSYMFPDumMqG2k= -github.com/docker/docker v26.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= From 5b9a12384d07c42fa9e5c899412aefc0d0bdafe4 Mon Sep 17 00:00:00 2001 From: Adam Boguszewski Date: Wed, 22 May 2024 16:04:39 +0200 Subject: [PATCH 7/8] run go mod tidy in all dirs --- cmd/configschema/go.mod | 3 ++- cmd/configschema/go.sum | 6 +++-- cmd/oteltestbedcol/go.mod | 5 +++-- cmd/oteltestbedcol/go.sum | 6 +++-- connector/datadogconnector/go.sum | 6 +++-- exporter/clickhouseexporter/go.mod | 3 ++- exporter/clickhouseexporter/go.sum | 6 +++-- exporter/datadogexporter/go.mod | 3 ++- exporter/datadogexporter/go.sum | 6 +++-- .../datadogexporter/integrationtest/go.sum | 6 +++-- exporter/prometheusexporter/go.mod | 3 ++- exporter/prometheusexporter/go.sum | 6 +++-- exporter/splunkhecexporter/go.mod | 3 ++- exporter/splunkhecexporter/go.sum | 6 +++-- extension/observer/dockerobserver/go.mod | 15 +++++++++---- extension/observer/dockerobserver/go.sum | 22 ++++++++++--------- go.mod | 2 +- go.sum | 4 ++-- internal/coreinternal/go.mod | 3 ++- internal/coreinternal/go.sum | 6 +++-- internal/docker/go.mod | 6 +++++ internal/docker/go.sum | 6 ----- internal/k8stest/go.mod | 3 ++- internal/k8stest/go.sum | 6 +++-- internal/metadataproviders/go.mod | 3 ++- internal/metadataproviders/go.sum | 6 +++-- processor/k8sattributesprocessor/go.mod | 3 ++- processor/k8sattributesprocessor/go.sum | 6 +++-- processor/resourcedetectionprocessor/go.mod | 3 ++- processor/resourcedetectionprocessor/go.sum | 6 +++-- receiver/aerospikereceiver/go.mod | 3 ++- receiver/aerospikereceiver/go.sum | 6 +++-- receiver/apachereceiver/go.mod | 3 ++- receiver/apachereceiver/go.sum | 6 +++-- receiver/apachesparkreceiver/go.mod | 3 ++- receiver/apachesparkreceiver/go.sum | 6 +++-- receiver/awscontainerinsightreceiver/go.mod | 3 ++- receiver/awscontainerinsightreceiver/go.sum | 6 +++-- receiver/bigipreceiver/go.mod | 3 ++- receiver/bigipreceiver/go.sum | 6 +++-- receiver/elasticsearchreceiver/go.mod | 3 ++- receiver/elasticsearchreceiver/go.sum | 6 +++-- receiver/filestatsreceiver/go.mod | 3 ++- receiver/filestatsreceiver/go.sum | 6 +++-- receiver/haproxyreceiver/go.mod | 3 ++- receiver/haproxyreceiver/go.sum | 6 +++-- receiver/hostmetricsreceiver/go.mod | 3 ++- receiver/hostmetricsreceiver/go.sum | 6 +++-- receiver/iisreceiver/go.mod | 3 ++- receiver/iisreceiver/go.sum | 6 +++-- receiver/jmxreceiver/go.mod | 3 ++- receiver/jmxreceiver/go.sum | 6 +++-- receiver/k8sclusterreceiver/go.mod | 3 ++- receiver/k8sclusterreceiver/go.sum | 6 +++-- receiver/k8sobjectsreceiver/go.mod | 3 ++- receiver/k8sobjectsreceiver/go.sum | 6 +++-- receiver/kubeletstatsreceiver/go.mod | 3 ++- receiver/kubeletstatsreceiver/go.sum | 6 +++-- receiver/memcachedreceiver/go.mod | 3 ++- receiver/memcachedreceiver/go.sum | 6 +++-- receiver/mongodbreceiver/go.mod | 3 ++- receiver/mongodbreceiver/go.sum | 6 +++-- receiver/mysqlreceiver/go.mod | 3 ++- receiver/mysqlreceiver/go.sum | 6 +++-- receiver/nginxreceiver/go.mod | 3 ++- receiver/nginxreceiver/go.sum | 6 +++-- receiver/postgresqlreceiver/go.mod | 3 ++- receiver/postgresqlreceiver/go.sum | 6 +++-- receiver/prometheusreceiver/go.mod | 3 ++- receiver/prometheusreceiver/go.sum | 6 +++-- receiver/purefareceiver/go.mod | 3 ++- receiver/purefareceiver/go.sum | 6 +++-- receiver/purefbreceiver/go.mod | 3 ++- receiver/purefbreceiver/go.sum | 6 +++-- receiver/redisreceiver/go.mod | 3 ++- receiver/redisreceiver/go.sum | 6 +++-- receiver/simpleprometheusreceiver/go.mod | 3 ++- receiver/simpleprometheusreceiver/go.sum | 6 +++-- receiver/snmpreceiver/go.mod | 3 ++- receiver/snmpreceiver/go.sum | 6 +++-- receiver/splunkhecreceiver/go.mod | 2 ++ receiver/splunkhecreceiver/go.sum | 6 +++-- receiver/sqlqueryreceiver/go.mod | 3 ++- receiver/sqlqueryreceiver/go.sum | 6 +++-- receiver/vcenterreceiver/go.mod | 3 ++- receiver/vcenterreceiver/go.sum | 6 +++-- receiver/zookeeperreceiver/go.mod | 3 ++- receiver/zookeeperreceiver/go.sum | 6 +++-- testbed/go.mod | 3 ++- testbed/go.sum | 6 +++-- 90 files changed, 287 insertions(+), 150 deletions(-) diff --git a/cmd/configschema/go.mod b/cmd/configschema/go.mod index d1028fe55296..b7ab275c71c9 100644 --- a/cmd/configschema/go.mod +++ b/cmd/configschema/go.mod @@ -357,7 +357,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -513,6 +513,7 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/cmd/configschema/go.sum b/cmd/configschema/go.sum index fa810dee872d..0ebc53d061ed 100644 --- a/cmd/configschema/go.sum +++ b/cmd/configschema/go.sum @@ -1212,8 +1212,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -1926,6 +1926,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= diff --git a/cmd/oteltestbedcol/go.mod b/cmd/oteltestbedcol/go.mod index 848526a7db5c..2a6b3395e88f 100644 --- a/cmd/oteltestbedcol/go.mod +++ b/cmd/oteltestbedcol/go.mod @@ -4,7 +4,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/cmd/oteltestbed go 1.21.0 -toolchain go1.21.10 +toolchain go1.21.4 require ( github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.101.0 @@ -85,7 +85,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/elastic/go-structform v0.0.10 // indirect @@ -170,6 +170,7 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect diff --git a/cmd/oteltestbedcol/go.sum b/cmd/oteltestbedcol/go.sum index 5f9d9b1a213c..a5c815929fe7 100644 --- a/cmd/oteltestbedcol/go.sum +++ b/cmd/oteltestbedcol/go.sum @@ -150,8 +150,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -486,6 +486,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/connector/datadogconnector/go.sum b/connector/datadogconnector/go.sum index e8a499434243..f0ebbe7cc9df 100644 --- a/connector/datadogconnector/go.sum +++ b/connector/datadogconnector/go.sum @@ -297,8 +297,8 @@ github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqY github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -640,6 +640,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= diff --git a/exporter/clickhouseexporter/go.mod b/exporter/clickhouseexporter/go.mod index 3e4b46362060..3981aa7a602b 100644 --- a/exporter/clickhouseexporter/go.mod +++ b/exporter/clickhouseexporter/go.mod @@ -36,7 +36,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -57,6 +57,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/exporter/clickhouseexporter/go.sum b/exporter/clickhouseexporter/go.sum index ad546066ce77..c84c2d849e28 100644 --- a/exporter/clickhouseexporter/go.sum +++ b/exporter/clickhouseexporter/go.sum @@ -35,8 +35,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -107,6 +107,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/exporter/datadogexporter/go.mod b/exporter/datadogexporter/go.mod index e966f42cf9f5..2a297d9f3a4d 100644 --- a/exporter/datadogexporter/go.mod +++ b/exporter/datadogexporter/go.mod @@ -149,7 +149,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -239,6 +239,7 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect diff --git a/exporter/datadogexporter/go.sum b/exporter/datadogexporter/go.sum index 892d943f8c9d..509562c0aa95 100644 --- a/exporter/datadogexporter/go.sum +++ b/exporter/datadogexporter/go.sum @@ -327,8 +327,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -726,6 +726,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= diff --git a/exporter/datadogexporter/integrationtest/go.sum b/exporter/datadogexporter/integrationtest/go.sum index e8a499434243..f0ebbe7cc9df 100644 --- a/exporter/datadogexporter/integrationtest/go.sum +++ b/exporter/datadogexporter/integrationtest/go.sum @@ -297,8 +297,8 @@ github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqY github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -640,6 +640,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= diff --git a/exporter/prometheusexporter/go.mod b/exporter/prometheusexporter/go.mod index 47b8d76e8276..9f23bd878467 100644 --- a/exporter/prometheusexporter/go.mod +++ b/exporter/prometheusexporter/go.mod @@ -49,7 +49,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -120,6 +120,7 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/exporter/prometheusexporter/go.sum b/exporter/prometheusexporter/go.sum index 48fc13467986..365036f288d3 100644 --- a/exporter/prometheusexporter/go.sum +++ b/exporter/prometheusexporter/go.sum @@ -112,8 +112,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -414,6 +414,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/exporter/splunkhecexporter/go.mod b/exporter/splunkhecexporter/go.mod index 6c338dbf9bf1..b0c5772334bc 100644 --- a/exporter/splunkhecexporter/go.mod +++ b/exporter/splunkhecexporter/go.mod @@ -43,7 +43,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -64,6 +64,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/exporter/splunkhecexporter/go.sum b/exporter/splunkhecexporter/go.sum index a2e93ed2894f..e30673732d9a 100644 --- a/exporter/splunkhecexporter/go.sum +++ b/exporter/splunkhecexporter/go.sum @@ -32,8 +32,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -115,6 +115,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/extension/observer/dockerobserver/go.mod b/extension/observer/dockerobserver/go.mod index 482df419fcd9..acdbdab7ac13 100644 --- a/extension/observer/dockerobserver/go.mod +++ b/extension/observer/dockerobserver/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/extension/obser go 1.21.0 require ( - github.com/docker/docker v25.0.5+incompatible + github.com/docker/docker v26.1.2+incompatible github.com/docker/go-connections v0.5.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.101.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.101.0 @@ -26,7 +26,7 @@ require ( github.com/Microsoft/hcsshim v0.11.4 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/containerd v1.7.15 // indirect github.com/containerd/log v0.1.0 // indirect github.com/cpuguy83/dockercfg v0.3.1 // indirect @@ -49,6 +49,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect @@ -79,11 +80,11 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.23.0 // indirect + golang.org/x/net v0.24.0 // indirect golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.15.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/grpc v1.63.2 // indirect google.golang.org/protobuf v1.34.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect @@ -103,3 +104,9 @@ retract ( v0.76.1 v0.65.0 ) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../../pkg/pdatautil + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../../pkg/pdatatest + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../../pkg/golden diff --git a/extension/observer/dockerobserver/go.sum b/extension/observer/dockerobserver/go.sum index 926649ad6733..903a5b83dcfa 100644 --- a/extension/observer/dockerobserver/go.sum +++ b/extension/observer/dockerobserver/go.sum @@ -12,8 +12,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/containerd/containerd v1.7.15 h1:afEHXdil9iAm03BmhjzKyXnnEBtjaLJefdU7DV0IFes= github.com/containerd/containerd v1.7.15/go.mod h1:ISzRRTMF8EXNpJlTzyr2XMhN+j9K302C21/+cr3kUnY= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.2+incompatible h1:UVX5ZOrrfTGZZYEP+ZDq3Xn9PdHNXaSYMFPDumMqG2k= +github.com/docker/docker v26.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -78,6 +78,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= @@ -153,8 +155,8 @@ go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= go.opentelemetry.io/otel/exporters/prometheus v0.48.0 h1:sBQe3VNGUjY9IKWQC6z2lNqa5iGbDSxhs60ABwK4y0s= go.opentelemetry.io/otel/exporters/prometheus v0.48.0/go.mod h1:DtrbMzoZWwQHyrQmCfLam5DZbnmorsGbOtTbYHycU5o= go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= @@ -186,8 +188,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -226,8 +228,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= diff --git a/go.mod b/go.mod index e5c5e6d22cbd..54c1151bcf3a 100644 --- a/go.mod +++ b/go.mod @@ -376,7 +376,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.2+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect diff --git a/go.sum b/go.sum index 010f3cb09df3..452d64fff48a 100644 --- a/go.sum +++ b/go.sum @@ -1214,8 +1214,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.2+incompatible h1:UVX5ZOrrfTGZZYEP+ZDq3Xn9PdHNXaSYMFPDumMqG2k= -github.com/docker/docker v26.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= diff --git a/internal/coreinternal/go.mod b/internal/coreinternal/go.mod index dbcb8bd31c52..bfa152677a3f 100644 --- a/internal/coreinternal/go.mod +++ b/internal/coreinternal/go.mod @@ -35,7 +35,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.1 // indirect @@ -53,6 +53,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/internal/coreinternal/go.sum b/internal/coreinternal/go.sum index 3906841dd3eb..62a80a1d2bc4 100644 --- a/internal/coreinternal/go.sum +++ b/internal/coreinternal/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -79,6 +79,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/internal/docker/go.mod b/internal/docker/go.mod index 5a7632055568..db8ecc59e5cc 100644 --- a/internal/docker/go.mod +++ b/internal/docker/go.mod @@ -81,3 +81,9 @@ retract ( v0.76.1 v0.65.0 ) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../pkg/pdatautil + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../pkg/pdatatest + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden diff --git a/internal/docker/go.sum b/internal/docker/go.sum index dcbcc9d20bbe..33e2aa7f44be 100644 --- a/internal/docker/go.sum +++ b/internal/docker/go.sum @@ -73,12 +73,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.101.0 h1:Ohhry/Fcxh7/ysAxFhW2IJR/4hWEPaizDNtg02upYLA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.101.0/go.mod h1:H2vPArfULuCAm4Y6GHNxuLrjFGSgO16NJgdGACxBhSM= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.101.0 h1:TCQYvGS2MKTotOTQDnHUSd4ljEzXRzHXopdv71giKWU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.101.0/go.mod h1:Nl2d4DSK/IbaWnnBxYyhMNUW6C9sb5/4idVZrSW/5Ps= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0 h1:dVINhi/nne11lG+Xnwuy9t/N4xyaH2Om2EU+5lphCA4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0/go.mod h1:kjyfpKOuBfkx3UsJQsbQ5eTJM3yQWiRYaYxs47PpxvI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= diff --git a/internal/k8stest/go.mod b/internal/k8stest/go.mod index b3f403ca9cff..97b13db12519 100644 --- a/internal/k8stest/go.mod +++ b/internal/k8stest/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stes go 1.21.0 require ( - github.com/docker/docker v25.0.5+incompatible + github.com/docker/docker v26.1.3+incompatible github.com/stretchr/testify v1.9.0 k8s.io/api v0.29.3 k8s.io/apimachinery v0.29.3 @@ -33,6 +33,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/internal/k8stest/go.sum b/internal/k8stest/go.sum index 88e12dcff96a..9f40bbd895d6 100644 --- a/internal/k8stest/go.sum +++ b/internal/k8stest/go.sum @@ -12,8 +12,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -75,6 +75,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/internal/metadataproviders/go.mod b/internal/metadataproviders/go.mod index 6b1006536c7c..1d888d3c49d4 100644 --- a/internal/metadataproviders/go.mod +++ b/internal/metadataproviders/go.mod @@ -5,7 +5,7 @@ go 1.21.0 require ( github.com/Showmax/go-fqdn v1.0.0 github.com/aws/aws-sdk-go v1.53.7 - github.com/docker/docker v25.0.5+incompatible + github.com/docker/docker v26.1.3+incompatible github.com/hashicorp/consul/api v1.28.2 github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.101.0 github.com/stretchr/testify v1.9.0 @@ -58,6 +58,7 @@ require ( github.com/mattn/go-isatty v0.0.17 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/internal/metadataproviders/go.sum b/internal/metadataproviders/go.sum index 3edb2297fde5..7e5d89d32e9a 100644 --- a/internal/metadataproviders/go.sum +++ b/internal/metadataproviders/go.sum @@ -77,8 +77,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -313,6 +313,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= diff --git a/processor/k8sattributesprocessor/go.mod b/processor/k8sattributesprocessor/go.mod index 78ec9c555e3b..381650ef4c3f 100644 --- a/processor/k8sattributesprocessor/go.mod +++ b/processor/k8sattributesprocessor/go.mod @@ -36,7 +36,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -66,6 +66,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect diff --git a/processor/k8sattributesprocessor/go.sum b/processor/k8sattributesprocessor/go.sum index 5d17a5e0b9ec..a0fbca555e1c 100644 --- a/processor/k8sattributesprocessor/go.sum +++ b/processor/k8sattributesprocessor/go.sum @@ -824,8 +824,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -1110,6 +1110,8 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= diff --git a/processor/resourcedetectionprocessor/go.mod b/processor/resourcedetectionprocessor/go.mod index 77bef65424d8..8d94afa4165c 100644 --- a/processor/resourcedetectionprocessor/go.mod +++ b/processor/resourcedetectionprocessor/go.mod @@ -41,7 +41,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -86,6 +86,7 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/processor/resourcedetectionprocessor/go.sum b/processor/resourcedetectionprocessor/go.sum index b7cae4a605a7..ff2bb53bcecc 100644 --- a/processor/resourcedetectionprocessor/go.sum +++ b/processor/resourcedetectionprocessor/go.sum @@ -83,8 +83,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -342,6 +342,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= diff --git a/receiver/aerospikereceiver/go.mod b/receiver/aerospikereceiver/go.mod index 928f835bd067..c56d165270b7 100644 --- a/receiver/aerospikereceiver/go.mod +++ b/receiver/aerospikereceiver/go.mod @@ -38,7 +38,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect @@ -57,6 +57,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/aerospikereceiver/go.sum b/receiver/aerospikereceiver/go.sum index 3d0d1cf7e207..50bc7cc574af 100644 --- a/receiver/aerospikereceiver/go.sum +++ b/receiver/aerospikereceiver/go.sum @@ -29,8 +29,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -87,6 +87,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/apachereceiver/go.mod b/receiver/apachereceiver/go.mod index 73523a2d1b49..68e7d6ba87cc 100644 --- a/receiver/apachereceiver/go.mod +++ b/receiver/apachereceiver/go.mod @@ -36,7 +36,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -58,6 +58,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/apachereceiver/go.sum b/receiver/apachereceiver/go.sum index c1e7a7b31073..611f17f3e5f0 100644 --- a/receiver/apachereceiver/go.sum +++ b/receiver/apachereceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,6 +85,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/apachesparkreceiver/go.mod b/receiver/apachesparkreceiver/go.mod index b65234518a4b..701bd24a79df 100644 --- a/receiver/apachesparkreceiver/go.mod +++ b/receiver/apachesparkreceiver/go.mod @@ -35,7 +35,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -57,6 +57,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/apachesparkreceiver/go.sum b/receiver/apachesparkreceiver/go.sum index 8462a1b861ac..2533ce0fc498 100644 --- a/receiver/apachesparkreceiver/go.sum +++ b/receiver/apachesparkreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,6 +85,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/awscontainerinsightreceiver/go.mod b/receiver/awscontainerinsightreceiver/go.mod index af93c72ec66f..116f6bbc4b2e 100644 --- a/receiver/awscontainerinsightreceiver/go.mod +++ b/receiver/awscontainerinsightreceiver/go.mod @@ -44,7 +44,7 @@ require ( github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -84,6 +84,7 @@ require ( github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/receiver/awscontainerinsightreceiver/go.sum b/receiver/awscontainerinsightreceiver/go.sum index 1d9a696ab6f4..b06d63c0f535 100644 --- a/receiver/awscontainerinsightreceiver/go.sum +++ b/receiver/awscontainerinsightreceiver/go.sum @@ -75,8 +75,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -268,6 +268,8 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= diff --git a/receiver/bigipreceiver/go.mod b/receiver/bigipreceiver/go.mod index 4199c5d72eb3..3879a742292d 100644 --- a/receiver/bigipreceiver/go.mod +++ b/receiver/bigipreceiver/go.mod @@ -37,7 +37,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -59,6 +59,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/bigipreceiver/go.sum b/receiver/bigipreceiver/go.sum index 8462a1b861ac..2533ce0fc498 100644 --- a/receiver/bigipreceiver/go.sum +++ b/receiver/bigipreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,6 +85,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/elasticsearchreceiver/go.mod b/receiver/elasticsearchreceiver/go.mod index d7f86631ff74..d03707583f9d 100644 --- a/receiver/elasticsearchreceiver/go.mod +++ b/receiver/elasticsearchreceiver/go.mod @@ -38,7 +38,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -59,6 +59,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/elasticsearchreceiver/go.sum b/receiver/elasticsearchreceiver/go.sum index 8462a1b861ac..2533ce0fc498 100644 --- a/receiver/elasticsearchreceiver/go.sum +++ b/receiver/elasticsearchreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,6 +85,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/filestatsreceiver/go.mod b/receiver/filestatsreceiver/go.mod index 680c8d9ec712..2d1883e6c6e4 100644 --- a/receiver/filestatsreceiver/go.mod +++ b/receiver/filestatsreceiver/go.mod @@ -34,7 +34,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -53,6 +53,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/filestatsreceiver/go.sum b/receiver/filestatsreceiver/go.sum index 86178a23f20d..c6bcde89db76 100644 --- a/receiver/filestatsreceiver/go.sum +++ b/receiver/filestatsreceiver/go.sum @@ -29,8 +29,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -81,6 +81,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/haproxyreceiver/go.mod b/receiver/haproxyreceiver/go.mod index f9e186db409d..67b4ac4b1507 100644 --- a/receiver/haproxyreceiver/go.mod +++ b/receiver/haproxyreceiver/go.mod @@ -36,7 +36,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -58,6 +58,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/haproxyreceiver/go.sum b/receiver/haproxyreceiver/go.sum index c1e7a7b31073..611f17f3e5f0 100644 --- a/receiver/haproxyreceiver/go.sum +++ b/receiver/haproxyreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,6 +85,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/hostmetricsreceiver/go.mod b/receiver/hostmetricsreceiver/go.mod index a42211c6e84c..42f0097b33f7 100644 --- a/receiver/hostmetricsreceiver/go.mod +++ b/receiver/hostmetricsreceiver/go.mod @@ -42,7 +42,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -66,6 +66,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/hostmetricsreceiver/go.sum b/receiver/hostmetricsreceiver/go.sum index d87e59cb926c..1de392a708c9 100644 --- a/receiver/hostmetricsreceiver/go.sum +++ b/receiver/hostmetricsreceiver/go.sum @@ -77,8 +77,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -222,6 +222,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/iisreceiver/go.mod b/receiver/iisreceiver/go.mod index 1f503d643cc9..0be69e7d510b 100644 --- a/receiver/iisreceiver/go.mod +++ b/receiver/iisreceiver/go.mod @@ -35,7 +35,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -54,6 +54,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/iisreceiver/go.sum b/receiver/iisreceiver/go.sum index 124da2ac016a..e41fc73a6f47 100644 --- a/receiver/iisreceiver/go.sum +++ b/receiver/iisreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -79,6 +79,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/jmxreceiver/go.mod b/receiver/jmxreceiver/go.mod index a1f21ca3c5b4..cc1706c0a8e8 100644 --- a/receiver/jmxreceiver/go.mod +++ b/receiver/jmxreceiver/go.mod @@ -36,7 +36,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -58,6 +58,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/jmxreceiver/go.sum b/receiver/jmxreceiver/go.sum index 89b600bafae6..7f5b56115a06 100644 --- a/receiver/jmxreceiver/go.sum +++ b/receiver/jmxreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,6 +85,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/k8sclusterreceiver/go.mod b/receiver/k8sclusterreceiver/go.mod index 9d2da0b31c3b..6341f6d1a603 100644 --- a/receiver/k8sclusterreceiver/go.mod +++ b/receiver/k8sclusterreceiver/go.mod @@ -40,7 +40,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -69,6 +69,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect diff --git a/receiver/k8sclusterreceiver/go.sum b/receiver/k8sclusterreceiver/go.sum index dbe75dc30e0f..1ae2c71aa112 100644 --- a/receiver/k8sclusterreceiver/go.sum +++ b/receiver/k8sclusterreceiver/go.sum @@ -57,8 +57,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -226,6 +226,8 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= diff --git a/receiver/k8sobjectsreceiver/go.mod b/receiver/k8sobjectsreceiver/go.mod index 6104edb0b154..046dc359a549 100644 --- a/receiver/k8sobjectsreceiver/go.mod +++ b/receiver/k8sobjectsreceiver/go.mod @@ -30,7 +30,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -60,6 +60,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect diff --git a/receiver/k8sobjectsreceiver/go.sum b/receiver/k8sobjectsreceiver/go.sum index c4019c387335..54901919b768 100644 --- a/receiver/k8sobjectsreceiver/go.sum +++ b/receiver/k8sobjectsreceiver/go.sum @@ -57,8 +57,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -224,6 +224,8 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= diff --git a/receiver/kubeletstatsreceiver/go.mod b/receiver/kubeletstatsreceiver/go.mod index 4474671400c7..4de9d5b49900 100644 --- a/receiver/kubeletstatsreceiver/go.mod +++ b/receiver/kubeletstatsreceiver/go.mod @@ -38,7 +38,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -67,6 +67,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect diff --git a/receiver/kubeletstatsreceiver/go.sum b/receiver/kubeletstatsreceiver/go.sum index 4d47e312691e..c92034964ef7 100644 --- a/receiver/kubeletstatsreceiver/go.sum +++ b/receiver/kubeletstatsreceiver/go.sum @@ -57,8 +57,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -224,6 +224,8 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= diff --git a/receiver/memcachedreceiver/go.mod b/receiver/memcachedreceiver/go.mod index 587e34957d16..c83430866753 100644 --- a/receiver/memcachedreceiver/go.mod +++ b/receiver/memcachedreceiver/go.mod @@ -35,7 +35,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -54,6 +54,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/memcachedreceiver/go.sum b/receiver/memcachedreceiver/go.sum index 8f5ea02be66b..a8ce6b167de1 100644 --- a/receiver/memcachedreceiver/go.sum +++ b/receiver/memcachedreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -81,6 +81,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/mongodbreceiver/go.mod b/receiver/mongodbreceiver/go.mod index a1831b5b614b..ae05a2349255 100644 --- a/receiver/mongodbreceiver/go.mod +++ b/receiver/mongodbreceiver/go.mod @@ -41,7 +41,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -62,6 +62,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/mongodbreceiver/go.sum b/receiver/mongodbreceiver/go.sum index b94fe63ca36c..cbe706e493d9 100644 --- a/receiver/mongodbreceiver/go.sum +++ b/receiver/mongodbreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,6 +85,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/mysqlreceiver/go.mod b/receiver/mysqlreceiver/go.mod index 6fa493c202ad..3143902e3db1 100644 --- a/receiver/mysqlreceiver/go.mod +++ b/receiver/mysqlreceiver/go.mod @@ -39,7 +39,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -59,6 +59,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/mysqlreceiver/go.sum b/receiver/mysqlreceiver/go.sum index 9251bda77c02..aa20de61492d 100644 --- a/receiver/mysqlreceiver/go.sum +++ b/receiver/mysqlreceiver/go.sum @@ -29,8 +29,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,6 +85,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/nginxreceiver/go.mod b/receiver/nginxreceiver/go.mod index 735432abd1bb..caa37f108f61 100644 --- a/receiver/nginxreceiver/go.mod +++ b/receiver/nginxreceiver/go.mod @@ -36,7 +36,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -58,6 +58,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/nginxreceiver/go.sum b/receiver/nginxreceiver/go.sum index 998829beb45d..979bfc837c8c 100644 --- a/receiver/nginxreceiver/go.sum +++ b/receiver/nginxreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,6 +85,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/postgresqlreceiver/go.mod b/receiver/postgresqlreceiver/go.mod index 715749d81049..6549898c71fb 100644 --- a/receiver/postgresqlreceiver/go.mod +++ b/receiver/postgresqlreceiver/go.mod @@ -41,7 +41,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -62,6 +62,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/postgresqlreceiver/go.sum b/receiver/postgresqlreceiver/go.sum index 62b174a1bed5..78a044aeec70 100644 --- a/receiver/postgresqlreceiver/go.sum +++ b/receiver/postgresqlreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,6 +85,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/prometheusreceiver/go.mod b/receiver/prometheusreceiver/go.mod index 9fb98aa01857..a730072a1884 100644 --- a/receiver/prometheusreceiver/go.mod +++ b/receiver/prometheusreceiver/go.mod @@ -58,7 +58,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -129,6 +129,7 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/receiver/prometheusreceiver/go.sum b/receiver/prometheusreceiver/go.sum index bded91929bfc..304e2ac04a43 100644 --- a/receiver/prometheusreceiver/go.sum +++ b/receiver/prometheusreceiver/go.sum @@ -113,8 +113,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -415,6 +415,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/receiver/purefareceiver/go.mod b/receiver/purefareceiver/go.mod index c4371408bc1a..340127320500 100644 --- a/receiver/purefareceiver/go.mod +++ b/receiver/purefareceiver/go.mod @@ -42,7 +42,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -113,6 +113,7 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/receiver/purefareceiver/go.sum b/receiver/purefareceiver/go.sum index b5dbbc48f253..bcaa43ae0973 100644 --- a/receiver/purefareceiver/go.sum +++ b/receiver/purefareceiver/go.sum @@ -112,8 +112,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -414,6 +414,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/receiver/purefbreceiver/go.mod b/receiver/purefbreceiver/go.mod index 3453f644119e..d5a9d68b2fa6 100644 --- a/receiver/purefbreceiver/go.mod +++ b/receiver/purefbreceiver/go.mod @@ -42,7 +42,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -113,6 +113,7 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/receiver/purefbreceiver/go.sum b/receiver/purefbreceiver/go.sum index b5dbbc48f253..bcaa43ae0973 100644 --- a/receiver/purefbreceiver/go.sum +++ b/receiver/purefbreceiver/go.sum @@ -112,8 +112,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -414,6 +414,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/receiver/redisreceiver/go.mod b/receiver/redisreceiver/go.mod index 50d35bb3069b..0b8c34eedc59 100644 --- a/receiver/redisreceiver/go.mod +++ b/receiver/redisreceiver/go.mod @@ -38,7 +38,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -58,6 +58,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/redisreceiver/go.sum b/receiver/redisreceiver/go.sum index db528d5c97d1..1bc9b6d6a664 100644 --- a/receiver/redisreceiver/go.sum +++ b/receiver/redisreceiver/go.sum @@ -33,8 +33,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -87,6 +87,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/simpleprometheusreceiver/go.mod b/receiver/simpleprometheusreceiver/go.mod index 8a25260b0a12..7bfa7abf97f0 100644 --- a/receiver/simpleprometheusreceiver/go.mod +++ b/receiver/simpleprometheusreceiver/go.mod @@ -40,7 +40,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -111,6 +111,7 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/receiver/simpleprometheusreceiver/go.sum b/receiver/simpleprometheusreceiver/go.sum index b5dbbc48f253..bcaa43ae0973 100644 --- a/receiver/simpleprometheusreceiver/go.sum +++ b/receiver/simpleprometheusreceiver/go.sum @@ -112,8 +112,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -414,6 +414,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/receiver/snmpreceiver/go.mod b/receiver/snmpreceiver/go.mod index 157f8cc1e635..9a1c77544b35 100644 --- a/receiver/snmpreceiver/go.mod +++ b/receiver/snmpreceiver/go.mod @@ -34,7 +34,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -57,6 +57,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/snmpreceiver/go.sum b/receiver/snmpreceiver/go.sum index b6dde8cdcf2c..b89923ae025e 100644 --- a/receiver/snmpreceiver/go.sum +++ b/receiver/snmpreceiver/go.sum @@ -33,8 +33,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -116,6 +116,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/splunkhecreceiver/go.mod b/receiver/splunkhecreceiver/go.mod index c302c848d819..885fc8cc44b4 100644 --- a/receiver/splunkhecreceiver/go.mod +++ b/receiver/splunkhecreceiver/go.mod @@ -33,6 +33,7 @@ require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.1 // indirect @@ -48,6 +49,7 @@ require ( github.com/knadh/koanf/v2 v2.1.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.101.0 // indirect diff --git a/receiver/splunkhecreceiver/go.sum b/receiver/splunkhecreceiver/go.sum index 51336758e812..40e188c46cc6 100644 --- a/receiver/splunkhecreceiver/go.sum +++ b/receiver/splunkhecreceiver/go.sum @@ -28,8 +28,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -110,6 +110,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/sqlqueryreceiver/go.mod b/receiver/sqlqueryreceiver/go.mod index fbd84d100a7a..8f935636dde2 100644 --- a/receiver/sqlqueryreceiver/go.mod +++ b/receiver/sqlqueryreceiver/go.mod @@ -59,7 +59,7 @@ require ( github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/expr-lang/expr v1.16.7 // indirect @@ -102,6 +102,7 @@ require ( github.com/microsoft/go-mssqldb v1.7.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/sqlqueryreceiver/go.sum b/receiver/sqlqueryreceiver/go.sum index 173a2f837d57..68211060deff 100644 --- a/receiver/sqlqueryreceiver/go.sum +++ b/receiver/sqlqueryreceiver/go.sum @@ -107,8 +107,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -222,6 +222,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/vcenterreceiver/go.mod b/receiver/vcenterreceiver/go.mod index 4b83f4d00cc1..98e2e07c3fe4 100644 --- a/receiver/vcenterreceiver/go.mod +++ b/receiver/vcenterreceiver/go.mod @@ -40,7 +40,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -61,6 +61,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/vcenterreceiver/go.sum b/receiver/vcenterreceiver/go.sum index a18fe6702447..e20a4a3c208a 100644 --- a/receiver/vcenterreceiver/go.sum +++ b/receiver/vcenterreceiver/go.sum @@ -31,8 +31,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -87,6 +87,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/zookeeperreceiver/go.mod b/receiver/zookeeperreceiver/go.mod index d0accb165755..614d36805ab3 100644 --- a/receiver/zookeeperreceiver/go.mod +++ b/receiver/zookeeperreceiver/go.mod @@ -36,7 +36,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -56,6 +56,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/zookeeperreceiver/go.sum b/receiver/zookeeperreceiver/go.sum index bf6afee00018..7ebe0042172c 100644 --- a/receiver/zookeeperreceiver/go.sum +++ b/receiver/zookeeperreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -81,6 +81,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/testbed/go.mod b/testbed/go.mod index 9afa01fc0a47..efd9696ec1da 100644 --- a/testbed/go.mod +++ b/testbed/go.mod @@ -96,7 +96,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -180,6 +180,7 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect diff --git a/testbed/go.sum b/testbed/go.sum index 61f765241591..4a9a0bb78659 100644 --- a/testbed/go.sum +++ b/testbed/go.sum @@ -143,8 +143,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -483,6 +483,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= From e0142cec86bcf63ebd0261540a14ca50cf762d60 Mon Sep 17 00:00:00 2001 From: Adam Boguszewski Date: Wed, 22 May 2024 16:20:44 +0200 Subject: [PATCH 8/8] Revert "run go mod tidy in all dirs" This reverts commit 5b9a12384d07c42fa9e5c899412aefc0d0bdafe4. --- cmd/configschema/go.mod | 3 +-- cmd/configschema/go.sum | 6 ++--- cmd/oteltestbedcol/go.mod | 5 ++--- cmd/oteltestbedcol/go.sum | 6 ++--- connector/datadogconnector/go.sum | 6 ++--- exporter/clickhouseexporter/go.mod | 3 +-- exporter/clickhouseexporter/go.sum | 6 ++--- exporter/datadogexporter/go.mod | 3 +-- exporter/datadogexporter/go.sum | 6 ++--- .../datadogexporter/integrationtest/go.sum | 6 ++--- exporter/prometheusexporter/go.mod | 3 +-- exporter/prometheusexporter/go.sum | 6 ++--- exporter/splunkhecexporter/go.mod | 3 +-- exporter/splunkhecexporter/go.sum | 6 ++--- extension/observer/dockerobserver/go.mod | 15 ++++--------- extension/observer/dockerobserver/go.sum | 22 +++++++++---------- go.mod | 2 +- go.sum | 4 ++-- internal/coreinternal/go.mod | 3 +-- internal/coreinternal/go.sum | 6 ++--- internal/docker/go.mod | 6 ----- internal/docker/go.sum | 6 +++++ internal/k8stest/go.mod | 3 +-- internal/k8stest/go.sum | 6 ++--- internal/metadataproviders/go.mod | 3 +-- internal/metadataproviders/go.sum | 6 ++--- processor/k8sattributesprocessor/go.mod | 3 +-- processor/k8sattributesprocessor/go.sum | 6 ++--- processor/resourcedetectionprocessor/go.mod | 3 +-- processor/resourcedetectionprocessor/go.sum | 6 ++--- receiver/aerospikereceiver/go.mod | 3 +-- receiver/aerospikereceiver/go.sum | 6 ++--- receiver/apachereceiver/go.mod | 3 +-- receiver/apachereceiver/go.sum | 6 ++--- receiver/apachesparkreceiver/go.mod | 3 +-- receiver/apachesparkreceiver/go.sum | 6 ++--- receiver/awscontainerinsightreceiver/go.mod | 3 +-- receiver/awscontainerinsightreceiver/go.sum | 6 ++--- receiver/bigipreceiver/go.mod | 3 +-- receiver/bigipreceiver/go.sum | 6 ++--- receiver/elasticsearchreceiver/go.mod | 3 +-- receiver/elasticsearchreceiver/go.sum | 6 ++--- receiver/filestatsreceiver/go.mod | 3 +-- receiver/filestatsreceiver/go.sum | 6 ++--- receiver/haproxyreceiver/go.mod | 3 +-- receiver/haproxyreceiver/go.sum | 6 ++--- receiver/hostmetricsreceiver/go.mod | 3 +-- receiver/hostmetricsreceiver/go.sum | 6 ++--- receiver/iisreceiver/go.mod | 3 +-- receiver/iisreceiver/go.sum | 6 ++--- receiver/jmxreceiver/go.mod | 3 +-- receiver/jmxreceiver/go.sum | 6 ++--- receiver/k8sclusterreceiver/go.mod | 3 +-- receiver/k8sclusterreceiver/go.sum | 6 ++--- receiver/k8sobjectsreceiver/go.mod | 3 +-- receiver/k8sobjectsreceiver/go.sum | 6 ++--- receiver/kubeletstatsreceiver/go.mod | 3 +-- receiver/kubeletstatsreceiver/go.sum | 6 ++--- receiver/memcachedreceiver/go.mod | 3 +-- receiver/memcachedreceiver/go.sum | 6 ++--- receiver/mongodbreceiver/go.mod | 3 +-- receiver/mongodbreceiver/go.sum | 6 ++--- receiver/mysqlreceiver/go.mod | 3 +-- receiver/mysqlreceiver/go.sum | 6 ++--- receiver/nginxreceiver/go.mod | 3 +-- receiver/nginxreceiver/go.sum | 6 ++--- receiver/postgresqlreceiver/go.mod | 3 +-- receiver/postgresqlreceiver/go.sum | 6 ++--- receiver/prometheusreceiver/go.mod | 3 +-- receiver/prometheusreceiver/go.sum | 6 ++--- receiver/purefareceiver/go.mod | 3 +-- receiver/purefareceiver/go.sum | 6 ++--- receiver/purefbreceiver/go.mod | 3 +-- receiver/purefbreceiver/go.sum | 6 ++--- receiver/redisreceiver/go.mod | 3 +-- receiver/redisreceiver/go.sum | 6 ++--- receiver/simpleprometheusreceiver/go.mod | 3 +-- receiver/simpleprometheusreceiver/go.sum | 6 ++--- receiver/snmpreceiver/go.mod | 3 +-- receiver/snmpreceiver/go.sum | 6 ++--- receiver/splunkhecreceiver/go.mod | 2 -- receiver/splunkhecreceiver/go.sum | 6 ++--- receiver/sqlqueryreceiver/go.mod | 3 +-- receiver/sqlqueryreceiver/go.sum | 6 ++--- receiver/vcenterreceiver/go.mod | 3 +-- receiver/vcenterreceiver/go.sum | 6 ++--- receiver/zookeeperreceiver/go.mod | 3 +-- receiver/zookeeperreceiver/go.sum | 6 ++--- testbed/go.mod | 3 +-- testbed/go.sum | 6 ++--- 90 files changed, 150 insertions(+), 287 deletions(-) diff --git a/cmd/configschema/go.mod b/cmd/configschema/go.mod index b7ab275c71c9..d1028fe55296 100644 --- a/cmd/configschema/go.mod +++ b/cmd/configschema/go.mod @@ -357,7 +357,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -513,7 +513,6 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/cmd/configschema/go.sum b/cmd/configschema/go.sum index 0ebc53d061ed..fa810dee872d 100644 --- a/cmd/configschema/go.sum +++ b/cmd/configschema/go.sum @@ -1212,8 +1212,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -1926,8 +1926,6 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= diff --git a/cmd/oteltestbedcol/go.mod b/cmd/oteltestbedcol/go.mod index 2a6b3395e88f..848526a7db5c 100644 --- a/cmd/oteltestbedcol/go.mod +++ b/cmd/oteltestbedcol/go.mod @@ -4,7 +4,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/cmd/oteltestbed go 1.21.0 -toolchain go1.21.4 +toolchain go1.21.10 require ( github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.101.0 @@ -85,7 +85,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/elastic/go-structform v0.0.10 // indirect @@ -170,7 +170,6 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect diff --git a/cmd/oteltestbedcol/go.sum b/cmd/oteltestbedcol/go.sum index a5c815929fe7..5f9d9b1a213c 100644 --- a/cmd/oteltestbedcol/go.sum +++ b/cmd/oteltestbedcol/go.sum @@ -150,8 +150,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -486,8 +486,6 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/connector/datadogconnector/go.sum b/connector/datadogconnector/go.sum index f0ebbe7cc9df..e8a499434243 100644 --- a/connector/datadogconnector/go.sum +++ b/connector/datadogconnector/go.sum @@ -297,8 +297,8 @@ github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqY github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -640,8 +640,6 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= diff --git a/exporter/clickhouseexporter/go.mod b/exporter/clickhouseexporter/go.mod index 3981aa7a602b..3e4b46362060 100644 --- a/exporter/clickhouseexporter/go.mod +++ b/exporter/clickhouseexporter/go.mod @@ -36,7 +36,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -57,7 +57,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/exporter/clickhouseexporter/go.sum b/exporter/clickhouseexporter/go.sum index c84c2d849e28..ad546066ce77 100644 --- a/exporter/clickhouseexporter/go.sum +++ b/exporter/clickhouseexporter/go.sum @@ -35,8 +35,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -107,8 +107,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/exporter/datadogexporter/go.mod b/exporter/datadogexporter/go.mod index 2a297d9f3a4d..e966f42cf9f5 100644 --- a/exporter/datadogexporter/go.mod +++ b/exporter/datadogexporter/go.mod @@ -149,7 +149,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -239,7 +239,6 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect diff --git a/exporter/datadogexporter/go.sum b/exporter/datadogexporter/go.sum index 509562c0aa95..892d943f8c9d 100644 --- a/exporter/datadogexporter/go.sum +++ b/exporter/datadogexporter/go.sum @@ -327,8 +327,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -726,8 +726,6 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= diff --git a/exporter/datadogexporter/integrationtest/go.sum b/exporter/datadogexporter/integrationtest/go.sum index f0ebbe7cc9df..e8a499434243 100644 --- a/exporter/datadogexporter/integrationtest/go.sum +++ b/exporter/datadogexporter/integrationtest/go.sum @@ -297,8 +297,8 @@ github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqY github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -640,8 +640,6 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= diff --git a/exporter/prometheusexporter/go.mod b/exporter/prometheusexporter/go.mod index 9f23bd878467..47b8d76e8276 100644 --- a/exporter/prometheusexporter/go.mod +++ b/exporter/prometheusexporter/go.mod @@ -49,7 +49,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -120,7 +120,6 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/exporter/prometheusexporter/go.sum b/exporter/prometheusexporter/go.sum index 365036f288d3..48fc13467986 100644 --- a/exporter/prometheusexporter/go.sum +++ b/exporter/prometheusexporter/go.sum @@ -112,8 +112,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -414,8 +414,6 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/exporter/splunkhecexporter/go.mod b/exporter/splunkhecexporter/go.mod index b0c5772334bc..6c338dbf9bf1 100644 --- a/exporter/splunkhecexporter/go.mod +++ b/exporter/splunkhecexporter/go.mod @@ -43,7 +43,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -64,7 +64,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/exporter/splunkhecexporter/go.sum b/exporter/splunkhecexporter/go.sum index e30673732d9a..a2e93ed2894f 100644 --- a/exporter/splunkhecexporter/go.sum +++ b/exporter/splunkhecexporter/go.sum @@ -32,8 +32,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -115,8 +115,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/extension/observer/dockerobserver/go.mod b/extension/observer/dockerobserver/go.mod index acdbdab7ac13..482df419fcd9 100644 --- a/extension/observer/dockerobserver/go.mod +++ b/extension/observer/dockerobserver/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/extension/obser go 1.21.0 require ( - github.com/docker/docker v26.1.2+incompatible + github.com/docker/docker v25.0.5+incompatible github.com/docker/go-connections v0.5.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.101.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.101.0 @@ -26,7 +26,7 @@ require ( github.com/Microsoft/hcsshim v0.11.4 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/containerd v1.7.15 // indirect github.com/containerd/log v0.1.0 // indirect github.com/cpuguy83/dockercfg v0.3.1 // indirect @@ -49,7 +49,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect @@ -80,11 +79,11 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.24.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.15.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/grpc v1.63.2 // indirect google.golang.org/protobuf v1.34.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect @@ -104,9 +103,3 @@ retract ( v0.76.1 v0.65.0 ) - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../../pkg/pdatautil - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../../pkg/pdatatest - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../../pkg/golden diff --git a/extension/observer/dockerobserver/go.sum b/extension/observer/dockerobserver/go.sum index 903a5b83dcfa..926649ad6733 100644 --- a/extension/observer/dockerobserver/go.sum +++ b/extension/observer/dockerobserver/go.sum @@ -12,8 +12,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/containerd/containerd v1.7.15 h1:afEHXdil9iAm03BmhjzKyXnnEBtjaLJefdU7DV0IFes= github.com/containerd/containerd v1.7.15/go.mod h1:ISzRRTMF8EXNpJlTzyr2XMhN+j9K302C21/+cr3kUnY= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.2+incompatible h1:UVX5ZOrrfTGZZYEP+ZDq3Xn9PdHNXaSYMFPDumMqG2k= -github.com/docker/docker v26.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -78,8 +78,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= @@ -155,8 +153,8 @@ go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= go.opentelemetry.io/otel/exporters/prometheus v0.48.0 h1:sBQe3VNGUjY9IKWQC6z2lNqa5iGbDSxhs60ABwK4y0s= go.opentelemetry.io/otel/exporters/prometheus v0.48.0/go.mod h1:DtrbMzoZWwQHyrQmCfLam5DZbnmorsGbOtTbYHycU5o= go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= @@ -188,8 +186,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -228,8 +226,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= diff --git a/go.mod b/go.mod index 54c1151bcf3a..e5c5e6d22cbd 100644 --- a/go.mod +++ b/go.mod @@ -376,7 +376,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v26.1.2+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect diff --git a/go.sum b/go.sum index 452d64fff48a..010f3cb09df3 100644 --- a/go.sum +++ b/go.sum @@ -1214,8 +1214,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.2+incompatible h1:UVX5ZOrrfTGZZYEP+ZDq3Xn9PdHNXaSYMFPDumMqG2k= +github.com/docker/docker v26.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= diff --git a/internal/coreinternal/go.mod b/internal/coreinternal/go.mod index bfa152677a3f..dbcb8bd31c52 100644 --- a/internal/coreinternal/go.mod +++ b/internal/coreinternal/go.mod @@ -35,7 +35,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.1 // indirect @@ -53,7 +53,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/internal/coreinternal/go.sum b/internal/coreinternal/go.sum index 62a80a1d2bc4..3906841dd3eb 100644 --- a/internal/coreinternal/go.sum +++ b/internal/coreinternal/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -79,8 +79,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/internal/docker/go.mod b/internal/docker/go.mod index db8ecc59e5cc..5a7632055568 100644 --- a/internal/docker/go.mod +++ b/internal/docker/go.mod @@ -81,9 +81,3 @@ retract ( v0.76.1 v0.65.0 ) - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../pkg/pdatautil - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../pkg/pdatatest - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden diff --git a/internal/docker/go.sum b/internal/docker/go.sum index 33e2aa7f44be..dcbcc9d20bbe 100644 --- a/internal/docker/go.sum +++ b/internal/docker/go.sum @@ -73,6 +73,12 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.101.0 h1:Ohhry/Fcxh7/ysAxFhW2IJR/4hWEPaizDNtg02upYLA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.101.0/go.mod h1:H2vPArfULuCAm4Y6GHNxuLrjFGSgO16NJgdGACxBhSM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.101.0 h1:TCQYvGS2MKTotOTQDnHUSd4ljEzXRzHXopdv71giKWU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.101.0/go.mod h1:Nl2d4DSK/IbaWnnBxYyhMNUW6C9sb5/4idVZrSW/5Ps= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0 h1:dVINhi/nne11lG+Xnwuy9t/N4xyaH2Om2EU+5lphCA4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0/go.mod h1:kjyfpKOuBfkx3UsJQsbQ5eTJM3yQWiRYaYxs47PpxvI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= diff --git a/internal/k8stest/go.mod b/internal/k8stest/go.mod index 97b13db12519..b3f403ca9cff 100644 --- a/internal/k8stest/go.mod +++ b/internal/k8stest/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stes go 1.21.0 require ( - github.com/docker/docker v26.1.3+incompatible + github.com/docker/docker v25.0.5+incompatible github.com/stretchr/testify v1.9.0 k8s.io/api v0.29.3 k8s.io/apimachinery v0.29.3 @@ -33,7 +33,6 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/internal/k8stest/go.sum b/internal/k8stest/go.sum index 9f40bbd895d6..88e12dcff96a 100644 --- a/internal/k8stest/go.sum +++ b/internal/k8stest/go.sum @@ -12,8 +12,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -75,8 +75,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/internal/metadataproviders/go.mod b/internal/metadataproviders/go.mod index 1d888d3c49d4..6b1006536c7c 100644 --- a/internal/metadataproviders/go.mod +++ b/internal/metadataproviders/go.mod @@ -5,7 +5,7 @@ go 1.21.0 require ( github.com/Showmax/go-fqdn v1.0.0 github.com/aws/aws-sdk-go v1.53.7 - github.com/docker/docker v26.1.3+incompatible + github.com/docker/docker v25.0.5+incompatible github.com/hashicorp/consul/api v1.28.2 github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.101.0 github.com/stretchr/testify v1.9.0 @@ -58,7 +58,6 @@ require ( github.com/mattn/go-isatty v0.0.17 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/internal/metadataproviders/go.sum b/internal/metadataproviders/go.sum index 7e5d89d32e9a..3edb2297fde5 100644 --- a/internal/metadataproviders/go.sum +++ b/internal/metadataproviders/go.sum @@ -77,8 +77,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -313,8 +313,6 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= diff --git a/processor/k8sattributesprocessor/go.mod b/processor/k8sattributesprocessor/go.mod index 381650ef4c3f..78ec9c555e3b 100644 --- a/processor/k8sattributesprocessor/go.mod +++ b/processor/k8sattributesprocessor/go.mod @@ -36,7 +36,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -66,7 +66,6 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect diff --git a/processor/k8sattributesprocessor/go.sum b/processor/k8sattributesprocessor/go.sum index a0fbca555e1c..5d17a5e0b9ec 100644 --- a/processor/k8sattributesprocessor/go.sum +++ b/processor/k8sattributesprocessor/go.sum @@ -824,8 +824,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -1110,8 +1110,6 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= diff --git a/processor/resourcedetectionprocessor/go.mod b/processor/resourcedetectionprocessor/go.mod index 8d94afa4165c..77bef65424d8 100644 --- a/processor/resourcedetectionprocessor/go.mod +++ b/processor/resourcedetectionprocessor/go.mod @@ -41,7 +41,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -86,7 +86,6 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/processor/resourcedetectionprocessor/go.sum b/processor/resourcedetectionprocessor/go.sum index ff2bb53bcecc..b7cae4a605a7 100644 --- a/processor/resourcedetectionprocessor/go.sum +++ b/processor/resourcedetectionprocessor/go.sum @@ -83,8 +83,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -342,8 +342,6 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= diff --git a/receiver/aerospikereceiver/go.mod b/receiver/aerospikereceiver/go.mod index c56d165270b7..928f835bd067 100644 --- a/receiver/aerospikereceiver/go.mod +++ b/receiver/aerospikereceiver/go.mod @@ -38,7 +38,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect @@ -57,7 +57,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/aerospikereceiver/go.sum b/receiver/aerospikereceiver/go.sum index 50bc7cc574af..3d0d1cf7e207 100644 --- a/receiver/aerospikereceiver/go.sum +++ b/receiver/aerospikereceiver/go.sum @@ -29,8 +29,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -87,8 +87,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/apachereceiver/go.mod b/receiver/apachereceiver/go.mod index 68e7d6ba87cc..73523a2d1b49 100644 --- a/receiver/apachereceiver/go.mod +++ b/receiver/apachereceiver/go.mod @@ -36,7 +36,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -58,7 +58,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/apachereceiver/go.sum b/receiver/apachereceiver/go.sum index 611f17f3e5f0..c1e7a7b31073 100644 --- a/receiver/apachereceiver/go.sum +++ b/receiver/apachereceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,8 +85,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/apachesparkreceiver/go.mod b/receiver/apachesparkreceiver/go.mod index 701bd24a79df..b65234518a4b 100644 --- a/receiver/apachesparkreceiver/go.mod +++ b/receiver/apachesparkreceiver/go.mod @@ -35,7 +35,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -57,7 +57,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/apachesparkreceiver/go.sum b/receiver/apachesparkreceiver/go.sum index 2533ce0fc498..8462a1b861ac 100644 --- a/receiver/apachesparkreceiver/go.sum +++ b/receiver/apachesparkreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,8 +85,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/awscontainerinsightreceiver/go.mod b/receiver/awscontainerinsightreceiver/go.mod index 116f6bbc4b2e..af93c72ec66f 100644 --- a/receiver/awscontainerinsightreceiver/go.mod +++ b/receiver/awscontainerinsightreceiver/go.mod @@ -44,7 +44,7 @@ require ( github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -84,7 +84,6 @@ require ( github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/receiver/awscontainerinsightreceiver/go.sum b/receiver/awscontainerinsightreceiver/go.sum index b06d63c0f535..1d9a696ab6f4 100644 --- a/receiver/awscontainerinsightreceiver/go.sum +++ b/receiver/awscontainerinsightreceiver/go.sum @@ -75,8 +75,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -268,8 +268,6 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= diff --git a/receiver/bigipreceiver/go.mod b/receiver/bigipreceiver/go.mod index 3879a742292d..4199c5d72eb3 100644 --- a/receiver/bigipreceiver/go.mod +++ b/receiver/bigipreceiver/go.mod @@ -37,7 +37,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -59,7 +59,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/bigipreceiver/go.sum b/receiver/bigipreceiver/go.sum index 2533ce0fc498..8462a1b861ac 100644 --- a/receiver/bigipreceiver/go.sum +++ b/receiver/bigipreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,8 +85,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/elasticsearchreceiver/go.mod b/receiver/elasticsearchreceiver/go.mod index d03707583f9d..d7f86631ff74 100644 --- a/receiver/elasticsearchreceiver/go.mod +++ b/receiver/elasticsearchreceiver/go.mod @@ -38,7 +38,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -59,7 +59,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/elasticsearchreceiver/go.sum b/receiver/elasticsearchreceiver/go.sum index 2533ce0fc498..8462a1b861ac 100644 --- a/receiver/elasticsearchreceiver/go.sum +++ b/receiver/elasticsearchreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,8 +85,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/filestatsreceiver/go.mod b/receiver/filestatsreceiver/go.mod index 2d1883e6c6e4..680c8d9ec712 100644 --- a/receiver/filestatsreceiver/go.mod +++ b/receiver/filestatsreceiver/go.mod @@ -34,7 +34,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -53,7 +53,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/filestatsreceiver/go.sum b/receiver/filestatsreceiver/go.sum index c6bcde89db76..86178a23f20d 100644 --- a/receiver/filestatsreceiver/go.sum +++ b/receiver/filestatsreceiver/go.sum @@ -29,8 +29,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -81,8 +81,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/haproxyreceiver/go.mod b/receiver/haproxyreceiver/go.mod index 67b4ac4b1507..f9e186db409d 100644 --- a/receiver/haproxyreceiver/go.mod +++ b/receiver/haproxyreceiver/go.mod @@ -36,7 +36,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -58,7 +58,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/haproxyreceiver/go.sum b/receiver/haproxyreceiver/go.sum index 611f17f3e5f0..c1e7a7b31073 100644 --- a/receiver/haproxyreceiver/go.sum +++ b/receiver/haproxyreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,8 +85,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/hostmetricsreceiver/go.mod b/receiver/hostmetricsreceiver/go.mod index 42f0097b33f7..a42211c6e84c 100644 --- a/receiver/hostmetricsreceiver/go.mod +++ b/receiver/hostmetricsreceiver/go.mod @@ -42,7 +42,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -66,7 +66,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/hostmetricsreceiver/go.sum b/receiver/hostmetricsreceiver/go.sum index 1de392a708c9..d87e59cb926c 100644 --- a/receiver/hostmetricsreceiver/go.sum +++ b/receiver/hostmetricsreceiver/go.sum @@ -77,8 +77,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -222,8 +222,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/iisreceiver/go.mod b/receiver/iisreceiver/go.mod index 0be69e7d510b..1f503d643cc9 100644 --- a/receiver/iisreceiver/go.mod +++ b/receiver/iisreceiver/go.mod @@ -35,7 +35,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -54,7 +54,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/iisreceiver/go.sum b/receiver/iisreceiver/go.sum index e41fc73a6f47..124da2ac016a 100644 --- a/receiver/iisreceiver/go.sum +++ b/receiver/iisreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -79,8 +79,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/jmxreceiver/go.mod b/receiver/jmxreceiver/go.mod index cc1706c0a8e8..a1f21ca3c5b4 100644 --- a/receiver/jmxreceiver/go.mod +++ b/receiver/jmxreceiver/go.mod @@ -36,7 +36,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -58,7 +58,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/jmxreceiver/go.sum b/receiver/jmxreceiver/go.sum index 7f5b56115a06..89b600bafae6 100644 --- a/receiver/jmxreceiver/go.sum +++ b/receiver/jmxreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,8 +85,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/k8sclusterreceiver/go.mod b/receiver/k8sclusterreceiver/go.mod index 6341f6d1a603..9d2da0b31c3b 100644 --- a/receiver/k8sclusterreceiver/go.mod +++ b/receiver/k8sclusterreceiver/go.mod @@ -40,7 +40,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -69,7 +69,6 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect diff --git a/receiver/k8sclusterreceiver/go.sum b/receiver/k8sclusterreceiver/go.sum index 1ae2c71aa112..dbe75dc30e0f 100644 --- a/receiver/k8sclusterreceiver/go.sum +++ b/receiver/k8sclusterreceiver/go.sum @@ -57,8 +57,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -226,8 +226,6 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= diff --git a/receiver/k8sobjectsreceiver/go.mod b/receiver/k8sobjectsreceiver/go.mod index 046dc359a549..6104edb0b154 100644 --- a/receiver/k8sobjectsreceiver/go.mod +++ b/receiver/k8sobjectsreceiver/go.mod @@ -30,7 +30,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -60,7 +60,6 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect diff --git a/receiver/k8sobjectsreceiver/go.sum b/receiver/k8sobjectsreceiver/go.sum index 54901919b768..c4019c387335 100644 --- a/receiver/k8sobjectsreceiver/go.sum +++ b/receiver/k8sobjectsreceiver/go.sum @@ -57,8 +57,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -224,8 +224,6 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= diff --git a/receiver/kubeletstatsreceiver/go.mod b/receiver/kubeletstatsreceiver/go.mod index 4de9d5b49900..4474671400c7 100644 --- a/receiver/kubeletstatsreceiver/go.mod +++ b/receiver/kubeletstatsreceiver/go.mod @@ -38,7 +38,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -67,7 +67,6 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect diff --git a/receiver/kubeletstatsreceiver/go.sum b/receiver/kubeletstatsreceiver/go.sum index c92034964ef7..4d47e312691e 100644 --- a/receiver/kubeletstatsreceiver/go.sum +++ b/receiver/kubeletstatsreceiver/go.sum @@ -57,8 +57,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -224,8 +224,6 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= diff --git a/receiver/memcachedreceiver/go.mod b/receiver/memcachedreceiver/go.mod index c83430866753..587e34957d16 100644 --- a/receiver/memcachedreceiver/go.mod +++ b/receiver/memcachedreceiver/go.mod @@ -35,7 +35,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -54,7 +54,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/memcachedreceiver/go.sum b/receiver/memcachedreceiver/go.sum index a8ce6b167de1..8f5ea02be66b 100644 --- a/receiver/memcachedreceiver/go.sum +++ b/receiver/memcachedreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -81,8 +81,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/mongodbreceiver/go.mod b/receiver/mongodbreceiver/go.mod index ae05a2349255..a1831b5b614b 100644 --- a/receiver/mongodbreceiver/go.mod +++ b/receiver/mongodbreceiver/go.mod @@ -41,7 +41,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -62,7 +62,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/mongodbreceiver/go.sum b/receiver/mongodbreceiver/go.sum index cbe706e493d9..b94fe63ca36c 100644 --- a/receiver/mongodbreceiver/go.sum +++ b/receiver/mongodbreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,8 +85,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/mysqlreceiver/go.mod b/receiver/mysqlreceiver/go.mod index 3143902e3db1..6fa493c202ad 100644 --- a/receiver/mysqlreceiver/go.mod +++ b/receiver/mysqlreceiver/go.mod @@ -39,7 +39,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -59,7 +59,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/mysqlreceiver/go.sum b/receiver/mysqlreceiver/go.sum index aa20de61492d..9251bda77c02 100644 --- a/receiver/mysqlreceiver/go.sum +++ b/receiver/mysqlreceiver/go.sum @@ -29,8 +29,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,8 +85,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/nginxreceiver/go.mod b/receiver/nginxreceiver/go.mod index caa37f108f61..735432abd1bb 100644 --- a/receiver/nginxreceiver/go.mod +++ b/receiver/nginxreceiver/go.mod @@ -36,7 +36,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -58,7 +58,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/nginxreceiver/go.sum b/receiver/nginxreceiver/go.sum index 979bfc837c8c..998829beb45d 100644 --- a/receiver/nginxreceiver/go.sum +++ b/receiver/nginxreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,8 +85,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/postgresqlreceiver/go.mod b/receiver/postgresqlreceiver/go.mod index 6549898c71fb..715749d81049 100644 --- a/receiver/postgresqlreceiver/go.mod +++ b/receiver/postgresqlreceiver/go.mod @@ -41,7 +41,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -62,7 +62,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/postgresqlreceiver/go.sum b/receiver/postgresqlreceiver/go.sum index 78a044aeec70..62b174a1bed5 100644 --- a/receiver/postgresqlreceiver/go.sum +++ b/receiver/postgresqlreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -85,8 +85,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/prometheusreceiver/go.mod b/receiver/prometheusreceiver/go.mod index a730072a1884..9fb98aa01857 100644 --- a/receiver/prometheusreceiver/go.mod +++ b/receiver/prometheusreceiver/go.mod @@ -58,7 +58,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -129,7 +129,6 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/receiver/prometheusreceiver/go.sum b/receiver/prometheusreceiver/go.sum index 304e2ac04a43..bded91929bfc 100644 --- a/receiver/prometheusreceiver/go.sum +++ b/receiver/prometheusreceiver/go.sum @@ -113,8 +113,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -415,8 +415,6 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/receiver/purefareceiver/go.mod b/receiver/purefareceiver/go.mod index 340127320500..c4371408bc1a 100644 --- a/receiver/purefareceiver/go.mod +++ b/receiver/purefareceiver/go.mod @@ -42,7 +42,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -113,7 +113,6 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/receiver/purefareceiver/go.sum b/receiver/purefareceiver/go.sum index bcaa43ae0973..b5dbbc48f253 100644 --- a/receiver/purefareceiver/go.sum +++ b/receiver/purefareceiver/go.sum @@ -112,8 +112,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -414,8 +414,6 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/receiver/purefbreceiver/go.mod b/receiver/purefbreceiver/go.mod index d5a9d68b2fa6..3453f644119e 100644 --- a/receiver/purefbreceiver/go.mod +++ b/receiver/purefbreceiver/go.mod @@ -42,7 +42,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -113,7 +113,6 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/receiver/purefbreceiver/go.sum b/receiver/purefbreceiver/go.sum index bcaa43ae0973..b5dbbc48f253 100644 --- a/receiver/purefbreceiver/go.sum +++ b/receiver/purefbreceiver/go.sum @@ -112,8 +112,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -414,8 +414,6 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/receiver/redisreceiver/go.mod b/receiver/redisreceiver/go.mod index 0b8c34eedc59..50d35bb3069b 100644 --- a/receiver/redisreceiver/go.mod +++ b/receiver/redisreceiver/go.mod @@ -38,7 +38,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -58,7 +58,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/redisreceiver/go.sum b/receiver/redisreceiver/go.sum index 1bc9b6d6a664..db528d5c97d1 100644 --- a/receiver/redisreceiver/go.sum +++ b/receiver/redisreceiver/go.sum @@ -33,8 +33,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -87,8 +87,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/simpleprometheusreceiver/go.mod b/receiver/simpleprometheusreceiver/go.mod index 7bfa7abf97f0..8a25260b0a12 100644 --- a/receiver/simpleprometheusreceiver/go.mod +++ b/receiver/simpleprometheusreceiver/go.mod @@ -40,7 +40,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -111,7 +111,6 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/receiver/simpleprometheusreceiver/go.sum b/receiver/simpleprometheusreceiver/go.sum index bcaa43ae0973..b5dbbc48f253 100644 --- a/receiver/simpleprometheusreceiver/go.sum +++ b/receiver/simpleprometheusreceiver/go.sum @@ -112,8 +112,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -414,8 +414,6 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/receiver/snmpreceiver/go.mod b/receiver/snmpreceiver/go.mod index 9a1c77544b35..157f8cc1e635 100644 --- a/receiver/snmpreceiver/go.mod +++ b/receiver/snmpreceiver/go.mod @@ -34,7 +34,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -57,7 +57,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/snmpreceiver/go.sum b/receiver/snmpreceiver/go.sum index b89923ae025e..b6dde8cdcf2c 100644 --- a/receiver/snmpreceiver/go.sum +++ b/receiver/snmpreceiver/go.sum @@ -33,8 +33,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -116,8 +116,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/splunkhecreceiver/go.mod b/receiver/splunkhecreceiver/go.mod index 885fc8cc44b4..c302c848d819 100644 --- a/receiver/splunkhecreceiver/go.mod +++ b/receiver/splunkhecreceiver/go.mod @@ -33,7 +33,6 @@ require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.1 // indirect @@ -49,7 +48,6 @@ require ( github.com/knadh/koanf/v2 v2.1.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.101.0 // indirect diff --git a/receiver/splunkhecreceiver/go.sum b/receiver/splunkhecreceiver/go.sum index 40e188c46cc6..51336758e812 100644 --- a/receiver/splunkhecreceiver/go.sum +++ b/receiver/splunkhecreceiver/go.sum @@ -28,8 +28,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -110,8 +110,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/sqlqueryreceiver/go.mod b/receiver/sqlqueryreceiver/go.mod index 8f935636dde2..fbd84d100a7a 100644 --- a/receiver/sqlqueryreceiver/go.mod +++ b/receiver/sqlqueryreceiver/go.mod @@ -59,7 +59,7 @@ require ( github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/expr-lang/expr v1.16.7 // indirect @@ -102,7 +102,6 @@ require ( github.com/microsoft/go-mssqldb v1.7.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/sqlqueryreceiver/go.sum b/receiver/sqlqueryreceiver/go.sum index 68211060deff..173a2f837d57 100644 --- a/receiver/sqlqueryreceiver/go.sum +++ b/receiver/sqlqueryreceiver/go.sum @@ -107,8 +107,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -222,8 +222,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/vcenterreceiver/go.mod b/receiver/vcenterreceiver/go.mod index 98e2e07c3fe4..4b83f4d00cc1 100644 --- a/receiver/vcenterreceiver/go.mod +++ b/receiver/vcenterreceiver/go.mod @@ -40,7 +40,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -61,7 +61,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/vcenterreceiver/go.sum b/receiver/vcenterreceiver/go.sum index e20a4a3c208a..a18fe6702447 100644 --- a/receiver/vcenterreceiver/go.sum +++ b/receiver/vcenterreceiver/go.sum @@ -31,8 +31,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -87,8 +87,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/receiver/zookeeperreceiver/go.mod b/receiver/zookeeperreceiver/go.mod index 614d36805ab3..d0accb165755 100644 --- a/receiver/zookeeperreceiver/go.mod +++ b/receiver/zookeeperreceiver/go.mod @@ -36,7 +36,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -56,7 +56,6 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect diff --git a/receiver/zookeeperreceiver/go.sum b/receiver/zookeeperreceiver/go.sum index 7ebe0042172c..bf6afee00018 100644 --- a/receiver/zookeeperreceiver/go.sum +++ b/receiver/zookeeperreceiver/go.sum @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -81,8 +81,6 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= diff --git a/testbed/go.mod b/testbed/go.mod index efd9696ec1da..9afa01fc0a47 100644 --- a/testbed/go.mod +++ b/testbed/go.mod @@ -96,7 +96,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v25.0.5+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -180,7 +180,6 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect diff --git a/testbed/go.sum b/testbed/go.sum index 4a9a0bb78659..61f765241591 100644 --- a/testbed/go.sum +++ b/testbed/go.sum @@ -143,8 +143,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -483,8 +483,6 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=