From fe59abb5412d54e93f03517601013561b074d104 Mon Sep 17 00:00:00 2001 From: Roger Coll Date: Tue, 9 Apr 2024 14:22:54 +0200 Subject: [PATCH] [podmanreceiver] Add metrics and resource metadata (#30232) **Description:** - Adds "metadata.yml" file to autogenerate metrics and resources. - [Update: not done in this PR] Fixes invalid network metrics: "rx -> input" and "tx -> output" **Link to tracking Issue:** https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/28640 **Testing:** Previous tests preserved. **Documentation:** --------- Co-authored-by: Mackenzie <63265430+mackjmr@users.noreply.github.com> --- .chloggen/add_podman_metadata.yaml | 27 + receiver/podmanreceiver/README.md | 6 + receiver/podmanreceiver/config.go | 5 + receiver/podmanreceiver/config_test.go | 10 +- receiver/podmanreceiver/documentation.md | 120 +++ receiver/podmanreceiver/factory.go | 7 +- receiver/podmanreceiver/go.mod | 2 +- receiver/podmanreceiver/go.sum | 2 - .../internal/metadata/generated_config.go | 134 +++ .../metadata/generated_config_test.go | 142 ++++ .../internal/metadata/generated_metrics.go | 775 ++++++++++++++++++ .../metadata/generated_metrics_test.go | 286 +++++++ .../internal/metadata/generated_resource.go | 57 ++ .../metadata/generated_resource_test.go | 58 ++ .../internal/metadata/testdata/config.yaml | 67 ++ receiver/podmanreceiver/metadata.yaml | 117 +++ receiver/podmanreceiver/metrics.go | 130 --- receiver/podmanreceiver/receiver.go | 69 +- receiver/podmanreceiver/receiver_test.go | 13 +- ...metrics_test.go => record_metrics_test.go} | 22 +- 20 files changed, 1881 insertions(+), 168 deletions(-) create mode 100755 .chloggen/add_podman_metadata.yaml create mode 100644 receiver/podmanreceiver/documentation.md create mode 100644 receiver/podmanreceiver/internal/metadata/generated_config.go create mode 100644 receiver/podmanreceiver/internal/metadata/generated_config_test.go create mode 100644 receiver/podmanreceiver/internal/metadata/generated_metrics.go create mode 100644 receiver/podmanreceiver/internal/metadata/generated_metrics_test.go create mode 100644 receiver/podmanreceiver/internal/metadata/generated_resource.go create mode 100644 receiver/podmanreceiver/internal/metadata/generated_resource_test.go create mode 100644 receiver/podmanreceiver/internal/metadata/testdata/config.yaml delete mode 100644 receiver/podmanreceiver/metrics.go rename receiver/podmanreceiver/{metrics_test.go => record_metrics_test.go} (86%) diff --git a/.chloggen/add_podman_metadata.yaml b/.chloggen/add_podman_metadata.yaml new file mode 100755 index 000000000000..935cfe849002 --- /dev/null +++ b/.chloggen/add_podman_metadata.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: podmanreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Adds metrics and resources metadata and sets seconds precision for cpu metrics" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [28640] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/podmanreceiver/README.md b/receiver/podmanreceiver/README.md index c2a96910cfbc..7056b46a39d8 100644 --- a/receiver/podmanreceiver/README.md +++ b/receiver/podmanreceiver/README.md @@ -32,6 +32,7 @@ The following settings are optional: - `collection_interval` (default = `10s`): The interval at which to gather container stats. - `initial_delay` (default = `1s`): defines how long this receiver waits before starting. - `timeout` (default = `5s`): The maximum amount of time to wait for Podman API responses. +- `metrics` (defaults at [./documentation.md](./documentation.md)): Enables/disables individual metrics. See [./documentation.md](./documentation.md) for full detail. Example: @@ -42,6 +43,9 @@ receivers: timeout: 10s collection_interval: 10s initial_delay: 1s + metrics: + container.cpu.usage.system: + enabled: false ``` The full list of settings exposed for this receiver are documented [here](./config.go) @@ -84,6 +88,8 @@ The receiver emits the following metrics: container.cpu.percent container.cpu.usage.percpu +See [./documentation.md](./documentation.md) for full detail. + ## Building This receiver uses the official libpod Go bindings for Podman. In order to include diff --git a/receiver/podmanreceiver/config.go b/receiver/podmanreceiver/config.go index bf80db303c8c..b59df15c7aad 100644 --- a/receiver/podmanreceiver/config.go +++ b/receiver/podmanreceiver/config.go @@ -9,6 +9,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/podmanreceiver/internal/metadata" ) var _ component.Config = (*Config)(nil) @@ -22,6 +24,9 @@ type Config struct { APIVersion string `mapstructure:"api_version"` SSHKey string `mapstructure:"ssh_key"` SSHPassphrase configopaque.String `mapstructure:"ssh_passphrase"` + + // MetricsBuilderConfig config. Enable or disable stats by name. + metadata.MetricsBuilderConfig `mapstructure:",squash"` } func (config Config) Validate() error { diff --git a/receiver/podmanreceiver/config_test.go b/receiver/podmanreceiver/config_test.go index fc415e629ffc..a10307e36329 100644 --- a/receiver/podmanreceiver/config_test.go +++ b/receiver/podmanreceiver/config_test.go @@ -36,8 +36,9 @@ func TestLoadConfig(t *testing.T) { InitialDelay: time.Second, Timeout: 5 * time.Second, }, - APIVersion: defaultAPIVersion, - Endpoint: "unix:///run/podman/podman.sock", + APIVersion: defaultAPIVersion, + Endpoint: "unix:///run/podman/podman.sock", + MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), }, }, { @@ -48,8 +49,9 @@ func TestLoadConfig(t *testing.T) { InitialDelay: time.Second, Timeout: 20 * time.Second, }, - APIVersion: defaultAPIVersion, - Endpoint: "http://example.com/", + APIVersion: defaultAPIVersion, + Endpoint: "http://example.com/", + MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), }, }, } diff --git a/receiver/podmanreceiver/documentation.md b/receiver/podmanreceiver/documentation.md new file mode 100644 index 000000000000..6e5bb91e9912 --- /dev/null +++ b/receiver/podmanreceiver/documentation.md @@ -0,0 +1,120 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# podman_stats + +## Default Metrics + +The following metrics are emitted by default. Each of them can be disabled by applying the following configuration: + +```yaml +metrics: + : + enabled: false +``` + +### container.blockio.io_service_bytes_recursive.read + +Number of bytes transferred from the disk by the container + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {operations} | Sum | Int | Cumulative | true | + +### container.blockio.io_service_bytes_recursive.write + +Number of bytes transferred to the disk by the container + +[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {operations} | Sum | Int | Cumulative | true | + +### container.cpu.percent + +Percent of CPU used by the container. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +### container.cpu.usage.percpu + +Total CPU time consumed per CPU-core. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| s | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| core | The CPU core number when utilising per-CPU metrics. | Any Str | + +### container.cpu.usage.system + +System CPU usage. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| s | Sum | Int | Cumulative | true | + +### container.cpu.usage.total + +Total CPU time consumed. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| s | Sum | Int | Cumulative | true | + +### container.memory.percent + +Percentage of memory used. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +### container.memory.usage.limit + +Memory limit of the container. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.memory.usage.total + +Memory usage of the container. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | false | + +### container.network.io.usage.rx_bytes + +Bytes received by the container. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +### container.network.io.usage.tx_bytes + +Bytes sent by the container. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +## Resource Attributes + +| Name | Description | Values | Enabled | +| ---- | ----------- | ------ | ------- | +| container.id | The ID of the container. | Any Str | true | +| container.image.name | The name of the image in use by the container. | Any Str | true | +| container.name | The name of the container. | Any Str | true | +| container.runtime | The runtime of the container. For this receiver, it will always be 'podman'. | Any Str | true | diff --git a/receiver/podmanreceiver/factory.go b/receiver/podmanreceiver/factory.go index 14dcfde7b9b5..f154e628d3ea 100644 --- a/receiver/podmanreceiver/factory.go +++ b/receiver/podmanreceiver/factory.go @@ -32,9 +32,10 @@ func createDefaultConfig() *Config { cfg.Timeout = 5 * time.Second return &Config{ - ControllerConfig: cfg, - Endpoint: "unix:///run/podman/podman.sock", - APIVersion: defaultAPIVersion, + ControllerConfig: cfg, + Endpoint: "unix:///run/podman/podman.sock", + APIVersion: defaultAPIVersion, + MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } } diff --git a/receiver/podmanreceiver/go.mod b/receiver/podmanreceiver/go.mod index 593603765fa5..df50cc350bcb 100644 --- a/receiver/podmanreceiver/go.mod +++ b/receiver/podmanreceiver/go.mod @@ -3,6 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/podman go 1.21 require ( + github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.97.1-0.20240404121116-4f1a8936d26b go.opentelemetry.io/collector/config/configopaque v1.4.1-0.20240404121116-4f1a8936d26b @@ -10,7 +11,6 @@ require ( go.opentelemetry.io/collector/consumer v0.97.1-0.20240404121116-4f1a8936d26b go.opentelemetry.io/collector/pdata v1.4.1-0.20240404121116-4f1a8936d26b go.opentelemetry.io/collector/receiver v0.97.1-0.20240404121116-4f1a8936d26b - go.opentelemetry.io/collector/semconv v0.97.1-0.20240404121116-4f1a8936d26b go.opentelemetry.io/otel/metric v1.24.0 go.opentelemetry.io/otel/trace v1.24.0 go.uber.org/goleak v1.3.0 diff --git a/receiver/podmanreceiver/go.sum b/receiver/podmanreceiver/go.sum index 8970c742ff00..dd9d40ea25b7 100644 --- a/receiver/podmanreceiver/go.sum +++ b/receiver/podmanreceiver/go.sum @@ -80,8 +80,6 @@ go.opentelemetry.io/collector/pdata v1.4.1-0.20240404121116-4f1a8936d26b h1:HQqz go.opentelemetry.io/collector/pdata v1.4.1-0.20240404121116-4f1a8936d26b/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw= go.opentelemetry.io/collector/receiver v0.97.1-0.20240404121116-4f1a8936d26b h1:d9xejxpSk5O46aM1X5nUb1qGQl1ToGQJy39csqnYl7c= go.opentelemetry.io/collector/receiver v0.97.1-0.20240404121116-4f1a8936d26b/go.mod h1:oj/eoc8Wf9u82gaPeRVdHmFbJ5e3m5F1v5CFTpjiVFU= -go.opentelemetry.io/collector/semconv v0.97.1-0.20240404121116-4f1a8936d26b h1:2ApIgbCJPzABy6TDKlc9b55J/zo6ixAIMPvIUC2nB9U= -go.opentelemetry.io/collector/semconv v0.97.1-0.20240404121116-4f1a8936d26b/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel/exporters/prometheus v0.46.0 h1:I8WIFXR351FoLJYuloU4EgXbtNX2URfU/85pUPheIEQ= diff --git a/receiver/podmanreceiver/internal/metadata/generated_config.go b/receiver/podmanreceiver/internal/metadata/generated_config.go new file mode 100644 index 000000000000..7edc12b42acf --- /dev/null +++ b/receiver/podmanreceiver/internal/metadata/generated_config.go @@ -0,0 +1,134 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import "go.opentelemetry.io/collector/confmap" + +// MetricConfig provides common config for a particular metric. +type MetricConfig struct { + Enabled bool `mapstructure:"enabled"` + + enabledSetByUser bool +} + +func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { + if parser == nil { + return nil + } + err := parser.Unmarshal(ms) + if err != nil { + return err + } + ms.enabledSetByUser = parser.IsSet("enabled") + return nil +} + +// MetricsConfig provides config for podman_stats metrics. +type MetricsConfig struct { + ContainerBlockioIoServiceBytesRecursiveRead MetricConfig `mapstructure:"container.blockio.io_service_bytes_recursive.read"` + ContainerBlockioIoServiceBytesRecursiveWrite MetricConfig `mapstructure:"container.blockio.io_service_bytes_recursive.write"` + ContainerCPUPercent MetricConfig `mapstructure:"container.cpu.percent"` + ContainerCPUUsagePercpu MetricConfig `mapstructure:"container.cpu.usage.percpu"` + ContainerCPUUsageSystem MetricConfig `mapstructure:"container.cpu.usage.system"` + ContainerCPUUsageTotal MetricConfig `mapstructure:"container.cpu.usage.total"` + ContainerMemoryPercent MetricConfig `mapstructure:"container.memory.percent"` + ContainerMemoryUsageLimit MetricConfig `mapstructure:"container.memory.usage.limit"` + ContainerMemoryUsageTotal MetricConfig `mapstructure:"container.memory.usage.total"` + ContainerNetworkIoUsageRxBytes MetricConfig `mapstructure:"container.network.io.usage.rx_bytes"` + ContainerNetworkIoUsageTxBytes MetricConfig `mapstructure:"container.network.io.usage.tx_bytes"` +} + +func DefaultMetricsConfig() MetricsConfig { + return MetricsConfig{ + ContainerBlockioIoServiceBytesRecursiveRead: MetricConfig{ + Enabled: true, + }, + ContainerBlockioIoServiceBytesRecursiveWrite: MetricConfig{ + Enabled: true, + }, + ContainerCPUPercent: MetricConfig{ + Enabled: true, + }, + ContainerCPUUsagePercpu: MetricConfig{ + Enabled: true, + }, + ContainerCPUUsageSystem: MetricConfig{ + Enabled: true, + }, + ContainerCPUUsageTotal: MetricConfig{ + Enabled: true, + }, + ContainerMemoryPercent: MetricConfig{ + Enabled: true, + }, + ContainerMemoryUsageLimit: MetricConfig{ + Enabled: true, + }, + ContainerMemoryUsageTotal: MetricConfig{ + Enabled: true, + }, + ContainerNetworkIoUsageRxBytes: MetricConfig{ + Enabled: true, + }, + ContainerNetworkIoUsageTxBytes: MetricConfig{ + Enabled: true, + }, + } +} + +// ResourceAttributeConfig provides common config for a particular resource attribute. +type ResourceAttributeConfig struct { + Enabled bool `mapstructure:"enabled"` + + enabledSetByUser bool +} + +func (rac *ResourceAttributeConfig) Unmarshal(parser *confmap.Conf) error { + if parser == nil { + return nil + } + err := parser.Unmarshal(rac) + if err != nil { + return err + } + rac.enabledSetByUser = parser.IsSet("enabled") + return nil +} + +// ResourceAttributesConfig provides config for podman_stats resource attributes. +type ResourceAttributesConfig struct { + ContainerID ResourceAttributeConfig `mapstructure:"container.id"` + ContainerImageName ResourceAttributeConfig `mapstructure:"container.image.name"` + ContainerName ResourceAttributeConfig `mapstructure:"container.name"` + ContainerRuntime ResourceAttributeConfig `mapstructure:"container.runtime"` +} + +func DefaultResourceAttributesConfig() ResourceAttributesConfig { + return ResourceAttributesConfig{ + ContainerID: ResourceAttributeConfig{ + Enabled: true, + }, + ContainerImageName: ResourceAttributeConfig{ + Enabled: true, + }, + ContainerName: ResourceAttributeConfig{ + Enabled: true, + }, + ContainerRuntime: ResourceAttributeConfig{ + Enabled: true, + }, + } +} + +// MetricsBuilderConfig is a configuration for podman_stats metrics builder. +type MetricsBuilderConfig struct { + Metrics MetricsConfig `mapstructure:"metrics"` + ResourceAttributes ResourceAttributesConfig `mapstructure:"resource_attributes"` +} + +func DefaultMetricsBuilderConfig() MetricsBuilderConfig { + return MetricsBuilderConfig{ + Metrics: DefaultMetricsConfig(), + ResourceAttributes: DefaultResourceAttributesConfig(), + } +} diff --git a/receiver/podmanreceiver/internal/metadata/generated_config_test.go b/receiver/podmanreceiver/internal/metadata/generated_config_test.go new file mode 100644 index 000000000000..db4823127769 --- /dev/null +++ b/receiver/podmanreceiver/internal/metadata/generated_config_test.go @@ -0,0 +1,142 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/confmaptest" +) + +func TestMetricsBuilderConfig(t *testing.T) { + tests := []struct { + name string + want MetricsBuilderConfig + }{ + { + name: "default", + want: DefaultMetricsBuilderConfig(), + }, + { + name: "all_set", + want: MetricsBuilderConfig{ + Metrics: MetricsConfig{ + ContainerBlockioIoServiceBytesRecursiveRead: MetricConfig{Enabled: true}, + ContainerBlockioIoServiceBytesRecursiveWrite: MetricConfig{Enabled: true}, + ContainerCPUPercent: MetricConfig{Enabled: true}, + ContainerCPUUsagePercpu: MetricConfig{Enabled: true}, + ContainerCPUUsageSystem: MetricConfig{Enabled: true}, + ContainerCPUUsageTotal: MetricConfig{Enabled: true}, + ContainerMemoryPercent: MetricConfig{Enabled: true}, + ContainerMemoryUsageLimit: MetricConfig{Enabled: true}, + ContainerMemoryUsageTotal: MetricConfig{Enabled: true}, + ContainerNetworkIoUsageRxBytes: MetricConfig{Enabled: true}, + ContainerNetworkIoUsageTxBytes: MetricConfig{Enabled: true}, + }, + ResourceAttributes: ResourceAttributesConfig{ + ContainerID: ResourceAttributeConfig{Enabled: true}, + ContainerImageName: ResourceAttributeConfig{Enabled: true}, + ContainerName: ResourceAttributeConfig{Enabled: true}, + ContainerRuntime: ResourceAttributeConfig{Enabled: true}, + }, + }, + }, + { + name: "none_set", + want: MetricsBuilderConfig{ + Metrics: MetricsConfig{ + ContainerBlockioIoServiceBytesRecursiveRead: MetricConfig{Enabled: false}, + ContainerBlockioIoServiceBytesRecursiveWrite: MetricConfig{Enabled: false}, + ContainerCPUPercent: MetricConfig{Enabled: false}, + ContainerCPUUsagePercpu: MetricConfig{Enabled: false}, + ContainerCPUUsageSystem: MetricConfig{Enabled: false}, + ContainerCPUUsageTotal: MetricConfig{Enabled: false}, + ContainerMemoryPercent: MetricConfig{Enabled: false}, + ContainerMemoryUsageLimit: MetricConfig{Enabled: false}, + ContainerMemoryUsageTotal: MetricConfig{Enabled: false}, + ContainerNetworkIoUsageRxBytes: MetricConfig{Enabled: false}, + ContainerNetworkIoUsageTxBytes: MetricConfig{Enabled: false}, + }, + ResourceAttributes: ResourceAttributesConfig{ + ContainerID: ResourceAttributeConfig{Enabled: false}, + ContainerImageName: ResourceAttributeConfig{Enabled: false}, + ContainerName: ResourceAttributeConfig{Enabled: false}, + ContainerRuntime: ResourceAttributeConfig{Enabled: false}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := loadMetricsBuilderConfig(t, tt.name) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } + }) + } +} + +func loadMetricsBuilderConfig(t *testing.T, name string) MetricsBuilderConfig { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + sub, err := cm.Sub(name) + require.NoError(t, err) + cfg := DefaultMetricsBuilderConfig() + require.NoError(t, component.UnmarshalConfig(sub, &cfg)) + return cfg +} + +func TestResourceAttributesConfig(t *testing.T) { + tests := []struct { + name string + want ResourceAttributesConfig + }{ + { + name: "default", + want: DefaultResourceAttributesConfig(), + }, + { + name: "all_set", + want: ResourceAttributesConfig{ + ContainerID: ResourceAttributeConfig{Enabled: true}, + ContainerImageName: ResourceAttributeConfig{Enabled: true}, + ContainerName: ResourceAttributeConfig{Enabled: true}, + ContainerRuntime: ResourceAttributeConfig{Enabled: true}, + }, + }, + { + name: "none_set", + want: ResourceAttributesConfig{ + ContainerID: ResourceAttributeConfig{Enabled: false}, + ContainerImageName: ResourceAttributeConfig{Enabled: false}, + ContainerName: ResourceAttributeConfig{Enabled: false}, + ContainerRuntime: ResourceAttributeConfig{Enabled: false}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := loadResourceAttributesConfig(t, tt.name) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } + }) + } +} + +func loadResourceAttributesConfig(t *testing.T, name string) ResourceAttributesConfig { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + sub, err := cm.Sub(name) + require.NoError(t, err) + sub, err = sub.Sub("resource_attributes") + require.NoError(t, err) + cfg := DefaultResourceAttributesConfig() + require.NoError(t, component.UnmarshalConfig(sub, &cfg)) + return cfg +} diff --git a/receiver/podmanreceiver/internal/metadata/generated_metrics.go b/receiver/podmanreceiver/internal/metadata/generated_metrics.go new file mode 100644 index 000000000000..b2a628971b8b --- /dev/null +++ b/receiver/podmanreceiver/internal/metadata/generated_metrics.go @@ -0,0 +1,775 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" +) + +type metricContainerBlockioIoServiceBytesRecursiveRead struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.blockio.io_service_bytes_recursive.read metric with initial data. +func (m *metricContainerBlockioIoServiceBytesRecursiveRead) init() { + m.data.SetName("container.blockio.io_service_bytes_recursive.read") + m.data.SetDescription("Number of bytes transferred from the disk by the container") + m.data.SetUnit("{operations}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerBlockioIoServiceBytesRecursiveRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerBlockioIoServiceBytesRecursiveRead) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerBlockioIoServiceBytesRecursiveRead) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerBlockioIoServiceBytesRecursiveRead(cfg MetricConfig) metricContainerBlockioIoServiceBytesRecursiveRead { + m := metricContainerBlockioIoServiceBytesRecursiveRead{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerBlockioIoServiceBytesRecursiveWrite struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.blockio.io_service_bytes_recursive.write metric with initial data. +func (m *metricContainerBlockioIoServiceBytesRecursiveWrite) init() { + m.data.SetName("container.blockio.io_service_bytes_recursive.write") + m.data.SetDescription("Number of bytes transferred to the disk by the container") + m.data.SetUnit("{operations}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerBlockioIoServiceBytesRecursiveWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerBlockioIoServiceBytesRecursiveWrite) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerBlockioIoServiceBytesRecursiveWrite) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerBlockioIoServiceBytesRecursiveWrite(cfg MetricConfig) metricContainerBlockioIoServiceBytesRecursiveWrite { + m := metricContainerBlockioIoServiceBytesRecursiveWrite{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUPercent struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.percent metric with initial data. +func (m *metricContainerCPUPercent) init() { + m.data.SetName("container.cpu.percent") + m.data.SetDescription("Percent of CPU used by the container.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricContainerCPUPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUPercent) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUPercent) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUPercent(cfg MetricConfig) metricContainerCPUPercent { + m := metricContainerCPUPercent{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUUsagePercpu struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.usage.percpu metric with initial data. +func (m *metricContainerCPUUsagePercpu) init() { + m.data.SetName("container.cpu.usage.percpu") + m.data.SetDescription("Total CPU time consumed per CPU-core.") + m.data.SetUnit("s") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricContainerCPUUsagePercpu) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, coreAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("core", coreAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUUsagePercpu) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUUsagePercpu) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUUsagePercpu(cfg MetricConfig) metricContainerCPUUsagePercpu { + m := metricContainerCPUUsagePercpu{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUUsageSystem struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.usage.system metric with initial data. +func (m *metricContainerCPUUsageSystem) init() { + m.data.SetName("container.cpu.usage.system") + m.data.SetDescription("System CPU usage.") + m.data.SetUnit("s") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerCPUUsageSystem) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUUsageSystem) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUUsageSystem) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUUsageSystem(cfg MetricConfig) metricContainerCPUUsageSystem { + m := metricContainerCPUUsageSystem{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUUsageTotal struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.usage.total metric with initial data. +func (m *metricContainerCPUUsageTotal) init() { + m.data.SetName("container.cpu.usage.total") + m.data.SetDescription("Total CPU time consumed.") + m.data.SetUnit("s") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerCPUUsageTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUUsageTotal) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUUsageTotal) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUUsageTotal(cfg MetricConfig) metricContainerCPUUsageTotal { + m := metricContainerCPUUsageTotal{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryPercent struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.percent metric with initial data. +func (m *metricContainerMemoryPercent) init() { + m.data.SetName("container.memory.percent") + m.data.SetDescription("Percentage of memory used.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricContainerMemoryPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryPercent) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryPercent) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryPercent(cfg MetricConfig) metricContainerMemoryPercent { + m := metricContainerMemoryPercent{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryUsageLimit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.usage.limit metric with initial data. +func (m *metricContainerMemoryUsageLimit) init() { + m.data.SetName("container.memory.usage.limit") + m.data.SetDescription("Memory limit of the container.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryUsageLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryUsageLimit) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryUsageLimit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryUsageLimit(cfg MetricConfig) metricContainerMemoryUsageLimit { + m := metricContainerMemoryUsageLimit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerMemoryUsageTotal struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.memory.usage.total metric with initial data. +func (m *metricContainerMemoryUsageTotal) init() { + m.data.SetName("container.memory.usage.total") + m.data.SetDescription("Memory usage of the container.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerMemoryUsageTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerMemoryUsageTotal) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerMemoryUsageTotal) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerMemoryUsageTotal(cfg MetricConfig) metricContainerMemoryUsageTotal { + m := metricContainerMemoryUsageTotal{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerNetworkIoUsageRxBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.network.io.usage.rx_bytes metric with initial data. +func (m *metricContainerNetworkIoUsageRxBytes) init() { + m.data.SetName("container.network.io.usage.rx_bytes") + m.data.SetDescription("Bytes received by the container.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerNetworkIoUsageRxBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerNetworkIoUsageRxBytes) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerNetworkIoUsageRxBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerNetworkIoUsageRxBytes(cfg MetricConfig) metricContainerNetworkIoUsageRxBytes { + m := metricContainerNetworkIoUsageRxBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerNetworkIoUsageTxBytes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.network.io.usage.tx_bytes metric with initial data. +func (m *metricContainerNetworkIoUsageTxBytes) init() { + m.data.SetName("container.network.io.usage.tx_bytes") + m.data.SetDescription("Bytes sent by the container.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerNetworkIoUsageTxBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerNetworkIoUsageTxBytes) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerNetworkIoUsageTxBytes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerNetworkIoUsageTxBytes(cfg MetricConfig) metricContainerNetworkIoUsageTxBytes { + m := metricContainerNetworkIoUsageTxBytes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations +// required to produce metric representation defined in metadata and user config. +type MetricsBuilder struct { + config MetricsBuilderConfig // config of the metrics builder. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information. + metricContainerBlockioIoServiceBytesRecursiveRead metricContainerBlockioIoServiceBytesRecursiveRead + metricContainerBlockioIoServiceBytesRecursiveWrite metricContainerBlockioIoServiceBytesRecursiveWrite + metricContainerCPUPercent metricContainerCPUPercent + metricContainerCPUUsagePercpu metricContainerCPUUsagePercpu + metricContainerCPUUsageSystem metricContainerCPUUsageSystem + metricContainerCPUUsageTotal metricContainerCPUUsageTotal + metricContainerMemoryPercent metricContainerMemoryPercent + metricContainerMemoryUsageLimit metricContainerMemoryUsageLimit + metricContainerMemoryUsageTotal metricContainerMemoryUsageTotal + metricContainerNetworkIoUsageRxBytes metricContainerNetworkIoUsageRxBytes + metricContainerNetworkIoUsageTxBytes metricContainerNetworkIoUsageTxBytes +} + +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) + +// WithStartTime sets startTime on the metrics builder. +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { + mb.startTime = startTime + } +} + +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { + mb := &MetricsBuilder{ + config: mbc, + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricContainerBlockioIoServiceBytesRecursiveRead: newMetricContainerBlockioIoServiceBytesRecursiveRead(mbc.Metrics.ContainerBlockioIoServiceBytesRecursiveRead), + metricContainerBlockioIoServiceBytesRecursiveWrite: newMetricContainerBlockioIoServiceBytesRecursiveWrite(mbc.Metrics.ContainerBlockioIoServiceBytesRecursiveWrite), + metricContainerCPUPercent: newMetricContainerCPUPercent(mbc.Metrics.ContainerCPUPercent), + metricContainerCPUUsagePercpu: newMetricContainerCPUUsagePercpu(mbc.Metrics.ContainerCPUUsagePercpu), + metricContainerCPUUsageSystem: newMetricContainerCPUUsageSystem(mbc.Metrics.ContainerCPUUsageSystem), + metricContainerCPUUsageTotal: newMetricContainerCPUUsageTotal(mbc.Metrics.ContainerCPUUsageTotal), + metricContainerMemoryPercent: newMetricContainerMemoryPercent(mbc.Metrics.ContainerMemoryPercent), + metricContainerMemoryUsageLimit: newMetricContainerMemoryUsageLimit(mbc.Metrics.ContainerMemoryUsageLimit), + metricContainerMemoryUsageTotal: newMetricContainerMemoryUsageTotal(mbc.Metrics.ContainerMemoryUsageTotal), + metricContainerNetworkIoUsageRxBytes: newMetricContainerNetworkIoUsageRxBytes(mbc.Metrics.ContainerNetworkIoUsageRxBytes), + metricContainerNetworkIoUsageTxBytes: newMetricContainerNetworkIoUsageTxBytes(mbc.Metrics.ContainerNetworkIoUsageTxBytes), + } + for _, op := range options { + op(mb) + } + return mb +} + +// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics. +func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder { + return NewResourceBuilder(mb.config.ResourceAttributes) +} + +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { + if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() + } +} + +// ResourceMetricsOption applies changes to provided resource metrics. +type ResourceMetricsOption func(pmetric.ResourceMetrics) + +// WithResource sets the provided resource on the emitted ResourceMetrics. +// It's recommended to use ResourceBuilder to create the resource. +func WithResource(res pcommon.Resource) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + res.CopyTo(rm.Resource()) + } +} + +// WithStartTimeOverride overrides start time for all the resource metrics data points. +// This option should be only used if different start time has to be set on metrics coming from different resources. +func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + var dps pmetric.NumberDataPointSlice + metrics := rm.ScopeMetrics().At(0).Metrics() + for i := 0; i < metrics.Len(); i++ { + switch metrics.At(i).Type() { + case pmetric.MetricTypeGauge: + dps = metrics.At(i).Gauge().DataPoints() + case pmetric.MetricTypeSum: + dps = metrics.At(i).Sum().DataPoints() + } + for j := 0; j < dps.Len(); j++ { + dps.At(j).SetStartTimestamp(start) + } + } + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. +// Resource attributes should be provided as ResourceMetricsOption arguments. +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { + rm := pmetric.NewResourceMetrics() + ils := rm.ScopeMetrics().AppendEmpty() + ils.Scope().SetName("otelcol/podmanreceiver") + ils.Scope().SetVersion(mb.buildInfo.Version) + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricContainerBlockioIoServiceBytesRecursiveRead.emit(ils.Metrics()) + mb.metricContainerBlockioIoServiceBytesRecursiveWrite.emit(ils.Metrics()) + mb.metricContainerCPUPercent.emit(ils.Metrics()) + mb.metricContainerCPUUsagePercpu.emit(ils.Metrics()) + mb.metricContainerCPUUsageSystem.emit(ils.Metrics()) + mb.metricContainerCPUUsageTotal.emit(ils.Metrics()) + mb.metricContainerMemoryPercent.emit(ils.Metrics()) + mb.metricContainerMemoryUsageLimit.emit(ils.Metrics()) + mb.metricContainerMemoryUsageTotal.emit(ils.Metrics()) + mb.metricContainerNetworkIoUsageRxBytes.emit(ils.Metrics()) + mb.metricContainerNetworkIoUsageTxBytes.emit(ils.Metrics()) + + for _, op := range rmo { + op(rm) + } + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user config, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) + metrics := mb.metricsBuffer + mb.metricsBuffer = pmetric.NewMetrics() + return metrics +} + +// RecordContainerBlockioIoServiceBytesRecursiveReadDataPoint adds a data point to container.blockio.io_service_bytes_recursive.read metric. +func (mb *MetricsBuilder) RecordContainerBlockioIoServiceBytesRecursiveReadDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerBlockioIoServiceBytesRecursiveRead.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerBlockioIoServiceBytesRecursiveWriteDataPoint adds a data point to container.blockio.io_service_bytes_recursive.write metric. +func (mb *MetricsBuilder) RecordContainerBlockioIoServiceBytesRecursiveWriteDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerBlockioIoServiceBytesRecursiveWrite.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUPercentDataPoint adds a data point to container.cpu.percent metric. +func (mb *MetricsBuilder) RecordContainerCPUPercentDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricContainerCPUPercent.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUUsagePercpuDataPoint adds a data point to container.cpu.usage.percpu metric. +func (mb *MetricsBuilder) RecordContainerCPUUsagePercpuDataPoint(ts pcommon.Timestamp, val int64, coreAttributeValue string) { + mb.metricContainerCPUUsagePercpu.recordDataPoint(mb.startTime, ts, val, coreAttributeValue) +} + +// RecordContainerCPUUsageSystemDataPoint adds a data point to container.cpu.usage.system metric. +func (mb *MetricsBuilder) RecordContainerCPUUsageSystemDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerCPUUsageSystem.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUUsageTotalDataPoint adds a data point to container.cpu.usage.total metric. +func (mb *MetricsBuilder) RecordContainerCPUUsageTotalDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerCPUUsageTotal.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryPercentDataPoint adds a data point to container.memory.percent metric. +func (mb *MetricsBuilder) RecordContainerMemoryPercentDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricContainerMemoryPercent.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryUsageLimitDataPoint adds a data point to container.memory.usage.limit metric. +func (mb *MetricsBuilder) RecordContainerMemoryUsageLimitDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryUsageLimit.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerMemoryUsageTotalDataPoint adds a data point to container.memory.usage.total metric. +func (mb *MetricsBuilder) RecordContainerMemoryUsageTotalDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerMemoryUsageTotal.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerNetworkIoUsageRxBytesDataPoint adds a data point to container.network.io.usage.rx_bytes metric. +func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxBytesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerNetworkIoUsageRxBytes.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerNetworkIoUsageTxBytesDataPoint adds a data point to container.network.io.usage.tx_bytes metric. +func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxBytesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerNetworkIoUsageTxBytes.recordDataPoint(mb.startTime, ts, val) +} + +// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, +// and metrics builder should update its startTime and reset it's internal state accordingly. +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) + for _, op := range options { + op(mb) + } +} diff --git a/receiver/podmanreceiver/internal/metadata/generated_metrics_test.go b/receiver/podmanreceiver/internal/metadata/generated_metrics_test.go new file mode 100644 index 000000000000..0b9a0868d03a --- /dev/null +++ b/receiver/podmanreceiver/internal/metadata/generated_metrics_test.go @@ -0,0 +1,286 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/receivertest" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" +) + +type testConfigCollection int + +const ( + testSetDefault testConfigCollection = iota + testSetAll + testSetNone +) + +func TestMetricsBuilder(t *testing.T) { + tests := []struct { + name string + configSet testConfigCollection + }{ + { + name: "default", + configSet: testSetDefault, + }, + { + name: "all_set", + configSet: testSetAll, + }, + { + name: "none_set", + configSet: testSetNone, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + start := pcommon.Timestamp(1_000_000_000) + ts := pcommon.Timestamp(1_000_001_000) + observedZapCore, observedLogs := observer.New(zap.WarnLevel) + settings := receivertest.NewNopCreateSettings() + settings.Logger = zap.New(observedZapCore) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) + + expectedWarnings := 0 + + assert.Equal(t, expectedWarnings, observedLogs.Len()) + + defaultMetricsCount := 0 + allMetricsCount := 0 + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerBlockioIoServiceBytesRecursiveReadDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerBlockioIoServiceBytesRecursiveWriteDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerCPUPercentDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerCPUUsagePercpuDataPoint(ts, 1, "core-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerCPUUsageSystemDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerCPUUsageTotalDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerMemoryPercentDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerMemoryUsageLimitDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerMemoryUsageTotalDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerNetworkIoUsageRxBytesDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordContainerNetworkIoUsageTxBytesDataPoint(ts, 1) + + rb := mb.NewResourceBuilder() + rb.SetContainerID("container.id-val") + rb.SetContainerImageName("container.image.name-val") + rb.SetContainerName("container.name-val") + rb.SetContainerRuntime("container.runtime-val") + res := rb.Emit() + metrics := mb.Emit(WithResource(res)) + + if test.configSet == testSetNone { + assert.Equal(t, 0, metrics.ResourceMetrics().Len()) + return + } + + assert.Equal(t, 1, metrics.ResourceMetrics().Len()) + rm := metrics.ResourceMetrics().At(0) + assert.Equal(t, res, rm.Resource()) + assert.Equal(t, 1, rm.ScopeMetrics().Len()) + ms := rm.ScopeMetrics().At(0).Metrics() + if test.configSet == testSetDefault { + assert.Equal(t, defaultMetricsCount, ms.Len()) + } + if test.configSet == testSetAll { + assert.Equal(t, allMetricsCount, ms.Len()) + } + validatedMetrics := make(map[string]bool) + for i := 0; i < ms.Len(); i++ { + switch ms.At(i).Name() { + case "container.blockio.io_service_bytes_recursive.read": + assert.False(t, validatedMetrics["container.blockio.io_service_bytes_recursive.read"], "Found a duplicate in the metrics slice: container.blockio.io_service_bytes_recursive.read") + validatedMetrics["container.blockio.io_service_bytes_recursive.read"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of bytes transferred from the disk by the container", ms.At(i).Description()) + assert.Equal(t, "{operations}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.blockio.io_service_bytes_recursive.write": + assert.False(t, validatedMetrics["container.blockio.io_service_bytes_recursive.write"], "Found a duplicate in the metrics slice: container.blockio.io_service_bytes_recursive.write") + validatedMetrics["container.blockio.io_service_bytes_recursive.write"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of bytes transferred to the disk by the container", ms.At(i).Description()) + assert.Equal(t, "{operations}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.cpu.percent": + assert.False(t, validatedMetrics["container.cpu.percent"], "Found a duplicate in the metrics slice: container.cpu.percent") + validatedMetrics["container.cpu.percent"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Percent of CPU used by the container.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + case "container.cpu.usage.percpu": + assert.False(t, validatedMetrics["container.cpu.usage.percpu"], "Found a duplicate in the metrics slice: container.cpu.usage.percpu") + validatedMetrics["container.cpu.usage.percpu"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Total CPU time consumed per CPU-core.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("core") + assert.True(t, ok) + assert.EqualValues(t, "core-val", attrVal.Str()) + case "container.cpu.usage.system": + assert.False(t, validatedMetrics["container.cpu.usage.system"], "Found a duplicate in the metrics slice: container.cpu.usage.system") + validatedMetrics["container.cpu.usage.system"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "System CPU usage.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.cpu.usage.total": + assert.False(t, validatedMetrics["container.cpu.usage.total"], "Found a duplicate in the metrics slice: container.cpu.usage.total") + validatedMetrics["container.cpu.usage.total"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Total CPU time consumed.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.percent": + assert.False(t, validatedMetrics["container.memory.percent"], "Found a duplicate in the metrics slice: container.memory.percent") + validatedMetrics["container.memory.percent"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Percentage of memory used.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + case "container.memory.usage.limit": + assert.False(t, validatedMetrics["container.memory.usage.limit"], "Found a duplicate in the metrics slice: container.memory.usage.limit") + validatedMetrics["container.memory.usage.limit"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Memory limit of the container.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.memory.usage.total": + assert.False(t, validatedMetrics["container.memory.usage.total"], "Found a duplicate in the metrics slice: container.memory.usage.total") + validatedMetrics["container.memory.usage.total"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Memory usage of the container.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.network.io.usage.rx_bytes": + assert.False(t, validatedMetrics["container.network.io.usage.rx_bytes"], "Found a duplicate in the metrics slice: container.network.io.usage.rx_bytes") + validatedMetrics["container.network.io.usage.rx_bytes"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes received by the container.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "container.network.io.usage.tx_bytes": + assert.False(t, validatedMetrics["container.network.io.usage.tx_bytes"], "Found a duplicate in the metrics slice: container.network.io.usage.tx_bytes") + validatedMetrics["container.network.io.usage.tx_bytes"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes sent by the container.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + } + } + }) + } +} diff --git a/receiver/podmanreceiver/internal/metadata/generated_resource.go b/receiver/podmanreceiver/internal/metadata/generated_resource.go new file mode 100644 index 000000000000..3fb5a3cd56fd --- /dev/null +++ b/receiver/podmanreceiver/internal/metadata/generated_resource.go @@ -0,0 +1,57 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceBuilder is a helper struct to build resources predefined in metadata.yaml. +// The ResourceBuilder is not thread-safe and must not to be used in multiple goroutines. +type ResourceBuilder struct { + config ResourceAttributesConfig + res pcommon.Resource +} + +// NewResourceBuilder creates a new ResourceBuilder. This method should be called on the start of the application. +func NewResourceBuilder(rac ResourceAttributesConfig) *ResourceBuilder { + return &ResourceBuilder{ + config: rac, + res: pcommon.NewResource(), + } +} + +// SetContainerID sets provided value as "container.id" attribute. +func (rb *ResourceBuilder) SetContainerID(val string) { + if rb.config.ContainerID.Enabled { + rb.res.Attributes().PutStr("container.id", val) + } +} + +// SetContainerImageName sets provided value as "container.image.name" attribute. +func (rb *ResourceBuilder) SetContainerImageName(val string) { + if rb.config.ContainerImageName.Enabled { + rb.res.Attributes().PutStr("container.image.name", val) + } +} + +// SetContainerName sets provided value as "container.name" attribute. +func (rb *ResourceBuilder) SetContainerName(val string) { + if rb.config.ContainerName.Enabled { + rb.res.Attributes().PutStr("container.name", val) + } +} + +// SetContainerRuntime sets provided value as "container.runtime" attribute. +func (rb *ResourceBuilder) SetContainerRuntime(val string) { + if rb.config.ContainerRuntime.Enabled { + rb.res.Attributes().PutStr("container.runtime", val) + } +} + +// Emit returns the built resource and resets the internal builder state. +func (rb *ResourceBuilder) Emit() pcommon.Resource { + r := rb.res + rb.res = pcommon.NewResource() + return r +} diff --git a/receiver/podmanreceiver/internal/metadata/generated_resource_test.go b/receiver/podmanreceiver/internal/metadata/generated_resource_test.go new file mode 100644 index 000000000000..c1d9bafe1da0 --- /dev/null +++ b/receiver/podmanreceiver/internal/metadata/generated_resource_test.go @@ -0,0 +1,58 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestResourceBuilder(t *testing.T) { + for _, test := range []string{"default", "all_set", "none_set"} { + t.Run(test, func(t *testing.T) { + cfg := loadResourceAttributesConfig(t, test) + rb := NewResourceBuilder(cfg) + rb.SetContainerID("container.id-val") + rb.SetContainerImageName("container.image.name-val") + rb.SetContainerName("container.name-val") + rb.SetContainerRuntime("container.runtime-val") + + res := rb.Emit() + assert.Equal(t, 0, rb.Emit().Attributes().Len()) // Second call should return empty Resource + + switch test { + case "default": + assert.Equal(t, 4, res.Attributes().Len()) + case "all_set": + assert.Equal(t, 4, res.Attributes().Len()) + case "none_set": + assert.Equal(t, 0, res.Attributes().Len()) + return + default: + assert.Failf(t, "unexpected test case: %s", test) + } + + val, ok := res.Attributes().Get("container.id") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "container.id-val", val.Str()) + } + val, ok = res.Attributes().Get("container.image.name") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "container.image.name-val", val.Str()) + } + val, ok = res.Attributes().Get("container.name") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "container.name-val", val.Str()) + } + val, ok = res.Attributes().Get("container.runtime") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "container.runtime-val", val.Str()) + } + }) + } +} diff --git a/receiver/podmanreceiver/internal/metadata/testdata/config.yaml b/receiver/podmanreceiver/internal/metadata/testdata/config.yaml new file mode 100644 index 000000000000..9fafe024c230 --- /dev/null +++ b/receiver/podmanreceiver/internal/metadata/testdata/config.yaml @@ -0,0 +1,67 @@ +default: +all_set: + metrics: + container.blockio.io_service_bytes_recursive.read: + enabled: true + container.blockio.io_service_bytes_recursive.write: + enabled: true + container.cpu.percent: + enabled: true + container.cpu.usage.percpu: + enabled: true + container.cpu.usage.system: + enabled: true + container.cpu.usage.total: + enabled: true + container.memory.percent: + enabled: true + container.memory.usage.limit: + enabled: true + container.memory.usage.total: + enabled: true + container.network.io.usage.rx_bytes: + enabled: true + container.network.io.usage.tx_bytes: + enabled: true + resource_attributes: + container.id: + enabled: true + container.image.name: + enabled: true + container.name: + enabled: true + container.runtime: + enabled: true +none_set: + metrics: + container.blockio.io_service_bytes_recursive.read: + enabled: false + container.blockio.io_service_bytes_recursive.write: + enabled: false + container.cpu.percent: + enabled: false + container.cpu.usage.percpu: + enabled: false + container.cpu.usage.system: + enabled: false + container.cpu.usage.total: + enabled: false + container.memory.percent: + enabled: false + container.memory.usage.limit: + enabled: false + container.memory.usage.total: + enabled: false + container.network.io.usage.rx_bytes: + enabled: false + container.network.io.usage.tx_bytes: + enabled: false + resource_attributes: + container.id: + enabled: false + container.image.name: + enabled: false + container.name: + enabled: false + container.runtime: + enabled: false diff --git a/receiver/podmanreceiver/metadata.yaml b/receiver/podmanreceiver/metadata.yaml index de3c3f37fe8d..b1ce716920c3 100644 --- a/receiver/podmanreceiver/metadata.yaml +++ b/receiver/podmanreceiver/metadata.yaml @@ -10,6 +10,123 @@ status: active: [rogercoll] unsupported_platforms: [windows] +resource_attributes: + container.runtime: + description: "The runtime of the container. For this receiver, it will always be 'podman'." + type: string + enabled: true + container.id: + description: "The ID of the container." + type: string + enabled: true + container.image.name: + description: "The name of the image in use by the container." + type: string + enabled: true + container.name: + description: "The name of the container." + type: string + enabled: true + +attributes: + core: + description: "The CPU core number when utilising per-CPU metrics." + type: string + +metrics: + # CPU + container.cpu.usage.system: + enabled: true + description: "System CPU usage." + unit: s + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + container.cpu.usage.total: + enabled: true + description: "Total CPU time consumed." + unit: s + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + container.cpu.usage.percpu: + enabled: true + description: "Total CPU time consumed per CPU-core." + unit: s + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + attributes: + - core + container.cpu.percent: + enabled: true + description: "Percent of CPU used by the container." + unit: 1 + gauge: + value_type: double + # Memory + container.memory.usage.limit: + enabled: true + description: "Memory limit of the container." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.usage.total: + enabled: true + description: "Memory usage of the container." + unit: By + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + container.memory.percent: + enabled: true + description: "Percentage of memory used." + unit: 1 + gauge: + value_type: double + # Network + container.network.io.usage.rx_bytes: + enabled: true + description: "Bytes received by the container." + unit: By + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + container.network.io.usage.tx_bytes: + enabled: true + description: "Bytes sent by the container." + unit: By + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + # BlockIO + container.blockio.io_service_bytes_recursive.read: + enabled: true + description: "Number of bytes transferred from the disk by the container" + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." + unit: "{operations}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + container.blockio.io_service_bytes_recursive.write: + enabled: true + description: "Number of bytes transferred to the disk by the container" + extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)." + unit: "{operations}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + # TODO: Update the receiver to pass the tests tests: skip_lifecycle: true diff --git a/receiver/podmanreceiver/metrics.go b/receiver/podmanreceiver/metrics.go deleted file mode 100644 index ee0dcae29da2..000000000000 --- a/receiver/podmanreceiver/metrics.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build !windows - -package podmanreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/podmanreceiver" - -import ( - "fmt" - "time" - - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" -) - -type point struct { - intVal uint64 - doubleVal float64 - attributes map[string]string -} - -func containerStatsToMetrics(ts time.Time, container container, stats *containerStats) pmetric.Metrics { - pbts := pcommon.NewTimestampFromTime(ts) - - md := pmetric.NewMetrics() - rs := md.ResourceMetrics().AppendEmpty() - - resourceAttr := rs.Resource().Attributes() - resourceAttr.PutStr(conventions.AttributeContainerRuntime, "podman") - resourceAttr.PutStr(conventions.AttributeContainerName, stats.Name) - resourceAttr.PutStr(conventions.AttributeContainerID, stats.ContainerID) - resourceAttr.PutStr(conventions.AttributeContainerImageName, container.Image) - - ms := rs.ScopeMetrics().AppendEmpty().Metrics() - appendIOMetrics(ms, stats, pbts) - appendCPUMetrics(ms, stats, pbts) - appendNetworkMetrics(ms, stats, pbts) - appendMemoryMetrics(ms, stats, pbts) - - return md -} - -func appendMemoryMetrics(ms pmetric.MetricSlice, stats *containerStats, ts pcommon.Timestamp) { - gaugeI(ms, "memory.usage.limit", "By", []point{{intVal: stats.MemLimit}}, ts) - gaugeI(ms, "memory.usage.total", "By", []point{{intVal: stats.MemUsage}}, ts) - gaugeF(ms, "memory.percent", "1", []point{{doubleVal: stats.MemPerc}}, ts) -} - -func appendNetworkMetrics(ms pmetric.MetricSlice, stats *containerStats, ts pcommon.Timestamp) { - sum(ms, "network.io.usage.tx_bytes", "By", []point{{intVal: stats.NetInput}}, ts) - sum(ms, "network.io.usage.rx_bytes", "By", []point{{intVal: stats.NetOutput}}, ts) -} - -func appendIOMetrics(ms pmetric.MetricSlice, stats *containerStats, ts pcommon.Timestamp) { - sum(ms, "blockio.io_service_bytes_recursive.write", "By", []point{{intVal: stats.BlockOutput}}, ts) - sum(ms, "blockio.io_service_bytes_recursive.read", "By", []point{{intVal: stats.BlockInput}}, ts) -} - -func appendCPUMetrics(ms pmetric.MetricSlice, stats *containerStats, ts pcommon.Timestamp) { - sum(ms, "cpu.usage.system", "ns", []point{{intVal: stats.CPUSystemNano}}, ts) - sum(ms, "cpu.usage.total", "ns", []point{{intVal: stats.CPUNano}}, ts) - gaugeF(ms, "cpu.percent", "1", []point{{doubleVal: stats.CPU}}, ts) - - points := make([]point, len(stats.PerCPU)) - for i, cpu := range stats.PerCPU { - points[i] = point{ - intVal: cpu, - attributes: map[string]string{ - "core": fmt.Sprintf("cpu%d", i), - }, - } - } - sum(ms, "cpu.usage.percpu", "ns", points, ts) -} - -func initMetric(ms pmetric.MetricSlice, name, unit string) pmetric.Metric { - m := ms.AppendEmpty() - m.SetName(fmt.Sprintf("container.%s", name)) - m.SetUnit(unit) - return m -} - -func sum(ilm pmetric.MetricSlice, metricName string, unit string, points []point, ts pcommon.Timestamp) { - metric := initMetric(ilm, metricName, unit) - sum := metric.SetEmptySum() - sum.SetIsMonotonic(true) - sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - - dataPoints := sum.DataPoints() - - for _, pt := range points { - dataPoint := dataPoints.AppendEmpty() - dataPoint.SetTimestamp(ts) - dataPoint.SetIntValue(int64(pt.intVal)) - setDataPointAttributes(dataPoint, pt.attributes) - } -} - -func gauge(ms pmetric.MetricSlice, metricName string, unit string) pmetric.NumberDataPointSlice { - metric := initMetric(ms, metricName, unit) - gauge := metric.SetEmptyGauge() - return gauge.DataPoints() -} - -func gaugeI(ms pmetric.MetricSlice, metricName string, unit string, points []point, ts pcommon.Timestamp) { - dataPoints := gauge(ms, metricName, unit) - for _, pt := range points { - dataPoint := dataPoints.AppendEmpty() - dataPoint.SetTimestamp(ts) - dataPoint.SetIntValue(int64(pt.intVal)) - setDataPointAttributes(dataPoint, pt.attributes) - } -} - -func gaugeF(ms pmetric.MetricSlice, metricName string, unit string, points []point, ts pcommon.Timestamp) { - dataPoints := gauge(ms, metricName, unit) - for _, pt := range points { - dataPoint := dataPoints.AppendEmpty() - dataPoint.SetTimestamp(ts) - dataPoint.SetDoubleValue(pt.doubleVal) - setDataPointAttributes(dataPoint, pt.attributes) - } -} - -func setDataPointAttributes(dataPoint pmetric.NumberDataPoint, attributes map[string]string) { - for k, v := range attributes { - dataPoint.Attributes().PutStr(k, v) - } -} diff --git a/receiver/podmanreceiver/receiver.go b/receiver/podmanreceiver/receiver.go index 1a748cfa7fd4..4ea3cc218468 100644 --- a/receiver/podmanreceiver/receiver.go +++ b/receiver/podmanreceiver/receiver.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/scrapererror" @@ -27,6 +28,7 @@ type metricsReceiver struct { set receiver.CreateSettings clientFactory clientFactory scraper *ContainerScraper + mb *metadata.MetricsBuilder } func newMetricsReceiver( @@ -49,6 +51,7 @@ func newMetricsReceiver( config: config, clientFactory: clientFactory, set: set, + mb: metadata.NewMetricsBuilder(config.MetricsBuilderConfig, set), } scrp, err := scraperhelper.NewScraper(metadata.Type.String(), recv.scrape, scraperhelper.WithStart(recv.start)) @@ -74,8 +77,9 @@ func (r *metricsReceiver) start(ctx context.Context, _ component.Host) error { } type result struct { - md pmetric.Metrics - err error + container container + containerStats containerStats + err error } func (r *metricsReceiver) scrape(ctx context.Context) (pmetric.Metrics, error) { @@ -88,11 +92,7 @@ func (r *metricsReceiver) scrape(ctx context.Context) (pmetric.Metrics, error) { go func(c container) { defer wg.Done() stats, err := r.scraper.fetchContainerStats(ctx, c) - if err != nil { - results <- result{md: pmetric.Metrics{}, err: err} - return - } - results <- result{md: containerStatsToMetrics(time.Now(), c, &stats), err: nil} + results <- result{container: c, containerStats: stats, err: err} }(c) } @@ -100,15 +100,62 @@ func (r *metricsReceiver) scrape(ctx context.Context) (pmetric.Metrics, error) { close(results) var errs error - md := pmetric.NewMetrics() + now := pcommon.NewTimestampFromTime(time.Now()) + for res := range results { if res.err != nil { // Don't know the number of failed metrics, but one container fetch is a partial error. errs = multierr.Append(errs, scrapererror.NewPartialScrapeError(res.err, 0)) - fmt.Println("No stats found!") continue } - res.md.ResourceMetrics().CopyTo(md.ResourceMetrics()) + r.recordContainerStats(now, res.container, &res.containerStats) + } + return r.mb.Emit(), errs +} + +func (r *metricsReceiver) recordContainerStats(now pcommon.Timestamp, container container, stats *containerStats) { + r.recordCPUMetrics(now, stats) + r.recordNetworkMetrics(now, stats) + r.recordMemoryMetrics(now, stats) + r.recordIOMetrics(now, stats) + + rb := r.mb.NewResourceBuilder() + rb.SetContainerRuntime("podman") + rb.SetContainerName(stats.Name) + rb.SetContainerID(stats.ContainerID) + rb.SetContainerImageName(container.Image) + + r.mb.EmitForResource(metadata.WithResource(rb.Emit())) +} + +func (r *metricsReceiver) recordCPUMetrics(now pcommon.Timestamp, stats *containerStats) { + r.mb.RecordContainerCPUUsageSystemDataPoint(now, int64(toSecondsWithNanosecondPrecision(stats.CPUSystemNano))) + r.mb.RecordContainerCPUUsageTotalDataPoint(now, int64(toSecondsWithNanosecondPrecision(stats.CPUNano))) + r.mb.RecordContainerCPUPercentDataPoint(now, stats.CPU) + + for i, cpu := range stats.PerCPU { + r.mb.RecordContainerCPUUsagePercpuDataPoint(now, int64(toSecondsWithNanosecondPrecision(cpu)), fmt.Sprintf("cpu%d", i)) } - return md, nil + +} + +func (r *metricsReceiver) recordNetworkMetrics(now pcommon.Timestamp, stats *containerStats) { + r.mb.RecordContainerNetworkIoUsageRxBytesDataPoint(now, int64(stats.NetOutput)) + r.mb.RecordContainerNetworkIoUsageTxBytesDataPoint(now, int64(stats.NetInput)) +} + +func (r *metricsReceiver) recordMemoryMetrics(now pcommon.Timestamp, stats *containerStats) { + r.mb.RecordContainerMemoryUsageTotalDataPoint(now, int64(stats.MemUsage)) + r.mb.RecordContainerMemoryUsageLimitDataPoint(now, int64(stats.MemLimit)) + r.mb.RecordContainerMemoryPercentDataPoint(now, stats.MemPerc) +} + +func (r *metricsReceiver) recordIOMetrics(now pcommon.Timestamp, stats *containerStats) { + r.mb.RecordContainerBlockioIoServiceBytesRecursiveReadDataPoint(now, int64(stats.BlockInput)) + r.mb.RecordContainerBlockioIoServiceBytesRecursiveWriteDataPoint(now, int64(stats.BlockOutput)) +} + +// nanoseconds to seconds conversion truncating the fractional part +func toSecondsWithNanosecondPrecision(nanoseconds uint64) uint64 { + return nanoseconds / 1e9 } diff --git a/receiver/podmanreceiver/receiver_test.go b/receiver/podmanreceiver/receiver_test.go index 4f568149a688..cd08f24bdfda 100644 --- a/receiver/podmanreceiver/receiver_test.go +++ b/receiver/podmanreceiver/receiver_test.go @@ -65,10 +65,11 @@ func TestScraperLoop(t *testing.T) { assert.NotNil(t, r) go func() { + sampleStats := genContainerStats() client <- containerStatsReport{ - Stats: []containerStats{{ - ContainerID: "c1", - }}, + Stats: []containerStats{ + *sampleStats, + }, Error: containerStatsReportError{}, } }() @@ -76,7 +77,9 @@ func TestScraperLoop(t *testing.T) { assert.NoError(t, r.Start(ctx, componenttest.NewNopHost())) md := <-consumer - assert.Equal(t, md.ResourceMetrics().Len(), 1) + assert.Equal(t, 1, md.ResourceMetrics().Len()) + + assertStatsEqualToMetrics(t, genContainerStats(), md) assert.NoError(t, r.Shutdown(ctx)) } @@ -102,7 +105,7 @@ func (c mockClient) ping(context.Context) error { type mockConsumer chan pmetric.Metrics func (c mockClient) list(context.Context, url.Values) ([]container, error) { - return []container{{ID: "c1"}}, nil + return []container{{ID: "c1", Image: "localimage"}}, nil } func (c mockClient) events(context.Context, url.Values) (<-chan event, <-chan error) { diff --git a/receiver/podmanreceiver/metrics_test.go b/receiver/podmanreceiver/record_metrics_test.go similarity index 86% rename from receiver/podmanreceiver/metrics_test.go rename to receiver/podmanreceiver/record_metrics_test.go index 3846efea9d93..a665c0be331d 100644 --- a/receiver/podmanreceiver/metrics_test.go +++ b/receiver/podmanreceiver/record_metrics_test.go @@ -8,17 +8,15 @@ package podmanreceiver import ( "fmt" "testing" - "time" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pmetric" ) -func TestTranslateStatsToMetrics(t *testing.T) { - ts := time.Now() - stats := genContainerStats() - md := containerStatsToMetrics(ts, container{Image: "localimage"}, stats) - assertStatsEqualToMetrics(t, stats, md) +type point struct { + intVal uint64 + doubleVal float64 + attributes map[string]string } func assertStatsEqualToMetrics(t *testing.T, podmanStats *containerStats, md pmetric.Metrics) { @@ -34,7 +32,7 @@ func assertStatsEqualToMetrics(t *testing.T, podmanStats *containerStats, md pme for k, v := range resourceAttrs { attr, exists := rsm.Resource().Attributes().Get(k) assert.True(t, exists) - assert.Equal(t, attr.Str(), v) + assert.Equal(t, v, attr.Str()) } assert.Equal(t, rsm.ScopeMetrics().Len(), 1) @@ -46,9 +44,9 @@ func assertStatsEqualToMetrics(t *testing.T, podmanStats *containerStats, md pme m := metrics.At(i) switch m.Name() { case "container.memory.usage.limit": - assertMetricEqual(t, m, pmetric.MetricTypeGauge, []point{{intVal: podmanStats.MemLimit}}) + assertMetricEqual(t, m, pmetric.MetricTypeSum, []point{{intVal: podmanStats.MemLimit}}) case "container.memory.usage.total": - assertMetricEqual(t, m, pmetric.MetricTypeGauge, []point{{intVal: podmanStats.MemUsage}}) + assertMetricEqual(t, m, pmetric.MetricTypeSum, []point{{intVal: podmanStats.MemUsage}}) case "container.memory.percent": assertMetricEqual(t, m, pmetric.MetricTypeGauge, []point{{doubleVal: podmanStats.MemPerc}}) case "container.network.io.usage.tx_bytes": @@ -62,15 +60,15 @@ func assertStatsEqualToMetrics(t *testing.T, podmanStats *containerStats, md pme assertMetricEqual(t, m, pmetric.MetricTypeSum, []point{{intVal: podmanStats.BlockInput}}) case "container.cpu.usage.system": - assertMetricEqual(t, m, pmetric.MetricTypeSum, []point{{intVal: podmanStats.CPUSystemNano}}) + assertMetricEqual(t, m, pmetric.MetricTypeSum, []point{{intVal: toSecondsWithNanosecondPrecision(podmanStats.CPUSystemNano)}}) case "container.cpu.usage.total": - assertMetricEqual(t, m, pmetric.MetricTypeSum, []point{{intVal: podmanStats.CPUNano}}) + assertMetricEqual(t, m, pmetric.MetricTypeSum, []point{{intVal: toSecondsWithNanosecondPrecision(podmanStats.CPUNano)}}) case "container.cpu.percent": assertMetricEqual(t, m, pmetric.MetricTypeGauge, []point{{doubleVal: podmanStats.CPU}}) case "container.cpu.usage.percpu": points := make([]point, len(podmanStats.PerCPU)) for i, v := range podmanStats.PerCPU { - points[i] = point{intVal: v, attributes: map[string]string{"core": fmt.Sprintf("cpu%d", i)}} + points[i] = point{intVal: toSecondsWithNanosecondPrecision(v), attributes: map[string]string{"core": fmt.Sprintf("cpu%d", i)}} } assertMetricEqual(t, m, pmetric.MetricTypeSum, points)