From a245d13e68ffbb5534010875a2966469c47d4849 Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Wed, 4 May 2022 16:36:05 -0600
Subject: [PATCH 01/17] Updated to use v2 generated metrics
---
receiver/kubeletstatsreceiver/config.go | 4 +
receiver/kubeletstatsreceiver/doc.go | 2 +-
.../kubeletstatsreceiver/documentation.md | 9 +-
receiver/kubeletstatsreceiver/factory.go | 2 +-
.../internal/kubelet/accumulator.go | 74 +-
.../internal/kubelet/accumulator_test.go | 3 +
.../internal/kubelet/conventions.go | 6 +
.../internal/kubelet/cpu.go | 15 +-
.../internal/kubelet/fs.go | 9 +-
.../internal/kubelet/mem.go | 15 +-
.../internal/kubelet/metrics.go | 5 +-
.../internal/kubelet/metrics_test.go | 13 +-
.../internal/kubelet/network.go | 43 +-
.../internal/kubelet/utils.go | 58 +-
.../internal/kubelet/volume.go | 36 +-
.../internal/kubelet/volume_test.go | 9 +-
.../internal/metadata/generated_metrics.go | 607 ----
.../internal/metadata/generated_metrics_v2.go | 2838 +++++++++++++++++
.../internal/metadata/metrics.go | 127 +-
receiver/kubeletstatsreceiver/scraper.go | 6 +-
receiver/kubeletstatsreceiver/scraper_test.go | 6 +
21 files changed, 3054 insertions(+), 833 deletions(-)
delete mode 100644 receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go
create mode 100644 receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go
diff --git a/receiver/kubeletstatsreceiver/config.go b/receiver/kubeletstatsreceiver/config.go
index d300a96e50a3..3acbb5e9726b 100644
--- a/receiver/kubeletstatsreceiver/config.go
+++ b/receiver/kubeletstatsreceiver/config.go
@@ -17,6 +17,7 @@ package kubeletstatsreceiver // import "github.com/open-telemetry/opentelemetry-
import (
"errors"
"fmt"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"go.opentelemetry.io/collector/config"
"go.opentelemetry.io/collector/config/confignet"
@@ -48,6 +49,9 @@ type Config struct {
// Configuration of the Kubernetes API client.
K8sAPIConfig *k8sconfig.APIConfig `mapstructure:"k8s_api_config"`
+
+ // Metrics allows customizing scraped metrics representation.
+ Metrics metadata.MetricsSettings `mapstructure:"metrics"`
}
func (cfg *Config) Validate() error {
diff --git a/receiver/kubeletstatsreceiver/doc.go b/receiver/kubeletstatsreceiver/doc.go
index 022e323883a9..0ca68ee9cbc6 100644
--- a/receiver/kubeletstatsreceiver/doc.go
+++ b/receiver/kubeletstatsreceiver/doc.go
@@ -15,6 +15,6 @@
//go:build !windows
// +build !windows
-//go:generate mdatagen metadata.yaml
+//go:generate mdatagen --experimental-gen metadata.yaml
package kubeletstatsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver"
diff --git a/receiver/kubeletstatsreceiver/documentation.md b/receiver/kubeletstatsreceiver/documentation.md
index 3590dbc74cf5..d77f4d9a04ed 100644
--- a/receiver/kubeletstatsreceiver/documentation.md
+++ b/receiver/kubeletstatsreceiver/documentation.md
@@ -51,7 +51,14 @@ These are the metrics available for this scraper.
| **k8s.volume.inodes.free** | The free inodes in the filesystem. | 1 | Gauge(Int) |
|
| **k8s.volume.inodes.used** | The inodes used by the filesystem. This may not equal inodes - free because filesystem may share inodes with other filesystems. | 1 | Gauge(Int) | |
-**Highlighted metrics** are emitted by default.
+**Highlighted metrics** are emitted by default. Other metrics are optional and not emitted by default.
+Any metric can be enabled or disabled with the following scraper configuration:
+
+```yaml
+metrics:
+ :
+ enabled:
+```
## Resource attributes
diff --git a/receiver/kubeletstatsreceiver/factory.go b/receiver/kubeletstatsreceiver/factory.go
index 21299debc87d..329f5c1da9bb 100644
--- a/receiver/kubeletstatsreceiver/factory.go
+++ b/receiver/kubeletstatsreceiver/factory.go
@@ -78,7 +78,7 @@ func createMetricsReceiver(
return nil, err
}
- scrp, err := newKubletScraper(rest, set, rOptions)
+ scrp, err := newKubletScraper(rest, set, rOptions, cfg.Metrics)
if err != nil {
return nil, err
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
index 73e987f04220..57b06a741980 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
@@ -49,33 +49,22 @@ type metricDataAccumulator struct {
logger *zap.Logger
metricGroupsToCollect map[MetricGroup]bool
time time.Time
+ mb *metadata.MetricsBuilder
}
-const (
- scopeName = "otelcol/kubeletstatsreceiver"
-)
-
func (a *metricDataAccumulator) nodeStats(s stats.NodeStats) {
if !a.metricGroupsToCollect[NodeMetricGroup] {
return
}
- md := pmetric.NewMetrics()
- rm := md.ResourceMetrics().AppendEmpty()
- fillNodeResource(rm.Resource(), s)
-
- ilm := rm.ScopeMetrics().AppendEmpty()
- ilm.Scope().SetName(scopeName)
-
- startTime := pcommon.NewTimestampFromTime(s.StartTime.Time)
currentTime := pcommon.NewTimestampFromTime(a.time)
- addCPUMetrics(ilm.Metrics(), metadata.NodeCPUMetrics, s.CPU, startTime, currentTime)
- addMemoryMetrics(ilm.Metrics(), metadata.NodeMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(ilm.Metrics(), metadata.NodeFilesystemMetrics, s.Fs, currentTime)
- addNetworkMetrics(ilm.Metrics(), metadata.NodeNetworkMetrics, s.Network, startTime, currentTime)
+ addCPUMetrics(a.mb, metadata.NodeCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mb, metadata.NodeMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mb, metadata.NodeFilesystemMetrics, s.Fs, currentTime)
+ addNetworkMetrics(a.mb, metadata.NodeNetworkMetrics, s.Network, currentTime)
// todo s.Runtime.ImageFs
- a.m = append(a.m, md)
+ a.m = append(a.m, a.mb.Emit(getNodeResourceOptions(s)...))
}
func (a *metricDataAccumulator) podStats(s stats.PodStats) {
@@ -83,21 +72,13 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) {
return
}
- md := pmetric.NewMetrics()
- rm := md.ResourceMetrics().AppendEmpty()
- fillPodResource(rm.Resource(), s)
-
- ilm := rm.ScopeMetrics().AppendEmpty()
- ilm.Scope().SetName(scopeName)
-
- startTime := pcommon.NewTimestampFromTime(s.StartTime.Time)
currentTime := pcommon.NewTimestampFromTime(a.time)
- addCPUMetrics(ilm.Metrics(), metadata.PodCPUMetrics, s.CPU, startTime, currentTime)
- addMemoryMetrics(ilm.Metrics(), metadata.PodMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(ilm.Metrics(), metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
- addNetworkMetrics(ilm.Metrics(), metadata.PodNetworkMetrics, s.Network, startTime, currentTime)
+ addCPUMetrics(a.mb, metadata.PodCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mb, metadata.PodMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mb, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
+ addNetworkMetrics(a.mb, metadata.PodNetworkMetrics, s.Network, currentTime)
- a.m = append(a.m, md)
+ a.m = append(a.m, a.mb.Emit(getPodResourceOptions(s)...))
}
func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.ContainerStats) {
@@ -105,10 +86,8 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont
return
}
- md := pmetric.NewMetrics()
- rm := md.ResourceMetrics().AppendEmpty()
-
- if err := fillContainerResource(rm.Resource(), sPod, s, a.metadata); err != nil {
+ ro, err := getContainerResourceOptions(sPod, s, a.metadata)
+ if err != nil {
a.logger.Warn(
"failed to fetch container metrics",
zap.String("pod", sPod.PodRef.Name),
@@ -117,15 +96,12 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont
return
}
- ilm := rm.ScopeMetrics().AppendEmpty()
- ilm.Scope().SetName(scopeName)
-
- startTime := pcommon.NewTimestampFromTime(s.StartTime.Time)
currentTime := pcommon.NewTimestampFromTime(a.time)
- addCPUMetrics(ilm.Metrics(), metadata.ContainerCPUMetrics, s.CPU, startTime, currentTime)
- addMemoryMetrics(ilm.Metrics(), metadata.ContainerMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(ilm.Metrics(), metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime)
- a.m = append(a.m, md)
+ addCPUMetrics(a.mb, metadata.ContainerCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mb, metadata.ContainerMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mb, metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime)
+
+ a.m = append(a.m, a.mb.Emit(ro...))
}
func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeStats) {
@@ -133,10 +109,8 @@ func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeS
return
}
- md := pmetric.NewMetrics()
- rm := md.ResourceMetrics().AppendEmpty()
-
- if err := fillVolumeResource(rm.Resource(), sPod, s, a.metadata); err != nil {
+ ro, err := getVolumeResourceOptions(sPod, s, a.metadata)
+ if err != nil {
a.logger.Warn(
"Failed to gather additional volume metadata. Skipping metric collection.",
zap.String("pod", sPod.PodRef.Name),
@@ -145,10 +119,8 @@ func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeS
return
}
- ilm := rm.ScopeMetrics().AppendEmpty()
- ilm.Scope().SetName(scopeName)
-
currentTime := pcommon.NewTimestampFromTime(a.time)
- addVolumeMetrics(ilm.Metrics(), metadata.K8sVolumeMetrics, s, currentTime)
- a.m = append(a.m, md)
+ addVolumeMetrics(a.mb, metadata.K8sVolumeMetrics, s, currentTime)
+
+ a.m = append(a.m, a.mb.Emit(ro...))
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
index 49f9db843d4e..fcb383e2acdf 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
@@ -16,6 +16,7 @@ package kubelet
import (
"errors"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"testing"
"github.com/stretchr/testify/assert"
@@ -210,6 +211,7 @@ func TestMetadataErrorCases(t *testing.T) {
metadata: tt.metadata,
logger: logger,
metricGroupsToCollect: tt.metricGroupsToCollect,
+ mb: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
}
tt.testScenario(acc)
@@ -231,6 +233,7 @@ func TestNilHandling(t *testing.T) {
ContainerMetricGroup: true,
VolumeMetricGroup: true,
},
+ mb: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
}
assert.NotPanics(t, func() {
acc.nodeStats(stats.NodeStats{})
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go b/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go
index f1541e04aab5..f28b50005e57 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go
@@ -30,4 +30,10 @@ const (
labelValueAWSEBSVolume = "awsElasticBlockStore"
labelValueGCEPDVolume = "gcePersistentDisk"
labelValueGlusterFSVolume = "glusterfs"
+ labelAwsVolumeId = "aws.volume.id"
+ labelFsType = "fs.type"
+ labelPartition = "partition"
+ labelGcePdName = "gce.pd.name"
+ labelGlusterfsEndpointsName = "glusterfs.endpoints.name"
+ labelGlusterfsPath = "glusterfs.path"
)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go b/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go
index 4d8a1dfbee3a..4f9bcaefab47 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go
@@ -16,32 +16,31 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func addCPUMetrics(dest pmetric.MetricSlice, cpuMetrics metadata.CPUMetrics, s *stats.CPUStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
+func addCPUMetrics(mb *metadata.MetricsBuilder, cpuMetrics metadata.CPUMetrics, s *stats.CPUStats, currentTime pcommon.Timestamp) {
if s == nil {
return
}
- addCPUUsageMetric(dest, cpuMetrics.Utilization, s, currentTime)
- addCPUTimeMetric(dest, cpuMetrics.Time, s, startTime, currentTime)
+ addCPUUsageMetric(mb, cpuMetrics.Utilization, s, currentTime)
+ addCPUTimeMetric(mb, cpuMetrics.Time, s, currentTime)
}
-func addCPUUsageMetric(dest pmetric.MetricSlice, metricInt metadata.MetricIntf, s *stats.CPUStats, currentTime pcommon.Timestamp) {
+func addCPUUsageMetric(mb *metadata.MetricsBuilder, recordDataPoint metadata.RecordDoubleDataPointFunc, s *stats.CPUStats, currentTime pcommon.Timestamp) {
if s.UsageNanoCores == nil {
return
}
value := float64(*s.UsageNanoCores) / 1_000_000_000
- fillDoubleGauge(dest.AppendEmpty(), metricInt, value, currentTime)
+ recordDataPoint(mb, currentTime, value)
}
-func addCPUTimeMetric(dest pmetric.MetricSlice, metricInt metadata.MetricIntf, s *stats.CPUStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
+func addCPUTimeMetric(mb *metadata.MetricsBuilder, recordDataPoint metadata.RecordDoubleDataPointFunc, s *stats.CPUStats, currentTime pcommon.Timestamp) {
if s.UsageCoreNanoSeconds == nil {
return
}
value := float64(*s.UsageCoreNanoSeconds) / 1_000_000_000
- fillDoubleSum(dest.AppendEmpty(), metricInt, value, startTime, currentTime)
+ recordDataPoint(mb, currentTime, value)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/fs.go b/receiver/kubeletstatsreceiver/internal/kubelet/fs.go
index 8d1d45a249b0..57f89aff39a8 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/fs.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/fs.go
@@ -16,18 +16,17 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func addFilesystemMetrics(dest pmetric.MetricSlice, filesystemMetrics metadata.FilesystemMetrics, s *stats.FsStats, currentTime pcommon.Timestamp) {
+func addFilesystemMetrics(mb *metadata.MetricsBuilder, filesystemMetrics metadata.FilesystemMetrics, s *stats.FsStats, currentTime pcommon.Timestamp) {
if s == nil {
return
}
- addIntGauge(dest, filesystemMetrics.Available, s.AvailableBytes, currentTime)
- addIntGauge(dest, filesystemMetrics.Capacity, s.CapacityBytes, currentTime)
- addIntGauge(dest, filesystemMetrics.Usage, s.UsedBytes, currentTime)
+ recordIntDataPoint(mb, filesystemMetrics.Available, s.AvailableBytes, currentTime)
+ recordIntDataPoint(mb, filesystemMetrics.Capacity, s.CapacityBytes, currentTime)
+ recordIntDataPoint(mb, filesystemMetrics.Usage, s.UsedBytes, currentTime)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/mem.go b/receiver/kubeletstatsreceiver/internal/kubelet/mem.go
index 17f04b424fcd..184b2b9cae8e 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/mem.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/mem.go
@@ -16,21 +16,20 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func addMemoryMetrics(dest pmetric.MetricSlice, memoryMetrics metadata.MemoryMetrics, s *stats.MemoryStats, currentTime pcommon.Timestamp) {
+func addMemoryMetrics(mb *metadata.MetricsBuilder, memoryMetrics metadata.MemoryMetrics, s *stats.MemoryStats, currentTime pcommon.Timestamp) {
if s == nil {
return
}
- addIntGauge(dest, memoryMetrics.Available, s.AvailableBytes, currentTime)
- addIntGauge(dest, memoryMetrics.Usage, s.UsageBytes, currentTime)
- addIntGauge(dest, memoryMetrics.Rss, s.RSSBytes, currentTime)
- addIntGauge(dest, memoryMetrics.WorkingSet, s.WorkingSetBytes, currentTime)
- addIntGauge(dest, memoryMetrics.PageFaults, s.PageFaults, currentTime)
- addIntGauge(dest, memoryMetrics.MajorPageFaults, s.MajorPageFaults, currentTime)
+ recordIntDataPoint(mb, memoryMetrics.Available, s.AvailableBytes, currentTime)
+ recordIntDataPoint(mb, memoryMetrics.Usage, s.UsageBytes, currentTime)
+ recordIntDataPoint(mb, memoryMetrics.Rss, s.RSSBytes, currentTime)
+ recordIntDataPoint(mb, memoryMetrics.WorkingSet, s.WorkingSetBytes, currentTime)
+ recordIntDataPoint(mb, memoryMetrics.PageFaults, s.PageFaults, currentTime)
+ recordIntDataPoint(mb, memoryMetrics.MajorPageFaults, s.MajorPageFaults, currentTime)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
index 918a65ef47ec..e5189f01b175 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
@@ -15,6 +15,7 @@
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
import (
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"time"
"go.opentelemetry.io/collector/pdata/pmetric"
@@ -25,12 +26,14 @@ import (
func MetricsData(
logger *zap.Logger, summary *stats.Summary,
metadata Metadata,
- metricGroupsToCollect map[MetricGroup]bool) []pmetric.Metrics {
+ metricGroupsToCollect map[MetricGroup]bool,
+ mb *metadata.MetricsBuilder) []pmetric.Metrics {
acc := &metricDataAccumulator{
metadata: metadata,
logger: logger,
metricGroupsToCollect: metricGroupsToCollect,
time: time.Now(),
+ mb: mb,
}
acc.nodeStats(summary.Node)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
index 34b606f4978a..3736272d820a 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
@@ -22,6 +22,8 @@ import (
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
type fakeRestClient struct {
@@ -41,11 +43,13 @@ func TestMetricAccumulator(t *testing.T) {
summary, _ := statsProvider.StatsSummary()
metadataProvider := NewMetadataProvider(rc)
podsMetadata, _ := metadataProvider.Pods()
- metadata := NewMetadata([]MetadataLabel{MetadataLabelContainerID}, podsMetadata, nil)
- requireMetricsOk(t, MetricsData(zap.NewNop(), summary, metadata, ValidMetricGroups))
+ k8sMetadata := NewMetadata([]MetadataLabel{MetadataLabelContainerID}, podsMetadata, nil)
+ mb := metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings())
+ requireMetricsOk(t, MetricsData(zap.NewNop(), summary, k8sMetadata, ValidMetricGroups, mb))
+ mb.Reset()
// Disable all groups
- require.Equal(t, 0, len(MetricsData(zap.NewNop(), summary, metadata, map[MetricGroup]bool{})))
+ require.Equal(t, 0, len(MetricsData(zap.NewNop(), summary, k8sMetadata, map[MetricGroup]bool{}, mb)))
}
func requireMetricsOk(t *testing.T, mds []pmetric.Metrics) {
@@ -166,5 +170,6 @@ func fakeMetrics() []pmetric.Metrics {
PodMetricGroup: true,
NodeMetricGroup: true,
}
- return MetricsData(zap.NewNop(), summary, Metadata{}, mgs)
+ mb := metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings())
+ return MetricsData(zap.NewNop(), summary, Metadata{}, mgs, mb)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/network.go b/receiver/kubeletstatsreceiver/internal/kubelet/network.go
index c2e2c123d3be..f432a6bb5f25 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/network.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/network.go
@@ -13,55 +13,26 @@
// limitations under the License.
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
-
import (
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
-
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func addNetworkMetrics(dest pmetric.MetricSlice, networkMetrics metadata.NetworkMetrics, s *stats.NetworkStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
+func addNetworkMetrics(mb *metadata.MetricsBuilder, networkMetrics metadata.NetworkMetrics, s *stats.NetworkStats, currentTime pcommon.Timestamp) {
if s == nil {
return
}
- addNetworkIOMetric(dest, networkMetrics.IO, s, startTime, currentTime)
- addNetworkErrorsMetric(dest, networkMetrics.Errors, s, startTime, currentTime)
-}
-func addNetworkIOMetric(dest pmetric.MetricSlice, metricInt metadata.MetricIntf, s *stats.NetworkStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
- if s.RxBytes == nil && s.TxBytes == nil {
- return
- }
-
- m := dest.AppendEmpty()
- metricInt.Init(m)
-
- fillNetworkDataPoint(m.Sum().DataPoints(), s.Name, metadata.AttributeDirection.Receive, s.RxBytes, startTime, currentTime)
- fillNetworkDataPoint(m.Sum().DataPoints(), s.Name, metadata.AttributeDirection.Transmit, s.TxBytes, startTime, currentTime)
+ recordNetworkDataPoint(mb, networkMetrics.IO, s, currentTime)
+ recordNetworkDataPoint(mb, networkMetrics.Errors, s, currentTime)
}
-func addNetworkErrorsMetric(dest pmetric.MetricSlice, metricInt metadata.MetricIntf, s *stats.NetworkStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
+func recordNetworkDataPoint(mb *metadata.MetricsBuilder, recordDataPoint metadata.RecordIntDataPointWithDirectionFunc, s *stats.NetworkStats, currentTime pcommon.Timestamp) {
if s.RxBytes == nil && s.TxBytes == nil {
return
}
- m := dest.AppendEmpty()
- metricInt.Init(m)
-
- fillNetworkDataPoint(m.Sum().DataPoints(), s.Name, metadata.AttributeDirection.Receive, s.RxErrors, startTime, currentTime)
- fillNetworkDataPoint(m.Sum().DataPoints(), s.Name, metadata.AttributeDirection.Transmit, s.TxErrors, startTime, currentTime)
-}
-
-func fillNetworkDataPoint(dps pmetric.NumberDataPointSlice, interfaceName string, direction string, value *uint64, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
- if value == nil {
- return
- }
- dp := dps.AppendEmpty()
- dp.Attributes().UpsertString(metadata.A.Interface, interfaceName)
- dp.Attributes().UpsertString(metadata.A.Direction, direction)
- dp.SetIntVal(int64(*value))
- dp.SetStartTimestamp(startTime)
- dp.SetTimestamp(currentTime)
+ recordDataPoint(mb, currentTime, int64(*s.RxBytes), s.Name, metadata.AttributeDirectionReceive)
+ recordDataPoint(mb, currentTime, int64(*s.TxBytes), s.Name, metadata.AttributeDirectionTransmit)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/utils.go b/receiver/kubeletstatsreceiver/internal/kubelet/utils.go
index 0ff47ebeefc4..0059289f802f 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/utils.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/utils.go
@@ -13,39 +13,43 @@
// limitations under the License.
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
-
import (
- "go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
-
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
+ "go.opentelemetry.io/collector/pdata/pcommon"
)
-func fillDoubleGauge(dest pmetric.Metric, metricInt metadata.MetricIntf, value float64, currentTime pcommon.Timestamp) {
- metricInt.Init(dest)
- dp := dest.Gauge().DataPoints().AppendEmpty()
- dp.SetDoubleVal(value)
- dp.SetTimestamp(currentTime)
-}
-
-func addIntGauge(dest pmetric.MetricSlice, metricInt metadata.MetricIntf, value *uint64, currentTime pcommon.Timestamp) {
+func recordIntDataPoint(mb *metadata.MetricsBuilder, recordDataPoint metadata.RecordIntDataPointFunc, value *uint64, currentTime pcommon.Timestamp) {
if value == nil {
return
}
- fillIntGauge(dest.AppendEmpty(), metricInt, int64(*value), currentTime)
+ recordDataPoint(mb, currentTime, int64(*value))
}
-func fillIntGauge(dest pmetric.Metric, metricInt metadata.MetricIntf, value int64, currentTime pcommon.Timestamp) {
- metricInt.Init(dest)
- dp := dest.Gauge().DataPoints().AppendEmpty()
- dp.SetIntVal(value)
- dp.SetTimestamp(currentTime)
-}
-
-func fillDoubleSum(dest pmetric.Metric, metricInt metadata.MetricIntf, value float64, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
- metricInt.Init(dest)
- dp := dest.Sum().DataPoints().AppendEmpty()
- dp.SetDoubleVal(value)
- dp.SetStartTimestamp(startTime)
- dp.SetTimestamp(currentTime)
-}
+//func fillDoubleGauge(dest pmetric.Metric, metricInt metadata.MetricIntf, value float64, currentTime pcommon.Timestamp) {
+// metricInt.Init(dest)
+// dp := dest.Gauge().DataPoints().AppendEmpty()
+// dp.SetDoubleVal(value)
+// dp.SetTimestamp(currentTime)
+//}
+//
+//func addIntGauge(dest pmetric.MetricSlice, metricInt metadata.MetricIntf, value *uint64, currentTime pcommon.Timestamp) {
+// if value == nil {
+// return
+// }
+// fillIntGauge(dest.AppendEmpty(), metricInt, int64(*value), currentTime)
+//}
+//
+//func fillIntGauge(dest pmetric.Metric, metricInt metadata.MetricIntf, value int64, currentTime pcommon.Timestamp) {
+// metricInt.Init(dest)
+// dp := dest.Gauge().DataPoints().AppendEmpty()
+// dp.SetIntVal(value)
+// dp.SetTimestamp(currentTime)
+//}
+//
+//func fillDoubleSum(dest pmetric.Metric, metricInt metadata.MetricIntf, value float64, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
+// metricInt.Init(dest)
+// dp := dest.Sum().DataPoints().AppendEmpty()
+// dp.SetDoubleVal(value)
+// dp.SetStartTimestamp(startTime)
+// dp.SetTimestamp(currentTime)
+//}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
index d2957ad59355..5e187ac526a9 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
@@ -13,24 +13,20 @@
// limitations under the License.
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
-
import (
- "strconv"
-
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
v1 "k8s.io/api/core/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
-
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
+ "strconv"
)
-func addVolumeMetrics(dest pmetric.MetricSlice, volumeMetrics metadata.VolumeMetrics, s stats.VolumeStats, currentTime pcommon.Timestamp) {
- addIntGauge(dest, volumeMetrics.Available, s.AvailableBytes, currentTime)
- addIntGauge(dest, volumeMetrics.Capacity, s.CapacityBytes, currentTime)
- addIntGauge(dest, volumeMetrics.Inodes, s.Inodes, currentTime)
- addIntGauge(dest, volumeMetrics.InodesFree, s.InodesFree, currentTime)
- addIntGauge(dest, volumeMetrics.InodesUsed, s.InodesUsed, currentTime)
+func addVolumeMetrics(mb *metadata.MetricsBuilder, volumeMetrics metadata.VolumeMetrics, s stats.VolumeStats, currentTime pcommon.Timestamp) {
+ recordIntDataPoint(mb, volumeMetrics.Available, s.AvailableBytes, currentTime)
+ recordIntDataPoint(mb, volumeMetrics.Capacity, s.CapacityBytes, currentTime)
+ recordIntDataPoint(mb, volumeMetrics.Inodes, s.Inodes, currentTime)
+ recordIntDataPoint(mb, volumeMetrics.InodesFree, s.InodesFree, currentTime)
+ recordIntDataPoint(mb, volumeMetrics.InodesUsed, s.InodesUsed, currentTime)
}
func getLabelsFromVolume(volume v1.Volume, labels map[string]string) {
@@ -83,22 +79,22 @@ func GetPersistentVolumeLabels(pv v1.PersistentVolumeSource, labels map[string]s
func awsElasticBlockStoreDims(vs v1.AWSElasticBlockStoreVolumeSource, labels map[string]string) {
labels[labelVolumeType] = labelValueAWSEBSVolume
// AWS specific labels.
- labels["aws.volume.id"] = vs.VolumeID
- labels["fs.type"] = vs.FSType
- labels["partition"] = strconv.Itoa(int(vs.Partition))
+ labels[labelAwsVolumeId] = vs.VolumeID
+ labels[labelFsType] = vs.FSType
+ labels[labelPartition] = strconv.Itoa(int(vs.Partition))
}
func gcePersistentDiskDims(vs v1.GCEPersistentDiskVolumeSource, labels map[string]string) {
labels[labelVolumeType] = labelValueGCEPDVolume
// GCP specific labels.
- labels["gce.pd.name"] = vs.PDName
- labels["fs.type"] = vs.FSType
- labels["partition"] = strconv.Itoa(int(vs.Partition))
+ labels[labelGcePdName] = vs.PDName
+ labels[labelFsType] = vs.FSType
+ labels[labelPartition] = strconv.Itoa(int(vs.Partition))
}
func glusterfsDims(vs v1.GlusterfsVolumeSource, labels map[string]string) {
labels[labelVolumeType] = labelValueGlusterFSVolume
// GlusterFS specific labels.
- labels["glusterfs.endpoints.name"] = vs.EndpointsName
- labels["glusterfs.path"] = vs.Path
+ labels[labelGlusterfsEndpointsName] = vs.EndpointsName
+ labels[labelGlusterfsPath] = vs.Path
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
index dcb0c42577ff..7bb8e6aacf68 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
@@ -189,9 +189,14 @@ func TestDetailedPVCLabels(t *testing.T) {
}, nil)
metadata.DetailedPVCLabelsSetter = tt.detailedPVCLabelsSetterOverride
- volumeResource := pcommon.NewResource()
- err := fillVolumeResource(volumeResource, podStats, stats.VolumeStats{Name: tt.volumeName}, metadata)
+ ro, err := getVolumeResourceOptions(podStats, stats.VolumeStats{Name: tt.volumeName}, metadata)
require.NoError(t, err)
+
+ volumeResource := pcommon.NewResource()
+ for _, op := range ro {
+ op(volumeResource)
+ }
+
require.Equal(t, pcommon.NewMapFromRaw(tt.want).Sort(), volumeResource.Attributes().Sort())
})
}
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go
deleted file mode 100644
index 92de0a9bf282..000000000000
--- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go
+++ /dev/null
@@ -1,607 +0,0 @@
-// Code generated by mdatagen. DO NOT EDIT.
-
-package metadata
-
-import (
- "go.opentelemetry.io/collector/config"
- "go.opentelemetry.io/collector/pdata/pmetric"
-)
-
-// Type is the component type name.
-const Type config.Type = "kubeletstatsreceiver"
-
-// MetricIntf is an interface to generically interact with generated metric.
-type MetricIntf interface {
- Name() string
- New() pmetric.Metric
- Init(metric pmetric.Metric)
-}
-
-// Intentionally not exposing this so that it is opaque and can change freely.
-type metricImpl struct {
- name string
- initFunc func(pmetric.Metric)
-}
-
-// Name returns the metric name.
-func (m *metricImpl) Name() string {
- return m.name
-}
-
-// New creates a metric object preinitialized.
-func (m *metricImpl) New() pmetric.Metric {
- metric := pmetric.NewMetric()
- m.Init(metric)
- return metric
-}
-
-// Init initializes the provided metric object.
-func (m *metricImpl) Init(metric pmetric.Metric) {
- m.initFunc(metric)
-}
-
-type metricStruct struct {
- ContainerCPUTime MetricIntf
- ContainerCPUUtilization MetricIntf
- ContainerFilesystemAvailable MetricIntf
- ContainerFilesystemCapacity MetricIntf
- ContainerFilesystemUsage MetricIntf
- ContainerMemoryAvailable MetricIntf
- ContainerMemoryMajorPageFaults MetricIntf
- ContainerMemoryPageFaults MetricIntf
- ContainerMemoryRss MetricIntf
- ContainerMemoryUsage MetricIntf
- ContainerMemoryWorkingSet MetricIntf
- K8sNodeCPUTime MetricIntf
- K8sNodeCPUUtilization MetricIntf
- K8sNodeFilesystemAvailable MetricIntf
- K8sNodeFilesystemCapacity MetricIntf
- K8sNodeFilesystemUsage MetricIntf
- K8sNodeMemoryAvailable MetricIntf
- K8sNodeMemoryMajorPageFaults MetricIntf
- K8sNodeMemoryPageFaults MetricIntf
- K8sNodeMemoryRss MetricIntf
- K8sNodeMemoryUsage MetricIntf
- K8sNodeMemoryWorkingSet MetricIntf
- K8sNodeNetworkErrors MetricIntf
- K8sNodeNetworkIo MetricIntf
- K8sPodCPUTime MetricIntf
- K8sPodCPUUtilization MetricIntf
- K8sPodFilesystemAvailable MetricIntf
- K8sPodFilesystemCapacity MetricIntf
- K8sPodFilesystemUsage MetricIntf
- K8sPodMemoryAvailable MetricIntf
- K8sPodMemoryMajorPageFaults MetricIntf
- K8sPodMemoryPageFaults MetricIntf
- K8sPodMemoryRss MetricIntf
- K8sPodMemoryUsage MetricIntf
- K8sPodMemoryWorkingSet MetricIntf
- K8sPodNetworkErrors MetricIntf
- K8sPodNetworkIo MetricIntf
- K8sVolumeAvailable MetricIntf
- K8sVolumeCapacity MetricIntf
- K8sVolumeInodes MetricIntf
- K8sVolumeInodesFree MetricIntf
- K8sVolumeInodesUsed MetricIntf
-}
-
-// Names returns a list of all the metric name strings.
-func (m *metricStruct) Names() []string {
- return []string{
- "container.cpu.time",
- "container.cpu.utilization",
- "container.filesystem.available",
- "container.filesystem.capacity",
- "container.filesystem.usage",
- "container.memory.available",
- "container.memory.major_page_faults",
- "container.memory.page_faults",
- "container.memory.rss",
- "container.memory.usage",
- "container.memory.working_set",
- "k8s.node.cpu.time",
- "k8s.node.cpu.utilization",
- "k8s.node.filesystem.available",
- "k8s.node.filesystem.capacity",
- "k8s.node.filesystem.usage",
- "k8s.node.memory.available",
- "k8s.node.memory.major_page_faults",
- "k8s.node.memory.page_faults",
- "k8s.node.memory.rss",
- "k8s.node.memory.usage",
- "k8s.node.memory.working_set",
- "k8s.node.network.errors",
- "k8s.node.network.io",
- "k8s.pod.cpu.time",
- "k8s.pod.cpu.utilization",
- "k8s.pod.filesystem.available",
- "k8s.pod.filesystem.capacity",
- "k8s.pod.filesystem.usage",
- "k8s.pod.memory.available",
- "k8s.pod.memory.major_page_faults",
- "k8s.pod.memory.page_faults",
- "k8s.pod.memory.rss",
- "k8s.pod.memory.usage",
- "k8s.pod.memory.working_set",
- "k8s.pod.network.errors",
- "k8s.pod.network.io",
- "k8s.volume.available",
- "k8s.volume.capacity",
- "k8s.volume.inodes",
- "k8s.volume.inodes.free",
- "k8s.volume.inodes.used",
- }
-}
-
-var metricsByName = map[string]MetricIntf{
- "container.cpu.time": Metrics.ContainerCPUTime,
- "container.cpu.utilization": Metrics.ContainerCPUUtilization,
- "container.filesystem.available": Metrics.ContainerFilesystemAvailable,
- "container.filesystem.capacity": Metrics.ContainerFilesystemCapacity,
- "container.filesystem.usage": Metrics.ContainerFilesystemUsage,
- "container.memory.available": Metrics.ContainerMemoryAvailable,
- "container.memory.major_page_faults": Metrics.ContainerMemoryMajorPageFaults,
- "container.memory.page_faults": Metrics.ContainerMemoryPageFaults,
- "container.memory.rss": Metrics.ContainerMemoryRss,
- "container.memory.usage": Metrics.ContainerMemoryUsage,
- "container.memory.working_set": Metrics.ContainerMemoryWorkingSet,
- "k8s.node.cpu.time": Metrics.K8sNodeCPUTime,
- "k8s.node.cpu.utilization": Metrics.K8sNodeCPUUtilization,
- "k8s.node.filesystem.available": Metrics.K8sNodeFilesystemAvailable,
- "k8s.node.filesystem.capacity": Metrics.K8sNodeFilesystemCapacity,
- "k8s.node.filesystem.usage": Metrics.K8sNodeFilesystemUsage,
- "k8s.node.memory.available": Metrics.K8sNodeMemoryAvailable,
- "k8s.node.memory.major_page_faults": Metrics.K8sNodeMemoryMajorPageFaults,
- "k8s.node.memory.page_faults": Metrics.K8sNodeMemoryPageFaults,
- "k8s.node.memory.rss": Metrics.K8sNodeMemoryRss,
- "k8s.node.memory.usage": Metrics.K8sNodeMemoryUsage,
- "k8s.node.memory.working_set": Metrics.K8sNodeMemoryWorkingSet,
- "k8s.node.network.errors": Metrics.K8sNodeNetworkErrors,
- "k8s.node.network.io": Metrics.K8sNodeNetworkIo,
- "k8s.pod.cpu.time": Metrics.K8sPodCPUTime,
- "k8s.pod.cpu.utilization": Metrics.K8sPodCPUUtilization,
- "k8s.pod.filesystem.available": Metrics.K8sPodFilesystemAvailable,
- "k8s.pod.filesystem.capacity": Metrics.K8sPodFilesystemCapacity,
- "k8s.pod.filesystem.usage": Metrics.K8sPodFilesystemUsage,
- "k8s.pod.memory.available": Metrics.K8sPodMemoryAvailable,
- "k8s.pod.memory.major_page_faults": Metrics.K8sPodMemoryMajorPageFaults,
- "k8s.pod.memory.page_faults": Metrics.K8sPodMemoryPageFaults,
- "k8s.pod.memory.rss": Metrics.K8sPodMemoryRss,
- "k8s.pod.memory.usage": Metrics.K8sPodMemoryUsage,
- "k8s.pod.memory.working_set": Metrics.K8sPodMemoryWorkingSet,
- "k8s.pod.network.errors": Metrics.K8sPodNetworkErrors,
- "k8s.pod.network.io": Metrics.K8sPodNetworkIo,
- "k8s.volume.available": Metrics.K8sVolumeAvailable,
- "k8s.volume.capacity": Metrics.K8sVolumeCapacity,
- "k8s.volume.inodes": Metrics.K8sVolumeInodes,
- "k8s.volume.inodes.free": Metrics.K8sVolumeInodesFree,
- "k8s.volume.inodes.used": Metrics.K8sVolumeInodesUsed,
-}
-
-func (m *metricStruct) ByName(n string) MetricIntf {
- return metricsByName[n]
-}
-
-// Metrics contains a set of methods for each metric that help with
-// manipulating those metrics.
-var Metrics = &metricStruct{
- &metricImpl{
- "container.cpu.time",
- func(metric pmetric.Metric) {
- metric.SetName("container.cpu.time")
- metric.SetDescription("Container CPU time")
- metric.SetUnit("s")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "container.cpu.utilization",
- func(metric pmetric.Metric) {
- metric.SetName("container.cpu.utilization")
- metric.SetDescription("Container CPU utilization")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.filesystem.available",
- func(metric pmetric.Metric) {
- metric.SetName("container.filesystem.available")
- metric.SetDescription("Container filesystem available")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.filesystem.capacity",
- func(metric pmetric.Metric) {
- metric.SetName("container.filesystem.capacity")
- metric.SetDescription("Container filesystem capacity")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.filesystem.usage",
- func(metric pmetric.Metric) {
- metric.SetName("container.filesystem.usage")
- metric.SetDescription("Container filesystem usage")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.memory.available",
- func(metric pmetric.Metric) {
- metric.SetName("container.memory.available")
- metric.SetDescription("Container memory available")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.memory.major_page_faults",
- func(metric pmetric.Metric) {
- metric.SetName("container.memory.major_page_faults")
- metric.SetDescription("Container memory major_page_faults")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.memory.page_faults",
- func(metric pmetric.Metric) {
- metric.SetName("container.memory.page_faults")
- metric.SetDescription("Container memory page_faults")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.memory.rss",
- func(metric pmetric.Metric) {
- metric.SetName("container.memory.rss")
- metric.SetDescription("Container memory rss")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.memory.usage",
- func(metric pmetric.Metric) {
- metric.SetName("container.memory.usage")
- metric.SetDescription("Container memory usage")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.memory.working_set",
- func(metric pmetric.Metric) {
- metric.SetName("container.memory.working_set")
- metric.SetDescription("Container memory working_set")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.cpu.time",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.cpu.time")
- metric.SetDescription("Node CPU time")
- metric.SetUnit("s")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "k8s.node.cpu.utilization",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.cpu.utilization")
- metric.SetDescription("Node CPU utilization")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.filesystem.available",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.filesystem.available")
- metric.SetDescription("Node filesystem available")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.filesystem.capacity",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.filesystem.capacity")
- metric.SetDescription("Node filesystem capacity")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.filesystem.usage",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.filesystem.usage")
- metric.SetDescription("Node filesystem usage")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.memory.available",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.memory.available")
- metric.SetDescription("Node memory available")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.memory.major_page_faults",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.memory.major_page_faults")
- metric.SetDescription("Node memory major_page_faults")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.memory.page_faults",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.memory.page_faults")
- metric.SetDescription("Node memory page_faults")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.memory.rss",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.memory.rss")
- metric.SetDescription("Node memory rss")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.memory.usage",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.memory.usage")
- metric.SetDescription("Node memory usage")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.memory.working_set",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.memory.working_set")
- metric.SetDescription("Node memory working_set")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.network.errors",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.network.errors")
- metric.SetDescription("Node network errors")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "k8s.node.network.io",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.network.io")
- metric.SetDescription("Node network IO")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "k8s.pod.cpu.time",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.cpu.time")
- metric.SetDescription("Pod CPU time")
- metric.SetUnit("s")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "k8s.pod.cpu.utilization",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.cpu.utilization")
- metric.SetDescription("Pod CPU utilization")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.filesystem.available",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.filesystem.available")
- metric.SetDescription("Pod filesystem available")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.filesystem.capacity",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.filesystem.capacity")
- metric.SetDescription("Pod filesystem capacity")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.filesystem.usage",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.filesystem.usage")
- metric.SetDescription("Pod filesystem usage")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.memory.available",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.memory.available")
- metric.SetDescription("Pod memory available")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.memory.major_page_faults",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.memory.major_page_faults")
- metric.SetDescription("Pod memory major_page_faults")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.memory.page_faults",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.memory.page_faults")
- metric.SetDescription("Pod memory page_faults")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.memory.rss",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.memory.rss")
- metric.SetDescription("Pod memory rss")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.memory.usage",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.memory.usage")
- metric.SetDescription("Pod memory usage")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.memory.working_set",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.memory.working_set")
- metric.SetDescription("Pod memory working_set")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.network.errors",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.network.errors")
- metric.SetDescription("Pod network errors")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "k8s.pod.network.io",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.network.io")
- metric.SetDescription("Pod network IO")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "k8s.volume.available",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.volume.available")
- metric.SetDescription("The number of available bytes in the volume.")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.volume.capacity",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.volume.capacity")
- metric.SetDescription("The total capacity in bytes of the volume.")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.volume.inodes",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.volume.inodes")
- metric.SetDescription("The total inodes in the filesystem.")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.volume.inodes.free",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.volume.inodes.free")
- metric.SetDescription("The free inodes in the filesystem.")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.volume.inodes.used",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.volume.inodes.used")
- metric.SetDescription("The inodes used by the filesystem. This may not equal inodes - free because filesystem may share inodes with other filesystems.")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
-}
-
-// M contains a set of methods for each metric that help with
-// manipulating those metrics. M is an alias for Metrics
-var M = Metrics
-
-// Attributes contains the possible metric attributes that can be used.
-var Attributes = struct {
- // Direction (Direction of flow of bytes/operations (receive or transmit).)
- Direction string
- // Interface (Name of the network interface.)
- Interface string
-}{
- "direction",
- "interface",
-}
-
-// A is an alias for Attributes.
-var A = Attributes
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- Receive string
- Transmit string
-}{
- "receive",
- "transmit",
-}
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go
new file mode 100644
index 000000000000..d36dabc48ab2
--- /dev/null
+++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go
@@ -0,0 +1,2838 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "time"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+)
+
+// MetricSettings provides common settings for a particular metric.
+type MetricSettings struct {
+ Enabled bool `mapstructure:"enabled"`
+}
+
+// MetricsSettings provides settings for kubeletstatsreceiver metrics.
+type MetricsSettings struct {
+ ContainerCPUTime MetricSettings `mapstructure:"container.cpu.time"`
+ ContainerCPUUtilization MetricSettings `mapstructure:"container.cpu.utilization"`
+ ContainerFilesystemAvailable MetricSettings `mapstructure:"container.filesystem.available"`
+ ContainerFilesystemCapacity MetricSettings `mapstructure:"container.filesystem.capacity"`
+ ContainerFilesystemUsage MetricSettings `mapstructure:"container.filesystem.usage"`
+ ContainerMemoryAvailable MetricSettings `mapstructure:"container.memory.available"`
+ ContainerMemoryMajorPageFaults MetricSettings `mapstructure:"container.memory.major_page_faults"`
+ ContainerMemoryPageFaults MetricSettings `mapstructure:"container.memory.page_faults"`
+ ContainerMemoryRss MetricSettings `mapstructure:"container.memory.rss"`
+ ContainerMemoryUsage MetricSettings `mapstructure:"container.memory.usage"`
+ ContainerMemoryWorkingSet MetricSettings `mapstructure:"container.memory.working_set"`
+ K8sNodeCPUTime MetricSettings `mapstructure:"k8s.node.cpu.time"`
+ K8sNodeCPUUtilization MetricSettings `mapstructure:"k8s.node.cpu.utilization"`
+ K8sNodeFilesystemAvailable MetricSettings `mapstructure:"k8s.node.filesystem.available"`
+ K8sNodeFilesystemCapacity MetricSettings `mapstructure:"k8s.node.filesystem.capacity"`
+ K8sNodeFilesystemUsage MetricSettings `mapstructure:"k8s.node.filesystem.usage"`
+ K8sNodeMemoryAvailable MetricSettings `mapstructure:"k8s.node.memory.available"`
+ K8sNodeMemoryMajorPageFaults MetricSettings `mapstructure:"k8s.node.memory.major_page_faults"`
+ K8sNodeMemoryPageFaults MetricSettings `mapstructure:"k8s.node.memory.page_faults"`
+ K8sNodeMemoryRss MetricSettings `mapstructure:"k8s.node.memory.rss"`
+ K8sNodeMemoryUsage MetricSettings `mapstructure:"k8s.node.memory.usage"`
+ K8sNodeMemoryWorkingSet MetricSettings `mapstructure:"k8s.node.memory.working_set"`
+ K8sNodeNetworkErrors MetricSettings `mapstructure:"k8s.node.network.errors"`
+ K8sNodeNetworkIo MetricSettings `mapstructure:"k8s.node.network.io"`
+ K8sPodCPUTime MetricSettings `mapstructure:"k8s.pod.cpu.time"`
+ K8sPodCPUUtilization MetricSettings `mapstructure:"k8s.pod.cpu.utilization"`
+ K8sPodFilesystemAvailable MetricSettings `mapstructure:"k8s.pod.filesystem.available"`
+ K8sPodFilesystemCapacity MetricSettings `mapstructure:"k8s.pod.filesystem.capacity"`
+ K8sPodFilesystemUsage MetricSettings `mapstructure:"k8s.pod.filesystem.usage"`
+ K8sPodMemoryAvailable MetricSettings `mapstructure:"k8s.pod.memory.available"`
+ K8sPodMemoryMajorPageFaults MetricSettings `mapstructure:"k8s.pod.memory.major_page_faults"`
+ K8sPodMemoryPageFaults MetricSettings `mapstructure:"k8s.pod.memory.page_faults"`
+ K8sPodMemoryRss MetricSettings `mapstructure:"k8s.pod.memory.rss"`
+ K8sPodMemoryUsage MetricSettings `mapstructure:"k8s.pod.memory.usage"`
+ K8sPodMemoryWorkingSet MetricSettings `mapstructure:"k8s.pod.memory.working_set"`
+ K8sPodNetworkErrors MetricSettings `mapstructure:"k8s.pod.network.errors"`
+ K8sPodNetworkIo MetricSettings `mapstructure:"k8s.pod.network.io"`
+ K8sVolumeAvailable MetricSettings `mapstructure:"k8s.volume.available"`
+ K8sVolumeCapacity MetricSettings `mapstructure:"k8s.volume.capacity"`
+ K8sVolumeInodes MetricSettings `mapstructure:"k8s.volume.inodes"`
+ K8sVolumeInodesFree MetricSettings `mapstructure:"k8s.volume.inodes.free"`
+ K8sVolumeInodesUsed MetricSettings `mapstructure:"k8s.volume.inodes.used"`
+}
+
+func DefaultMetricsSettings() MetricsSettings {
+ return MetricsSettings{
+ ContainerCPUTime: MetricSettings{
+ Enabled: true,
+ },
+ ContainerCPUUtilization: MetricSettings{
+ Enabled: true,
+ },
+ ContainerFilesystemAvailable: MetricSettings{
+ Enabled: true,
+ },
+ ContainerFilesystemCapacity: MetricSettings{
+ Enabled: true,
+ },
+ ContainerFilesystemUsage: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryAvailable: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryMajorPageFaults: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryPageFaults: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryRss: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryUsage: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryWorkingSet: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeCPUTime: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeCPUUtilization: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeFilesystemAvailable: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeFilesystemCapacity: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeFilesystemUsage: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeMemoryAvailable: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeMemoryMajorPageFaults: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeMemoryPageFaults: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeMemoryRss: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeMemoryUsage: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeMemoryWorkingSet: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeNetworkErrors: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeNetworkIo: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodCPUTime: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodCPUUtilization: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodFilesystemAvailable: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodFilesystemCapacity: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodFilesystemUsage: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodMemoryAvailable: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodMemoryMajorPageFaults: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodMemoryPageFaults: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodMemoryRss: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodMemoryUsage: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodMemoryWorkingSet: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodNetworkErrors: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodNetworkIo: MetricSettings{
+ Enabled: true,
+ },
+ K8sVolumeAvailable: MetricSettings{
+ Enabled: true,
+ },
+ K8sVolumeCapacity: MetricSettings{
+ Enabled: true,
+ },
+ K8sVolumeInodes: MetricSettings{
+ Enabled: true,
+ },
+ K8sVolumeInodesFree: MetricSettings{
+ Enabled: true,
+ },
+ K8sVolumeInodesUsed: MetricSettings{
+ Enabled: true,
+ },
+ }
+}
+
+// AttributeDirection specifies the a value direction attribute.
+type AttributeDirection int
+
+const (
+ _ AttributeDirection = iota
+ AttributeDirectionReceive
+ AttributeDirectionTransmit
+)
+
+// String returns the string representation of the AttributeDirection.
+func (av AttributeDirection) String() string {
+ switch av {
+ case AttributeDirectionReceive:
+ return "receive"
+ case AttributeDirectionTransmit:
+ return "transmit"
+ }
+ return ""
+}
+
+// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
+var MapAttributeDirection = map[string]AttributeDirection{
+ "receive": AttributeDirectionReceive,
+ "transmit": AttributeDirectionTransmit,
+}
+
+type metricContainerCPUTime struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.time metric with initial data.
+func (m *metricContainerCPUTime) init() {
+ m.data.SetName("container.cpu.time")
+ m.data.SetDescription("Container CPU time")
+ m.data.SetUnit("s")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUTime) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUTime) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUTime(settings MetricSettings) metricContainerCPUTime {
+ m := metricContainerCPUTime{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerCPUUtilization struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.utilization metric with initial data.
+func (m *metricContainerCPUUtilization) init() {
+ m.data.SetName("container.cpu.utilization")
+ m.data.SetDescription("Container CPU utilization")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerCPUUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUUtilization) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUUtilization) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUUtilization(settings MetricSettings) metricContainerCPUUtilization {
+ m := metricContainerCPUUtilization{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerFilesystemAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.filesystem.available metric with initial data.
+func (m *metricContainerFilesystemAvailable) init() {
+ m.data.SetName("container.filesystem.available")
+ m.data.SetDescription("Container filesystem available")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerFilesystemAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerFilesystemAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerFilesystemAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerFilesystemAvailable(settings MetricSettings) metricContainerFilesystemAvailable {
+ m := metricContainerFilesystemAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerFilesystemCapacity struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.filesystem.capacity metric with initial data.
+func (m *metricContainerFilesystemCapacity) init() {
+ m.data.SetName("container.filesystem.capacity")
+ m.data.SetDescription("Container filesystem capacity")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerFilesystemCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerFilesystemCapacity) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerFilesystemCapacity) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerFilesystemCapacity(settings MetricSettings) metricContainerFilesystemCapacity {
+ m := metricContainerFilesystemCapacity{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerFilesystemUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.filesystem.usage metric with initial data.
+func (m *metricContainerFilesystemUsage) init() {
+ m.data.SetName("container.filesystem.usage")
+ m.data.SetDescription("Container filesystem usage")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerFilesystemUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerFilesystemUsage) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerFilesystemUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerFilesystemUsage(settings MetricSettings) metricContainerFilesystemUsage {
+ m := metricContainerFilesystemUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.available metric with initial data.
+func (m *metricContainerMemoryAvailable) init() {
+ m.data.SetName("container.memory.available")
+ m.data.SetDescription("Container memory available")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryAvailable(settings MetricSettings) metricContainerMemoryAvailable {
+ m := metricContainerMemoryAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryMajorPageFaults struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.major_page_faults metric with initial data.
+func (m *metricContainerMemoryMajorPageFaults) init() {
+ m.data.SetName("container.memory.major_page_faults")
+ m.data.SetDescription("Container memory major_page_faults")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryMajorPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryMajorPageFaults) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryMajorPageFaults) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryMajorPageFaults(settings MetricSettings) metricContainerMemoryMajorPageFaults {
+ m := metricContainerMemoryMajorPageFaults{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryPageFaults struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.page_faults metric with initial data.
+func (m *metricContainerMemoryPageFaults) init() {
+ m.data.SetName("container.memory.page_faults")
+ m.data.SetDescription("Container memory page_faults")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryPageFaults) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryPageFaults) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryPageFaults(settings MetricSettings) metricContainerMemoryPageFaults {
+ m := metricContainerMemoryPageFaults{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryRss struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.rss metric with initial data.
+func (m *metricContainerMemoryRss) init() {
+ m.data.SetName("container.memory.rss")
+ m.data.SetDescription("Container memory rss")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryRss) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryRss) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryRss(settings MetricSettings) metricContainerMemoryRss {
+ m := metricContainerMemoryRss{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.usage metric with initial data.
+func (m *metricContainerMemoryUsage) init() {
+ m.data.SetName("container.memory.usage")
+ m.data.SetDescription("Container memory usage")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryUsage) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryUsage(settings MetricSettings) metricContainerMemoryUsage {
+ m := metricContainerMemoryUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryWorkingSet struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.working_set metric with initial data.
+func (m *metricContainerMemoryWorkingSet) init() {
+ m.data.SetName("container.memory.working_set")
+ m.data.SetDescription("Container memory working_set")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryWorkingSet) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryWorkingSet) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryWorkingSet) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryWorkingSet(settings MetricSettings) metricContainerMemoryWorkingSet {
+ m := metricContainerMemoryWorkingSet{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeCPUTime struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.cpu.time metric with initial data.
+func (m *metricK8sNodeCPUTime) init() {
+ m.data.SetName("k8s.node.cpu.time")
+ m.data.SetDescription("Node CPU time")
+ m.data.SetUnit("s")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricK8sNodeCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeCPUTime) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeCPUTime) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeCPUTime(settings MetricSettings) metricK8sNodeCPUTime {
+ m := metricK8sNodeCPUTime{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeCPUUtilization struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.cpu.utilization metric with initial data.
+func (m *metricK8sNodeCPUUtilization) init() {
+ m.data.SetName("k8s.node.cpu.utilization")
+ m.data.SetDescription("Node CPU utilization")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeCPUUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeCPUUtilization) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeCPUUtilization) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeCPUUtilization(settings MetricSettings) metricK8sNodeCPUUtilization {
+ m := metricK8sNodeCPUUtilization{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeFilesystemAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.filesystem.available metric with initial data.
+func (m *metricK8sNodeFilesystemAvailable) init() {
+ m.data.SetName("k8s.node.filesystem.available")
+ m.data.SetDescription("Node filesystem available")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeFilesystemAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeFilesystemAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeFilesystemAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeFilesystemAvailable(settings MetricSettings) metricK8sNodeFilesystemAvailable {
+ m := metricK8sNodeFilesystemAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeFilesystemCapacity struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.filesystem.capacity metric with initial data.
+func (m *metricK8sNodeFilesystemCapacity) init() {
+ m.data.SetName("k8s.node.filesystem.capacity")
+ m.data.SetDescription("Node filesystem capacity")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeFilesystemCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeFilesystemCapacity) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeFilesystemCapacity) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeFilesystemCapacity(settings MetricSettings) metricK8sNodeFilesystemCapacity {
+ m := metricK8sNodeFilesystemCapacity{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeFilesystemUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.filesystem.usage metric with initial data.
+func (m *metricK8sNodeFilesystemUsage) init() {
+ m.data.SetName("k8s.node.filesystem.usage")
+ m.data.SetDescription("Node filesystem usage")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeFilesystemUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeFilesystemUsage) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeFilesystemUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeFilesystemUsage(settings MetricSettings) metricK8sNodeFilesystemUsage {
+ m := metricK8sNodeFilesystemUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeMemoryAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.memory.available metric with initial data.
+func (m *metricK8sNodeMemoryAvailable) init() {
+ m.data.SetName("k8s.node.memory.available")
+ m.data.SetDescription("Node memory available")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeMemoryAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeMemoryAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeMemoryAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeMemoryAvailable(settings MetricSettings) metricK8sNodeMemoryAvailable {
+ m := metricK8sNodeMemoryAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeMemoryMajorPageFaults struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.memory.major_page_faults metric with initial data.
+func (m *metricK8sNodeMemoryMajorPageFaults) init() {
+ m.data.SetName("k8s.node.memory.major_page_faults")
+ m.data.SetDescription("Node memory major_page_faults")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeMemoryMajorPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeMemoryMajorPageFaults) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeMemoryMajorPageFaults) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeMemoryMajorPageFaults(settings MetricSettings) metricK8sNodeMemoryMajorPageFaults {
+ m := metricK8sNodeMemoryMajorPageFaults{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeMemoryPageFaults struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.memory.page_faults metric with initial data.
+func (m *metricK8sNodeMemoryPageFaults) init() {
+ m.data.SetName("k8s.node.memory.page_faults")
+ m.data.SetDescription("Node memory page_faults")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeMemoryPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeMemoryPageFaults) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeMemoryPageFaults) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeMemoryPageFaults(settings MetricSettings) metricK8sNodeMemoryPageFaults {
+ m := metricK8sNodeMemoryPageFaults{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeMemoryRss struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.memory.rss metric with initial data.
+func (m *metricK8sNodeMemoryRss) init() {
+ m.data.SetName("k8s.node.memory.rss")
+ m.data.SetDescription("Node memory rss")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeMemoryRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeMemoryRss) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeMemoryRss) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeMemoryRss(settings MetricSettings) metricK8sNodeMemoryRss {
+ m := metricK8sNodeMemoryRss{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeMemoryUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.memory.usage metric with initial data.
+func (m *metricK8sNodeMemoryUsage) init() {
+ m.data.SetName("k8s.node.memory.usage")
+ m.data.SetDescription("Node memory usage")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeMemoryUsage) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeMemoryUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeMemoryUsage(settings MetricSettings) metricK8sNodeMemoryUsage {
+ m := metricK8sNodeMemoryUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeMemoryWorkingSet struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.memory.working_set metric with initial data.
+func (m *metricK8sNodeMemoryWorkingSet) init() {
+ m.data.SetName("k8s.node.memory.working_set")
+ m.data.SetDescription("Node memory working_set")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeMemoryWorkingSet) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeMemoryWorkingSet) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeMemoryWorkingSet) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeMemoryWorkingSet(settings MetricSettings) metricK8sNodeMemoryWorkingSet {
+ m := metricK8sNodeMemoryWorkingSet{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeNetworkErrors struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.network.errors metric with initial data.
+func (m *metricK8sNodeNetworkErrors) init() {
+ m.data.SetName("k8s.node.network.errors")
+ m.data.SetDescription("Node network errors")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricK8sNodeNetworkErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert(A.Interface, pcommon.NewValueString(interfaceAttributeValue))
+ dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeNetworkErrors) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeNetworkErrors) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeNetworkErrors(settings MetricSettings) metricK8sNodeNetworkErrors {
+ m := metricK8sNodeNetworkErrors{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeNetworkIo struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.network.io metric with initial data.
+func (m *metricK8sNodeNetworkIo) init() {
+ m.data.SetName("k8s.node.network.io")
+ m.data.SetDescription("Node network IO")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricK8sNodeNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert(A.Interface, pcommon.NewValueString(interfaceAttributeValue))
+ dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeNetworkIo) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeNetworkIo) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeNetworkIo(settings MetricSettings) metricK8sNodeNetworkIo {
+ m := metricK8sNodeNetworkIo{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodCPUTime struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.cpu.time metric with initial data.
+func (m *metricK8sPodCPUTime) init() {
+ m.data.SetName("k8s.pod.cpu.time")
+ m.data.SetDescription("Pod CPU time")
+ m.data.SetUnit("s")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricK8sPodCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodCPUTime) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodCPUTime) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodCPUTime(settings MetricSettings) metricK8sPodCPUTime {
+ m := metricK8sPodCPUTime{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodCPUUtilization struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.cpu.utilization metric with initial data.
+func (m *metricK8sPodCPUUtilization) init() {
+ m.data.SetName("k8s.pod.cpu.utilization")
+ m.data.SetDescription("Pod CPU utilization")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodCPUUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodCPUUtilization) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodCPUUtilization) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodCPUUtilization(settings MetricSettings) metricK8sPodCPUUtilization {
+ m := metricK8sPodCPUUtilization{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodFilesystemAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.filesystem.available metric with initial data.
+func (m *metricK8sPodFilesystemAvailable) init() {
+ m.data.SetName("k8s.pod.filesystem.available")
+ m.data.SetDescription("Pod filesystem available")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodFilesystemAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodFilesystemAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodFilesystemAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodFilesystemAvailable(settings MetricSettings) metricK8sPodFilesystemAvailable {
+ m := metricK8sPodFilesystemAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodFilesystemCapacity struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.filesystem.capacity metric with initial data.
+func (m *metricK8sPodFilesystemCapacity) init() {
+ m.data.SetName("k8s.pod.filesystem.capacity")
+ m.data.SetDescription("Pod filesystem capacity")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodFilesystemCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodFilesystemCapacity) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodFilesystemCapacity) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodFilesystemCapacity(settings MetricSettings) metricK8sPodFilesystemCapacity {
+ m := metricK8sPodFilesystemCapacity{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodFilesystemUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.filesystem.usage metric with initial data.
+func (m *metricK8sPodFilesystemUsage) init() {
+ m.data.SetName("k8s.pod.filesystem.usage")
+ m.data.SetDescription("Pod filesystem usage")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodFilesystemUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodFilesystemUsage) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodFilesystemUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodFilesystemUsage(settings MetricSettings) metricK8sPodFilesystemUsage {
+ m := metricK8sPodFilesystemUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodMemoryAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory.available metric with initial data.
+func (m *metricK8sPodMemoryAvailable) init() {
+ m.data.SetName("k8s.pod.memory.available")
+ m.data.SetDescription("Pod memory available")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodMemoryAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodMemoryAvailable(settings MetricSettings) metricK8sPodMemoryAvailable {
+ m := metricK8sPodMemoryAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodMemoryMajorPageFaults struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory.major_page_faults metric with initial data.
+func (m *metricK8sPodMemoryMajorPageFaults) init() {
+ m.data.SetName("k8s.pod.memory.major_page_faults")
+ m.data.SetDescription("Pod memory major_page_faults")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodMemoryMajorPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryMajorPageFaults) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryMajorPageFaults) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodMemoryMajorPageFaults(settings MetricSettings) metricK8sPodMemoryMajorPageFaults {
+ m := metricK8sPodMemoryMajorPageFaults{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodMemoryPageFaults struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory.page_faults metric with initial data.
+func (m *metricK8sPodMemoryPageFaults) init() {
+ m.data.SetName("k8s.pod.memory.page_faults")
+ m.data.SetDescription("Pod memory page_faults")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodMemoryPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryPageFaults) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryPageFaults) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodMemoryPageFaults(settings MetricSettings) metricK8sPodMemoryPageFaults {
+ m := metricK8sPodMemoryPageFaults{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodMemoryRss struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory.rss metric with initial data.
+func (m *metricK8sPodMemoryRss) init() {
+ m.data.SetName("k8s.pod.memory.rss")
+ m.data.SetDescription("Pod memory rss")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodMemoryRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryRss) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryRss) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodMemoryRss(settings MetricSettings) metricK8sPodMemoryRss {
+ m := metricK8sPodMemoryRss{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodMemoryUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory.usage metric with initial data.
+func (m *metricK8sPodMemoryUsage) init() {
+ m.data.SetName("k8s.pod.memory.usage")
+ m.data.SetDescription("Pod memory usage")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryUsage) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodMemoryUsage(settings MetricSettings) metricK8sPodMemoryUsage {
+ m := metricK8sPodMemoryUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodMemoryWorkingSet struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory.working_set metric with initial data.
+func (m *metricK8sPodMemoryWorkingSet) init() {
+ m.data.SetName("k8s.pod.memory.working_set")
+ m.data.SetDescription("Pod memory working_set")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodMemoryWorkingSet) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryWorkingSet) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryWorkingSet) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodMemoryWorkingSet(settings MetricSettings) metricK8sPodMemoryWorkingSet {
+ m := metricK8sPodMemoryWorkingSet{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodNetworkErrors struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.network.errors metric with initial data.
+func (m *metricK8sPodNetworkErrors) init() {
+ m.data.SetName("k8s.pod.network.errors")
+ m.data.SetDescription("Pod network errors")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricK8sPodNetworkErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert(A.Interface, pcommon.NewValueString(interfaceAttributeValue))
+ dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodNetworkErrors) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodNetworkErrors) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodNetworkErrors(settings MetricSettings) metricK8sPodNetworkErrors {
+ m := metricK8sPodNetworkErrors{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodNetworkIo struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.network.io metric with initial data.
+func (m *metricK8sPodNetworkIo) init() {
+ m.data.SetName("k8s.pod.network.io")
+ m.data.SetDescription("Pod network IO")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricK8sPodNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert(A.Interface, pcommon.NewValueString(interfaceAttributeValue))
+ dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodNetworkIo) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodNetworkIo) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodNetworkIo(settings MetricSettings) metricK8sPodNetworkIo {
+ m := metricK8sPodNetworkIo{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sVolumeAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.volume.available metric with initial data.
+func (m *metricK8sVolumeAvailable) init() {
+ m.data.SetName("k8s.volume.available")
+ m.data.SetDescription("The number of available bytes in the volume.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sVolumeAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sVolumeAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sVolumeAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sVolumeAvailable(settings MetricSettings) metricK8sVolumeAvailable {
+ m := metricK8sVolumeAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sVolumeCapacity struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.volume.capacity metric with initial data.
+func (m *metricK8sVolumeCapacity) init() {
+ m.data.SetName("k8s.volume.capacity")
+ m.data.SetDescription("The total capacity in bytes of the volume.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sVolumeCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sVolumeCapacity) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sVolumeCapacity) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sVolumeCapacity(settings MetricSettings) metricK8sVolumeCapacity {
+ m := metricK8sVolumeCapacity{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sVolumeInodes struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.volume.inodes metric with initial data.
+func (m *metricK8sVolumeInodes) init() {
+ m.data.SetName("k8s.volume.inodes")
+ m.data.SetDescription("The total inodes in the filesystem.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sVolumeInodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sVolumeInodes) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sVolumeInodes) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sVolumeInodes(settings MetricSettings) metricK8sVolumeInodes {
+ m := metricK8sVolumeInodes{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sVolumeInodesFree struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.volume.inodes.free metric with initial data.
+func (m *metricK8sVolumeInodesFree) init() {
+ m.data.SetName("k8s.volume.inodes.free")
+ m.data.SetDescription("The free inodes in the filesystem.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sVolumeInodesFree) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sVolumeInodesFree) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sVolumeInodesFree) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sVolumeInodesFree(settings MetricSettings) metricK8sVolumeInodesFree {
+ m := metricK8sVolumeInodesFree{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sVolumeInodesUsed struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.volume.inodes.used metric with initial data.
+func (m *metricK8sVolumeInodesUsed) init() {
+ m.data.SetName("k8s.volume.inodes.used")
+ m.data.SetDescription("The inodes used by the filesystem. This may not equal inodes - free because filesystem may share inodes with other filesystems.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sVolumeInodesUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sVolumeInodesUsed) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sVolumeInodesUsed) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sVolumeInodesUsed(settings MetricSettings) metricK8sVolumeInodesUsed {
+ m := metricK8sVolumeInodesUsed{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
+// required to produce metric representation defined in metadata and user settings.
+type MetricsBuilder struct {
+ startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
+ metricsCapacity int // maximum observed number of metrics per resource.
+ resourceCapacity int // maximum observed number of resource attributes.
+ metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
+ metricContainerCPUTime metricContainerCPUTime
+ metricContainerCPUUtilization metricContainerCPUUtilization
+ metricContainerFilesystemAvailable metricContainerFilesystemAvailable
+ metricContainerFilesystemCapacity metricContainerFilesystemCapacity
+ metricContainerFilesystemUsage metricContainerFilesystemUsage
+ metricContainerMemoryAvailable metricContainerMemoryAvailable
+ metricContainerMemoryMajorPageFaults metricContainerMemoryMajorPageFaults
+ metricContainerMemoryPageFaults metricContainerMemoryPageFaults
+ metricContainerMemoryRss metricContainerMemoryRss
+ metricContainerMemoryUsage metricContainerMemoryUsage
+ metricContainerMemoryWorkingSet metricContainerMemoryWorkingSet
+ metricK8sNodeCPUTime metricK8sNodeCPUTime
+ metricK8sNodeCPUUtilization metricK8sNodeCPUUtilization
+ metricK8sNodeFilesystemAvailable metricK8sNodeFilesystemAvailable
+ metricK8sNodeFilesystemCapacity metricK8sNodeFilesystemCapacity
+ metricK8sNodeFilesystemUsage metricK8sNodeFilesystemUsage
+ metricK8sNodeMemoryAvailable metricK8sNodeMemoryAvailable
+ metricK8sNodeMemoryMajorPageFaults metricK8sNodeMemoryMajorPageFaults
+ metricK8sNodeMemoryPageFaults metricK8sNodeMemoryPageFaults
+ metricK8sNodeMemoryRss metricK8sNodeMemoryRss
+ metricK8sNodeMemoryUsage metricK8sNodeMemoryUsage
+ metricK8sNodeMemoryWorkingSet metricK8sNodeMemoryWorkingSet
+ metricK8sNodeNetworkErrors metricK8sNodeNetworkErrors
+ metricK8sNodeNetworkIo metricK8sNodeNetworkIo
+ metricK8sPodCPUTime metricK8sPodCPUTime
+ metricK8sPodCPUUtilization metricK8sPodCPUUtilization
+ metricK8sPodFilesystemAvailable metricK8sPodFilesystemAvailable
+ metricK8sPodFilesystemCapacity metricK8sPodFilesystemCapacity
+ metricK8sPodFilesystemUsage metricK8sPodFilesystemUsage
+ metricK8sPodMemoryAvailable metricK8sPodMemoryAvailable
+ metricK8sPodMemoryMajorPageFaults metricK8sPodMemoryMajorPageFaults
+ metricK8sPodMemoryPageFaults metricK8sPodMemoryPageFaults
+ metricK8sPodMemoryRss metricK8sPodMemoryRss
+ metricK8sPodMemoryUsage metricK8sPodMemoryUsage
+ metricK8sPodMemoryWorkingSet metricK8sPodMemoryWorkingSet
+ metricK8sPodNetworkErrors metricK8sPodNetworkErrors
+ metricK8sPodNetworkIo metricK8sPodNetworkIo
+ metricK8sVolumeAvailable metricK8sVolumeAvailable
+ metricK8sVolumeCapacity metricK8sVolumeCapacity
+ metricK8sVolumeInodes metricK8sVolumeInodes
+ metricK8sVolumeInodesFree metricK8sVolumeInodesFree
+ metricK8sVolumeInodesUsed metricK8sVolumeInodesUsed
+}
+
+// metricBuilderOption applies changes to default metrics builder.
+type metricBuilderOption func(*MetricsBuilder)
+
+// WithStartTime sets startTime on the metrics builder.
+func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption {
+ return func(mb *MetricsBuilder) {
+ mb.startTime = startTime
+ }
+}
+
+func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder {
+ mb := &MetricsBuilder{
+ startTime: pcommon.NewTimestampFromTime(time.Now()),
+ metricsBuffer: pmetric.NewMetrics(),
+ metricContainerCPUTime: newMetricContainerCPUTime(settings.ContainerCPUTime),
+ metricContainerCPUUtilization: newMetricContainerCPUUtilization(settings.ContainerCPUUtilization),
+ metricContainerFilesystemAvailable: newMetricContainerFilesystemAvailable(settings.ContainerFilesystemAvailable),
+ metricContainerFilesystemCapacity: newMetricContainerFilesystemCapacity(settings.ContainerFilesystemCapacity),
+ metricContainerFilesystemUsage: newMetricContainerFilesystemUsage(settings.ContainerFilesystemUsage),
+ metricContainerMemoryAvailable: newMetricContainerMemoryAvailable(settings.ContainerMemoryAvailable),
+ metricContainerMemoryMajorPageFaults: newMetricContainerMemoryMajorPageFaults(settings.ContainerMemoryMajorPageFaults),
+ metricContainerMemoryPageFaults: newMetricContainerMemoryPageFaults(settings.ContainerMemoryPageFaults),
+ metricContainerMemoryRss: newMetricContainerMemoryRss(settings.ContainerMemoryRss),
+ metricContainerMemoryUsage: newMetricContainerMemoryUsage(settings.ContainerMemoryUsage),
+ metricContainerMemoryWorkingSet: newMetricContainerMemoryWorkingSet(settings.ContainerMemoryWorkingSet),
+ metricK8sNodeCPUTime: newMetricK8sNodeCPUTime(settings.K8sNodeCPUTime),
+ metricK8sNodeCPUUtilization: newMetricK8sNodeCPUUtilization(settings.K8sNodeCPUUtilization),
+ metricK8sNodeFilesystemAvailable: newMetricK8sNodeFilesystemAvailable(settings.K8sNodeFilesystemAvailable),
+ metricK8sNodeFilesystemCapacity: newMetricK8sNodeFilesystemCapacity(settings.K8sNodeFilesystemCapacity),
+ metricK8sNodeFilesystemUsage: newMetricK8sNodeFilesystemUsage(settings.K8sNodeFilesystemUsage),
+ metricK8sNodeMemoryAvailable: newMetricK8sNodeMemoryAvailable(settings.K8sNodeMemoryAvailable),
+ metricK8sNodeMemoryMajorPageFaults: newMetricK8sNodeMemoryMajorPageFaults(settings.K8sNodeMemoryMajorPageFaults),
+ metricK8sNodeMemoryPageFaults: newMetricK8sNodeMemoryPageFaults(settings.K8sNodeMemoryPageFaults),
+ metricK8sNodeMemoryRss: newMetricK8sNodeMemoryRss(settings.K8sNodeMemoryRss),
+ metricK8sNodeMemoryUsage: newMetricK8sNodeMemoryUsage(settings.K8sNodeMemoryUsage),
+ metricK8sNodeMemoryWorkingSet: newMetricK8sNodeMemoryWorkingSet(settings.K8sNodeMemoryWorkingSet),
+ metricK8sNodeNetworkErrors: newMetricK8sNodeNetworkErrors(settings.K8sNodeNetworkErrors),
+ metricK8sNodeNetworkIo: newMetricK8sNodeNetworkIo(settings.K8sNodeNetworkIo),
+ metricK8sPodCPUTime: newMetricK8sPodCPUTime(settings.K8sPodCPUTime),
+ metricK8sPodCPUUtilization: newMetricK8sPodCPUUtilization(settings.K8sPodCPUUtilization),
+ metricK8sPodFilesystemAvailable: newMetricK8sPodFilesystemAvailable(settings.K8sPodFilesystemAvailable),
+ metricK8sPodFilesystemCapacity: newMetricK8sPodFilesystemCapacity(settings.K8sPodFilesystemCapacity),
+ metricK8sPodFilesystemUsage: newMetricK8sPodFilesystemUsage(settings.K8sPodFilesystemUsage),
+ metricK8sPodMemoryAvailable: newMetricK8sPodMemoryAvailable(settings.K8sPodMemoryAvailable),
+ metricK8sPodMemoryMajorPageFaults: newMetricK8sPodMemoryMajorPageFaults(settings.K8sPodMemoryMajorPageFaults),
+ metricK8sPodMemoryPageFaults: newMetricK8sPodMemoryPageFaults(settings.K8sPodMemoryPageFaults),
+ metricK8sPodMemoryRss: newMetricK8sPodMemoryRss(settings.K8sPodMemoryRss),
+ metricK8sPodMemoryUsage: newMetricK8sPodMemoryUsage(settings.K8sPodMemoryUsage),
+ metricK8sPodMemoryWorkingSet: newMetricK8sPodMemoryWorkingSet(settings.K8sPodMemoryWorkingSet),
+ metricK8sPodNetworkErrors: newMetricK8sPodNetworkErrors(settings.K8sPodNetworkErrors),
+ metricK8sPodNetworkIo: newMetricK8sPodNetworkIo(settings.K8sPodNetworkIo),
+ metricK8sVolumeAvailable: newMetricK8sVolumeAvailable(settings.K8sVolumeAvailable),
+ metricK8sVolumeCapacity: newMetricK8sVolumeCapacity(settings.K8sVolumeCapacity),
+ metricK8sVolumeInodes: newMetricK8sVolumeInodes(settings.K8sVolumeInodes),
+ metricK8sVolumeInodesFree: newMetricK8sVolumeInodesFree(settings.K8sVolumeInodesFree),
+ metricK8sVolumeInodesUsed: newMetricK8sVolumeInodesUsed(settings.K8sVolumeInodesUsed),
+ }
+ for _, op := range options {
+ op(mb)
+ }
+ return mb
+}
+
+// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
+func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
+ if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
+ mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
+ }
+ if mb.resourceCapacity < rm.Resource().Attributes().Len() {
+ mb.resourceCapacity = rm.Resource().Attributes().Len()
+ }
+}
+
+// ResourceOption applies changes to provided resource.
+type ResourceOption func(pcommon.Resource)
+
+// WithAwsVolumeID sets provided value as "aws.volume.id" attribute for current resource.
+func WithAwsVolumeID(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("aws.volume.id", val)
+ }
+}
+
+// WithContainerID sets provided value as "container.id" attribute for current resource.
+func WithContainerID(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("container.id", val)
+ }
+}
+
+// WithContainerName sets provided value as "container.name" attribute for current resource.
+func WithContainerName(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("container.name", val)
+ }
+}
+
+// WithFsType sets provided value as "fs.type" attribute for current resource.
+func WithFsType(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("fs.type", val)
+ }
+}
+
+// WithGcePdName sets provided value as "gce.pd.name" attribute for current resource.
+func WithGcePdName(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("gce.pd.name", val)
+ }
+}
+
+// WithGlusterfsEndpointsName sets provided value as "glusterfs.endpoints.name" attribute for current resource.
+func WithGlusterfsEndpointsName(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("glusterfs.endpoints.name", val)
+ }
+}
+
+// WithGlusterfsPath sets provided value as "glusterfs.path" attribute for current resource.
+func WithGlusterfsPath(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("glusterfs.path", val)
+ }
+}
+
+// WithK8sNamespaceName sets provided value as "k8s.namespace.name" attribute for current resource.
+func WithK8sNamespaceName(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("k8s.namespace.name", val)
+ }
+}
+
+// WithK8sNodeName sets provided value as "k8s.node.name" attribute for current resource.
+func WithK8sNodeName(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("k8s.node.name", val)
+ }
+}
+
+// WithK8sPersistentvolumeclaimName sets provided value as "k8s.persistentvolumeclaim.name" attribute for current resource.
+func WithK8sPersistentvolumeclaimName(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("k8s.persistentvolumeclaim.name", val)
+ }
+}
+
+// WithK8sPodName sets provided value as "k8s.pod.name" attribute for current resource.
+func WithK8sPodName(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("k8s.pod.name", val)
+ }
+}
+
+// WithK8sPodUID sets provided value as "k8s.pod.uid" attribute for current resource.
+func WithK8sPodUID(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("k8s.pod.uid", val)
+ }
+}
+
+// WithK8sVolumeName sets provided value as "k8s.volume.name" attribute for current resource.
+func WithK8sVolumeName(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("k8s.volume.name", val)
+ }
+}
+
+// WithK8sVolumeType sets provided value as "k8s.volume.type" attribute for current resource.
+func WithK8sVolumeType(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("k8s.volume.type", val)
+ }
+}
+
+// WithPartition sets provided value as "partition" attribute for current resource.
+func WithPartition(val string) ResourceOption {
+ return func(r pcommon.Resource) {
+ r.Attributes().UpsertString("partition", val)
+ }
+}
+
+// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
+// recording another set of data points as part of another resource. This function can be helpful when one scraper
+// needs to emit metrics from several resources. Otherwise calling this function is not required,
+// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments.
+func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) {
+ rm := pmetric.NewResourceMetrics()
+ rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity)
+ for _, op := range ro {
+ op(rm.Resource())
+ }
+ ils := rm.ScopeMetrics().AppendEmpty()
+ ils.Scope().SetName("otelcol/kubeletstatsreceiver")
+ ils.Metrics().EnsureCapacity(mb.metricsCapacity)
+ mb.metricContainerCPUTime.emit(ils.Metrics())
+ mb.metricContainerCPUUtilization.emit(ils.Metrics())
+ mb.metricContainerFilesystemAvailable.emit(ils.Metrics())
+ mb.metricContainerFilesystemCapacity.emit(ils.Metrics())
+ mb.metricContainerFilesystemUsage.emit(ils.Metrics())
+ mb.metricContainerMemoryAvailable.emit(ils.Metrics())
+ mb.metricContainerMemoryMajorPageFaults.emit(ils.Metrics())
+ mb.metricContainerMemoryPageFaults.emit(ils.Metrics())
+ mb.metricContainerMemoryRss.emit(ils.Metrics())
+ mb.metricContainerMemoryUsage.emit(ils.Metrics())
+ mb.metricContainerMemoryWorkingSet.emit(ils.Metrics())
+ mb.metricK8sNodeCPUTime.emit(ils.Metrics())
+ mb.metricK8sNodeCPUUtilization.emit(ils.Metrics())
+ mb.metricK8sNodeFilesystemAvailable.emit(ils.Metrics())
+ mb.metricK8sNodeFilesystemCapacity.emit(ils.Metrics())
+ mb.metricK8sNodeFilesystemUsage.emit(ils.Metrics())
+ mb.metricK8sNodeMemoryAvailable.emit(ils.Metrics())
+ mb.metricK8sNodeMemoryMajorPageFaults.emit(ils.Metrics())
+ mb.metricK8sNodeMemoryPageFaults.emit(ils.Metrics())
+ mb.metricK8sNodeMemoryRss.emit(ils.Metrics())
+ mb.metricK8sNodeMemoryUsage.emit(ils.Metrics())
+ mb.metricK8sNodeMemoryWorkingSet.emit(ils.Metrics())
+ mb.metricK8sNodeNetworkErrors.emit(ils.Metrics())
+ mb.metricK8sNodeNetworkIo.emit(ils.Metrics())
+ mb.metricK8sPodCPUTime.emit(ils.Metrics())
+ mb.metricK8sPodCPUUtilization.emit(ils.Metrics())
+ mb.metricK8sPodFilesystemAvailable.emit(ils.Metrics())
+ mb.metricK8sPodFilesystemCapacity.emit(ils.Metrics())
+ mb.metricK8sPodFilesystemUsage.emit(ils.Metrics())
+ mb.metricK8sPodMemoryAvailable.emit(ils.Metrics())
+ mb.metricK8sPodMemoryMajorPageFaults.emit(ils.Metrics())
+ mb.metricK8sPodMemoryPageFaults.emit(ils.Metrics())
+ mb.metricK8sPodMemoryRss.emit(ils.Metrics())
+ mb.metricK8sPodMemoryUsage.emit(ils.Metrics())
+ mb.metricK8sPodMemoryWorkingSet.emit(ils.Metrics())
+ mb.metricK8sPodNetworkErrors.emit(ils.Metrics())
+ mb.metricK8sPodNetworkIo.emit(ils.Metrics())
+ mb.metricK8sVolumeAvailable.emit(ils.Metrics())
+ mb.metricK8sVolumeCapacity.emit(ils.Metrics())
+ mb.metricK8sVolumeInodes.emit(ils.Metrics())
+ mb.metricK8sVolumeInodesFree.emit(ils.Metrics())
+ mb.metricK8sVolumeInodesUsed.emit(ils.Metrics())
+ if ils.Metrics().Len() > 0 {
+ mb.updateCapacity(rm)
+ rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
+ }
+}
+
+// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
+// recording another set of metrics. This function will be responsible for applying all the transformations required to
+// produce metric representation defined in metadata and user settings, e.g. delta or cumulative.
+func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
+ mb.EmitForResource(ro...)
+ metrics := pmetric.NewMetrics()
+ mb.metricsBuffer.MoveTo(metrics)
+ return metrics
+}
+
+// RecordContainerCPUTimeDataPoint adds a data point to container.cpu.time metric.
+func (mb *MetricsBuilder) RecordContainerCPUTimeDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricContainerCPUTime.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerCPUUtilizationDataPoint adds a data point to container.cpu.utilization metric.
+func (mb *MetricsBuilder) RecordContainerCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricContainerCPUUtilization.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerFilesystemAvailableDataPoint adds a data point to container.filesystem.available metric.
+func (mb *MetricsBuilder) RecordContainerFilesystemAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerFilesystemAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerFilesystemCapacityDataPoint adds a data point to container.filesystem.capacity metric.
+func (mb *MetricsBuilder) RecordContainerFilesystemCapacityDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerFilesystemCapacity.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerFilesystemUsageDataPoint adds a data point to container.filesystem.usage metric.
+func (mb *MetricsBuilder) RecordContainerFilesystemUsageDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerFilesystemUsage.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryAvailableDataPoint adds a data point to container.memory.available metric.
+func (mb *MetricsBuilder) RecordContainerMemoryAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryMajorPageFaultsDataPoint adds a data point to container.memory.major_page_faults metric.
+func (mb *MetricsBuilder) RecordContainerMemoryMajorPageFaultsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryMajorPageFaults.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryPageFaultsDataPoint adds a data point to container.memory.page_faults metric.
+func (mb *MetricsBuilder) RecordContainerMemoryPageFaultsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryPageFaults.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryRssDataPoint adds a data point to container.memory.rss metric.
+func (mb *MetricsBuilder) RecordContainerMemoryRssDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryRss.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryUsageDataPoint adds a data point to container.memory.usage metric.
+func (mb *MetricsBuilder) RecordContainerMemoryUsageDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryUsage.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryWorkingSetDataPoint adds a data point to container.memory.working_set metric.
+func (mb *MetricsBuilder) RecordContainerMemoryWorkingSetDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryWorkingSet.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeCPUTimeDataPoint adds a data point to k8s.node.cpu.time metric.
+func (mb *MetricsBuilder) RecordK8sNodeCPUTimeDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricK8sNodeCPUTime.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeCPUUtilizationDataPoint adds a data point to k8s.node.cpu.utilization metric.
+func (mb *MetricsBuilder) RecordK8sNodeCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricK8sNodeCPUUtilization.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeFilesystemAvailableDataPoint adds a data point to k8s.node.filesystem.available metric.
+func (mb *MetricsBuilder) RecordK8sNodeFilesystemAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeFilesystemAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeFilesystemCapacityDataPoint adds a data point to k8s.node.filesystem.capacity metric.
+func (mb *MetricsBuilder) RecordK8sNodeFilesystemCapacityDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeFilesystemCapacity.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeFilesystemUsageDataPoint adds a data point to k8s.node.filesystem.usage metric.
+func (mb *MetricsBuilder) RecordK8sNodeFilesystemUsageDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeFilesystemUsage.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeMemoryAvailableDataPoint adds a data point to k8s.node.memory.available metric.
+func (mb *MetricsBuilder) RecordK8sNodeMemoryAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeMemoryAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeMemoryMajorPageFaultsDataPoint adds a data point to k8s.node.memory.major_page_faults metric.
+func (mb *MetricsBuilder) RecordK8sNodeMemoryMajorPageFaultsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeMemoryMajorPageFaults.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeMemoryPageFaultsDataPoint adds a data point to k8s.node.memory.page_faults metric.
+func (mb *MetricsBuilder) RecordK8sNodeMemoryPageFaultsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeMemoryPageFaults.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeMemoryRssDataPoint adds a data point to k8s.node.memory.rss metric.
+func (mb *MetricsBuilder) RecordK8sNodeMemoryRssDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeMemoryRss.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeMemoryUsageDataPoint adds a data point to k8s.node.memory.usage metric.
+func (mb *MetricsBuilder) RecordK8sNodeMemoryUsageDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeMemoryUsage.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeMemoryWorkingSetDataPoint adds a data point to k8s.node.memory.working_set metric.
+func (mb *MetricsBuilder) RecordK8sNodeMemoryWorkingSetDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeMemoryWorkingSet.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeNetworkErrorsDataPoint adds a data point to k8s.node.network.errors metric.
+func (mb *MetricsBuilder) RecordK8sNodeNetworkErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricK8sNodeNetworkErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue, directionAttributeValue.String())
+}
+
+// RecordK8sNodeNetworkIoDataPoint adds a data point to k8s.node.network.io metric.
+func (mb *MetricsBuilder) RecordK8sNodeNetworkIoDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricK8sNodeNetworkIo.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue, directionAttributeValue.String())
+}
+
+// RecordK8sPodCPUTimeDataPoint adds a data point to k8s.pod.cpu.time metric.
+func (mb *MetricsBuilder) RecordK8sPodCPUTimeDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricK8sPodCPUTime.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodCPUUtilizationDataPoint adds a data point to k8s.pod.cpu.utilization metric.
+func (mb *MetricsBuilder) RecordK8sPodCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricK8sPodCPUUtilization.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodFilesystemAvailableDataPoint adds a data point to k8s.pod.filesystem.available metric.
+func (mb *MetricsBuilder) RecordK8sPodFilesystemAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodFilesystemAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodFilesystemCapacityDataPoint adds a data point to k8s.pod.filesystem.capacity metric.
+func (mb *MetricsBuilder) RecordK8sPodFilesystemCapacityDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodFilesystemCapacity.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodFilesystemUsageDataPoint adds a data point to k8s.pod.filesystem.usage metric.
+func (mb *MetricsBuilder) RecordK8sPodFilesystemUsageDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodFilesystemUsage.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryAvailableDataPoint adds a data point to k8s.pod.memory.available metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodMemoryAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryMajorPageFaultsDataPoint adds a data point to k8s.pod.memory.major_page_faults metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryMajorPageFaultsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodMemoryMajorPageFaults.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryPageFaultsDataPoint adds a data point to k8s.pod.memory.page_faults metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryPageFaultsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodMemoryPageFaults.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryRssDataPoint adds a data point to k8s.pod.memory.rss metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryRssDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodMemoryRss.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryUsageDataPoint adds a data point to k8s.pod.memory.usage metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryUsageDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodMemoryUsage.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryWorkingSetDataPoint adds a data point to k8s.pod.memory.working_set metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryWorkingSetDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodMemoryWorkingSet.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodNetworkErrorsDataPoint adds a data point to k8s.pod.network.errors metric.
+func (mb *MetricsBuilder) RecordK8sPodNetworkErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricK8sPodNetworkErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue, directionAttributeValue.String())
+}
+
+// RecordK8sPodNetworkIoDataPoint adds a data point to k8s.pod.network.io metric.
+func (mb *MetricsBuilder) RecordK8sPodNetworkIoDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricK8sPodNetworkIo.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue, directionAttributeValue.String())
+}
+
+// RecordK8sVolumeAvailableDataPoint adds a data point to k8s.volume.available metric.
+func (mb *MetricsBuilder) RecordK8sVolumeAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sVolumeAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sVolumeCapacityDataPoint adds a data point to k8s.volume.capacity metric.
+func (mb *MetricsBuilder) RecordK8sVolumeCapacityDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sVolumeCapacity.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sVolumeInodesDataPoint adds a data point to k8s.volume.inodes metric.
+func (mb *MetricsBuilder) RecordK8sVolumeInodesDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sVolumeInodes.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sVolumeInodesFreeDataPoint adds a data point to k8s.volume.inodes.free metric.
+func (mb *MetricsBuilder) RecordK8sVolumeInodesFreeDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sVolumeInodesFree.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sVolumeInodesUsedDataPoint adds a data point to k8s.volume.inodes.used metric.
+func (mb *MetricsBuilder) RecordK8sVolumeInodesUsedDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sVolumeInodesUsed.recordDataPoint(mb.startTime, ts, val)
+}
+
+// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
+// and metrics builder should update its startTime and reset it's internal state accordingly.
+func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {
+ mb.startTime = pcommon.NewTimestampFromTime(time.Now())
+ for _, op := range options {
+ op(mb)
+ }
+}
+
+// Attributes contains the possible metric attributes that can be used.
+var Attributes = struct {
+ // Direction (Direction of flow of bytes/operations (receive or transmit).)
+ Direction string
+ // Interface (Name of the network interface.)
+ Interface string
+}{
+ "direction",
+ "interface",
+}
+
+// A is an alias for Attributes.
+var A = Attributes
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
index 3ccc516674c2..de358ffd9c6a 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
@@ -13,114 +13,121 @@
// limitations under the License.
package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
+import "go.opentelemetry.io/collector/pdata/pcommon"
+
+type RecordDoubleDataPointFunc func(*MetricsBuilder, pcommon.Timestamp, float64)
+
+type RecordIntDataPointFunc func(*MetricsBuilder, pcommon.Timestamp, int64)
+
+type RecordIntDataPointWithDirectionFunc func(*MetricsBuilder, pcommon.Timestamp, int64, string, AttributeDirection)
type CPUMetrics struct {
- Time MetricIntf
- Utilization MetricIntf
+ Time RecordDoubleDataPointFunc
+ Utilization RecordDoubleDataPointFunc
}
var NodeCPUMetrics = CPUMetrics{
- Time: M.K8sNodeCPUTime,
- Utilization: M.K8sNodeCPUUtilization,
+ Time: (*MetricsBuilder).RecordK8sNodeCPUTimeDataPoint,
+ Utilization: (*MetricsBuilder).RecordK8sNodeCPUUtilizationDataPoint,
}
var PodCPUMetrics = CPUMetrics{
- Time: M.K8sPodCPUTime,
- Utilization: M.K8sPodCPUUtilization,
+ Time: (*MetricsBuilder).RecordK8sPodCPUTimeDataPoint,
+ Utilization: (*MetricsBuilder).RecordK8sPodCPUUtilizationDataPoint,
}
var ContainerCPUMetrics = CPUMetrics{
- Time: M.ContainerCPUTime,
- Utilization: M.ContainerCPUUtilization,
+ Time: (*MetricsBuilder).RecordContainerCPUTimeDataPoint,
+ Utilization: (*MetricsBuilder).RecordContainerCPUUtilizationDataPoint,
}
type MemoryMetrics struct {
- Available MetricIntf
- Usage MetricIntf
- Rss MetricIntf
- WorkingSet MetricIntf
- PageFaults MetricIntf
- MajorPageFaults MetricIntf
+ Available RecordIntDataPointFunc
+ Usage RecordIntDataPointFunc
+ Rss RecordIntDataPointFunc
+ WorkingSet RecordIntDataPointFunc
+ PageFaults RecordIntDataPointFunc
+ MajorPageFaults RecordIntDataPointFunc
}
var NodeMemoryMetrics = MemoryMetrics{
- Available: M.K8sNodeMemoryAvailable,
- Usage: M.K8sNodeMemoryUsage,
- Rss: M.K8sNodeMemoryRss,
- WorkingSet: M.K8sNodeMemoryWorkingSet,
- PageFaults: M.K8sNodeMemoryPageFaults,
- MajorPageFaults: M.K8sNodeMemoryMajorPageFaults,
+ Available: (*MetricsBuilder).RecordK8sNodeMemoryAvailableDataPoint,
+ Usage: (*MetricsBuilder).RecordK8sNodeMemoryUsageDataPoint,
+ Rss: (*MetricsBuilder).RecordK8sNodeMemoryRssDataPoint,
+ WorkingSet: (*MetricsBuilder).RecordK8sNodeMemoryWorkingSetDataPoint,
+ PageFaults: (*MetricsBuilder).RecordK8sNodeMemoryPageFaultsDataPoint,
+ MajorPageFaults: (*MetricsBuilder).RecordK8sNodeMemoryMajorPageFaultsDataPoint,
}
var PodMemoryMetrics = MemoryMetrics{
- Available: M.K8sPodMemoryAvailable,
- Usage: M.K8sPodMemoryUsage,
- Rss: M.K8sPodMemoryRss,
- WorkingSet: M.K8sPodMemoryWorkingSet,
- PageFaults: M.K8sPodMemoryPageFaults,
- MajorPageFaults: M.K8sPodMemoryMajorPageFaults,
+ Available: (*MetricsBuilder).RecordK8sPodMemoryAvailableDataPoint,
+ Usage: (*MetricsBuilder).RecordK8sPodMemoryUsageDataPoint,
+ Rss: (*MetricsBuilder).RecordK8sPodMemoryRssDataPoint,
+ WorkingSet: (*MetricsBuilder).RecordK8sPodMemoryWorkingSetDataPoint,
+ PageFaults: (*MetricsBuilder).RecordK8sPodMemoryPageFaultsDataPoint,
+ MajorPageFaults: (*MetricsBuilder).RecordK8sPodMemoryMajorPageFaultsDataPoint,
}
var ContainerMemoryMetrics = MemoryMetrics{
- Available: M.ContainerMemoryAvailable,
- Usage: M.ContainerMemoryUsage,
- Rss: M.ContainerMemoryRss,
- WorkingSet: M.ContainerMemoryWorkingSet,
- PageFaults: M.ContainerMemoryPageFaults,
- MajorPageFaults: M.ContainerMemoryMajorPageFaults,
+ Available: (*MetricsBuilder).RecordContainerMemoryAvailableDataPoint,
+ Usage: (*MetricsBuilder).RecordContainerMemoryUsageDataPoint,
+ Rss: (*MetricsBuilder).RecordContainerMemoryRssDataPoint,
+ WorkingSet: (*MetricsBuilder).RecordContainerMemoryWorkingSetDataPoint,
+ PageFaults: (*MetricsBuilder).RecordContainerMemoryPageFaultsDataPoint,
+ MajorPageFaults: (*MetricsBuilder).RecordContainerMemoryMajorPageFaultsDataPoint,
}
type FilesystemMetrics struct {
- Available MetricIntf
- Capacity MetricIntf
- Usage MetricIntf
+ Available RecordIntDataPointFunc
+ Capacity RecordIntDataPointFunc
+ Usage RecordIntDataPointFunc
}
var NodeFilesystemMetrics = FilesystemMetrics{
- Available: M.K8sNodeFilesystemAvailable,
- Capacity: M.K8sNodeFilesystemCapacity,
- Usage: M.K8sNodeFilesystemUsage,
+ Available: (*MetricsBuilder).RecordK8sNodeFilesystemAvailableDataPoint,
+ Capacity: (*MetricsBuilder).RecordK8sNodeFilesystemCapacityDataPoint,
+ Usage: (*MetricsBuilder).RecordK8sNodeFilesystemUsageDataPoint,
}
var PodFilesystemMetrics = FilesystemMetrics{
- Available: M.K8sPodFilesystemAvailable,
- Capacity: M.K8sPodFilesystemCapacity,
- Usage: M.K8sPodFilesystemUsage,
+ Available: (*MetricsBuilder).RecordK8sPodFilesystemAvailableDataPoint,
+ Capacity: (*MetricsBuilder).RecordK8sPodFilesystemCapacityDataPoint,
+ Usage: (*MetricsBuilder).RecordK8sPodFilesystemUsageDataPoint,
}
var ContainerFilesystemMetrics = FilesystemMetrics{
- Available: M.ContainerFilesystemAvailable,
- Capacity: M.ContainerFilesystemCapacity,
- Usage: M.ContainerFilesystemUsage,
+ Available: (*MetricsBuilder).RecordContainerFilesystemAvailableDataPoint,
+ Capacity: (*MetricsBuilder).RecordContainerFilesystemCapacityDataPoint,
+ Usage: (*MetricsBuilder).RecordContainerFilesystemUsageDataPoint,
}
type NetworkMetrics struct {
- IO MetricIntf
- Errors MetricIntf
+ IO RecordIntDataPointWithDirectionFunc
+ Errors RecordIntDataPointWithDirectionFunc
}
var NodeNetworkMetrics = NetworkMetrics{
- IO: M.K8sNodeNetworkIo,
- Errors: M.K8sNodeNetworkErrors,
+ IO: (*MetricsBuilder).RecordK8sNodeNetworkIoDataPoint,
+ Errors: (*MetricsBuilder).RecordK8sNodeNetworkErrorsDataPoint,
}
var PodNetworkMetrics = NetworkMetrics{
- IO: M.K8sPodNetworkIo,
- Errors: M.K8sPodNetworkErrors,
+ IO: (*MetricsBuilder).RecordK8sPodNetworkIoDataPoint,
+ Errors: (*MetricsBuilder).RecordK8sPodNetworkErrorsDataPoint,
}
type VolumeMetrics struct {
- Available MetricIntf
- Capacity MetricIntf
- Inodes MetricIntf
- InodesFree MetricIntf
- InodesUsed MetricIntf
+ Available RecordIntDataPointFunc
+ Capacity RecordIntDataPointFunc
+ Inodes RecordIntDataPointFunc
+ InodesFree RecordIntDataPointFunc
+ InodesUsed RecordIntDataPointFunc
}
var K8sVolumeMetrics = VolumeMetrics{
- Available: M.K8sVolumeAvailable,
- Capacity: M.K8sVolumeCapacity,
- Inodes: M.K8sVolumeInodes,
- InodesFree: M.K8sVolumeInodesFree,
- InodesUsed: M.K8sVolumeInodesUsed,
+ Available: (*MetricsBuilder).RecordK8sVolumeAvailableDataPoint,
+ Capacity: (*MetricsBuilder).RecordK8sVolumeCapacityDataPoint,
+ Inodes: (*MetricsBuilder).RecordK8sVolumeInodesDataPoint,
+ InodesFree: (*MetricsBuilder).RecordK8sVolumeInodesFreeDataPoint,
+ InodesUsed: (*MetricsBuilder).RecordK8sVolumeInodesUsedDataPoint,
}
diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go
index cf4ab41a83d2..b9e4da01bc8a 100644
--- a/receiver/kubeletstatsreceiver/scraper.go
+++ b/receiver/kubeletstatsreceiver/scraper.go
@@ -17,6 +17,7 @@ package kubeletstatsreceiver // import "github.com/open-telemetry/opentelemetry-
import (
"context"
"fmt"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"time"
"go.opentelemetry.io/collector/component"
@@ -47,12 +48,14 @@ type kubletScraper struct {
metricGroupsToCollect map[kubelet.MetricGroup]bool
k8sAPIClient kubernetes.Interface
cachedVolumeLabels map[string]map[string]string
+ mb *metadata.MetricsBuilder
}
func newKubletScraper(
restClient kubelet.RestClient,
set component.ReceiverCreateSettings,
rOptions *scraperOptions,
+ metricsConfig metadata.MetricsSettings,
) (scraperhelper.Scraper, error) {
ks := &kubletScraper{
statsProvider: kubelet.NewStatsProvider(restClient),
@@ -62,6 +65,7 @@ func newKubletScraper(
metricGroupsToCollect: rOptions.metricGroupsToCollect,
k8sAPIClient: rOptions.k8sAPIClient,
cachedVolumeLabels: make(map[string]map[string]string),
+ mb: metadata.NewMetricsBuilder(metricsConfig),
}
return scraperhelper.NewScraper(typeStr, ks.scrape)
}
@@ -84,7 +88,7 @@ func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) {
}
metadata := kubelet.NewMetadata(r.extraMetadataLabels, podsMetadata, r.detailedPVCLabelsSetter())
- mds := kubelet.MetricsData(r.logger, summary, metadata, r.metricGroupsToCollect)
+ mds := kubelet.MetricsData(r.logger, summary, metadata, r.metricGroupsToCollect, r.mb)
md := pmetric.NewMetrics()
for i := range mds {
mds[i].ResourceMetrics().MoveAndAppendTo(md.ResourceMetrics())
diff --git a/receiver/kubeletstatsreceiver/scraper_test.go b/receiver/kubeletstatsreceiver/scraper_test.go
index c4828055fae8..e69b3b179b67 100644
--- a/receiver/kubeletstatsreceiver/scraper_test.go
+++ b/receiver/kubeletstatsreceiver/scraper_test.go
@@ -17,6 +17,7 @@ package kubeletstatsreceiver
import (
"context"
"errors"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"io/ioutil"
"strings"
"testing"
@@ -63,6 +64,7 @@ func TestScraper(t *testing.T) {
&fakeRestClient{},
componenttest.NewNopReceiverCreateSettings(),
options,
+ metadata.DefaultMetricsSettings(),
)
require.NoError(t, err)
@@ -112,6 +114,7 @@ func TestScraperWithMetadata(t *testing.T) {
&fakeRestClient{},
componenttest.NewNopReceiverCreateSettings(),
options,
+ metadata.DefaultMetricsSettings(),
)
require.NoError(t, err)
@@ -195,6 +198,7 @@ func TestScraperWithMetricGroups(t *testing.T) {
extraMetadataLabels: []kubelet.MetadataLabel{kubelet.MetadataLabelContainerID},
metricGroupsToCollect: test.metricGroups,
},
+ metadata.DefaultMetricsSettings(),
)
require.NoError(t, err)
@@ -344,6 +348,7 @@ func TestScraperWithPVCDetailedLabels(t *testing.T) {
},
k8sAPIClient: test.k8sAPIClient,
},
+ metadata.DefaultMetricsSettings(),
)
require.NoError(t, err)
@@ -456,6 +461,7 @@ func TestClientErrors(t *testing.T) {
},
settings,
options,
+ metadata.DefaultMetricsSettings(),
)
require.NoError(t, err)
From dfe1a29be00718e50686f42aa3744773137f7db1 Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Wed, 4 May 2022 16:41:49 -0600
Subject: [PATCH 02/17] Fix lint errors
---
receiver/kubeletstatsreceiver/config.go | 2 +-
.../internal/kubelet/accumulator_test.go | 3 +-
.../internal/kubelet/conventions.go | 13 ++++----
.../internal/kubelet/metrics.go | 3 +-
.../internal/kubelet/network.go | 3 +-
.../internal/kubelet/utils.go | 32 ++-----------------
.../internal/kubelet/volume.go | 8 +++--
receiver/kubeletstatsreceiver/scraper.go | 2 +-
receiver/kubeletstatsreceiver/scraper_test.go | 2 +-
9 files changed, 22 insertions(+), 46 deletions(-)
diff --git a/receiver/kubeletstatsreceiver/config.go b/receiver/kubeletstatsreceiver/config.go
index 3acbb5e9726b..7c740d4fa082 100644
--- a/receiver/kubeletstatsreceiver/config.go
+++ b/receiver/kubeletstatsreceiver/config.go
@@ -17,7 +17,6 @@ package kubeletstatsreceiver // import "github.com/open-telemetry/opentelemetry-
import (
"errors"
"fmt"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"go.opentelemetry.io/collector/config"
"go.opentelemetry.io/collector/config/confignet"
@@ -27,6 +26,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig"
kube "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
var _ config.Receiver = (*Config)(nil)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
index fcb383e2acdf..76d2613fe049 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
@@ -16,7 +16,6 @@ package kubelet
import (
"errors"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"testing"
"github.com/stretchr/testify/assert"
@@ -27,6 +26,8 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
// TestMetadataErrorCases walks through the error cases of collecting
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go b/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go
index f28b50005e57..43a4c512b255 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go
@@ -16,8 +16,13 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
const (
labelPersistentVolumeClaimName = "k8s.persistentvolumeclaim.name"
- labelVolumeName = "k8s.volume.name"
labelVolumeType = "k8s.volume.type"
+ labelAwsVolumeID = "aws.volume.id"
+ labelFsType = "fs.type"
+ labelPartition = "partition"
+ labelGcePdName = "gce.pd.name"
+ labelGlusterfsEndpointsName = "glusterfs.endpoints.name"
+ labelGlusterfsPath = "glusterfs.path"
// Volume types.
labelValuePersistentVolumeClaim = "persistentVolumeClaim"
@@ -30,10 +35,4 @@ const (
labelValueAWSEBSVolume = "awsElasticBlockStore"
labelValueGCEPDVolume = "gcePersistentDisk"
labelValueGlusterFSVolume = "glusterfs"
- labelAwsVolumeId = "aws.volume.id"
- labelFsType = "fs.type"
- labelPartition = "partition"
- labelGcePdName = "gce.pd.name"
- labelGlusterfsEndpointsName = "glusterfs.endpoints.name"
- labelGlusterfsPath = "glusterfs.path"
)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
index e5189f01b175..eefc891ada67 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
@@ -15,12 +15,13 @@
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
import (
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"time"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/zap"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
func MetricsData(
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/network.go b/receiver/kubeletstatsreceiver/internal/kubelet/network.go
index f432a6bb5f25..c557b3e21937 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/network.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/network.go
@@ -14,9 +14,10 @@
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
import (
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"go.opentelemetry.io/collector/pdata/pcommon"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
func addNetworkMetrics(mb *metadata.MetricsBuilder, networkMetrics metadata.NetworkMetrics, s *stats.NetworkStats, currentTime pcommon.Timestamp) {
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/utils.go b/receiver/kubeletstatsreceiver/internal/kubelet/utils.go
index 0059289f802f..25824cf04294 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/utils.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/utils.go
@@ -14,8 +14,9 @@
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
import (
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
func recordIntDataPoint(mb *metadata.MetricsBuilder, recordDataPoint metadata.RecordIntDataPointFunc, value *uint64, currentTime pcommon.Timestamp) {
@@ -24,32 +25,3 @@ func recordIntDataPoint(mb *metadata.MetricsBuilder, recordDataPoint metadata.Re
}
recordDataPoint(mb, currentTime, int64(*value))
}
-
-//func fillDoubleGauge(dest pmetric.Metric, metricInt metadata.MetricIntf, value float64, currentTime pcommon.Timestamp) {
-// metricInt.Init(dest)
-// dp := dest.Gauge().DataPoints().AppendEmpty()
-// dp.SetDoubleVal(value)
-// dp.SetTimestamp(currentTime)
-//}
-//
-//func addIntGauge(dest pmetric.MetricSlice, metricInt metadata.MetricIntf, value *uint64, currentTime pcommon.Timestamp) {
-// if value == nil {
-// return
-// }
-// fillIntGauge(dest.AppendEmpty(), metricInt, int64(*value), currentTime)
-//}
-//
-//func fillIntGauge(dest pmetric.Metric, metricInt metadata.MetricIntf, value int64, currentTime pcommon.Timestamp) {
-// metricInt.Init(dest)
-// dp := dest.Gauge().DataPoints().AppendEmpty()
-// dp.SetIntVal(value)
-// dp.SetTimestamp(currentTime)
-//}
-//
-//func fillDoubleSum(dest pmetric.Metric, metricInt metadata.MetricIntf, value float64, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
-// metricInt.Init(dest)
-// dp := dest.Sum().DataPoints().AppendEmpty()
-// dp.SetDoubleVal(value)
-// dp.SetStartTimestamp(startTime)
-// dp.SetTimestamp(currentTime)
-//}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
index 5e187ac526a9..4cfe92002e22 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
@@ -14,11 +14,13 @@
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
import (
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
+ "strconv"
+
"go.opentelemetry.io/collector/pdata/pcommon"
v1 "k8s.io/api/core/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
- "strconv"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
func addVolumeMetrics(mb *metadata.MetricsBuilder, volumeMetrics metadata.VolumeMetrics, s stats.VolumeStats, currentTime pcommon.Timestamp) {
@@ -79,7 +81,7 @@ func GetPersistentVolumeLabels(pv v1.PersistentVolumeSource, labels map[string]s
func awsElasticBlockStoreDims(vs v1.AWSElasticBlockStoreVolumeSource, labels map[string]string) {
labels[labelVolumeType] = labelValueAWSEBSVolume
// AWS specific labels.
- labels[labelAwsVolumeId] = vs.VolumeID
+ labels[labelAwsVolumeID] = vs.VolumeID
labels[labelFsType] = vs.FSType
labels[labelPartition] = strconv.Itoa(int(vs.Partition))
}
diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go
index b9e4da01bc8a..c97dd1e0d316 100644
--- a/receiver/kubeletstatsreceiver/scraper.go
+++ b/receiver/kubeletstatsreceiver/scraper.go
@@ -17,7 +17,6 @@ package kubeletstatsreceiver // import "github.com/open-telemetry/opentelemetry-
import (
"context"
"fmt"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"time"
"go.opentelemetry.io/collector/component"
@@ -30,6 +29,7 @@ import (
"k8s.io/client-go/kubernetes"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
type scraperOptions struct {
diff --git a/receiver/kubeletstatsreceiver/scraper_test.go b/receiver/kubeletstatsreceiver/scraper_test.go
index e69b3b179b67..617c066a3901 100644
--- a/receiver/kubeletstatsreceiver/scraper_test.go
+++ b/receiver/kubeletstatsreceiver/scraper_test.go
@@ -17,7 +17,6 @@ package kubeletstatsreceiver
import (
"context"
"errors"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"io/ioutil"
"strings"
"testing"
@@ -31,6 +30,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
const (
From f5a90c74cddd20e6c89354ce5c49e0e43ac4a43b Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Thu, 5 May 2022 09:09:18 -0600
Subject: [PATCH 03/17] Update changelog
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e67a3ea9370c..a8439d7e2e40 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -36,6 +36,7 @@
- `transformprocessor`: Add new `limit` function to allow limiting the number of items in a map, such as the number of attributes in `attributes` or `resource.attributes` (#9552)
- `processor/attributes`: Support attributes set by server authenticator (#9420)
- `datadogexporter`: Experimental support for Exponential Histograms with delta aggregation temporality (#8350)
+- `kubeletstatsreceiver` Update receiver to use new Metrics Builder. All emitted metrics remain the same. (#9744)
### 🧰 Bug fixes 🧰
From 27e9adbdf1469d4531d1934fea2b4ac23e6aaef8 Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Thu, 5 May 2022 10:23:17 -0600
Subject: [PATCH 04/17] Moved Node and Pod resources back to accumulator
---
.../kubeletstatsreceiver/internal/kubelet/accumulator.go | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
index 57b06a741980..3e826ba5e962 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
@@ -64,7 +64,7 @@ func (a *metricDataAccumulator) nodeStats(s stats.NodeStats) {
addNetworkMetrics(a.mb, metadata.NodeNetworkMetrics, s.Network, currentTime)
// todo s.Runtime.ImageFs
- a.m = append(a.m, a.mb.Emit(getNodeResourceOptions(s)...))
+ a.m = append(a.m, a.mb.Emit(metadata.WithK8sNodeName(s.NodeName)))
}
func (a *metricDataAccumulator) podStats(s stats.PodStats) {
@@ -78,7 +78,9 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) {
addFilesystemMetrics(a.mb, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
addNetworkMetrics(a.mb, metadata.PodNetworkMetrics, s.Network, currentTime)
- a.m = append(a.m, a.mb.Emit(getPodResourceOptions(s)...))
+ a.m = append(a.m, a.mb.Emit(metadata.WithK8sPodUID(s.PodRef.UID),
+ metadata.WithK8sPodName(s.PodRef.Name),
+ metadata.WithK8sNamespaceName(s.PodRef.Namespace)))
}
func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.ContainerStats) {
From d9b4910b7ce57350db1589ea45be33341e95f594 Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Wed, 11 May 2022 11:43:39 -0600
Subject: [PATCH 05/17] replaced resource labels with ResourceOptions
---
.../internal/kubelet/accumulator_test.go | 8 +-
.../internal/kubelet/metadata.go | 44 ++++++-----
.../internal/kubelet/metadata_test.go | 58 ++++++++------
.../internal/kubelet/resource.go | 63 +++++++--------
.../internal/kubelet/volume.go | 76 ++++++++++---------
.../internal/kubelet/volume_test.go | 37 ++++-----
receiver/kubeletstatsreceiver/scraper.go | 27 +++----
7 files changed, 162 insertions(+), 151 deletions(-)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
index 76d2613fe049..fb44453f61fb 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
@@ -41,7 +41,7 @@ func TestMetadataErrorCases(t *testing.T) {
numMDs int
numLogs int
logMessages []string
- detailedPVCLabelsSetterOverride func(volCacheID, volumeClaim, namespace string, labels map[string]string) error
+ detailedPVCLabelsSetterOverride func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error)
}{
{
name: "Fails to get container metadata",
@@ -178,9 +178,9 @@ func TestMetadataErrorCases(t *testing.T) {
},
},
}, nil),
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
// Mock failure cases.
- return errors.New("")
+ return nil, errors.New("")
},
testScenario: func(acc metricDataAccumulator) {
podStats := stats.PodStats{
@@ -207,7 +207,7 @@ func TestMetadataErrorCases(t *testing.T) {
observedLogger, logs := observer.New(zapcore.WarnLevel)
logger := zap.New(observedLogger)
- tt.metadata.DetailedPVCLabelsSetter = tt.detailedPVCLabelsSetterOverride
+ tt.metadata.DetailedPVCResourceGetter = tt.detailedPVCLabelsSetterOverride
acc := metricDataAccumulator{
metadata: tt.metadata,
logger: logger,
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
index 2288d5acd453..fdd74a5ec87e 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
@@ -17,6 +17,7 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"errors"
"fmt"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"regexp"
conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
@@ -55,18 +56,18 @@ func ValidateMetadataLabelsConfig(labels []MetadataLabel) error {
}
type Metadata struct {
- Labels map[MetadataLabel]bool
- PodsMetadata *v1.PodList
- DetailedPVCLabelsSetter func(volCacheID, volumeClaim, namespace string, labels map[string]string) error
+ Labels map[MetadataLabel]bool
+ PodsMetadata *v1.PodList
+ DetailedPVCResourceGetter func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error)
}
func NewMetadata(
labels []MetadataLabel, podsMetadata *v1.PodList,
- detailedPVCLabelsSetter func(volCacheID, volumeClaim, namespace string, labels map[string]string) error) Metadata {
+ detailedPVCResourceGetter func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error)) Metadata {
return Metadata{
- Labels: getLabelsMap(labels),
- PodsMetadata: podsMetadata,
- DetailedPVCLabelsSetter: detailedPVCLabelsSetter,
+ Labels: getLabelsMap(labels),
+ PodsMetadata: podsMetadata,
+ DetailedPVCResourceGetter: detailedPVCResourceGetter,
}
}
@@ -78,45 +79,46 @@ func getLabelsMap(metadataLabels []MetadataLabel) map[MetadataLabel]bool {
return out
}
-// setExtraLabels sets extra labels in `labels` map based on provided metadata label.
-func (m *Metadata) setExtraLabels(
- labels map[string]string, podRef stats.PodReference,
- extraMetadataLabel MetadataLabel, extraMetadataFrom string) error {
+// getExtraResources gets extra resources based on provided metadata label.
+func (m *Metadata) getExtraResources(podRef stats.PodReference, extraMetadataLabel MetadataLabel,
+ extraMetadataFrom string) ([]metadata.ResourceOption, error) {
// Ensure MetadataLabel exists before proceeding.
if !m.Labels[extraMetadataLabel] || len(m.Labels) == 0 {
- return nil
+ return nil, nil
}
// Cannot proceed, if metadata is unavailable.
if m.PodsMetadata == nil {
- return errors.New("pods metadata were not fetched")
+ return nil, errors.New("pods metadata were not fetched")
}
switch extraMetadataLabel {
case MetadataLabelContainerID:
containerID, err := m.getContainerID(podRef.UID, extraMetadataFrom)
if err != nil {
- return err
+ return nil, err
}
- labels[conventions.AttributeContainerID] = containerID
+ return []metadata.ResourceOption{metadata.WithContainerID(containerID)}, nil
case MetadataLabelVolumeType:
volume, err := m.getPodVolume(podRef.UID, extraMetadataFrom)
if err != nil {
- return err
+ return nil, err
}
- getLabelsFromVolume(volume, labels)
+ ro := getResourcesFromVolume(volume)
// Get more labels from PersistentVolumeClaim volume type.
if volume.PersistentVolumeClaim != nil {
volCacheID := fmt.Sprintf("%s/%s", podRef.UID, extraMetadataFrom)
- if err := m.DetailedPVCLabelsSetter(volCacheID, labels[labelPersistentVolumeClaimName], podRef.Namespace,
- labels); err != nil {
- return fmt.Errorf("failed to set labels from volume claim: %w", err)
+ pvcResources, err := m.DetailedPVCResourceGetter(volCacheID, volume.PersistentVolumeClaim.ClaimName, podRef.Namespace)
+ if err != nil {
+ return nil, fmt.Errorf("failed to set labels from volume claim: %w", err)
}
+ ro = append(ro, pvcResources...)
}
+ return ro, nil
}
- return nil
+ return nil, nil
}
// getContainerID retrieves container id from metadata for given pod UID and container name,
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
index 2021e6513f6a..38e415e0c3c7 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
@@ -16,6 +16,7 @@
package kubelet
import (
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"testing"
"github.com/stretchr/testify/assert"
@@ -23,6 +24,8 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
)
func TestValidateMetadataLabelsConfig(t *testing.T) {
@@ -75,13 +78,13 @@ func TestSetExtraLabels(t *testing.T) {
metadata Metadata
args []string
wantError string
- want map[string]string
+ want map[string]interface{}
}{
{
name: "no_labels",
metadata: NewMetadata([]MetadataLabel{}, nil, nil),
args: []string{"uid", "container.id", "container"},
- want: map[string]string{},
+ want: map[string]interface{}{},
},
{
name: "set_container_id_valid",
@@ -103,7 +106,7 @@ func TestSetExtraLabels(t *testing.T) {
},
}, nil),
args: []string{"uid-1234", "container.id", "container1"},
- want: map[string]string{
+ want: map[string]interface{}{
string(MetadataLabelContainerID): "test-container",
},
},
@@ -166,11 +169,17 @@ func TestSetExtraLabels(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- fields := map[string]string{}
- err := tt.metadata.setExtraLabels(fields, stats.PodReference{UID: tt.args[0]}, MetadataLabel(tt.args[1]), tt.args[2])
+ ro, err := tt.metadata.getExtraResources(stats.PodReference{UID: tt.args[0]}, MetadataLabel(tt.args[1]), tt.args[2])
+
+ r := pcommon.NewResource()
+ for _, op := range ro {
+ op(r)
+ }
+
if tt.wantError == "" {
require.NoError(t, err)
- assert.EqualValues(t, tt.want, fields)
+ temp := r.Attributes().AsRaw()
+ assert.EqualValues(t, tt.want, temp)
} else {
assert.Equal(t, tt.wantError, err.Error())
}
@@ -184,7 +193,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
name string
vs v1.VolumeSource
args []string
- want map[string]string
+ want map[string]interface{}
}{
{
name: "hostPath",
@@ -192,7 +201,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
HostPath: &v1.HostPathVolumeSource{},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "hostPath",
},
},
@@ -202,7 +211,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
ConfigMap: &v1.ConfigMapVolumeSource{},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "configMap",
},
},
@@ -212,7 +221,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
EmptyDir: &v1.EmptyDirVolumeSource{},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "emptyDir",
},
},
@@ -222,7 +231,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
Secret: &v1.SecretVolumeSource{},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "secret",
},
},
@@ -232,7 +241,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
DownwardAPI: &v1.DownwardAPIVolumeSource{},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "downwardAPI",
},
},
@@ -244,7 +253,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "persistentVolumeClaim",
"k8s.persistentvolumeclaim.name": "claim-name",
},
@@ -259,7 +268,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "awsElasticBlockStore",
"aws.volume.id": "volume_id",
"fs.type": "fs_type",
@@ -276,7 +285,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "gcePersistentDisk",
"gce.pd.name": "pd_name",
"fs.type": "fs_type",
@@ -292,7 +301,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "glusterfs",
"glusterfs.endpoints.name": "endspoints_name",
"glusterfs.path": "path",
@@ -302,12 +311,11 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
name: "unsupported type",
vs: v1.VolumeSource{},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{},
+ want: map[string]interface{}{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- fields := map[string]string{}
volName := "volume0"
metadata := NewMetadata([]MetadataLabel{MetadataLabelVolumeType}, &v1.PodList{
Items: []v1.Pod{
@@ -325,11 +333,17 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
},
},
},
- }, func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
- return nil
+ }, func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+ return nil, nil
})
- metadata.setExtraLabels(fields, stats.PodReference{UID: tt.args[0]}, MetadataLabel(tt.args[1]), volName)
- assert.Equal(t, tt.want, fields)
+ ro, _ := metadata.getExtraResources(stats.PodReference{UID: tt.args[0]}, MetadataLabel(tt.args[1]), volName)
+
+ r := pcommon.NewResource()
+ for _, op := range ro {
+ op(r)
+ }
+
+ assert.Equal(t, tt.want, r.Attributes().AsRaw())
})
}
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/resource.go b/receiver/kubeletstatsreceiver/internal/kubelet/resource.go
index 83de0be16adc..53c7d57458a3 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/resource.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/resource.go
@@ -16,52 +16,43 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"fmt"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
- "go.opentelemetry.io/collector/pdata/pcommon"
- conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
)
-func fillNodeResource(dest pcommon.Resource, s stats.NodeStats) {
- dest.Attributes().UpsertString(conventions.AttributeK8SNodeName, s.NodeName)
-}
-
-func fillPodResource(dest pcommon.Resource, s stats.PodStats) {
- dest.Attributes().UpsertString(conventions.AttributeK8SPodUID, s.PodRef.UID)
- dest.Attributes().UpsertString(conventions.AttributeK8SPodName, s.PodRef.Name)
- dest.Attributes().UpsertString(conventions.AttributeK8SNamespaceName, s.PodRef.Namespace)
-}
-
-func fillContainerResource(dest pcommon.Resource, sPod stats.PodStats, sContainer stats.ContainerStats, metadata Metadata) error {
- labels := map[string]string{
- conventions.AttributeK8SPodUID: sPod.PodRef.UID,
- conventions.AttributeK8SPodName: sPod.PodRef.Name,
- conventions.AttributeK8SNamespaceName: sPod.PodRef.Namespace,
- conventions.AttributeK8SContainerName: sContainer.Name,
- }
- if err := metadata.setExtraLabels(labels, sPod.PodRef, MetadataLabelContainerID, sContainer.Name); err != nil {
- return fmt.Errorf("failed to set extra labels from metadata: %w", err)
+func getContainerResourceOptions(sPod stats.PodStats, sContainer stats.ContainerStats, k8sMetadata Metadata) ([]metadata.ResourceOption, error) {
+ ro := []metadata.ResourceOption{
+ metadata.WithK8sPodUID(sPod.PodRef.UID),
+ metadata.WithK8sPodName(sPod.PodRef.Name),
+ metadata.WithK8sNamespaceName(sPod.PodRef.Namespace),
+ metadata.WithContainerName(sContainer.Name),
}
- for k, v := range labels {
- dest.Attributes().UpsertString(k, v)
+
+ extraResources, err := k8sMetadata.getExtraResources(sPod.PodRef, MetadataLabelContainerID, sContainer.Name)
+ if err != nil {
+ return nil, fmt.Errorf("failed to set extra labels from metadata: %w", err)
}
- return nil
+
+ ro = append(ro, extraResources...)
+
+ return ro, nil
}
-func fillVolumeResource(dest pcommon.Resource, sPod stats.PodStats, vs stats.VolumeStats, metadata Metadata) error {
- labels := map[string]string{
- conventions.AttributeK8SPodUID: sPod.PodRef.UID,
- conventions.AttributeK8SPodName: sPod.PodRef.Name,
- conventions.AttributeK8SNamespaceName: sPod.PodRef.Namespace,
- labelVolumeName: vs.Name,
+func getVolumeResourceOptions(sPod stats.PodStats, vs stats.VolumeStats, k8sMetadata Metadata) ([]metadata.ResourceOption, error) {
+ ro := []metadata.ResourceOption{
+ metadata.WithK8sPodUID(sPod.PodRef.UID),
+ metadata.WithK8sPodName(sPod.PodRef.Name),
+ metadata.WithK8sNamespaceName(sPod.PodRef.Namespace),
+ metadata.WithK8sVolumeName(vs.Name),
}
- if err := metadata.setExtraLabels(labels, sPod.PodRef, MetadataLabelVolumeType, vs.Name); err != nil {
- return fmt.Errorf("failed to set extra labels from metadata: %w", err)
+ extraResources, err := k8sMetadata.getExtraResources(sPod.PodRef, MetadataLabelVolumeType, vs.Name)
+ if err != nil {
+ return nil, fmt.Errorf("failed to set extra labels from metadata: %w", err)
}
- for k, v := range labels {
- dest.Attributes().UpsertString(k, v)
- }
- return nil
+ ro = append(ro, extraResources...)
+
+ return ro, nil
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
index 4cfe92002e22..a5bf353dc5e5 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
@@ -31,72 +31,80 @@ func addVolumeMetrics(mb *metadata.MetricsBuilder, volumeMetrics metadata.Volume
recordIntDataPoint(mb, volumeMetrics.InodesUsed, s.InodesUsed, currentTime)
}
-func getLabelsFromVolume(volume v1.Volume, labels map[string]string) {
+func getResourcesFromVolume(volume v1.Volume) []metadata.ResourceOption {
switch {
// TODO: Support more types
case volume.ConfigMap != nil:
- labels[labelVolumeType] = labelValueConfigMapVolume
+ return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValueConfigMapVolume)}
case volume.DownwardAPI != nil:
- labels[labelVolumeType] = labelValueDownwardAPIVolume
+ return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValueDownwardAPIVolume)}
case volume.EmptyDir != nil:
- labels[labelVolumeType] = labelValueEmptyDirVolume
+ return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValueEmptyDirVolume)}
case volume.Secret != nil:
- labels[labelVolumeType] = labelValueSecretVolume
+ return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValueSecretVolume)}
case volume.PersistentVolumeClaim != nil:
- labels[labelVolumeType] = labelValuePersistentVolumeClaim
- labels[labelPersistentVolumeClaimName] = volume.PersistentVolumeClaim.ClaimName
+ return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValuePersistentVolumeClaim),
+ metadata.WithK8sPersistentvolumeclaimName(volume.PersistentVolumeClaim.ClaimName)}
case volume.HostPath != nil:
- labels[labelVolumeType] = labelValueHostPathVolume
+ return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValueHostPathVolume)}
case volume.AWSElasticBlockStore != nil:
- awsElasticBlockStoreDims(*volume.AWSElasticBlockStore, labels)
+ return awsElasticBlockStoreDims(*volume.AWSElasticBlockStore)
case volume.GCEPersistentDisk != nil:
- gcePersistentDiskDims(*volume.GCEPersistentDisk, labels)
+ return gcePersistentDiskDims(*volume.GCEPersistentDisk)
case volume.Glusterfs != nil:
- glusterfsDims(*volume.Glusterfs, labels)
+ return glusterfsDims(*volume.Glusterfs)
}
+ return nil
}
-func GetPersistentVolumeLabels(pv v1.PersistentVolumeSource, labels map[string]string) {
+func GetPersistentVolumeLabels(pv v1.PersistentVolumeSource) []metadata.ResourceOption {
// TODO: Support more types
switch {
case pv.Local != nil:
- labels[labelVolumeType] = labelValueLocalVolume
+ return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValueLocalVolume)}
case pv.AWSElasticBlockStore != nil:
- awsElasticBlockStoreDims(*pv.AWSElasticBlockStore, labels)
+ return awsElasticBlockStoreDims(*pv.AWSElasticBlockStore)
case pv.GCEPersistentDisk != nil:
- gcePersistentDiskDims(*pv.GCEPersistentDisk, labels)
+ return gcePersistentDiskDims(*pv.GCEPersistentDisk)
case pv.Glusterfs != nil:
// pv.Glusterfs is a GlusterfsPersistentVolumeSource instead of GlusterfsVolumeSource,
// convert to GlusterfsVolumeSource so a single method can handle both structs. This
// can be broken out into separate methods if one is interested in different sets
// of labels from the two structs in the future.
- glusterfsDims(v1.GlusterfsVolumeSource{
+ return glusterfsDims(v1.GlusterfsVolumeSource{
EndpointsName: pv.Glusterfs.EndpointsName,
Path: pv.Glusterfs.Path,
ReadOnly: pv.Glusterfs.ReadOnly,
- }, labels)
+ })
}
+ return nil
}
-func awsElasticBlockStoreDims(vs v1.AWSElasticBlockStoreVolumeSource, labels map[string]string) {
- labels[labelVolumeType] = labelValueAWSEBSVolume
- // AWS specific labels.
- labels[labelAwsVolumeID] = vs.VolumeID
- labels[labelFsType] = vs.FSType
- labels[labelPartition] = strconv.Itoa(int(vs.Partition))
+func awsElasticBlockStoreDims(vs v1.AWSElasticBlockStoreVolumeSource) []metadata.ResourceOption {
+ return []metadata.ResourceOption{
+ metadata.WithK8sVolumeType(labelValueAWSEBSVolume),
+ // AWS specific labels.
+ metadata.WithAwsVolumeID(vs.VolumeID),
+ metadata.WithFsType(vs.FSType),
+ metadata.WithPartition(strconv.Itoa(int(vs.Partition))),
+ }
}
-func gcePersistentDiskDims(vs v1.GCEPersistentDiskVolumeSource, labels map[string]string) {
- labels[labelVolumeType] = labelValueGCEPDVolume
- // GCP specific labels.
- labels[labelGcePdName] = vs.PDName
- labels[labelFsType] = vs.FSType
- labels[labelPartition] = strconv.Itoa(int(vs.Partition))
+func gcePersistentDiskDims(vs v1.GCEPersistentDiskVolumeSource) []metadata.ResourceOption {
+ return []metadata.ResourceOption{
+ metadata.WithK8sVolumeType(labelValueGCEPDVolume),
+ // GCP specific labels.
+ metadata.WithGcePdName(vs.PDName),
+ metadata.WithFsType(vs.FSType),
+ metadata.WithPartition(strconv.Itoa(int(vs.Partition))),
+ }
}
-func glusterfsDims(vs v1.GlusterfsVolumeSource, labels map[string]string) {
- labels[labelVolumeType] = labelValueGlusterFSVolume
- // GlusterFS specific labels.
- labels[labelGlusterfsEndpointsName] = vs.EndpointsName
- labels[labelGlusterfsPath] = vs.Path
+func glusterfsDims(vs v1.GlusterfsVolumeSource) []metadata.ResourceOption {
+ return []metadata.ResourceOption{
+ metadata.WithK8sVolumeType(labelValueGlusterFSVolume),
+ // GlusterFS specific labels.
+ metadata.WithGlusterfsEndpointsName(vs.EndpointsName),
+ metadata.WithGlusterfsPath(vs.Path),
+ }
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
index 7bb8e6aacf68..64a038b29a4d 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
@@ -15,6 +15,7 @@
package kubelet
import (
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"testing"
"github.com/stretchr/testify/require"
@@ -38,7 +39,7 @@ func TestDetailedPVCLabels(t *testing.T) {
volumeName string
volumeSource v1.VolumeSource
pod pod
- detailedPVCLabelsSetterOverride func(volCacheID, volumeClaim, namespace string, labels map[string]string) error
+ detailedPVCLabelsSetterOverride func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error)
want map[string]interface{}
}{
{
@@ -50,15 +51,15 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
pod: pod{uid: "uid-1234", name: "pod-name", namespace: "pod-namespace"},
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
- GetPersistentVolumeLabels(v1.PersistentVolumeSource{
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+ ro := GetPersistentVolumeLabels(v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "volume_id",
FSType: "fs_type",
Partition: 10,
},
- }, labels)
- return nil
+ })
+ return ro, nil
},
want: map[string]interface{}{
"k8s.volume.name": "volume0",
@@ -81,15 +82,15 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
pod: pod{uid: "uid-1234", name: "pod-name", namespace: "pod-namespace"},
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
- GetPersistentVolumeLabels(v1.PersistentVolumeSource{
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+ ro := GetPersistentVolumeLabels(v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "pd_name",
FSType: "fs_type",
Partition: 10,
},
- }, labels)
- return nil
+ })
+ return ro, nil
},
want: map[string]interface{}{
"k8s.volume.name": "volume0",
@@ -112,14 +113,14 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
pod: pod{uid: "uid-1234", name: "pod-name", namespace: "pod-namespace"},
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
- GetPersistentVolumeLabels(v1.PersistentVolumeSource{
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+ ro := GetPersistentVolumeLabels(v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsPersistentVolumeSource{
EndpointsName: "endpoints_name",
Path: "path",
},
- }, labels)
- return nil
+ })
+ return ro, nil
},
want: map[string]interface{}{
"k8s.volume.name": "volume0",
@@ -141,13 +142,13 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
pod: pod{uid: "uid-1234", name: "pod-name", namespace: "pod-namespace"},
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
- GetPersistentVolumeLabels(v1.PersistentVolumeSource{
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+ ro := GetPersistentVolumeLabels(v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{
Path: "path",
},
- }, labels)
- return nil
+ })
+ return ro, nil
},
want: map[string]interface{}{
"k8s.volume.name": "volume0",
@@ -187,7 +188,7 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
}, nil)
- metadata.DetailedPVCLabelsSetter = tt.detailedPVCLabelsSetterOverride
+ metadata.DetailedPVCResourceGetter = tt.detailedPVCLabelsSetterOverride
ro, err := getVolumeResourceOptions(podStats, stats.VolumeStats{Name: tt.volumeName}, metadata)
require.NoError(t, err)
diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go
index c97dd1e0d316..16ad22e4b5a3 100644
--- a/receiver/kubeletstatsreceiver/scraper.go
+++ b/receiver/kubeletstatsreceiver/scraper.go
@@ -47,7 +47,7 @@ type kubletScraper struct {
extraMetadataLabels []kubelet.MetadataLabel
metricGroupsToCollect map[kubelet.MetricGroup]bool
k8sAPIClient kubernetes.Interface
- cachedVolumeLabels map[string]map[string]string
+ cachedVolumeLabels map[string][]metadata.ResourceOption
mb *metadata.MetricsBuilder
}
@@ -64,7 +64,7 @@ func newKubletScraper(
extraMetadataLabels: rOptions.extraMetadataLabels,
metricGroupsToCollect: rOptions.metricGroupsToCollect,
k8sAPIClient: rOptions.k8sAPIClient,
- cachedVolumeLabels: make(map[string]map[string]string),
+ cachedVolumeLabels: make(map[string][]metadata.ResourceOption),
mb: metadata.NewMetricsBuilder(metricsConfig),
}
return scraperhelper.NewScraper(typeStr, ks.scrape)
@@ -96,39 +96,34 @@ func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) {
return md, nil
}
-func (r *kubletScraper) detailedPVCLabelsSetter() func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
- return func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
+func (r *kubletScraper) detailedPVCLabelsSetter() func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+ return func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
if r.k8sAPIClient == nil {
- return nil
+ return nil, nil
}
if r.cachedVolumeLabels[volCacheID] == nil {
ctx := context.Background()
pvc, err := r.k8sAPIClient.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, volumeClaim, metav1.GetOptions{})
if err != nil {
- return err
+ return nil, err
}
volName := pvc.Spec.VolumeName
if volName == "" {
- return fmt.Errorf("PersistentVolumeClaim %s does not have a volume name", pvc.Name)
+ return nil, fmt.Errorf("PersistentVolumeClaim %s does not have a volume name", pvc.Name)
}
pv, err := r.k8sAPIClient.CoreV1().PersistentVolumes().Get(ctx, volName, metav1.GetOptions{})
if err != nil {
- return err
+ return nil, err
}
- labelsToCache := make(map[string]string)
- kubelet.GetPersistentVolumeLabels(pv.Spec.PersistentVolumeSource, labelsToCache)
+ ro := kubelet.GetPersistentVolumeLabels(pv.Spec.PersistentVolumeSource)
// Cache collected labels.
- r.cachedVolumeLabels[volCacheID] = labelsToCache
+ r.cachedVolumeLabels[volCacheID] = ro
}
-
- for k, v := range r.cachedVolumeLabels[volCacheID] {
- labels[k] = v
- }
- return nil
+ return r.cachedVolumeLabels[volCacheID], nil
}
}
From 329abad6cefc30e2ebe7c7f6e6cce6f5088b83f4 Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Wed, 11 May 2022 11:50:40 -0600
Subject: [PATCH 06/17] Fix lint
---
.../kubeletstatsreceiver/internal/kubelet/conventions.go | 9 +--------
.../kubeletstatsreceiver/internal/kubelet/metadata.go | 3 ++-
.../internal/kubelet/metadata_test.go | 4 ++--
.../kubeletstatsreceiver/internal/kubelet/resource.go | 3 ++-
.../kubeletstatsreceiver/internal/kubelet/volume_test.go | 3 ++-
5 files changed, 9 insertions(+), 13 deletions(-)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go b/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go
index 43a4c512b255..a693876ebeb2 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go
@@ -15,14 +15,7 @@
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
const (
- labelPersistentVolumeClaimName = "k8s.persistentvolumeclaim.name"
- labelVolumeType = "k8s.volume.type"
- labelAwsVolumeID = "aws.volume.id"
- labelFsType = "fs.type"
- labelPartition = "partition"
- labelGcePdName = "gce.pd.name"
- labelGlusterfsEndpointsName = "glusterfs.endpoints.name"
- labelGlusterfsPath = "glusterfs.path"
+ labelVolumeType = "k8s.volume.type"
// Volume types.
labelValuePersistentVolumeClaim = "persistentVolumeClaim"
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
index fdd74a5ec87e..2ce738ab8821 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
@@ -17,13 +17,14 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"errors"
"fmt"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"regexp"
conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
type MetadataLabel string
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
index 38e415e0c3c7..b64cbc9e79f8 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
@@ -16,16 +16,16 @@
package kubelet
import (
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/pdata/pcommon"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
- "go.opentelemetry.io/collector/pdata/pcommon"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
func TestValidateMetadataLabelsConfig(t *testing.T) {
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/resource.go b/receiver/kubeletstatsreceiver/internal/kubelet/resource.go
index 53c7d57458a3..a1a258f0410a 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/resource.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/resource.go
@@ -16,9 +16,10 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"fmt"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
func getContainerResourceOptions(sPod stats.PodStats, sContainer stats.ContainerStats, k8sMetadata Metadata) ([]metadata.ResourceOption, error) {
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
index 64a038b29a4d..49b69125faf4 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
@@ -15,7 +15,6 @@
package kubelet
import (
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"testing"
"github.com/stretchr/testify/require"
@@ -24,6 +23,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
type pod struct {
From 6b9ae6e4bef16126511e58b571d7edebcf1519aa Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Wed, 11 May 2022 12:53:26 -0600
Subject: [PATCH 07/17] Fix import spacing
---
receiver/kubeletstatsreceiver/internal/kubelet/network.go | 1 +
receiver/kubeletstatsreceiver/internal/kubelet/utils.go | 1 +
receiver/kubeletstatsreceiver/internal/kubelet/volume.go | 1 +
3 files changed, 3 insertions(+)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/network.go b/receiver/kubeletstatsreceiver/internal/kubelet/network.go
index c557b3e21937..5d494ce08e85 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/network.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/network.go
@@ -13,6 +13,7 @@
// limitations under the License.
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+
import (
"go.opentelemetry.io/collector/pdata/pcommon"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/utils.go b/receiver/kubeletstatsreceiver/internal/kubelet/utils.go
index 25824cf04294..bc12e5a0c5fa 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/utils.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/utils.go
@@ -13,6 +13,7 @@
// limitations under the License.
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+
import (
"go.opentelemetry.io/collector/pdata/pcommon"
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
index a5bf353dc5e5..f94d24613b77 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
@@ -13,6 +13,7 @@
// limitations under the License.
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+
import (
"strconv"
From ab14507879088fa9e0e348d63d21325fbf2ff070 Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Wed, 11 May 2022 12:54:01 -0600
Subject: [PATCH 08/17] Update
receiver/kubeletstatsreceiver/internal/metadata/metrics.go
Co-authored-by: Dmitrii Anoshin
---
receiver/kubeletstatsreceiver/internal/metadata/metrics.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
index de358ffd9c6a..e0944109a716 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
@@ -13,6 +13,7 @@
// limitations under the License.
package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
+
import "go.opentelemetry.io/collector/pdata/pcommon"
type RecordDoubleDataPointFunc func(*MetricsBuilder, pcommon.Timestamp, float64)
From ad435e8749c8f5e5cc035b4078bc968112c03973 Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Wed, 11 May 2022 14:27:26 -0600
Subject: [PATCH 09/17] Refactored scraper to emit using metrics builder
---
.../internal/kubelet/accumulator.go | 12 ++--
.../internal/kubelet/accumulator_test.go | 2 +-
.../internal/kubelet/metrics.go | 6 +-
.../internal/kubelet/metrics_test.go | 57 ++++++++++---------
receiver/kubeletstatsreceiver/scraper.go | 8 +--
5 files changed, 39 insertions(+), 46 deletions(-)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
index 3e826ba5e962..210989903d0a 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
@@ -18,7 +18,6 @@ import (
"time"
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/zap"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
@@ -44,7 +43,6 @@ var ValidMetricGroups = map[MetricGroup]bool{
}
type metricDataAccumulator struct {
- m []pmetric.Metrics
metadata Metadata
logger *zap.Logger
metricGroupsToCollect map[MetricGroup]bool
@@ -64,7 +62,7 @@ func (a *metricDataAccumulator) nodeStats(s stats.NodeStats) {
addNetworkMetrics(a.mb, metadata.NodeNetworkMetrics, s.Network, currentTime)
// todo s.Runtime.ImageFs
- a.m = append(a.m, a.mb.Emit(metadata.WithK8sNodeName(s.NodeName)))
+ a.mb.EmitForResource(metadata.WithK8sNodeName(s.NodeName))
}
func (a *metricDataAccumulator) podStats(s stats.PodStats) {
@@ -78,9 +76,9 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) {
addFilesystemMetrics(a.mb, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
addNetworkMetrics(a.mb, metadata.PodNetworkMetrics, s.Network, currentTime)
- a.m = append(a.m, a.mb.Emit(metadata.WithK8sPodUID(s.PodRef.UID),
+ a.mb.EmitForResource(metadata.WithK8sPodUID(s.PodRef.UID),
metadata.WithK8sPodName(s.PodRef.Name),
- metadata.WithK8sNamespaceName(s.PodRef.Namespace)))
+ metadata.WithK8sNamespaceName(s.PodRef.Namespace))
}
func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.ContainerStats) {
@@ -103,7 +101,7 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont
addMemoryMetrics(a.mb, metadata.ContainerMemoryMetrics, s.Memory, currentTime)
addFilesystemMetrics(a.mb, metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime)
- a.m = append(a.m, a.mb.Emit(ro...))
+ a.mb.EmitForResource(ro...)
}
func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeStats) {
@@ -124,5 +122,5 @@ func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeS
currentTime := pcommon.NewTimestampFromTime(a.time)
addVolumeMetrics(a.mb, metadata.K8sVolumeMetrics, s, currentTime)
- a.m = append(a.m, a.mb.Emit(ro...))
+ a.mb.EmitForResource(ro...)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
index fb44453f61fb..7a17e7752404 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
@@ -217,7 +217,7 @@ func TestMetadataErrorCases(t *testing.T) {
tt.testScenario(acc)
- assert.Equal(t, tt.numMDs, len(acc.m))
+ assert.Equal(t, tt.numMDs, acc.mb.Emit().MetricCount())
require.Equal(t, tt.numLogs, logs.Len())
for i := 0; i < tt.numLogs; i++ {
assert.Equal(t, tt.logMessages[i], logs.All()[i].Message)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
index eefc891ada67..bcad71538474 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
@@ -17,18 +17,17 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"time"
- "go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/zap"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func MetricsData(
+func PrepareMetricsData(
logger *zap.Logger, summary *stats.Summary,
metadata Metadata,
metricGroupsToCollect map[MetricGroup]bool,
- mb *metadata.MetricsBuilder) []pmetric.Metrics {
+ mb *metadata.MetricsBuilder) {
acc := &metricDataAccumulator{
metadata: metadata,
logger: logger,
@@ -50,5 +49,4 @@ func MetricsData(
acc.volumeStats(podStats, volumeStats)
}
}
- return acc.m
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
index 3736272d820a..4798283afcf3 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
@@ -45,27 +45,28 @@ func TestMetricAccumulator(t *testing.T) {
podsMetadata, _ := metadataProvider.Pods()
k8sMetadata := NewMetadata([]MetadataLabel{MetadataLabelContainerID}, podsMetadata, nil)
mb := metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings())
- requireMetricsOk(t, MetricsData(zap.NewNop(), summary, k8sMetadata, ValidMetricGroups, mb))
+ PrepareMetricsData(zap.NewNop(), summary, k8sMetadata, ValidMetricGroups, mb)
+ requireMetricsOk(t, mb.Emit())
mb.Reset()
+ PrepareMetricsData(zap.NewNop(), summary, k8sMetadata, map[MetricGroup]bool{}, mb)
// Disable all groups
- require.Equal(t, 0, len(MetricsData(zap.NewNop(), summary, k8sMetadata, map[MetricGroup]bool{}, mb)))
+ require.Equal(t, 0, mb.Emit().MetricCount())
}
-func requireMetricsOk(t *testing.T, mds []pmetric.Metrics) {
- for _, md := range mds {
- for i := 0; i < md.ResourceMetrics().Len(); i++ {
- rm := md.ResourceMetrics().At(i)
- requireResourceOk(t, rm.Resource())
- for j := 0; j < rm.ScopeMetrics().Len(); j++ {
- ilm := rm.ScopeMetrics().At(j)
- require.Equal(t, "otelcol/kubeletstatsreceiver", ilm.Scope().Name())
- for k := 0; k < ilm.Metrics().Len(); k++ {
- requireMetricOk(t, ilm.Metrics().At(k))
- }
+func requireMetricsOk(t *testing.T, md pmetric.Metrics) {
+ for i := 0; i < md.ResourceMetrics().Len(); i++ {
+ rm := md.ResourceMetrics().At(i)
+ requireResourceOk(t, rm.Resource())
+ for j := 0; j < rm.ScopeMetrics().Len(); j++ {
+ ilm := rm.ScopeMetrics().At(j)
+ require.Equal(t, "otelcol/kubeletstatsreceiver", ilm.Scope().Name())
+ for k := 0; k < ilm.Metrics().Len(); k++ {
+ requireMetricOk(t, ilm.Metrics().At(k))
}
}
}
+
}
func requireMetricOk(t *testing.T, m pmetric.Metric) {
@@ -141,27 +142,26 @@ func requireContains(t *testing.T, metrics map[string][]pmetric.Metric, metricNa
}
func indexedFakeMetrics() map[string][]pmetric.Metric {
- mds := fakeMetrics()
+ md := fakeMetrics()
metrics := make(map[string][]pmetric.Metric)
- for _, md := range mds {
- for i := 0; i < md.ResourceMetrics().Len(); i++ {
- rm := md.ResourceMetrics().At(i)
- for j := 0; j < rm.ScopeMetrics().Len(); j++ {
- ilm := rm.ScopeMetrics().At(j)
- for k := 0; k < ilm.Metrics().Len(); k++ {
- m := ilm.Metrics().At(k)
- metricName := m.Name()
- list := metrics[metricName]
- list = append(list, m)
- metrics[metricName] = list
- }
+
+ for i := 0; i < md.ResourceMetrics().Len(); i++ {
+ rm := md.ResourceMetrics().At(i)
+ for j := 0; j < rm.ScopeMetrics().Len(); j++ {
+ ilm := rm.ScopeMetrics().At(j)
+ for k := 0; k < ilm.Metrics().Len(); k++ {
+ m := ilm.Metrics().At(k)
+ metricName := m.Name()
+ list := metrics[metricName]
+ list = append(list, m)
+ metrics[metricName] = list
}
}
}
return metrics
}
-func fakeMetrics() []pmetric.Metrics {
+func fakeMetrics() pmetric.Metrics {
rc := &fakeRestClient{}
statsProvider := NewStatsProvider(rc)
summary, _ := statsProvider.StatsSummary()
@@ -171,5 +171,6 @@ func fakeMetrics() []pmetric.Metrics {
NodeMetricGroup: true,
}
mb := metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings())
- return MetricsData(zap.NewNop(), summary, Metadata{}, mgs, mb)
+ PrepareMetricsData(zap.NewNop(), summary, Metadata{}, mgs, mb)
+ return mb.Emit()
}
diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go
index 16ad22e4b5a3..f047a49eb300 100644
--- a/receiver/kubeletstatsreceiver/scraper.go
+++ b/receiver/kubeletstatsreceiver/scraper.go
@@ -88,12 +88,8 @@ func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) {
}
metadata := kubelet.NewMetadata(r.extraMetadataLabels, podsMetadata, r.detailedPVCLabelsSetter())
- mds := kubelet.MetricsData(r.logger, summary, metadata, r.metricGroupsToCollect, r.mb)
- md := pmetric.NewMetrics()
- for i := range mds {
- mds[i].ResourceMetrics().MoveAndAppendTo(md.ResourceMetrics())
- }
- return md, nil
+ kubelet.PrepareMetricsData(r.logger, summary, metadata, r.metricGroupsToCollect, r.mb)
+ return r.mb.Emit(), nil
}
func (r *kubletScraper) detailedPVCLabelsSetter() func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
From 2d2167c6a96cfe740d1e2125eed4493c0a7dda31 Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Thu, 12 May 2022 08:54:39 -0600
Subject: [PATCH 10/17] Add default metrics
---
receiver/kubeletstatsreceiver/factory.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/receiver/kubeletstatsreceiver/factory.go b/receiver/kubeletstatsreceiver/factory.go
index 329f5c1da9bb..66a3f1af404a 100644
--- a/receiver/kubeletstatsreceiver/factory.go
+++ b/receiver/kubeletstatsreceiver/factory.go
@@ -16,6 +16,7 @@ package kubeletstatsreceiver // import "github.com/open-telemetry/opentelemetry-
import (
"context"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"time"
"go.opentelemetry.io/collector/component"
@@ -59,6 +60,7 @@ func createDefaultConfig() config.Receiver {
AuthType: k8sconfig.AuthTypeTLS,
},
},
+ Metrics: metadata.DefaultMetricsSettings(),
}
}
From 9dcc91ad00bf4f034862448cb575f147bd034acd Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Thu, 12 May 2022 09:35:10 -0600
Subject: [PATCH 11/17] Added multiple metrics builders
---
receiver/kubeletstatsreceiver/config_test.go | 7 ++
.../internal/kubelet/accumulator.go | 48 ++++++------
.../internal/kubelet/accumulator_test.go | 14 +++-
.../internal/kubelet/metrics.go | 9 ++-
.../internal/kubelet/metrics_test.go | 75 ++++++++++---------
.../internal/metadata/metrics.go | 6 ++
receiver/kubeletstatsreceiver/scraper.go | 16 +++-
7 files changed, 109 insertions(+), 66 deletions(-)
diff --git a/receiver/kubeletstatsreceiver/config_test.go b/receiver/kubeletstatsreceiver/config_test.go
index 9d3b86643c37..e30e163cf22a 100644
--- a/receiver/kubeletstatsreceiver/config_test.go
+++ b/receiver/kubeletstatsreceiver/config_test.go
@@ -15,6 +15,7 @@
package kubeletstatsreceiver
import (
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"path/filepath"
"reflect"
"testing"
@@ -59,6 +60,7 @@ func TestLoadConfig(t *testing.T) {
kubelet.PodMetricGroup,
kubelet.NodeMetricGroup,
},
+ Metrics: metadata.DefaultMetricsSettings(),
}, defaultCfg)
tlsCfg := cfg.Receivers[config.NewComponentIDWithName(typeStr, "tls")].(*Config)
@@ -86,6 +88,7 @@ func TestLoadConfig(t *testing.T) {
kubelet.PodMetricGroup,
kubelet.NodeMetricGroup,
},
+ Metrics: metadata.DefaultMetricsSettings(),
}, tlsCfg)
saCfg := cfg.Receivers[config.NewComponentIDWithName(typeStr, "sa")].(*Config)
@@ -105,6 +108,7 @@ func TestLoadConfig(t *testing.T) {
kubelet.PodMetricGroup,
kubelet.NodeMetricGroup,
},
+ Metrics: metadata.DefaultMetricsSettings(),
}, saCfg)
metadataCfg := cfg.Receivers[config.NewComponentIDWithName(typeStr, "metadata")].(*Config)
@@ -127,6 +131,7 @@ func TestLoadConfig(t *testing.T) {
kubelet.PodMetricGroup,
kubelet.NodeMetricGroup,
},
+ Metrics: metadata.DefaultMetricsSettings(),
}, metadataCfg)
metricGroupsCfg := cfg.Receivers[config.NewComponentIDWithName(typeStr, "metric_groups")].(*Config)
@@ -145,6 +150,7 @@ func TestLoadConfig(t *testing.T) {
kubelet.NodeMetricGroup,
kubelet.VolumeMetricGroup,
},
+ Metrics: metadata.DefaultMetricsSettings(),
}, metricGroupsCfg)
metadataWithK8sAPICfg := cfg.Receivers[config.NewComponentIDWithName(typeStr, "metadata_with_k8s_api")].(*Config)
@@ -167,6 +173,7 @@ func TestLoadConfig(t *testing.T) {
kubelet.NodeMetricGroup,
},
K8sAPIConfig: &k8sconfig.APIConfig{AuthType: k8sconfig.AuthTypeKubeConfig},
+ Metrics: metadata.DefaultMetricsSettings(),
}, metadataWithK8sAPICfg)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
index 210989903d0a..4251f0a562d9 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
@@ -15,6 +15,7 @@
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
import (
+ "go.opentelemetry.io/collector/pdata/pmetric"
"time"
"go.opentelemetry.io/collector/pdata/pcommon"
@@ -43,26 +44,28 @@ var ValidMetricGroups = map[MetricGroup]bool{
}
type metricDataAccumulator struct {
+ m []pmetric.Metrics
metadata Metadata
logger *zap.Logger
metricGroupsToCollect map[MetricGroup]bool
time time.Time
- mb *metadata.MetricsBuilder
+ mbs *metadata.MetricsBuilders
}
func (a *metricDataAccumulator) nodeStats(s stats.NodeStats) {
if !a.metricGroupsToCollect[NodeMetricGroup] {
return
}
-
+ a.mbs.WithNodeStartTime.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(s.StartTime.Time)))
currentTime := pcommon.NewTimestampFromTime(a.time)
- addCPUMetrics(a.mb, metadata.NodeCPUMetrics, s.CPU, currentTime)
- addMemoryMetrics(a.mb, metadata.NodeMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(a.mb, metadata.NodeFilesystemMetrics, s.Fs, currentTime)
- addNetworkMetrics(a.mb, metadata.NodeNetworkMetrics, s.Network, currentTime)
+
+ addCPUMetrics(a.mbs.WithNodeStartTime, metadata.NodeCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mbs.WithNodeStartTime, metadata.NodeMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mbs.WithNodeStartTime, metadata.NodeFilesystemMetrics, s.Fs, currentTime)
+ addNetworkMetrics(a.mbs.WithNodeStartTime, metadata.NodeNetworkMetrics, s.Network, currentTime)
// todo s.Runtime.ImageFs
- a.mb.EmitForResource(metadata.WithK8sNodeName(s.NodeName))
+ a.m = append(a.m, a.mbs.WithNodeStartTime.Emit(metadata.WithK8sNodeName(s.NodeName)))
}
func (a *metricDataAccumulator) podStats(s stats.PodStats) {
@@ -70,15 +73,16 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) {
return
}
+ a.mbs.WithPodStartTime.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(s.StartTime.Time)))
currentTime := pcommon.NewTimestampFromTime(a.time)
- addCPUMetrics(a.mb, metadata.PodCPUMetrics, s.CPU, currentTime)
- addMemoryMetrics(a.mb, metadata.PodMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(a.mb, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
- addNetworkMetrics(a.mb, metadata.PodNetworkMetrics, s.Network, currentTime)
-
- a.mb.EmitForResource(metadata.WithK8sPodUID(s.PodRef.UID),
- metadata.WithK8sPodName(s.PodRef.Name),
- metadata.WithK8sNamespaceName(s.PodRef.Namespace))
+
+ addCPUMetrics(a.mbs.WithPodStartTime, metadata.PodCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mbs.WithPodStartTime, metadata.PodMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mbs.WithPodStartTime, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
+ addNetworkMetrics(a.mbs.WithPodStartTime, metadata.PodNetworkMetrics, s.Network, currentTime)
+
+ a.m = append(a.m, a.mbs.WithPodStartTime.Emit(metadata.WithK8sPodUID(s.PodRef.UID),
+ metadata.WithK8sPodName(s.PodRef.Name), metadata.WithK8sNamespaceName(s.PodRef.Namespace)))
}
func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.ContainerStats) {
@@ -96,12 +100,14 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont
return
}
+ a.mbs.WithPodStartTime.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(s.StartTime.Time)))
currentTime := pcommon.NewTimestampFromTime(a.time)
- addCPUMetrics(a.mb, metadata.ContainerCPUMetrics, s.CPU, currentTime)
- addMemoryMetrics(a.mb, metadata.ContainerMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(a.mb, metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime)
- a.mb.EmitForResource(ro...)
+ addCPUMetrics(a.mbs.WithPodStartTime, metadata.ContainerCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mbs.WithPodStartTime, metadata.ContainerMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mbs.WithPodStartTime, metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime)
+
+ a.m = append(a.m, a.mbs.WithPodStartTime.Emit(ro...))
}
func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeStats) {
@@ -120,7 +126,7 @@ func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeS
}
currentTime := pcommon.NewTimestampFromTime(a.time)
- addVolumeMetrics(a.mb, metadata.K8sVolumeMetrics, s, currentTime)
+ addVolumeMetrics(a.mbs.WithDefaultStartTime, metadata.K8sVolumeMetrics, s, currentTime)
- a.mb.EmitForResource(ro...)
+ a.m = append(a.m, a.mbs.WithDefaultStartTime.Emit(ro...))
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
index 7a17e7752404..041f18019050 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
@@ -212,12 +212,16 @@ func TestMetadataErrorCases(t *testing.T) {
metadata: tt.metadata,
logger: logger,
metricGroupsToCollect: tt.metricGroupsToCollect,
- mb: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ mbs: &metadata.MetricsBuilders{
+ WithNodeStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ WithPodStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ WithDefaultStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ },
}
tt.testScenario(acc)
- assert.Equal(t, tt.numMDs, acc.mb.Emit().MetricCount())
+ assert.Equal(t, tt.numMDs, len(acc.m))
require.Equal(t, tt.numLogs, logs.Len())
for i := 0; i < tt.numLogs; i++ {
assert.Equal(t, tt.logMessages[i], logs.All()[i].Message)
@@ -234,7 +238,11 @@ func TestNilHandling(t *testing.T) {
ContainerMetricGroup: true,
VolumeMetricGroup: true,
},
- mb: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ mbs: &metadata.MetricsBuilders{
+ WithNodeStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ WithPodStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ WithDefaultStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ },
}
assert.NotPanics(t, func() {
acc.nodeStats(stats.NodeStats{})
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
index bcad71538474..dfdfa5aebcce 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
@@ -15,6 +15,7 @@
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
import (
+ "go.opentelemetry.io/collector/pdata/pmetric"
"time"
"go.uber.org/zap"
@@ -23,19 +24,18 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func PrepareMetricsData(
+func MetricsData(
logger *zap.Logger, summary *stats.Summary,
metadata Metadata,
metricGroupsToCollect map[MetricGroup]bool,
- mb *metadata.MetricsBuilder) {
+ mbs *metadata.MetricsBuilders) []pmetric.Metrics {
acc := &metricDataAccumulator{
metadata: metadata,
logger: logger,
metricGroupsToCollect: metricGroupsToCollect,
time: time.Now(),
- mb: mb,
+ mbs: mbs,
}
-
acc.nodeStats(summary.Node)
for _, podStats := range summary.Pods {
acc.podStats(podStats)
@@ -49,4 +49,5 @@ func PrepareMetricsData(
acc.volumeStats(podStats, volumeStats)
}
}
+ return acc.m
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
index 4798283afcf3..d2e73c4b84ec 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
@@ -44,35 +44,38 @@ func TestMetricAccumulator(t *testing.T) {
metadataProvider := NewMetadataProvider(rc)
podsMetadata, _ := metadataProvider.Pods()
k8sMetadata := NewMetadata([]MetadataLabel{MetadataLabelContainerID}, podsMetadata, nil)
- mb := metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings())
- PrepareMetricsData(zap.NewNop(), summary, k8sMetadata, ValidMetricGroups, mb)
- requireMetricsOk(t, mb.Emit())
-
- mb.Reset()
- PrepareMetricsData(zap.NewNop(), summary, k8sMetadata, map[MetricGroup]bool{}, mb)
+ mbs := &metadata.MetricsBuilders{
+ WithNodeStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ WithPodStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ WithDefaultStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ }
+ requireMetricsOk(t, MetricsData(zap.NewNop(), summary, k8sMetadata, ValidMetricGroups, mbs))
// Disable all groups
- require.Equal(t, 0, mb.Emit().MetricCount())
+ mbs.WithNodeStartTime.Reset()
+ mbs.WithPodStartTime.Reset()
+ mbs.WithDefaultStartTime.Reset()
+ require.Equal(t, 0, len(MetricsData(zap.NewNop(), summary, k8sMetadata, map[MetricGroup]bool{}, mbs)))
}
-func requireMetricsOk(t *testing.T, md pmetric.Metrics) {
- for i := 0; i < md.ResourceMetrics().Len(); i++ {
- rm := md.ResourceMetrics().At(i)
- requireResourceOk(t, rm.Resource())
- for j := 0; j < rm.ScopeMetrics().Len(); j++ {
- ilm := rm.ScopeMetrics().At(j)
- require.Equal(t, "otelcol/kubeletstatsreceiver", ilm.Scope().Name())
- for k := 0; k < ilm.Metrics().Len(); k++ {
- requireMetricOk(t, ilm.Metrics().At(k))
+func requireMetricsOk(t *testing.T, mds []pmetric.Metrics) {
+ for _, md := range mds {
+ for i := 0; i < md.ResourceMetrics().Len(); i++ {
+ rm := md.ResourceMetrics().At(i)
+ requireResourceOk(t, rm.Resource())
+ for j := 0; j < rm.ScopeMetrics().Len(); j++ {
+ ilm := rm.ScopeMetrics().At(j)
+ require.Equal(t, "otelcol/kubeletstatsreceiver", ilm.Scope().Name())
+ for k := 0; k < ilm.Metrics().Len(); k++ {
+ requireMetricOk(t, ilm.Metrics().At(k))
+ }
}
}
}
-
}
func requireMetricOk(t *testing.T, m pmetric.Metric) {
require.NotZero(t, m.Name())
require.NotEqual(t, pmetric.MetricDataTypeNone, m.DataType())
-
switch m.DataType() {
case pmetric.MetricDataTypeGauge:
gauge := m.Gauge()
@@ -142,26 +145,27 @@ func requireContains(t *testing.T, metrics map[string][]pmetric.Metric, metricNa
}
func indexedFakeMetrics() map[string][]pmetric.Metric {
- md := fakeMetrics()
+ mds := fakeMetrics()
metrics := make(map[string][]pmetric.Metric)
-
- for i := 0; i < md.ResourceMetrics().Len(); i++ {
- rm := md.ResourceMetrics().At(i)
- for j := 0; j < rm.ScopeMetrics().Len(); j++ {
- ilm := rm.ScopeMetrics().At(j)
- for k := 0; k < ilm.Metrics().Len(); k++ {
- m := ilm.Metrics().At(k)
- metricName := m.Name()
- list := metrics[metricName]
- list = append(list, m)
- metrics[metricName] = list
+ for _, md := range mds {
+ for i := 0; i < md.ResourceMetrics().Len(); i++ {
+ rm := md.ResourceMetrics().At(i)
+ for j := 0; j < rm.ScopeMetrics().Len(); j++ {
+ ilm := rm.ScopeMetrics().At(j)
+ for k := 0; k < ilm.Metrics().Len(); k++ {
+ m := ilm.Metrics().At(k)
+ metricName := m.Name()
+ list := metrics[metricName]
+ list = append(list, m)
+ metrics[metricName] = list
+ }
}
}
}
return metrics
}
-func fakeMetrics() pmetric.Metrics {
+func fakeMetrics() []pmetric.Metrics {
rc := &fakeRestClient{}
statsProvider := NewStatsProvider(rc)
summary, _ := statsProvider.StatsSummary()
@@ -170,7 +174,10 @@ func fakeMetrics() pmetric.Metrics {
PodMetricGroup: true,
NodeMetricGroup: true,
}
- mb := metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings())
- PrepareMetricsData(zap.NewNop(), summary, Metadata{}, mgs, mb)
- return mb.Emit()
+ mbs := &metadata.MetricsBuilders{
+ WithNodeStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ WithPodStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ WithDefaultStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ }
+ return MetricsData(zap.NewNop(), summary, Metadata{}, mgs, mbs)
}
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
index e0944109a716..a5c441aa9368 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
@@ -22,6 +22,12 @@ type RecordIntDataPointFunc func(*MetricsBuilder, pcommon.Timestamp, int64)
type RecordIntDataPointWithDirectionFunc func(*MetricsBuilder, pcommon.Timestamp, int64, string, AttributeDirection)
+type MetricsBuilders struct {
+ WithNodeStartTime *MetricsBuilder
+ WithPodStartTime *MetricsBuilder
+ WithDefaultStartTime *MetricsBuilder
+}
+
type CPUMetrics struct {
Time RecordDoubleDataPointFunc
Utilization RecordDoubleDataPointFunc
diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go
index f047a49eb300..62a41f544415 100644
--- a/receiver/kubeletstatsreceiver/scraper.go
+++ b/receiver/kubeletstatsreceiver/scraper.go
@@ -48,7 +48,7 @@ type kubletScraper struct {
metricGroupsToCollect map[kubelet.MetricGroup]bool
k8sAPIClient kubernetes.Interface
cachedVolumeLabels map[string][]metadata.ResourceOption
- mb *metadata.MetricsBuilder
+ mbs *metadata.MetricsBuilders
}
func newKubletScraper(
@@ -65,7 +65,11 @@ func newKubletScraper(
metricGroupsToCollect: rOptions.metricGroupsToCollect,
k8sAPIClient: rOptions.k8sAPIClient,
cachedVolumeLabels: make(map[string][]metadata.ResourceOption),
- mb: metadata.NewMetricsBuilder(metricsConfig),
+ mbs: &metadata.MetricsBuilders{
+ WithNodeStartTime: metadata.NewMetricsBuilder(metricsConfig),
+ WithPodStartTime: metadata.NewMetricsBuilder(metricsConfig),
+ WithDefaultStartTime: metadata.NewMetricsBuilder(metricsConfig),
+ },
}
return scraperhelper.NewScraper(typeStr, ks.scrape)
}
@@ -88,8 +92,12 @@ func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) {
}
metadata := kubelet.NewMetadata(r.extraMetadataLabels, podsMetadata, r.detailedPVCLabelsSetter())
- kubelet.PrepareMetricsData(r.logger, summary, metadata, r.metricGroupsToCollect, r.mb)
- return r.mb.Emit(), nil
+ mds := kubelet.MetricsData(r.logger, summary, metadata, r.metricGroupsToCollect, r.mbs)
+ md := pmetric.NewMetrics()
+ for i := range mds {
+ mds[i].ResourceMetrics().MoveAndAppendTo(md.ResourceMetrics())
+ }
+ return md, nil
}
func (r *kubletScraper) detailedPVCLabelsSetter() func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
From 7f71110571824dea422816618d2d1b128c90c5d1 Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Thu, 12 May 2022 09:49:34 -0600
Subject: [PATCH 12/17] Fix lint
---
receiver/kubeletstatsreceiver/config_test.go | 2 +-
receiver/kubeletstatsreceiver/factory.go | 2 +-
receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go | 2 +-
receiver/kubeletstatsreceiver/internal/kubelet/metrics.go | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/receiver/kubeletstatsreceiver/config_test.go b/receiver/kubeletstatsreceiver/config_test.go
index e30e163cf22a..9ac9b5f0062d 100644
--- a/receiver/kubeletstatsreceiver/config_test.go
+++ b/receiver/kubeletstatsreceiver/config_test.go
@@ -15,7 +15,6 @@
package kubeletstatsreceiver
import (
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"path/filepath"
"reflect"
"testing"
@@ -32,6 +31,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig"
kube "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
func TestLoadConfig(t *testing.T) {
diff --git a/receiver/kubeletstatsreceiver/factory.go b/receiver/kubeletstatsreceiver/factory.go
index 66a3f1af404a..a5af38686f75 100644
--- a/receiver/kubeletstatsreceiver/factory.go
+++ b/receiver/kubeletstatsreceiver/factory.go
@@ -16,7 +16,6 @@ package kubeletstatsreceiver // import "github.com/open-telemetry/opentelemetry-
import (
"context"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
"time"
"go.opentelemetry.io/collector/component"
@@ -28,6 +27,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig"
kube "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
const (
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
index 4251f0a562d9..fda6f0824836 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
@@ -15,10 +15,10 @@
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
import (
- "go.opentelemetry.io/collector/pdata/pmetric"
"time"
"go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/zap"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
index dfdfa5aebcce..f01b598db4b3 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
@@ -15,9 +15,9 @@
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
import (
- "go.opentelemetry.io/collector/pdata/pmetric"
"time"
+ "go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/zap"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
From d81e77a9c0ccdd92214a2059272f51238cb7728f Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Thu, 12 May 2022 10:10:38 -0600
Subject: [PATCH 13/17] Regenerated v2 metrics
---
.../internal/metadata/generated_metrics_v2.go | 30 +++++--------------
1 file changed, 8 insertions(+), 22 deletions(-)
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go
index d36dabc48ab2..49fb0f1ca595 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go
@@ -1324,8 +1324,8 @@ func (m *metricK8sNodeNetworkErrors) recordDataPoint(start pcommon.Timestamp, ts
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntVal(val)
- dp.Attributes().Insert(A.Interface, pcommon.NewValueString(interfaceAttributeValue))
- dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue))
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+ dp.Attributes().Insert("direction", pcommon.NewValueString(directionAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -1378,8 +1378,8 @@ func (m *metricK8sNodeNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pco
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntVal(val)
- dp.Attributes().Insert(A.Interface, pcommon.NewValueString(interfaceAttributeValue))
- dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue))
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+ dp.Attributes().Insert("direction", pcommon.NewValueString(directionAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -1973,8 +1973,8 @@ func (m *metricK8sPodNetworkErrors) recordDataPoint(start pcommon.Timestamp, ts
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntVal(val)
- dp.Attributes().Insert(A.Interface, pcommon.NewValueString(interfaceAttributeValue))
- dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue))
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+ dp.Attributes().Insert("direction", pcommon.NewValueString(directionAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -2027,8 +2027,8 @@ func (m *metricK8sPodNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcom
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntVal(val)
- dp.Attributes().Insert(A.Interface, pcommon.NewValueString(interfaceAttributeValue))
- dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue))
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+ dp.Attributes().Insert("direction", pcommon.NewValueString(directionAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -2822,17 +2822,3 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {
op(mb)
}
}
-
-// Attributes contains the possible metric attributes that can be used.
-var Attributes = struct {
- // Direction (Direction of flow of bytes/operations (receive or transmit).)
- Direction string
- // Interface (Name of the network interface.)
- Interface string
-}{
- "direction",
- "interface",
-}
-
-// A is an alias for Attributes.
-var A = Attributes
From 5cf3f2ddddeab5545733ea932171808b49c7cd75 Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Thu, 12 May 2022 15:58:45 -0600
Subject: [PATCH 14/17] rename metric builders
---
.../internal/kubelet/accumulator.go | 38 +++++++++----------
.../internal/kubelet/accumulator_test.go | 12 +++---
.../internal/kubelet/metrics_test.go | 18 ++++-----
.../internal/metadata/metrics.go | 6 +--
receiver/kubeletstatsreceiver/scraper.go | 6 +--
5 files changed, 40 insertions(+), 40 deletions(-)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
index fda6f0824836..5ea5281f37a5 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
@@ -56,16 +56,16 @@ func (a *metricDataAccumulator) nodeStats(s stats.NodeStats) {
if !a.metricGroupsToCollect[NodeMetricGroup] {
return
}
- a.mbs.WithNodeStartTime.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(s.StartTime.Time)))
+ a.mbs.NodeMetricsBuilder.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(s.StartTime.Time)))
currentTime := pcommon.NewTimestampFromTime(a.time)
- addCPUMetrics(a.mbs.WithNodeStartTime, metadata.NodeCPUMetrics, s.CPU, currentTime)
- addMemoryMetrics(a.mbs.WithNodeStartTime, metadata.NodeMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(a.mbs.WithNodeStartTime, metadata.NodeFilesystemMetrics, s.Fs, currentTime)
- addNetworkMetrics(a.mbs.WithNodeStartTime, metadata.NodeNetworkMetrics, s.Network, currentTime)
+ addCPUMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeFilesystemMetrics, s.Fs, currentTime)
+ addNetworkMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeNetworkMetrics, s.Network, currentTime)
// todo s.Runtime.ImageFs
- a.m = append(a.m, a.mbs.WithNodeStartTime.Emit(metadata.WithK8sNodeName(s.NodeName)))
+ a.m = append(a.m, a.mbs.NodeMetricsBuilder.Emit(metadata.WithK8sNodeName(s.NodeName)))
}
func (a *metricDataAccumulator) podStats(s stats.PodStats) {
@@ -73,15 +73,15 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) {
return
}
- a.mbs.WithPodStartTime.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(s.StartTime.Time)))
+ a.mbs.PodMetricsBuilder.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(s.StartTime.Time)))
currentTime := pcommon.NewTimestampFromTime(a.time)
- addCPUMetrics(a.mbs.WithPodStartTime, metadata.PodCPUMetrics, s.CPU, currentTime)
- addMemoryMetrics(a.mbs.WithPodStartTime, metadata.PodMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(a.mbs.WithPodStartTime, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
- addNetworkMetrics(a.mbs.WithPodStartTime, metadata.PodNetworkMetrics, s.Network, currentTime)
+ addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.PodCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mbs.PodMetricsBuilder, metadata.PodMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mbs.PodMetricsBuilder, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
+ addNetworkMetrics(a.mbs.PodMetricsBuilder, metadata.PodNetworkMetrics, s.Network, currentTime)
- a.m = append(a.m, a.mbs.WithPodStartTime.Emit(metadata.WithK8sPodUID(s.PodRef.UID),
+ a.m = append(a.m, a.mbs.PodMetricsBuilder.Emit(metadata.WithK8sPodUID(s.PodRef.UID),
metadata.WithK8sPodName(s.PodRef.Name), metadata.WithK8sNamespaceName(s.PodRef.Namespace)))
}
@@ -100,14 +100,14 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont
return
}
- a.mbs.WithPodStartTime.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(s.StartTime.Time)))
+ a.mbs.PodMetricsBuilder.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(s.StartTime.Time)))
currentTime := pcommon.NewTimestampFromTime(a.time)
- addCPUMetrics(a.mbs.WithPodStartTime, metadata.ContainerCPUMetrics, s.CPU, currentTime)
- addMemoryMetrics(a.mbs.WithPodStartTime, metadata.ContainerMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(a.mbs.WithPodStartTime, metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime)
+ addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.ContainerCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mbs.PodMetricsBuilder, metadata.ContainerMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mbs.PodMetricsBuilder, metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime)
- a.m = append(a.m, a.mbs.WithPodStartTime.Emit(ro...))
+ a.m = append(a.m, a.mbs.PodMetricsBuilder.Emit(ro...))
}
func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeStats) {
@@ -126,7 +126,7 @@ func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeS
}
currentTime := pcommon.NewTimestampFromTime(a.time)
- addVolumeMetrics(a.mbs.WithDefaultStartTime, metadata.K8sVolumeMetrics, s, currentTime)
+ addVolumeMetrics(a.mbs.OtherMetricsBuilder, metadata.K8sVolumeMetrics, s, currentTime)
- a.m = append(a.m, a.mbs.WithDefaultStartTime.Emit(ro...))
+ a.m = append(a.m, a.mbs.OtherMetricsBuilder.Emit(ro...))
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
index 041f18019050..bf00e7fc50f2 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
@@ -213,9 +213,9 @@ func TestMetadataErrorCases(t *testing.T) {
logger: logger,
metricGroupsToCollect: tt.metricGroupsToCollect,
mbs: &metadata.MetricsBuilders{
- WithNodeStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- WithPodStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- WithDefaultStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
},
}
@@ -239,9 +239,9 @@ func TestNilHandling(t *testing.T) {
VolumeMetricGroup: true,
},
mbs: &metadata.MetricsBuilders{
- WithNodeStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- WithPodStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- WithDefaultStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
},
}
assert.NotPanics(t, func() {
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
index d2e73c4b84ec..ed94c17a06f4 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
@@ -45,15 +45,15 @@ func TestMetricAccumulator(t *testing.T) {
podsMetadata, _ := metadataProvider.Pods()
k8sMetadata := NewMetadata([]MetadataLabel{MetadataLabelContainerID}, podsMetadata, nil)
mbs := &metadata.MetricsBuilders{
- WithNodeStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- WithPodStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- WithDefaultStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
}
requireMetricsOk(t, MetricsData(zap.NewNop(), summary, k8sMetadata, ValidMetricGroups, mbs))
// Disable all groups
- mbs.WithNodeStartTime.Reset()
- mbs.WithPodStartTime.Reset()
- mbs.WithDefaultStartTime.Reset()
+ mbs.NodeMetricsBuilder.Reset()
+ mbs.PodMetricsBuilder.Reset()
+ mbs.OtherMetricsBuilder.Reset()
require.Equal(t, 0, len(MetricsData(zap.NewNop(), summary, k8sMetadata, map[MetricGroup]bool{}, mbs)))
}
@@ -175,9 +175,9 @@ func fakeMetrics() []pmetric.Metrics {
NodeMetricGroup: true,
}
mbs := &metadata.MetricsBuilders{
- WithNodeStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- WithPodStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- WithDefaultStartTime: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
}
return MetricsData(zap.NewNop(), summary, Metadata{}, mgs, mbs)
}
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
index a5c441aa9368..4eb4aceef7c4 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
@@ -23,9 +23,9 @@ type RecordIntDataPointFunc func(*MetricsBuilder, pcommon.Timestamp, int64)
type RecordIntDataPointWithDirectionFunc func(*MetricsBuilder, pcommon.Timestamp, int64, string, AttributeDirection)
type MetricsBuilders struct {
- WithNodeStartTime *MetricsBuilder
- WithPodStartTime *MetricsBuilder
- WithDefaultStartTime *MetricsBuilder
+ NodeMetricsBuilder *MetricsBuilder
+ PodMetricsBuilder *MetricsBuilder
+ OtherMetricsBuilder *MetricsBuilder
}
type CPUMetrics struct {
diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go
index 62a41f544415..6d33468f73b6 100644
--- a/receiver/kubeletstatsreceiver/scraper.go
+++ b/receiver/kubeletstatsreceiver/scraper.go
@@ -66,9 +66,9 @@ func newKubletScraper(
k8sAPIClient: rOptions.k8sAPIClient,
cachedVolumeLabels: make(map[string][]metadata.ResourceOption),
mbs: &metadata.MetricsBuilders{
- WithNodeStartTime: metadata.NewMetricsBuilder(metricsConfig),
- WithPodStartTime: metadata.NewMetricsBuilder(metricsConfig),
- WithDefaultStartTime: metadata.NewMetricsBuilder(metricsConfig),
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
},
}
return scraperhelper.NewScraper(typeStr, ks.scrape)
From 9cc2c6e7737c1d46b248c0d9ac39fa706af6dcee Mon Sep 17 00:00:00 2001
From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com>
Date: Fri, 13 May 2022 09:25:23 -0600
Subject: [PATCH 15/17] Updated to set new start time.
---
.../internal/kubelet/accumulator.go | 28 ++--
.../internal/kubelet/accumulator_test.go | 11 +-
.../internal/kubelet/metadata.go | 8 +-
.../internal/kubelet/metadata_test.go | 14 +-
.../internal/kubelet/metrics_test.go | 14 +-
.../internal/kubelet/resource.go | 10 +-
.../internal/kubelet/volume.go | 30 ++--
.../internal/kubelet/volume_test.go | 17 +--
.../internal/metadata/generated_metrics_v2.go | 129 ++++++++++--------
.../internal/metadata/metrics.go | 7 +-
receiver/kubeletstatsreceiver/scraper.go | 15 +-
11 files changed, 157 insertions(+), 126 deletions(-)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
index 5ea5281f37a5..b82e14e5564e 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
@@ -56,16 +56,18 @@ func (a *metricDataAccumulator) nodeStats(s stats.NodeStats) {
if !a.metricGroupsToCollect[NodeMetricGroup] {
return
}
- a.mbs.NodeMetricsBuilder.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(s.StartTime.Time)))
- currentTime := pcommon.NewTimestampFromTime(a.time)
+ currentTime := pcommon.NewTimestampFromTime(a.time)
addCPUMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeCPUMetrics, s.CPU, currentTime)
addMemoryMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeMemoryMetrics, s.Memory, currentTime)
addFilesystemMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeFilesystemMetrics, s.Fs, currentTime)
addNetworkMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeNetworkMetrics, s.Network, currentTime)
// todo s.Runtime.ImageFs
- a.m = append(a.m, a.mbs.NodeMetricsBuilder.Emit(metadata.WithK8sNodeName(s.NodeName)))
+ a.m = append(a.m, a.mbs.NodeMetricsBuilder.Emit(
+ metadata.WithStartTimeOverride(pcommon.NewTimestampFromTime(s.StartTime.Time)),
+ metadata.WithK8sNodeName(s.NodeName),
+ ))
}
func (a *metricDataAccumulator) podStats(s stats.PodStats) {
@@ -73,16 +75,18 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) {
return
}
- a.mbs.PodMetricsBuilder.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(s.StartTime.Time)))
currentTime := pcommon.NewTimestampFromTime(a.time)
-
addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.PodCPUMetrics, s.CPU, currentTime)
addMemoryMetrics(a.mbs.PodMetricsBuilder, metadata.PodMemoryMetrics, s.Memory, currentTime)
addFilesystemMetrics(a.mbs.PodMetricsBuilder, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
addNetworkMetrics(a.mbs.PodMetricsBuilder, metadata.PodNetworkMetrics, s.Network, currentTime)
- a.m = append(a.m, a.mbs.PodMetricsBuilder.Emit(metadata.WithK8sPodUID(s.PodRef.UID),
- metadata.WithK8sPodName(s.PodRef.Name), metadata.WithK8sNamespaceName(s.PodRef.Namespace)))
+ a.m = append(a.m, a.mbs.PodMetricsBuilder.Emit(
+ metadata.WithStartTimeOverride(pcommon.NewTimestampFromTime(s.StartTime.Time)),
+ metadata.WithK8sPodUID(s.PodRef.UID),
+ metadata.WithK8sPodName(s.PodRef.Name),
+ metadata.WithK8sNamespaceName(s.PodRef.Namespace),
+ ))
}
func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.ContainerStats) {
@@ -100,14 +104,12 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont
return
}
- a.mbs.PodMetricsBuilder.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(s.StartTime.Time)))
currentTime := pcommon.NewTimestampFromTime(a.time)
+ addCPUMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime)
- addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.ContainerCPUMetrics, s.CPU, currentTime)
- addMemoryMetrics(a.mbs.PodMetricsBuilder, metadata.ContainerMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(a.mbs.PodMetricsBuilder, metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime)
-
- a.m = append(a.m, a.mbs.PodMetricsBuilder.Emit(ro...))
+ a.m = append(a.m, a.mbs.ContainerMetricsBuilder.Emit(ro...))
}
func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeStats) {
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
index bf00e7fc50f2..b05428d1ced2 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
@@ -41,7 +41,7 @@ func TestMetadataErrorCases(t *testing.T) {
numMDs int
numLogs int
logMessages []string
- detailedPVCLabelsSetterOverride func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error)
+ detailedPVCLabelsSetterOverride func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error)
}{
{
name: "Fails to get container metadata",
@@ -178,7 +178,7 @@ func TestMetadataErrorCases(t *testing.T) {
},
},
}, nil),
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
// Mock failure cases.
return nil, errors.New("")
},
@@ -239,9 +239,10 @@ func TestNilHandling(t *testing.T) {
VolumeMetricGroup: true,
},
mbs: &metadata.MetricsBuilders{
- NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ ContainerMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
},
}
assert.NotPanics(t, func() {
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
index 2ce738ab8821..bdb16b804c0f 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
@@ -59,12 +59,12 @@ func ValidateMetadataLabelsConfig(labels []MetadataLabel) error {
type Metadata struct {
Labels map[MetadataLabel]bool
PodsMetadata *v1.PodList
- DetailedPVCResourceGetter func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error)
+ DetailedPVCResourceGetter func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error)
}
func NewMetadata(
labels []MetadataLabel, podsMetadata *v1.PodList,
- detailedPVCResourceGetter func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error)) Metadata {
+ detailedPVCResourceGetter func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error)) Metadata {
return Metadata{
Labels: getLabelsMap(labels),
PodsMetadata: podsMetadata,
@@ -82,7 +82,7 @@ func getLabelsMap(metadataLabels []MetadataLabel) map[MetadataLabel]bool {
// getExtraResources gets extra resources based on provided metadata label.
func (m *Metadata) getExtraResources(podRef stats.PodReference, extraMetadataLabel MetadataLabel,
- extraMetadataFrom string) ([]metadata.ResourceOption, error) {
+ extraMetadataFrom string) ([]metadata.ResourceMetricsOption, error) {
// Ensure MetadataLabel exists before proceeding.
if !m.Labels[extraMetadataLabel] || len(m.Labels) == 0 {
return nil, nil
@@ -99,7 +99,7 @@ func (m *Metadata) getExtraResources(podRef stats.PodReference, extraMetadataLab
if err != nil {
return nil, err
}
- return []metadata.ResourceOption{metadata.WithContainerID(containerID)}, nil
+ return []metadata.ResourceMetricsOption{metadata.WithContainerID(containerID)}, nil
case MetadataLabelVolumeType:
volume, err := m.getPodVolume(podRef.UID, extraMetadataFrom)
if err != nil {
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
index b64cbc9e79f8..adfa0b2e6795 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
@@ -20,7 +20,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
@@ -171,14 +171,14 @@ func TestSetExtraLabels(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
ro, err := tt.metadata.getExtraResources(stats.PodReference{UID: tt.args[0]}, MetadataLabel(tt.args[1]), tt.args[2])
- r := pcommon.NewResource()
+ r := pmetric.NewResourceMetrics()
for _, op := range ro {
op(r)
}
if tt.wantError == "" {
require.NoError(t, err)
- temp := r.Attributes().AsRaw()
+ temp := r.Resource().Attributes().AsRaw()
assert.EqualValues(t, tt.want, temp)
} else {
assert.Equal(t, tt.wantError, err.Error())
@@ -333,17 +333,17 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
},
},
},
- }, func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+ }, func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
return nil, nil
})
ro, _ := metadata.getExtraResources(stats.PodReference{UID: tt.args[0]}, MetadataLabel(tt.args[1]), volName)
- r := pcommon.NewResource()
+ rm := pmetric.NewResourceMetrics()
for _, op := range ro {
- op(r)
+ op(rm)
}
- assert.Equal(t, tt.want, r.Attributes().AsRaw())
+ assert.Equal(t, tt.want, rm.Resource().Attributes().AsRaw())
})
}
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
index ed94c17a06f4..43f3a9e7b0d0 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
@@ -45,9 +45,10 @@ func TestMetricAccumulator(t *testing.T) {
podsMetadata, _ := metadataProvider.Pods()
k8sMetadata := NewMetadata([]MetadataLabel{MetadataLabelContainerID}, podsMetadata, nil)
mbs := &metadata.MetricsBuilders{
- NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ ContainerMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
}
requireMetricsOk(t, MetricsData(zap.NewNop(), summary, k8sMetadata, ValidMetricGroups, mbs))
// Disable all groups
@@ -175,9 +176,10 @@ func fakeMetrics() []pmetric.Metrics {
NodeMetricGroup: true,
}
mbs := &metadata.MetricsBuilders{
- NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
- OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ ContainerMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
}
return MetricsData(zap.NewNop(), summary, Metadata{}, mgs, mbs)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/resource.go b/receiver/kubeletstatsreceiver/internal/kubelet/resource.go
index a1a258f0410a..5dcc152b9fec 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/resource.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/resource.go
@@ -17,13 +17,15 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"fmt"
+ "go.opentelemetry.io/collector/pdata/pcommon"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func getContainerResourceOptions(sPod stats.PodStats, sContainer stats.ContainerStats, k8sMetadata Metadata) ([]metadata.ResourceOption, error) {
- ro := []metadata.ResourceOption{
+func getContainerResourceOptions(sPod stats.PodStats, sContainer stats.ContainerStats, k8sMetadata Metadata) ([]metadata.ResourceMetricsOption, error) {
+ ro := []metadata.ResourceMetricsOption{
+ metadata.WithStartTimeOverride(pcommon.NewTimestampFromTime(sContainer.StartTime.Time)),
metadata.WithK8sPodUID(sPod.PodRef.UID),
metadata.WithK8sPodName(sPod.PodRef.Name),
metadata.WithK8sNamespaceName(sPod.PodRef.Namespace),
@@ -40,8 +42,8 @@ func getContainerResourceOptions(sPod stats.PodStats, sContainer stats.Container
return ro, nil
}
-func getVolumeResourceOptions(sPod stats.PodStats, vs stats.VolumeStats, k8sMetadata Metadata) ([]metadata.ResourceOption, error) {
- ro := []metadata.ResourceOption{
+func getVolumeResourceOptions(sPod stats.PodStats, vs stats.VolumeStats, k8sMetadata Metadata) ([]metadata.ResourceMetricsOption, error) {
+ ro := []metadata.ResourceMetricsOption{
metadata.WithK8sPodUID(sPod.PodRef.UID),
metadata.WithK8sPodName(sPod.PodRef.Name),
metadata.WithK8sNamespaceName(sPod.PodRef.Namespace),
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
index f94d24613b77..914a311ae762 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
@@ -32,22 +32,22 @@ func addVolumeMetrics(mb *metadata.MetricsBuilder, volumeMetrics metadata.Volume
recordIntDataPoint(mb, volumeMetrics.InodesUsed, s.InodesUsed, currentTime)
}
-func getResourcesFromVolume(volume v1.Volume) []metadata.ResourceOption {
+func getResourcesFromVolume(volume v1.Volume) []metadata.ResourceMetricsOption {
switch {
// TODO: Support more types
case volume.ConfigMap != nil:
- return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValueConfigMapVolume)}
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValueConfigMapVolume)}
case volume.DownwardAPI != nil:
- return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValueDownwardAPIVolume)}
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValueDownwardAPIVolume)}
case volume.EmptyDir != nil:
- return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValueEmptyDirVolume)}
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValueEmptyDirVolume)}
case volume.Secret != nil:
- return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValueSecretVolume)}
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValueSecretVolume)}
case volume.PersistentVolumeClaim != nil:
- return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValuePersistentVolumeClaim),
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValuePersistentVolumeClaim),
metadata.WithK8sPersistentvolumeclaimName(volume.PersistentVolumeClaim.ClaimName)}
case volume.HostPath != nil:
- return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValueHostPathVolume)}
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValueHostPathVolume)}
case volume.AWSElasticBlockStore != nil:
return awsElasticBlockStoreDims(*volume.AWSElasticBlockStore)
case volume.GCEPersistentDisk != nil:
@@ -58,11 +58,11 @@ func getResourcesFromVolume(volume v1.Volume) []metadata.ResourceOption {
return nil
}
-func GetPersistentVolumeLabels(pv v1.PersistentVolumeSource) []metadata.ResourceOption {
+func GetPersistentVolumeLabels(pv v1.PersistentVolumeSource) []metadata.ResourceMetricsOption {
// TODO: Support more types
switch {
case pv.Local != nil:
- return []metadata.ResourceOption{metadata.WithK8sVolumeType(labelValueLocalVolume)}
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValueLocalVolume)}
case pv.AWSElasticBlockStore != nil:
return awsElasticBlockStoreDims(*pv.AWSElasticBlockStore)
case pv.GCEPersistentDisk != nil:
@@ -81,8 +81,8 @@ func GetPersistentVolumeLabels(pv v1.PersistentVolumeSource) []metadata.Resource
return nil
}
-func awsElasticBlockStoreDims(vs v1.AWSElasticBlockStoreVolumeSource) []metadata.ResourceOption {
- return []metadata.ResourceOption{
+func awsElasticBlockStoreDims(vs v1.AWSElasticBlockStoreVolumeSource) []metadata.ResourceMetricsOption {
+ return []metadata.ResourceMetricsOption{
metadata.WithK8sVolumeType(labelValueAWSEBSVolume),
// AWS specific labels.
metadata.WithAwsVolumeID(vs.VolumeID),
@@ -91,8 +91,8 @@ func awsElasticBlockStoreDims(vs v1.AWSElasticBlockStoreVolumeSource) []metadata
}
}
-func gcePersistentDiskDims(vs v1.GCEPersistentDiskVolumeSource) []metadata.ResourceOption {
- return []metadata.ResourceOption{
+func gcePersistentDiskDims(vs v1.GCEPersistentDiskVolumeSource) []metadata.ResourceMetricsOption {
+ return []metadata.ResourceMetricsOption{
metadata.WithK8sVolumeType(labelValueGCEPDVolume),
// GCP specific labels.
metadata.WithGcePdName(vs.PDName),
@@ -101,8 +101,8 @@ func gcePersistentDiskDims(vs v1.GCEPersistentDiskVolumeSource) []metadata.Resou
}
}
-func glusterfsDims(vs v1.GlusterfsVolumeSource) []metadata.ResourceOption {
- return []metadata.ResourceOption{
+func glusterfsDims(vs v1.GlusterfsVolumeSource) []metadata.ResourceMetricsOption {
+ return []metadata.ResourceMetricsOption{
metadata.WithK8sVolumeType(labelValueGlusterFSVolume),
// GlusterFS specific labels.
metadata.WithGlusterfsEndpointsName(vs.EndpointsName),
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
index 49b69125faf4..a4c713528a8a 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
@@ -19,6 +19,7 @@ import (
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -40,7 +41,7 @@ func TestDetailedPVCLabels(t *testing.T) {
volumeName string
volumeSource v1.VolumeSource
pod pod
- detailedPVCLabelsSetterOverride func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error)
+ detailedPVCLabelsSetterOverride func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error)
want map[string]interface{}
}{
{
@@ -52,7 +53,7 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
pod: pod{uid: "uid-1234", name: "pod-name", namespace: "pod-namespace"},
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
ro := GetPersistentVolumeLabels(v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "volume_id",
@@ -83,7 +84,7 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
pod: pod{uid: "uid-1234", name: "pod-name", namespace: "pod-namespace"},
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
ro := GetPersistentVolumeLabels(v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "pd_name",
@@ -114,7 +115,7 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
pod: pod{uid: "uid-1234", name: "pod-name", namespace: "pod-namespace"},
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
ro := GetPersistentVolumeLabels(v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsPersistentVolumeSource{
EndpointsName: "endpoints_name",
@@ -143,7 +144,7 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
pod: pod{uid: "uid-1234", name: "pod-name", namespace: "pod-namespace"},
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
ro := GetPersistentVolumeLabels(v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{
Path: "path",
@@ -194,12 +195,12 @@ func TestDetailedPVCLabels(t *testing.T) {
ro, err := getVolumeResourceOptions(podStats, stats.VolumeStats{Name: tt.volumeName}, metadata)
require.NoError(t, err)
- volumeResource := pcommon.NewResource()
+ volumeResourceMetrics := pmetric.NewResourceMetrics()
for _, op := range ro {
- op(volumeResource)
+ op(volumeResourceMetrics)
}
- require.Equal(t, pcommon.NewMapFromRaw(tt.want).Sort(), volumeResource.Attributes().Sort())
+ require.Equal(t, pcommon.NewMapFromRaw(tt.want).Sort(), volumeResourceMetrics.Resource().Attributes().Sort())
})
}
}
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go
index 49fb0f1ca595..e92dd632cb5c 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go
@@ -2425,124 +2425,142 @@ func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
}
}
-// ResourceOption applies changes to provided resource.
-type ResourceOption func(pcommon.Resource)
+// ResourceMetricsOption applies changes to provided resource metrics.
+type ResourceMetricsOption func(pmetric.ResourceMetrics)
// WithAwsVolumeID sets provided value as "aws.volume.id" attribute for current resource.
-func WithAwsVolumeID(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("aws.volume.id", val)
+func WithAwsVolumeID(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("aws.volume.id", val)
}
}
// WithContainerID sets provided value as "container.id" attribute for current resource.
-func WithContainerID(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("container.id", val)
+func WithContainerID(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("container.id", val)
}
}
// WithContainerName sets provided value as "container.name" attribute for current resource.
-func WithContainerName(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("container.name", val)
+func WithContainerName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("container.name", val)
}
}
// WithFsType sets provided value as "fs.type" attribute for current resource.
-func WithFsType(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("fs.type", val)
+func WithFsType(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("fs.type", val)
}
}
// WithGcePdName sets provided value as "gce.pd.name" attribute for current resource.
-func WithGcePdName(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("gce.pd.name", val)
+func WithGcePdName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("gce.pd.name", val)
}
}
// WithGlusterfsEndpointsName sets provided value as "glusterfs.endpoints.name" attribute for current resource.
-func WithGlusterfsEndpointsName(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("glusterfs.endpoints.name", val)
+func WithGlusterfsEndpointsName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("glusterfs.endpoints.name", val)
}
}
// WithGlusterfsPath sets provided value as "glusterfs.path" attribute for current resource.
-func WithGlusterfsPath(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("glusterfs.path", val)
+func WithGlusterfsPath(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("glusterfs.path", val)
}
}
// WithK8sNamespaceName sets provided value as "k8s.namespace.name" attribute for current resource.
-func WithK8sNamespaceName(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("k8s.namespace.name", val)
+func WithK8sNamespaceName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.namespace.name", val)
}
}
// WithK8sNodeName sets provided value as "k8s.node.name" attribute for current resource.
-func WithK8sNodeName(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("k8s.node.name", val)
+func WithK8sNodeName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.node.name", val)
}
}
// WithK8sPersistentvolumeclaimName sets provided value as "k8s.persistentvolumeclaim.name" attribute for current resource.
-func WithK8sPersistentvolumeclaimName(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("k8s.persistentvolumeclaim.name", val)
+func WithK8sPersistentvolumeclaimName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.persistentvolumeclaim.name", val)
}
}
// WithK8sPodName sets provided value as "k8s.pod.name" attribute for current resource.
-func WithK8sPodName(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("k8s.pod.name", val)
+func WithK8sPodName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.pod.name", val)
}
}
// WithK8sPodUID sets provided value as "k8s.pod.uid" attribute for current resource.
-func WithK8sPodUID(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("k8s.pod.uid", val)
+func WithK8sPodUID(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.pod.uid", val)
}
}
// WithK8sVolumeName sets provided value as "k8s.volume.name" attribute for current resource.
-func WithK8sVolumeName(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("k8s.volume.name", val)
+func WithK8sVolumeName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.volume.name", val)
}
}
// WithK8sVolumeType sets provided value as "k8s.volume.type" attribute for current resource.
-func WithK8sVolumeType(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("k8s.volume.type", val)
+func WithK8sVolumeType(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.volume.type", val)
}
}
// WithPartition sets provided value as "partition" attribute for current resource.
-func WithPartition(val string) ResourceOption {
- return func(r pcommon.Resource) {
- r.Attributes().UpsertString("partition", val)
+func WithPartition(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("partition", val)
+ }
+}
+
+// WithStartTimeOverride overrides start time for all the resource metrics data points.
+// This option should be only used if different start time has to be set on metrics coming from different resources.
+func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ metrics := rm.ScopeMetrics().At(0).Metrics()
+ for i := 0; i < metrics.Len(); i++ {
+ dps := pmetric.NewNumberDataPointSlice()
+ switch metrics.At(i).DataType() {
+ case pmetric.MetricDataTypeGauge:
+ dps = metrics.At(i).Gauge().DataPoints()
+ case pmetric.MetricDataTypeSum:
+ dps = metrics.At(i).Sum().DataPoints()
+ }
+ for j := 0; j < dps.Len(); j++ {
+ dps.At(j).SetStartTimestamp(start)
+ }
+ }
}
}
// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
// recording another set of data points as part of another resource. This function can be helpful when one scraper
// needs to emit metrics from several resources. Otherwise calling this function is not required,
-// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments.
-func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) {
+// just `Emit` function can be called instead.
+// Resource attributes should be provided as ResourceMetricsOption arguments.
+func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
rm := pmetric.NewResourceMetrics()
rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity)
- for _, op := range ro {
- op(rm.Resource())
- }
ils := rm.ScopeMetrics().AppendEmpty()
ils.Scope().SetName("otelcol/kubeletstatsreceiver")
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
@@ -2588,6 +2606,9 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) {
mb.metricK8sVolumeInodes.emit(ils.Metrics())
mb.metricK8sVolumeInodesFree.emit(ils.Metrics())
mb.metricK8sVolumeInodesUsed.emit(ils.Metrics())
+ for _, op := range rmo {
+ op(rm)
+ }
if ils.Metrics().Len() > 0 {
mb.updateCapacity(rm)
rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
@@ -2597,8 +2618,8 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) {
// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
// recording another set of metrics. This function will be responsible for applying all the transformations required to
// produce metric representation defined in metadata and user settings, e.g. delta or cumulative.
-func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
- mb.EmitForResource(ro...)
+func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics {
+ mb.EmitForResource(rmo...)
metrics := pmetric.NewMetrics()
mb.metricsBuffer.MoveTo(metrics)
return metrics
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
index 4eb4aceef7c4..590e1cf44c56 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
@@ -23,9 +23,10 @@ type RecordIntDataPointFunc func(*MetricsBuilder, pcommon.Timestamp, int64)
type RecordIntDataPointWithDirectionFunc func(*MetricsBuilder, pcommon.Timestamp, int64, string, AttributeDirection)
type MetricsBuilders struct {
- NodeMetricsBuilder *MetricsBuilder
- PodMetricsBuilder *MetricsBuilder
- OtherMetricsBuilder *MetricsBuilder
+ NodeMetricsBuilder *MetricsBuilder
+ PodMetricsBuilder *MetricsBuilder
+ ContainerMetricsBuilder *MetricsBuilder
+ OtherMetricsBuilder *MetricsBuilder
}
type CPUMetrics struct {
diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go
index 6d33468f73b6..5d2d6ebac362 100644
--- a/receiver/kubeletstatsreceiver/scraper.go
+++ b/receiver/kubeletstatsreceiver/scraper.go
@@ -47,7 +47,7 @@ type kubletScraper struct {
extraMetadataLabels []kubelet.MetadataLabel
metricGroupsToCollect map[kubelet.MetricGroup]bool
k8sAPIClient kubernetes.Interface
- cachedVolumeLabels map[string][]metadata.ResourceOption
+ cachedVolumeLabels map[string][]metadata.ResourceMetricsOption
mbs *metadata.MetricsBuilders
}
@@ -64,11 +64,12 @@ func newKubletScraper(
extraMetadataLabels: rOptions.extraMetadataLabels,
metricGroupsToCollect: rOptions.metricGroupsToCollect,
k8sAPIClient: rOptions.k8sAPIClient,
- cachedVolumeLabels: make(map[string][]metadata.ResourceOption),
+ cachedVolumeLabels: make(map[string][]metadata.ResourceMetricsOption),
mbs: &metadata.MetricsBuilders{
- NodeMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
- PodMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
- OtherMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
+ ContainerMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
},
}
return scraperhelper.NewScraper(typeStr, ks.scrape)
@@ -100,8 +101,8 @@ func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) {
return md, nil
}
-func (r *kubletScraper) detailedPVCLabelsSetter() func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
- return func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceOption, error) {
+func (r *kubletScraper) detailedPVCLabelsSetter() func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
+ return func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
if r.k8sAPIClient == nil {
return nil, nil
}
From 4d2254346ca546578c441119aa917dc69ef47ded Mon Sep 17 00:00:00 2001
From: Dmitrii Anoshin
Date: Fri, 13 May 2022 11:14:23 -0700
Subject: [PATCH 16/17] Update CHANGELOG.md
---
CHANGELOG.md | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 187772686e6f..79d2de6a5733 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -12,6 +12,8 @@
### 💡 Enhancements 💡
+- `kubeletstatsreceiver` Update receiver to use new Metrics Builder. All emitted metrics remain the same. (#9744)
+
### 🧰 Bug fixes 🧰
## v0.51.0
@@ -52,7 +54,6 @@
- `transformprocessor`: Add new `limit` function to allow limiting the number of items in a map, such as the number of attributes in `attributes` or `resource.attributes` (#9552)
- `processor/attributes`: Support attributes set by server authenticator (#9420)
- `datadogexporter`: Experimental support for Exponential Histograms with delta aggregation temporality (#8350)
-- `kubeletstatsreceiver` Update receiver to use new Metrics Builder. All emitted metrics remain the same. (#9744)
### 🧰 Bug fixes 🧰
From 6b894b7e7a496548cff7c82421d467074ffb2933 Mon Sep 17 00:00:00 2001
From: Dmitrii Anoshin
Date: Fri, 13 May 2022 11:14:41 -0700
Subject: [PATCH 17/17] Update CHANGELOG.md
---
CHANGELOG.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 79d2de6a5733..1de9f417c82b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -12,7 +12,7 @@
### 💡 Enhancements 💡
-- `kubeletstatsreceiver` Update receiver to use new Metrics Builder. All emitted metrics remain the same. (#9744)
+- `kubeletstatsreceiver`: Update receiver to use new Metrics Builder. All emitted metrics remain the same. (#9744)
### 🧰 Bug fixes 🧰