diff --git a/.chloggen/feat_scrape-config-configurable.yaml b/.chloggen/feat_scrape-config-configurable.yaml new file mode 100755 index 0000000000..8c60b8655c --- /dev/null +++ b/.chloggen/feat_scrape-config-configurable.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. operator, target allocator, github action) +component: target allocator + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Make the Target Allocator default scrape interval for Prometheus CRs configurable + +# One or more tracking issues related to the change +issues: [1925] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: Note that this only works for Prometheus CRs, raw Prometheus configuration from the receiver uses its own settings. diff --git a/apis/v1alpha1/opentelemetrycollector_types.go b/apis/v1alpha1/opentelemetrycollector_types.go index eb9aa6cd29..7c14e54b0e 100644 --- a/apis/v1alpha1/opentelemetrycollector_types.go +++ b/apis/v1alpha1/opentelemetrycollector_types.go @@ -251,6 +251,12 @@ type OpenTelemetryTargetAllocatorPrometheusCR struct { // Enabled indicates whether to use a PrometheusOperator custom resources as targets or not. // +optional Enabled bool `json:"enabled,omitempty"` + // Interval between consecutive scrapes. Equivalent to the same setting on the Prometheus CRD. + // + // Default: "30s" + // +kubebuilder:default:="30s" + // +kubebuilder:validation:Format:=duration + ScrapeInterval *metav1.Duration `json:"scrapeInterval,omitempty"` // PodMonitors to be selected for target discovery. // This is a map of {key,value} pairs. Each {key,value} in the map is going to exactly match a label in a // PodMonitor's meta labels. The requirements are ANDed. diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 6fb7da24ec..433a1b516d 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -23,6 +23,7 @@ import ( "k8s.io/api/autoscaling/v2" "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -699,6 +700,11 @@ func (in *OpenTelemetryTargetAllocator) DeepCopy() *OpenTelemetryTargetAllocator // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenTelemetryTargetAllocatorPrometheusCR) DeepCopyInto(out *OpenTelemetryTargetAllocatorPrometheusCR) { *out = *in + if in.ScrapeInterval != nil { + in, out := &in.ScrapeInterval, &out.ScrapeInterval + *out = new(metav1.Duration) + **out = **in + } if in.PodMonitorSelector != nil { in, out := &in.PodMonitorSelector, &out.PodMonitorSelector *out = make(map[string]string, len(*in)) diff --git a/bundle/manifests/opentelemetry.io_opentelemetrycollectors.yaml b/bundle/manifests/opentelemetry.io_opentelemetrycollectors.yaml index 55b4cff927..a7673696f2 100644 --- a/bundle/manifests/opentelemetry.io_opentelemetrycollectors.yaml +++ b/bundle/manifests/opentelemetry.io_opentelemetrycollectors.yaml @@ -3578,6 +3578,12 @@ spec: the map is going to exactly match a label in a PodMonitor's meta labels. The requirements are ANDed. type: object + scrapeInterval: + default: 30s + description: "Interval between consecutive scrapes. Equivalent + to the same setting on the Prometheus CRD. \n Default: \"30s\"" + format: duration + type: string serviceMonitorSelector: additionalProperties: type: string diff --git a/cmd/otel-allocator/config/config.go b/cmd/otel-allocator/config/config.go index 569eac8510..4828722bc1 100644 --- a/cmd/otel-allocator/config/config.go +++ b/cmd/otel-allocator/config/config.go @@ -24,6 +24,7 @@ import ( "time" "github.com/go-logr/logr" + "github.com/prometheus/common/model" promconfig "github.com/prometheus/prometheus/config" _ "github.com/prometheus/prometheus/discovery/install" "github.com/spf13/pflag" @@ -38,16 +39,22 @@ import ( const DefaultResyncTime = 5 * time.Minute const DefaultConfigFilePath string = "/conf/targetallocator.yaml" +const DefaultCRScrapeInterval model.Duration = model.Duration(time.Second * 30) type Config struct { LabelSelector map[string]string `yaml:"label_selector,omitempty"` Config *promconfig.Config `yaml:"config"` AllocationStrategy *string `yaml:"allocation_strategy,omitempty"` FilterStrategy *string `yaml:"filter_strategy,omitempty"` + PrometheusCR PrometheusCRConfig `yaml:"prometheus_cr,omitempty"` PodMonitorSelector map[string]string `yaml:"pod_monitor_selector,omitempty"` ServiceMonitorSelector map[string]string `yaml:"service_monitor_selector,omitempty"` } +type PrometheusCRConfig struct { + ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` +} + func (c Config) GetAllocationStrategy() string { if c.AllocationStrategy != nil { return *c.AllocationStrategy @@ -77,7 +84,7 @@ type CLIConfig struct { } func Load(file string) (Config, error) { - var cfg Config + cfg := createDefaultConfig() if err := unmarshal(&cfg, file); err != nil { return Config{}, err } @@ -96,6 +103,14 @@ func unmarshal(cfg *Config, configFile string) error { return nil } +func createDefaultConfig() Config { + return Config{ + PrometheusCR: PrometheusCRConfig{ + ScrapeInterval: DefaultCRScrapeInterval, + }, + } +} + func ParseCLI() (CLIConfig, error) { opts := zap.Options{} opts.BindFlags(flag.CommandLine) diff --git a/cmd/otel-allocator/config/config_test.go b/cmd/otel-allocator/config/config_test.go index 65e28aeefd..91f0d63b70 100644 --- a/cmd/otel-allocator/config/config_test.go +++ b/cmd/otel-allocator/config/config_test.go @@ -48,6 +48,9 @@ func TestLoad(t *testing.T) { "app.kubernetes.io/instance": "default.test", "app.kubernetes.io/managed-by": "opentelemetry-operator", }, + PrometheusCR: PrometheusCRConfig{ + ScrapeInterval: model.Duration(time.Second * 60), + }, Config: &promconfig.Config{ GlobalConfig: promconfig.GlobalConfig{ ScrapeInterval: model.Duration(60 * time.Second), @@ -96,7 +99,7 @@ func TestLoad(t *testing.T) { args: args{ file: "./testdata/no_config.yaml", }, - want: Config{}, + want: createDefaultConfig(), wantErr: assert.NoError, }, { @@ -109,6 +112,9 @@ func TestLoad(t *testing.T) { "app.kubernetes.io/instance": "default.test", "app.kubernetes.io/managed-by": "opentelemetry-operator", }, + PrometheusCR: PrometheusCRConfig{ + ScrapeInterval: DefaultCRScrapeInterval, + }, Config: &promconfig.Config{ GlobalConfig: promconfig.GlobalConfig{ ScrapeInterval: model.Duration(60 * time.Second), diff --git a/cmd/otel-allocator/config/testdata/config_test.yaml b/cmd/otel-allocator/config/testdata/config_test.yaml index 67670f26f7..efdc27bc39 100644 --- a/cmd/otel-allocator/config/testdata/config_test.yaml +++ b/cmd/otel-allocator/config/testdata/config_test.yaml @@ -1,6 +1,8 @@ label_selector: app.kubernetes.io/instance: default.test app.kubernetes.io/managed-by: opentelemetry-operator +prometheus_cr: + scrape_interval: 60s config: scrape_configs: - job_name: prometheus @@ -12,4 +14,4 @@ config: static_configs: - targets: ["prom.domain:9001", "prom.domain:9002", "prom.domain:9003"] labels: - my: label \ No newline at end of file + my: label diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index 9f26a9923e..f5e05aea7a 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -59,7 +59,7 @@ func NewPrometheusCRWatcher(logger logr.Logger, cfg allocatorconfig.Config, cliC prom := &monitoringv1.Prometheus{ Spec: monitoringv1.PrometheusSpec{ CommonPrometheusFields: monitoringv1.CommonPrometheusFields{ - ScrapeInterval: monitoringv1.Duration("30s"), + ScrapeInterval: monitoringv1.Duration(cfg.PrometheusCR.ScrapeInterval.String()), }, }, } diff --git a/config/crd/bases/opentelemetry.io_opentelemetrycollectors.yaml b/config/crd/bases/opentelemetry.io_opentelemetrycollectors.yaml index 39950402f4..98fd85fd0c 100644 --- a/config/crd/bases/opentelemetry.io_opentelemetrycollectors.yaml +++ b/config/crd/bases/opentelemetry.io_opentelemetrycollectors.yaml @@ -3575,6 +3575,12 @@ spec: the map is going to exactly match a label in a PodMonitor's meta labels. The requirements are ANDed. type: object + scrapeInterval: + default: 30s + description: "Interval between consecutive scrapes. Equivalent + to the same setting on the Prometheus CRD. \n Default: \"30s\"" + format: duration + type: string serviceMonitorSelector: additionalProperties: type: string diff --git a/docs/api.md b/docs/api.md index b41afc35c8..71f4048161 100644 --- a/docs/api.md +++ b/docs/api.md @@ -10029,6 +10029,17 @@ PrometheusCR defines the configuration for the retrieval of PrometheusOperator C PodMonitors to be selected for target discovery. This is a map of {key,value} pairs. Each {key,value} in the map is going to exactly match a label in a PodMonitor's meta labels. The requirements are ANDed.
false + + scrapeInterval + string + + Interval between consecutive scrapes. Equivalent to the same setting on the Prometheus CRD. + Default: "30s"
+
+ Format: duration
+ Default: 30s
+ + false serviceMonitorSelector map[string]string diff --git a/pkg/targetallocator/configmap.go b/pkg/targetallocator/configmap.go index cc5ef855af..210bd1c8be 100644 --- a/pkg/targetallocator/configmap.go +++ b/pkg/targetallocator/configmap.go @@ -49,6 +49,7 @@ func ConfigMap(instance v1alpha1.OpenTelemetryCollector) (corev1.ConfigMap, erro } taConfig := make(map[interface{}]interface{}) + prometheusCRConfig := make(map[interface{}]interface{}) taConfig["label_selector"] = collector.SelectorLabels(instance) // We only take the "config" from the returned object, if it's present if prometheusConfig, ok := prometheusReceiverConfig["config"]; ok { @@ -65,6 +66,10 @@ func ConfigMap(instance v1alpha1.OpenTelemetryCollector) (corev1.ConfigMap, erro taConfig["filter_strategy"] = instance.Spec.TargetAllocator.FilterStrategy } + if instance.Spec.TargetAllocator.PrometheusCR.ScrapeInterval.Size() > 0 { + prometheusCRConfig["scrape_interval"] = instance.Spec.TargetAllocator.PrometheusCR.ScrapeInterval.Duration + } + if instance.Spec.TargetAllocator.PrometheusCR.ServiceMonitorSelector != nil { taConfig["service_monitor_selector"] = &instance.Spec.TargetAllocator.PrometheusCR.ServiceMonitorSelector } @@ -73,6 +78,10 @@ func ConfigMap(instance v1alpha1.OpenTelemetryCollector) (corev1.ConfigMap, erro taConfig["pod_monitor_selector"] = &instance.Spec.TargetAllocator.PrometheusCR.PodMonitorSelector } + if len(prometheusCRConfig) > 0 { + taConfig["prometheus_cr"] = prometheusCRConfig + } + taConfigYAML, err := yaml.Marshal(taConfig) if err != nil { return corev1.ConfigMap{}, err diff --git a/pkg/targetallocator/configmap_test.go b/pkg/targetallocator/configmap_test.go index 2d2821d842..5e2d239c70 100644 --- a/pkg/targetallocator/configmap_test.go +++ b/pkg/targetallocator/configmap_test.go @@ -16,8 +16,10 @@ package targetallocator import ( "testing" + "time" "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestDesiredConfigMap(t *testing.T) { @@ -98,5 +100,39 @@ service_monitor_selector: assert.Equal(t, expectedData, actual.Data) }) + t.Run("should return expected target allocator config map with scrape interval set", func(t *testing.T) { + expectedLables["app.kubernetes.io/component"] = "opentelemetry-targetallocator" + expectedLables["app.kubernetes.io/name"] = "my-instance-targetallocator" + + expectedData := map[string]string{ + "targetallocator.yaml": `allocation_strategy: least-weighted +config: + scrape_configs: + - job_name: otel-collector + scrape_interval: 10s + static_configs: + - targets: + - 0.0.0.0:8888 + - 0.0.0.0:9999 +label_selector: + app.kubernetes.io/component: opentelemetry-collector + app.kubernetes.io/instance: default.my-instance + app.kubernetes.io/managed-by: opentelemetry-operator + app.kubernetes.io/part-of: opentelemetry +prometheus_cr: + scrape_interval: 30s +`, + } + + collector := collectorInstance() + collector.Spec.TargetAllocator.PrometheusCR.ScrapeInterval = &metav1.Duration{Duration: time.Second * 30} + actual, err := ConfigMap(collector) + assert.NoError(t, err) + + assert.Equal(t, "my-instance-targetallocator", actual.Name) + assert.Equal(t, expectedLables, actual.Labels) + assert.Equal(t, expectedData, actual.Data) + + }) }