From d29f7d861a9421dd82910cc0adbf40d25cf473c7 Mon Sep 17 00:00:00 2001 From: Lukasz Marchewka Date: Thu, 23 May 2024 08:20:23 +0200 Subject: [PATCH 1/2] STAC-20729 Cloned the newest changes from the original github repo, these changes supports custom table engines (also replicated) --- exporter/clickhousestsexporter/README.md | 17 ++ exporter/clickhousestsexporter/config.go | 38 +++- exporter/clickhousestsexporter/config_test.go | 78 ++++++++ .../clickhousestsexporter/exporter_logs.go | 10 +- .../exporter_logs_test.go | 28 ++- .../clickhousestsexporter/exporter_metrics.go | 2 +- .../exporter_metrics_test.go | 82 +++++--- .../exporter_resources.go | 2 +- .../exporter_sql_test.go | 186 ++++++++++++++++++ .../clickhousestsexporter/exporter_traces.go | 27 +-- .../exporter_traces_test.go | 45 ++--- exporter/clickhousestsexporter/factory.go | 6 +- exporter/clickhousestsexporter/go.mod | 16 +- .../internal/exponential_histogram_metrics.go | 19 +- .../internal/gauge_metrics.go | 17 +- .../internal/histogram_metrics.go | 17 +- .../internal/metadata/generated_status.go | 10 - .../internal/metadata/generated_telemetry.go | 17 ++ .../metadata/generated_telemetry_test.go | 63 ++++++ .../internal/metrics_model.go | 4 +- .../internal/sum_metrics.go | 17 +- .../internal/summary_metrics.go | 17 +- .../testdata/config.yaml | 16 ++ go.work.sum | 103 ++++++++++ 24 files changed, 699 insertions(+), 138 deletions(-) create mode 100644 exporter/clickhousestsexporter/exporter_sql_test.go create mode 100644 exporter/clickhousestsexporter/internal/metadata/generated_telemetry.go create mode 100644 exporter/clickhousestsexporter/internal/metadata/generated_telemetry_test.go diff --git a/exporter/clickhousestsexporter/README.md b/exporter/clickhousestsexporter/README.md index 5961ac7..11cbd19 100644 --- a/exporter/clickhousestsexporter/README.md +++ b/exporter/clickhousestsexporter/README.md @@ -291,6 +291,19 @@ ClickHouse tables: - `metrics_table_name` (default = otel_metrics): The table name for metrics. - `create_traces_table` (default = true): Create the traces table on startup +Cluster definition: + +- `cluster_name` (default = ): Optional. If present, will include `ON CLUSTER cluster_name` when creating tables. + +Table engine: + +- `table_engine` + - `name` (default = MergeTree) + - `params` (default = ) + +Modifies `ENGINE` definition when table is created. If not set then `ENGINE` defaults to `MergeTree()`. +Can be combined with `cluster_name` to enable [replication for fault tolerance](https://clickhouse.com/docs/en/architecture/replication). + Processing: - `timeout` (default = 5s): The timeout for every attempt to send data to the backend. @@ -337,6 +350,10 @@ exporters: initial_interval: 5s max_interval: 30s max_elapsed_time: 300s + # cluster_name: my_cluster + # table_engine: + # name: ReplicatedMergeTree + # params: service: pipelines: logs: diff --git a/exporter/clickhousestsexporter/config.go b/exporter/clickhousestsexporter/config.go index 050656f..a4b59de 100644 --- a/exporter/clickhousestsexporter/config.go +++ b/exporter/clickhousestsexporter/config.go @@ -26,7 +26,7 @@ type Config struct { Endpoint string `mapstructure:"endpoint"` // Username is the authentication username. Username string `mapstructure:"username"` - // Username is the authentication password. + // Password is the authentication password. Password configopaque.String `mapstructure:"password"` // Database is the database name to export. Database string `mapstructure:"database"` @@ -34,7 +34,7 @@ type Config struct { ConnectionParams map[string]string `mapstructure:"connection_params"` // LogsTableName is the table name for logs. default is `otel_logs`. LogsTableName string `mapstructure:"logs_table_name"` - // TracesTableName is the table name for logs. default is `otel_traces`. + // TracesTableName is the table name for traces. default is `otel_traces`. TracesTableName string `mapstructure:"traces_table_name"` // MetricsTableName is the table name for metrics. default is `otel_metrics`. MetricsTableName string `mapstructure:"metrics_table_name"` @@ -45,13 +45,24 @@ type Config struct { TTLDays uint `mapstructure:"ttl_days"` // TTL is The data time-to-live example 30m, 48h. 0 means no ttl. TTL time.Duration `mapstructure:"ttl"` + // TableEngine is the table engine to use. default is `MergeTree()`. + TableEngine TableEngine `mapstructure:"table_engine"` + // ClusterName if set will append `ON CLUSTER` with the provided name when creating tables. + ClusterName string `mapstructure:"cluster_name"` // Create the traces table on startup CreateTracesTable bool `mapstructure:"create_traces_table"` // Create the resources table on startup CreateResourcesTable bool `mapstructure:"create_resources_table"` } +// TableEngine defines the ENGINE string value when creating the table. +type TableEngine struct { + Name string `mapstructure:"name"` + Params string `mapstructure:"params"` +} + const defaultDatabase = "default" +const defaultTableEngineName = "MergeTree" var ( errConfigNoEndpoint = errors.New("endpoint must be specified") @@ -59,7 +70,7 @@ var ( errConfigTTL = errors.New("both 'ttl_days' and 'ttl' can not be provided. 'ttl_days' is deprecated, use 'ttl' instead") ) -// Validate the clickhouse server configuration. +// Validate the ClickHouse server configuration. func (cfg *Config) Validate() (err error) { if cfg.Endpoint == "" { err = errors.Join(err, errConfigNoEndpoint) @@ -140,5 +151,26 @@ func (cfg *Config) buildDB(database string) (*sql.DB, error) { } return conn, nil +} + +// TableEngineString generates the ENGINE string. +func (cfg *Config) TableEngineString() string { + engine := cfg.TableEngine.Name + params := cfg.TableEngine.Params + + if cfg.TableEngine.Name == "" { + engine = defaultTableEngineName + params = "" + } + + return fmt.Sprintf("%s(%s)", engine, params) +} + +// ClusterString generates the ON CLUSTER string. Returns empty string if not set. +func (cfg *Config) ClusterString() string { + if cfg.ClusterName == "" { + return "" + } + return fmt.Sprintf("ON CLUSTER %s", cfg.ClusterName) } diff --git a/exporter/clickhousestsexporter/config_test.go b/exporter/clickhousestsexporter/config_test.go index 94f9460..bdd0c6e 100644 --- a/exporter/clickhousestsexporter/config_test.go +++ b/exporter/clickhousestsexporter/config_test.go @@ -4,6 +4,7 @@ package clickhousestsexporter import ( + "fmt" "path/filepath" "testing" "time" @@ -73,6 +74,8 @@ func TestLoadConfig(t *testing.T) { QueueSize: 100, StorageID: &storageID, }, + CreateResourcesTable: true, + CreateTracesTable: true, }, }, } @@ -274,3 +277,78 @@ func TestConfig_buildDSN(t *testing.T) { }) } } + +func TestTableEngineConfigParsing(t *testing.T) { + t.Parallel() + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + + tests := []struct { + id component.ID + expected string + }{ + { + id: component.NewIDWithName(metadata.Type, "table-engine-empty"), + expected: "MergeTree()", + }, + { + id: component.NewIDWithName(metadata.Type, "table-engine-name-only"), + expected: "ReplicatedReplacingMergeTree()", + }, + { + id: component.NewIDWithName(metadata.Type, "table-engine-full"), + expected: "ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver)", + }, + { + id: component.NewIDWithName(metadata.Type, "table-engine-params-only"), + expected: "MergeTree()", + }, + } + + for _, tt := range tests { + t.Run(tt.id.String(), func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(tt.id.String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + assert.NoError(t, component.ValidateConfig(cfg)) + assert.Equal(t, tt.expected, cfg.(*Config).TableEngineString()) + }) + } +} + +func TestClusterString(t *testing.T) { + t.Parallel() + + tests := []struct { + input string + expected string + }{ + { + input: "", + expected: "", + }, + { + input: "cluster_a_b", + expected: "ON CLUSTER cluster_a_b", + }, + { + input: "cluster a b", + expected: "ON CLUSTER cluster a b", + }, + } + + for i, tt := range tests { + t.Run(fmt.Sprintf("ClusterString case %d", i), func(t *testing.T) { + cfg := createDefaultConfig() + cfg.(*Config).Endpoint = defaultEndpoint + cfg.(*Config).ClusterName = tt.input + + assert.NoError(t, component.ValidateConfig(cfg)) + assert.Equal(t, tt.expected, cfg.(*Config).ClusterString()) + }) + } +} diff --git a/exporter/clickhousestsexporter/exporter_logs.go b/exporter/clickhousestsexporter/exporter_logs.go index 65c2dd3..443d56e 100644 --- a/exporter/clickhousestsexporter/exporter_logs.go +++ b/exporter/clickhousestsexporter/exporter_logs.go @@ -7,10 +7,10 @@ import ( "context" "database/sql" "fmt" + "github.com/stackvista/sts-opentelemetry-collector/exporter/clickhousestsexporter/internal" "time" _ "github.com/ClickHouse/clickhouse-go/v2" // For register database driver. - "github.com/stackvista/sts-opentelemetry-collector/exporter/clickhousestsexporter/internal" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" @@ -127,7 +127,7 @@ func attributesToMap(attributes pcommon.Map) map[string]string { const ( // language=ClickHouse SQL createLogsTableSQL = ` -CREATE TABLE IF NOT EXISTS %s ( +CREATE TABLE IF NOT EXISTS %s %s ( Timestamp DateTime64(9) CODEC(Delta, ZSTD(1)), TraceId String CODEC(ZSTD(1)), SpanId String CODEC(ZSTD(1)), @@ -151,7 +151,7 @@ CREATE TABLE IF NOT EXISTS %s ( INDEX idx_log_attr_key mapKeys(LogAttributes) TYPE bloom_filter(0.01) GRANULARITY 1, INDEX idx_log_attr_value mapValues(LogAttributes) TYPE bloom_filter(0.01) GRANULARITY 1, INDEX idx_body Body TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 1 -) ENGINE MergeTree() +) ENGINE = %s %s PARTITION BY toDate(Timestamp) ORDER BY (ServiceName, SeverityText, toUnixTimestamp(Timestamp), TraceId) @@ -217,7 +217,7 @@ func createDatabase(ctx context.Context, cfg *Config) error { defer func() { _ = db.Close() }() - query := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", cfg.Database) + query := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s %s", cfg.Database, cfg.ClusterString()) _, err = db.ExecContext(ctx, query) if err != nil { return fmt.Errorf("create database:%w", err) @@ -234,7 +234,7 @@ func createLogsTable(ctx context.Context, cfg *Config, db *sql.DB) error { func renderCreateLogsTableSQL(cfg *Config) string { ttlExpr := internal.GenerateTTLExpr(cfg.TTLDays, cfg.TTL, "Timestamp") - return fmt.Sprintf(createLogsTableSQL, cfg.LogsTableName, ttlExpr) + return fmt.Sprintf(createLogsTableSQL, cfg.LogsTableName, cfg.ClusterString(), cfg.TableEngineString(), ttlExpr) } func renderInsertLogsSQL(cfg *Config) string { diff --git a/exporter/clickhousestsexporter/exporter_logs_test.go b/exporter/clickhousestsexporter/exporter_logs_test.go index a0c5a7e..33adf21 100644 --- a/exporter/clickhousestsexporter/exporter_logs_test.go +++ b/exporter/clickhousestsexporter/exporter_logs_test.go @@ -39,7 +39,7 @@ func TestLogsExporter_New(t *testing.T) { } failWithMsg := func(msg string) validate { - return func(t *testing.T, exporter *logsExporter, err error) { + return func(t *testing.T, _ *logsExporter, err error) { require.Error(t, err) require.Contains(t, err.Error(), msg) } @@ -121,6 +121,20 @@ func TestExporter_pushLogsData(t *testing.T) { }) } +func TestLogsClusterConfig(t *testing.T) { + testClusterConfig(t, func(t *testing.T, dsn string, clusterTest clusterTestConfig, fns ...func(*Config)) { + exporter := newTestLogsExporter(t, dsn, fns...) + clusterTest.verifyConfig(t, exporter.cfg) + }) +} + +func TestLogsTableEngineConfig(t *testing.T) { + testTableEngineConfig(t, func(t *testing.T, dsn string, engineTest tableEngineTestConfig, fns ...func(*Config)) { + exporter := newTestLogsExporter(t, dsn, fns...) + engineTest.verifyConfig(t, exporter.cfg.TableEngine) + }) +} + func newTestLogsExporter(t *testing.T, dsn string, fns ...func(*Config)) *logsExporter { exporter, err := newLogsExporter(zaptest.NewLogger(t), withTestExporterConfig(fns...)(dsn)) require.NoError(t, err) @@ -151,10 +165,18 @@ func simpleLogs(count int) plog.Logs { sl.Scope().SetName("io.opentelemetry.contrib.clickhouse") sl.Scope().SetVersion("1.0.0") sl.Scope().Attributes().PutStr("lib", "clickhouse") + timestamp := time.Unix(1703498029, 0) for i := 0; i < count; i++ { r := sl.LogRecords().AppendEmpty() - r.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) - r.Attributes().PutStr(conventions.AttributeServiceName, "v") + r.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) + r.SetObservedTimestamp(pcommon.NewTimestampFromTime(timestamp)) + r.SetSeverityNumber(plog.SeverityNumberError2) + r.SetSeverityText("error") + r.Body().SetStr("error message") + r.Attributes().PutStr(conventions.AttributeServiceNamespace, "default") + r.SetFlags(plog.DefaultLogRecordFlags) + r.SetTraceID([16]byte{1, 2, 3, byte(i)}) + r.SetSpanID([8]byte{1, 2, 3, byte(i)}) } return logs } diff --git a/exporter/clickhousestsexporter/exporter_metrics.go b/exporter/clickhousestsexporter/exporter_metrics.go index a07fc15..3a99837 100644 --- a/exporter/clickhousestsexporter/exporter_metrics.go +++ b/exporter/clickhousestsexporter/exporter_metrics.go @@ -44,7 +44,7 @@ func (e *metricsExporter) start(ctx context.Context, _ component.Host) error { internal.SetLogger(e.logger) ttlExpr := internal.GenerateTTLExpr(e.cfg.TTLDays, e.cfg.TTL, "TimeUnix") - return internal.NewMetricsTable(ctx, e.cfg.MetricsTableName, ttlExpr, e.client) + return internal.NewMetricsTable(ctx, e.cfg.MetricsTableName, e.cfg.ClusterString(), e.cfg.TableEngineString(), ttlExpr, e.client) } // shutdown will shut down the exporter. diff --git a/exporter/clickhousestsexporter/exporter_metrics_test.go b/exporter/clickhousestsexporter/exporter_metrics_test.go index 1b90049..04f26c4 100644 --- a/exporter/clickhousestsexporter/exporter_metrics_test.go +++ b/exporter/clickhousestsexporter/exporter_metrics_test.go @@ -19,29 +19,43 @@ import ( "go.uber.org/zap/zaptest" ) +func TestMetricsClusterConfig(t *testing.T) { + testClusterConfig(t, func(t *testing.T, dsn string, clusterTest clusterTestConfig, fns ...func(*Config)) { + exporter := newTestMetricsExporter(t, dsn, fns...) + clusterTest.verifyConfig(t, exporter.cfg) + }) +} + +func TestMetricsTableEngineConfig(t *testing.T) { + testTableEngineConfig(t, func(t *testing.T, dsn string, engineTest tableEngineTestConfig, fns ...func(*Config)) { + exporter := newTestMetricsExporter(t, dsn, fns...) + engineTest.verifyConfig(t, exporter.cfg.TableEngine) + }) +} + func TestExporter_pushMetricsData(t *testing.T) { t.Parallel() t.Run("push success", func(t *testing.T) { items := &atomic.Int32{} - initClickhouseTestServer(t, func(query string, values []driver.Value) error { + initClickhouseTestServer(t, func(query string, _ []driver.Value) error { if strings.HasPrefix(query, "INSERT") { items.Add(1) } return nil }) - exporter := newTestMetricsExporter(t) + exporter := newTestMetricsExporter(t, defaultEndpoint) mustPushMetricsData(t, exporter, simpleMetrics(1)) require.Equal(t, int32(15), items.Load()) }) t.Run("push failure", func(t *testing.T) { - initClickhouseTestServer(t, func(query string, values []driver.Value) error { + initClickhouseTestServer(t, func(query string, _ []driver.Value) error { if strings.HasPrefix(query, "INSERT") { return fmt.Errorf("mock insert error") } return nil }) - exporter := newTestMetricsExporter(t) + exporter := newTestMetricsExporter(t, defaultEndpoint) err := exporter.pushMetricsData(context.TODO(), simpleMetrics(2)) require.Error(t, err) }) @@ -93,7 +107,7 @@ func TestExporter_pushMetricsData(t *testing.T) { } return nil }) - exporter := newTestMetricsExporter(t) + exporter := newTestMetricsExporter(t, defaultEndpoint) mustPushMetricsData(t, exporter, simpleMetrics(1)) require.Equal(t, int32(15), items.Load()) @@ -101,31 +115,31 @@ func TestExporter_pushMetricsData(t *testing.T) { t.Run("check traceID and spanID", func(t *testing.T) { initClickhouseTestServer(t, func(query string, values []driver.Value) error { if strings.HasPrefix(query, "INSERT INTO otel_metrics_gauge") { - require.Equal(t, clickhouse.ArraySet{"0102030000000000"}, values[18]) - require.Equal(t, clickhouse.ArraySet{"01020300000000000000000000000000"}, values[19]) + require.Equal(t, clickhouse.ArraySet{"0102030000000000"}, values[19]) + require.Equal(t, clickhouse.ArraySet{"01020300000000000000000000000000"}, values[20]) } if strings.HasPrefix(query, "INSERT INTO otel_metrics_histogram") { - require.Equal(t, clickhouse.ArraySet{"0102030000000000"}, values[20]) - require.Equal(t, clickhouse.ArraySet{"01020300000000000000000000000000"}, values[21]) + require.Equal(t, clickhouse.ArraySet{"0102030000000000"}, values[21]) + require.Equal(t, clickhouse.ArraySet{"01020300000000000000000000000000"}, values[22]) } if strings.HasPrefix(query, "INSERT INTO otel_metrics_sum ") { - require.Equal(t, clickhouse.ArraySet{"0102030000000000"}, values[18]) - require.Equal(t, clickhouse.ArraySet{"01020300000000000000000000000000"}, values[19]) + require.Equal(t, clickhouse.ArraySet{"0102030000000000"}, values[19]) + require.Equal(t, clickhouse.ArraySet{"01020300000000000000000000000000"}, values[20]) } if strings.HasPrefix(query, "INSERT INTO otel_metrics_exponential_histogram") { - require.Equal(t, clickhouse.ArraySet{"0102030000000000"}, values[24]) - require.Equal(t, clickhouse.ArraySet{"01020300000000000000000000000000"}, values[25]) + require.Equal(t, clickhouse.ArraySet{"0102030000000000"}, values[25]) + require.Equal(t, clickhouse.ArraySet{"01020300000000000000000000000000"}, values[26]) } return nil }) - exporter := newTestMetricsExporter(t) + exporter := newTestMetricsExporter(t, defaultEndpoint) mustPushMetricsData(t, exporter, simpleMetrics(1)) }) } func Benchmark_pushMetricsData(b *testing.B) { pm := simpleMetrics(1) - exporter := newTestMetricsExporter(&testing.T{}) + exporter := newTestMetricsExporter(&testing.T{}, defaultEndpoint) b.ReportAllocs() b.ResetTimer() for n := 0; n < b.N; n++ { @@ -148,6 +162,7 @@ func simpleMetrics(count int) pmetric.Metrics { sm.Scope().SetDroppedAttributesCount(10) sm.Scope().SetName("Scope name 1") sm.Scope().SetVersion("Scope version 1") + timestamp := time.Unix(1703498029, 0) for i := 0; i < count; i++ { // gauge m := sm.Metrics().AppendEmpty() @@ -156,10 +171,12 @@ func simpleMetrics(count int) pmetric.Metrics { m.SetDescription("This is a gauge metrics") dp := m.SetEmptyGauge().DataPoints().AppendEmpty() dp.SetIntValue(int64(i)) + dp.SetFlags(pmetric.DefaultDataPointFlags) dp.Attributes().PutStr("gauge_label_1", "1") - dp.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now())) - dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(timestamp)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) exemplars := dp.Exemplars().AppendEmpty() + exemplars.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) exemplars.SetIntValue(54) exemplars.FilteredAttributes().PutStr("key", "value") exemplars.FilteredAttributes().PutStr("key2", "value2") @@ -173,10 +190,12 @@ func simpleMetrics(count int) pmetric.Metrics { m.SetDescription("This is a sum metrics") dp = m.SetEmptySum().DataPoints().AppendEmpty() dp.SetDoubleValue(11.234) + dp.SetFlags(pmetric.DefaultDataPointFlags) dp.Attributes().PutStr("sum_label_1", "1") - dp.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now())) - dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(timestamp)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) exemplars = dp.Exemplars().AppendEmpty() + exemplars.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) exemplars.SetIntValue(54) exemplars.FilteredAttributes().PutStr("key", "value") exemplars.FilteredAttributes().PutStr("key2", "value2") @@ -189,17 +208,18 @@ func simpleMetrics(count int) pmetric.Metrics { m.SetUnit("ms") m.SetDescription("This is a histogram metrics") dpHisto := m.SetEmptyHistogram().DataPoints().AppendEmpty() - dpHisto.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now())) - dpHisto.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + dpHisto.SetStartTimestamp(pcommon.NewTimestampFromTime(timestamp)) + dpHisto.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) dpHisto.SetCount(1) dpHisto.SetSum(1) dpHisto.Attributes().PutStr("key", "value") - dpHisto.Attributes().PutStr("key", "value") + dpHisto.Attributes().PutStr("key2", "value") dpHisto.ExplicitBounds().FromRaw([]float64{0, 0, 0, 0, 0}) dpHisto.BucketCounts().FromRaw([]uint64{0, 0, 0, 1, 0}) dpHisto.SetMin(0) dpHisto.SetMax(1) exemplars = dpHisto.Exemplars().AppendEmpty() + exemplars.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) exemplars.SetDoubleValue(55.22) exemplars.FilteredAttributes().PutStr("key", "value") exemplars.FilteredAttributes().PutStr("key2", "value2") @@ -212,21 +232,22 @@ func simpleMetrics(count int) pmetric.Metrics { m.SetUnit("ms") m.SetDescription("This is a exp histogram metrics") dpExpHisto := m.SetEmptyExponentialHistogram().DataPoints().AppendEmpty() - dpExpHisto.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now())) - dpExpHisto.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + dpExpHisto.SetStartTimestamp(pcommon.NewTimestampFromTime(timestamp)) + dpExpHisto.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) dpExpHisto.SetSum(1) dpExpHisto.SetMin(0) dpExpHisto.SetMax(1) dpExpHisto.SetZeroCount(0) dpExpHisto.SetCount(1) dpExpHisto.Attributes().PutStr("key", "value") - dpExpHisto.Attributes().PutStr("key", "value") + dpExpHisto.Attributes().PutStr("key2", "value") dpExpHisto.Negative().SetOffset(1) dpExpHisto.Negative().BucketCounts().FromRaw([]uint64{0, 0, 0, 1, 0}) dpExpHisto.Positive().SetOffset(1) dpExpHisto.Positive().BucketCounts().FromRaw([]uint64{0, 0, 0, 1, 0}) exemplars = dpExpHisto.Exemplars().AppendEmpty() + exemplars.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) exemplars.SetIntValue(54) exemplars.FilteredAttributes().PutStr("key", "value") exemplars.FilteredAttributes().PutStr("key2", "value2") @@ -239,10 +260,10 @@ func simpleMetrics(count int) pmetric.Metrics { m.SetUnit("ms") m.SetDescription("This is a summary metrics") summary := m.SetEmptySummary().DataPoints().AppendEmpty() - summary.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now())) - summary.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + summary.SetStartTimestamp(pcommon.NewTimestampFromTime(timestamp)) + summary.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) summary.Attributes().PutStr("key", "value") - summary.Attributes().PutStr("key2", "value2") + summary.Attributes().PutStr("key2", "value") summary.SetCount(1) summary.SetSum(1) quantileValues := summary.QuantileValues().AppendEmpty() @@ -475,8 +496,9 @@ func mustPushMetricsData(t *testing.T, exporter *metricsExporter, md pmetric.Met require.NoError(t, err) } -func newTestMetricsExporter(t *testing.T) *metricsExporter { - exporter, err := newMetricsExporter(zaptest.NewLogger(t), withTestExporterConfig()(defaultEndpoint)) +// nolint:unparam // not need to check this func +func newTestMetricsExporter(t *testing.T, dsn string, fns ...func(*Config)) *metricsExporter { + exporter, err := newMetricsExporter(zaptest.NewLogger(t), withTestExporterConfig(fns...)(dsn)) require.NoError(t, err) require.NoError(t, exporter.start(context.TODO(), nil)) diff --git a/exporter/clickhousestsexporter/exporter_resources.go b/exporter/clickhousestsexporter/exporter_resources.go index 17e2e52..433a2bd 100644 --- a/exporter/clickhousestsexporter/exporter_resources.go +++ b/exporter/clickhousestsexporter/exporter_resources.go @@ -114,7 +114,7 @@ const ( createResourcesTableSQL = ` CREATE TABLE IF NOT EXISTS %s ( Timestamp DateTime64(9) CODEC(Delta, ZSTD(1)), - ResourceRef UUID, + ResourceRef UUID, ResourceAttributes Map(LowCardinality(String), String) CODEC(ZSTD(1)), ) ENGINE = ReplacingMergeTree %s diff --git a/exporter/clickhousestsexporter/exporter_sql_test.go b/exporter/clickhousestsexporter/exporter_sql_test.go new file mode 100644 index 0000000..9552f8b --- /dev/null +++ b/exporter/clickhousestsexporter/exporter_sql_test.go @@ -0,0 +1,186 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package clickhousestsexporter + +import ( + "database/sql/driver" + "fmt" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type clusterTestCompletion func(t *testing.T, dsn string, clusterTest clusterTestConfig, fns ...func(*Config)) +type clusterTestConfig struct { + name string + cluster string + shouldPass bool +} + +func (test clusterTestConfig) verifyConfig(t *testing.T, cfg *Config) { + if test.cluster == "" { + require.Empty(t, cfg.ClusterString()) + } else { + require.NotEmpty(t, cfg.ClusterString()) + } +} + +func getQueryFirstLine(query string) string { + trimmed := strings.Trim(query, "\n") + line := strings.Split(trimmed, "\n")[0] + return strings.Trim(line, " (") +} + +func checkClusterQueryDefinition(query string, clusterName string) error { + line := getQueryFirstLine(query) + lowercasedLine := strings.ToLower(line) + suffix := fmt.Sprintf("ON CLUSTER %s", clusterName) + prefixes := []string{"create database", "create table", "create materialized view"} + for _, prefix := range prefixes { + if strings.HasPrefix(lowercasedLine, prefix) { + if strings.HasSuffix(line, suffix) { + return nil + } + } + } + + return fmt.Errorf("query does not contain cluster clause: %s", line) +} + +func testClusterConfig(t *testing.T, completion clusterTestCompletion) { + tests := []clusterTestConfig{ + { + name: "on", + cluster: "cluster_a_b", + shouldPass: true, + }, + { + name: "off", + cluster: "", + shouldPass: false, + }, + } + + for _, tt := range tests { + t.Run("test cluster config "+tt.name, func(t *testing.T) { + initClickhouseTestServer(t, func(query string, _ []driver.Value) error { + if tt.shouldPass { + require.NoError(t, checkClusterQueryDefinition(query, tt.cluster)) + } else { + require.Error(t, checkClusterQueryDefinition(query, tt.cluster)) + } + return nil + }) + + var configMods []func(*Config) + configMods = append(configMods, func(cfg *Config) { + cfg.ClusterName = tt.cluster + cfg.Database = "test_db_" + time.Now().Format("20060102150405") + }) + + completion(t, defaultEndpoint, tt, configMods...) + }) + } +} + +type tableEngineTestCompletion func(t *testing.T, dsn string, engineTest tableEngineTestConfig, fns ...func(*Config)) +type tableEngineTestConfig struct { + name string + engineName string + engineParams string + expectedTableName string + shouldPass bool +} + +func (engineTest tableEngineTestConfig) verifyConfig(t *testing.T, te TableEngine) { + if engineTest.engineName == "" { + require.Empty(t, te.Name) + } else { + require.NotEmpty(t, te.Name) + } +} + +func checkTableEngineQueryDefinition(query string, expectedEngineName string) error { + lines := strings.Split(query, "\n") + for _, line := range lines { + if strings.Contains(strings.ToLower(line), "engine = ") { + engine := strings.Split(line, " = ")[1] + engine = strings.Trim(engine, " ") + if engine == expectedEngineName { + return nil + } + + return fmt.Errorf("wrong engine name in query: %s, expected: %s", engine, expectedEngineName) + } + } + + return fmt.Errorf("query does not contain engine definition: %s", query) +} + +func testTableEngineConfig(t *testing.T, completion tableEngineTestCompletion) { + tests := []tableEngineTestConfig{ + { + name: "no params", + engineName: "CustomEngine", + engineParams: "", + expectedTableName: "CustomEngine", + shouldPass: true, + }, + { + name: "with params", + engineName: "CustomEngine", + engineParams: "'/x/y/z', 'some_param', another_param, last_param", + expectedTableName: "CustomEngine", + shouldPass: true, + }, + { + name: "with empty name", + engineName: "", + engineParams: "", + expectedTableName: defaultTableEngineName, + shouldPass: true, + }, + { + name: "fail", + engineName: "CustomEngine", + engineParams: "", + expectedTableName: defaultTableEngineName, + shouldPass: false, + }, + } + + for _, tt := range tests { + te := TableEngine{Name: tt.engineName, Params: tt.engineParams} + expectedEngineValue := fmt.Sprintf("%s(%s)", tt.expectedTableName, tt.engineParams) + + t.Run("test table engine config "+tt.name, func(t *testing.T) { + initClickhouseTestServer(t, func(query string, _ []driver.Value) error { + firstLine := getQueryFirstLine(query) + if !strings.HasPrefix(strings.ToLower(firstLine), "create table") { + return nil + } + + check := checkTableEngineQueryDefinition(query, expectedEngineValue) + if tt.shouldPass { + require.NoError(t, check) + } else { + require.Error(t, check) + } + + return nil + }) + + var configMods []func(*Config) + if te.Name != "" { + configMods = append(configMods, func(cfg *Config) { + cfg.TableEngine = te + }) + } + + completion(t, defaultEndpoint, tt, configMods...) + }) + } +} diff --git a/exporter/clickhousestsexporter/exporter_traces.go b/exporter/clickhousestsexporter/exporter_traces.go index 4854aae..ba19667 100644 --- a/exporter/clickhousestsexporter/exporter_traces.go +++ b/exporter/clickhousestsexporter/exporter_traces.go @@ -72,20 +72,6 @@ func (e *tracesExporter) shutdown(ctx context.Context) error { return nil } -func getSpanParentType(r ptrace.Span) string { - if r.ParentSpanID().IsEmpty() { - return "SPAN_PARENT_TYPE_ROOT" - } - switch r.Kind() { - case ptrace.SpanKindServer: - return "SPAN_PARENT_TYPE_EXTERNAL" - case ptrace.SpanKindConsumer: - return "SPAN_PARENT_TYPE_EXTERNAL" - default: - return "SPAN_PARENT_TYPE_INTERNAL" - } -} - func (e *tracesExporter) pushTraceData(ctx context.Context, td ptrace.Traces) error { start := time.Now() resources := []*resourceModel{} @@ -120,7 +106,6 @@ func (e *tracesExporter) pushTraceData(ctx context.Context, td ptrace.Traces) er status := r.Status() eventTimes, eventNames, eventAttrs := convertEvents(r.Events()) linksTraceIDs, linksSpanIDs, linksTraceStates, linksAttrs := convertLinks(r.Links()) - spanParentType := getSpanParentType(r) _, err = traceStatement.ExecContext(ctx, r.StartTimestamp().AsTime(), res.resourceRef, @@ -137,7 +122,6 @@ func (e *tracesExporter) pushTraceData(ctx context.Context, td ptrace.Traces) er r.EndTimestamp().AsTime().Sub(r.StartTimestamp().AsTime()).Nanoseconds(), StatusCodeStr(status.Code()), status.Message(), - spanParentType, eventTimes, eventNames, eventAttrs, @@ -196,7 +180,7 @@ func convertLinks(links ptrace.SpanLinkSlice) ([]string, []string, []string, []m const ( // language=ClickHouse SQL createTracesTableSQL = ` -CREATE TABLE IF NOT EXISTS %s ( +CREATE TABLE IF NOT EXISTS %s %s ( Timestamp DateTime64(9) CODEC(Delta, ZSTD(1)), ResourceRef UUID, TraceId String CODEC(ZSTD(1)), @@ -212,7 +196,6 @@ CREATE TABLE IF NOT EXISTS %s ( Duration Int64 CODEC(ZSTD(1)), StatusCode LowCardinality(String) CODEC(ZSTD(1)), StatusMessage String CODEC(ZSTD(1)), - SpanParentType String CODEC(ZSTD(1)), Events Nested ( Timestamp DateTime64(9), Name LowCardinality(String), @@ -224,7 +207,7 @@ CREATE TABLE IF NOT EXISTS %s ( TraceState String, Attributes Map(LowCardinality(String), String) ) CODEC(ZSTD(1)), -) ENGINE MergeTree() +) ENGINE = %s %s PARTITION BY toDate(Timestamp) ORDER BY (ServiceName, ResourceRef, SpanName, toUnixTimestamp(Timestamp), TraceId) @@ -233,7 +216,7 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; // language=ClickHouse SQL insertTracesSQLTemplate = `INSERT INTO %s ( Timestamp, - ResourceRef, + ResourceRef, TraceId, SpanId, ParentSpanId, @@ -247,7 +230,6 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; Duration, StatusCode, StatusMessage, - SpanParentType, Events.Timestamp, Events.Name, Events.Attributes, @@ -277,7 +259,6 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; ?, ?, ?, - ?, ? )` ) @@ -295,5 +276,5 @@ func renderInsertTracesSQL(cfg *Config) string { func renderCreateTracesTableSQL(cfg *Config) string { ttlExpr := internal.GenerateTTLExpr(cfg.TTLDays, cfg.TTL, "Timestamp") - return fmt.Sprintf(createTracesTableSQL, cfg.TracesTableName, ttlExpr) + return fmt.Sprintf(createTracesTableSQL, cfg.TracesTableName, cfg.ClusterString(), cfg.TableEngineString(), ttlExpr) } diff --git a/exporter/clickhousestsexporter/exporter_traces_test.go b/exporter/clickhousestsexporter/exporter_traces_test.go index 03c5760..9f9a105 100644 --- a/exporter/clickhousestsexporter/exporter_traces_test.go +++ b/exporter/clickhousestsexporter/exporter_traces_test.go @@ -44,8 +44,7 @@ func TestExporter_pushTracesData(t *testing.T) { t.Run("check insert resources with service name and attributes", func(t *testing.T) { initClickhouseTestServer(t, func(query string, values []driver.Value) error { if strings.HasPrefix(query, "INSERT") && strings.Contains(query, "otel_resources") { - require.Equal(t, "test-service", values[2]) - require.Equal(t, map[string]string{"service.name": "test-service"}, values[3]) + require.Equal(t, map[string]string{"service.name": "test-service"}, values[2]) } return nil }) @@ -66,22 +65,6 @@ func TestExporter_pushTracesData(t *testing.T) { exporter := newTestTracesExporter(t, defaultEndpoint) mustPushTracesData(t, exporter, simpleTraces(1)) }) - - t.Run("check insert parentSpanType", func(t *testing.T) { - var parentTypes []string - initClickhouseTestServer(t, func(query string, values []driver.Value) error { - if strings.HasPrefix(query, "INSERT") && strings.Contains(query, "otel_traces") { - if str, ok := values[15].(string); ok { - parentTypes = append(parentTypes, str) - } - } - return nil - }) - - exporter := newTestTracesExporter(t, defaultEndpoint) - mustPushTracesData(t, exporter, simpleTraces(2)) - require.Equal(t, parentTypes, []string{"SPAN_PARENT_TYPE_ROOT", "SPAN_PARENT_TYPE_INTERNAL"}) - }) } func newTestTracesExporter(t *testing.T, dsn string, fns ...func(*Config)) *tracesExporter { @@ -105,22 +88,28 @@ func simpleTraces(count int) ptrace.Traces { ss.SetSchemaUrl("https://opentelemetry.io/schemas/1.7.0") ss.Scope().SetDroppedAttributesCount(20) ss.Scope().Attributes().PutStr("lib", "clickhouse") - var firstSpan ptrace.Span + timestamp := time.Unix(1703498029, 0) for i := 0; i < count; i++ { s := ss.Spans().AppendEmpty() - s.SetSpanID([8]byte{byte(i + 1)}) - s.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now())) - s.SetEndTimestamp(pcommon.NewTimestampFromTime(time.Now())) + s.SetTraceID([16]byte{1, 2, 3, byte(i)}) + s.SetSpanID([8]byte{1, 2, 3, byte(i)}) + s.TraceState().FromRaw("trace state") + s.SetParentSpanID([8]byte{1, 2, 4, byte(i)}) + s.SetName("call db") + s.SetKind(ptrace.SpanKindInternal) + s.SetStartTimestamp(pcommon.NewTimestampFromTime(timestamp)) + s.SetEndTimestamp(pcommon.NewTimestampFromTime(timestamp.Add(time.Minute))) s.Attributes().PutStr(conventions.AttributeServiceName, "v") - if i == 0 { - firstSpan = s - } else { - s.SetParentSpanID(firstSpan.SpanID()) - } + s.Status().SetMessage("error") + s.Status().SetCode(ptrace.StatusCodeError) event := s.Events().AppendEmpty() event.SetName("event1") - event.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + event.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) + event.Attributes().PutStr("level", "info") link := s.Links().AppendEmpty() + link.SetTraceID([16]byte{1, 2, 5, byte(i)}) + link.SetSpanID([8]byte{1, 2, 5, byte(i)}) + link.TraceState().FromRaw("error") link.Attributes().PutStr("k", "v") } return traces diff --git a/exporter/clickhousestsexporter/factory.go b/exporter/clickhousestsexporter/factory.go index 56f0887..965bc42 100644 --- a/exporter/clickhousestsexporter/factory.go +++ b/exporter/clickhousestsexporter/factory.go @@ -17,7 +17,7 @@ import ( "github.com/stackvista/sts-opentelemetry-collector/exporter/clickhousestsexporter/internal/metadata" ) -// NewFactory creates a factory for Elastic exporter. +// NewFactory creates a factory for ClickHouse exporter. func NewFactory() exporter.Factory { return exporter.NewFactory( metadata.Type, @@ -49,7 +49,7 @@ func createDefaultConfig() component.Config { } // createLogsExporter creates a new exporter for logs. -// Logs are directly insert into clickhouse. +// Logs are directly inserted into ClickHouse. func createLogsExporter( ctx context.Context, set exporter.CreateSettings, @@ -75,7 +75,7 @@ func createLogsExporter( } // createTracesExporter creates a new exporter for traces. -// Traces are directly insert into clickhouse. +// Traces are directly inserted into ClickHouse. func createTracesExporter( ctx context.Context, set exporter.CreateSettings, diff --git a/exporter/clickhousestsexporter/go.mod b/exporter/clickhousestsexporter/go.mod index 7618e53..c126935 100644 --- a/exporter/clickhousestsexporter/go.mod +++ b/exporter/clickhousestsexporter/go.mod @@ -6,15 +6,15 @@ require ( github.com/ClickHouse/clickhouse-go/v2 v2.24.0 github.com/cenkalti/backoff/v4 v4.3.0 github.com/google/uuid v1.6.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.100.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0 github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/collector/component v0.100.0 - go.opentelemetry.io/collector/config/configopaque v1.7.0 - go.opentelemetry.io/collector/config/configretry v0.100.0 - go.opentelemetry.io/collector/confmap v0.100.0 - go.opentelemetry.io/collector/exporter v0.100.0 - go.opentelemetry.io/collector/pdata v1.7.0 - go.opentelemetry.io/collector/semconv v0.100.0 + go.opentelemetry.io/collector/component v0.101.0 + go.opentelemetry.io/collector/config/configopaque v1.8.0 + go.opentelemetry.io/collector/config/configretry v0.101.0 + go.opentelemetry.io/collector/confmap v0.101.0 + go.opentelemetry.io/collector/exporter v0.101.0 + go.opentelemetry.io/collector/pdata v1.8.0 + go.opentelemetry.io/collector/semconv v0.101.0 go.opentelemetry.io/otel/metric v1.26.0 go.opentelemetry.io/otel/trace v1.26.0 go.uber.org/goleak v1.3.0 diff --git a/exporter/clickhousestsexporter/internal/exponential_histogram_metrics.go b/exporter/clickhousestsexporter/internal/exponential_histogram_metrics.go index e4efbdc..b987fe4 100644 --- a/exporter/clickhousestsexporter/internal/exponential_histogram_metrics.go +++ b/exporter/clickhousestsexporter/internal/exponential_histogram_metrics.go @@ -11,13 +11,14 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" "go.uber.org/zap" ) const ( // language=ClickHouse SQL createExpHistogramTableSQL = ` -CREATE TABLE IF NOT EXISTS %s_exponential_histogram ( +CREATE TABLE IF NOT EXISTS %s_exponential_histogram %s ( ResourceAttributes Map(LowCardinality(String), String) CODEC(ZSTD(1)), ResourceSchemaUrl String CODEC(ZSTD(1)), ScopeName String CODEC(ZSTD(1)), @@ -25,6 +26,7 @@ CREATE TABLE IF NOT EXISTS %s_exponential_histogram ( ScopeAttributes Map(LowCardinality(String), String) CODEC(ZSTD(1)), ScopeDroppedAttrCount UInt32 CODEC(ZSTD(1)), ScopeSchemaUrl String CODEC(ZSTD(1)), + ServiceName LowCardinality(String) CODEC(ZSTD(1)), MetricName String CODEC(ZSTD(1)), MetricDescription String CODEC(ZSTD(1)), MetricUnit String CODEC(ZSTD(1)), @@ -55,10 +57,10 @@ CREATE TABLE IF NOT EXISTS %s_exponential_histogram ( INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1, INDEX idx_attr_key mapKeys(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1, INDEX idx_attr_value mapValues(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1 -) ENGINE MergeTree() +) ENGINE = %s %s PARTITION BY toDate(TimeUnix) -ORDER BY (MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix)) +ORDER BY (ServiceName, MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix)) SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; ` // language=ClickHouse SQL @@ -70,6 +72,7 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; ScopeAttributes, ScopeDroppedAttrCount, ScopeSchemaUrl, + ServiceName, MetricName, MetricDescription, MetricUnit, @@ -77,7 +80,7 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; StartTimeUnix, TimeUnix, Count, - Sum, + Sum, Scale, ZeroCount, PositiveOffset, @@ -91,7 +94,7 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; Exemplars.TraceId, Flags, Min, - Max) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` + Max) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` ) type expHistogramModel struct { @@ -125,6 +128,11 @@ func (e *expHistogramMetrics) insert(ctx context.Context, db *sql.DB) error { }() for _, model := range e.expHistogramModels { + var serviceName string + if v, ok := model.metadata.ResAttr[conventions.AttributeServiceName]; ok { + serviceName = v + } + for i := 0; i < model.expHistogram.DataPoints().Len(); i++ { dp := model.expHistogram.DataPoints().At(i) @@ -137,6 +145,7 @@ func (e *expHistogramMetrics) insert(ctx context.Context, db *sql.DB) error { attributesToMap(model.metadata.ScopeInstr.Attributes()), model.metadata.ScopeInstr.DroppedAttributesCount(), model.metadata.ScopeURL, + serviceName, model.metricName, model.metricDescription, model.metricUnit, diff --git a/exporter/clickhousestsexporter/internal/gauge_metrics.go b/exporter/clickhousestsexporter/internal/gauge_metrics.go index 6e4156f..8eb6eb8 100644 --- a/exporter/clickhousestsexporter/internal/gauge_metrics.go +++ b/exporter/clickhousestsexporter/internal/gauge_metrics.go @@ -11,13 +11,14 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" "go.uber.org/zap" ) const ( // language=ClickHouse SQL createGaugeTableSQL = ` -CREATE TABLE IF NOT EXISTS %s_gauge ( +CREATE TABLE IF NOT EXISTS %s_gauge %s ( ResourceAttributes Map(LowCardinality(String), String) CODEC(ZSTD(1)), ResourceSchemaUrl String CODEC(ZSTD(1)), ScopeName String CODEC(ZSTD(1)), @@ -25,6 +26,7 @@ CREATE TABLE IF NOT EXISTS %s_gauge ( ScopeAttributes Map(LowCardinality(String), String) CODEC(ZSTD(1)), ScopeDroppedAttrCount UInt32 CODEC(ZSTD(1)), ScopeSchemaUrl String CODEC(ZSTD(1)), + ServiceName LowCardinality(String) CODEC(ZSTD(1)), MetricName String CODEC(ZSTD(1)), MetricDescription String CODEC(ZSTD(1)), MetricUnit String CODEC(ZSTD(1)), @@ -46,10 +48,10 @@ CREATE TABLE IF NOT EXISTS %s_gauge ( INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1, INDEX idx_attr_key mapKeys(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1, INDEX idx_attr_value mapValues(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1 -) ENGINE MergeTree() +) ENGINE = %s %s PARTITION BY toDate(TimeUnix) -ORDER BY (MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix)) +ORDER BY (ServiceName, MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix)) SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; ` // language=ClickHouse SQL @@ -61,6 +63,7 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; ScopeAttributes, ScopeDroppedAttrCount, ScopeSchemaUrl, + ServiceName, MetricName, MetricDescription, MetricUnit, @@ -73,7 +76,7 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; Exemplars.TimeUnix, Exemplars.Value, Exemplars.SpanId, - Exemplars.TraceId) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` + Exemplars.TraceId) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` ) type gaugeModel struct { @@ -106,6 +109,11 @@ func (g *gaugeMetrics) insert(ctx context.Context, db *sql.DB) error { }() for _, model := range g.gaugeModels { + var serviceName string + if v, ok := model.metadata.ResAttr[conventions.AttributeServiceName]; ok { + serviceName = v + } + for i := 0; i < model.gauge.DataPoints().Len(); i++ { dp := model.gauge.DataPoints().At(i) attrs, times, values, traceIDs, spanIDs := convertExemplars(dp.Exemplars()) @@ -117,6 +125,7 @@ func (g *gaugeMetrics) insert(ctx context.Context, db *sql.DB) error { attributesToMap(model.metadata.ScopeInstr.Attributes()), model.metadata.ScopeInstr.DroppedAttributesCount(), model.metadata.ScopeURL, + serviceName, model.metricName, model.metricDescription, model.metricUnit, diff --git a/exporter/clickhousestsexporter/internal/histogram_metrics.go b/exporter/clickhousestsexporter/internal/histogram_metrics.go index e730589..227c29c 100644 --- a/exporter/clickhousestsexporter/internal/histogram_metrics.go +++ b/exporter/clickhousestsexporter/internal/histogram_metrics.go @@ -11,13 +11,14 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" "go.uber.org/zap" ) const ( // language=ClickHouse SQL createHistogramTableSQL = ` -CREATE TABLE IF NOT EXISTS %s_histogram ( +CREATE TABLE IF NOT EXISTS %s_histogram %s ( ResourceAttributes Map(LowCardinality(String), String) CODEC(ZSTD(1)), ResourceSchemaUrl String CODEC(ZSTD(1)), ScopeName String CODEC(ZSTD(1)), @@ -25,6 +26,7 @@ CREATE TABLE IF NOT EXISTS %s_histogram ( ScopeAttributes Map(LowCardinality(String), String) CODEC(ZSTD(1)), ScopeDroppedAttrCount UInt32 CODEC(ZSTD(1)), ScopeSchemaUrl String CODEC(ZSTD(1)), + ServiceName LowCardinality(String) CODEC(ZSTD(1)), MetricName String CODEC(ZSTD(1)), MetricDescription String CODEC(ZSTD(1)), MetricUnit String CODEC(ZSTD(1)), @@ -51,10 +53,10 @@ CREATE TABLE IF NOT EXISTS %s_histogram ( INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1, INDEX idx_attr_key mapKeys(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1, INDEX idx_attr_value mapValues(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1 -) ENGINE MergeTree() +) ENGINE = %s %s PARTITION BY toDate(TimeUnix) -ORDER BY (MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix)) +ORDER BY (ServiceName, MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix)) SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; ` // language=ClickHouse SQL @@ -66,6 +68,7 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; ScopeAttributes, ScopeDroppedAttrCount, ScopeSchemaUrl, + ServiceName, MetricName, MetricDescription, MetricUnit, @@ -83,7 +86,7 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; Exemplars.TraceId, Flags, Min, - Max) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` + Max) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` ) type histogramModel struct { @@ -116,6 +119,11 @@ func (h *histogramMetrics) insert(ctx context.Context, db *sql.DB) error { }() for _, model := range h.histogramModel { + var serviceName string + if v, ok := model.metadata.ResAttr[conventions.AttributeServiceName]; ok { + serviceName = v + } + for i := 0; i < model.histogram.DataPoints().Len(); i++ { dp := model.histogram.DataPoints().At(i) attrs, times, values, traceIDs, spanIDs := convertExemplars(dp.Exemplars()) @@ -127,6 +135,7 @@ func (h *histogramMetrics) insert(ctx context.Context, db *sql.DB) error { attributesToMap(model.metadata.ScopeInstr.Attributes()), model.metadata.ScopeInstr.DroppedAttributesCount(), model.metadata.ScopeURL, + serviceName, model.metricName, model.metricDescription, model.metricUnit, diff --git a/exporter/clickhousestsexporter/internal/metadata/generated_status.go b/exporter/clickhousestsexporter/internal/metadata/generated_status.go index 65ab674..54e82d5 100644 --- a/exporter/clickhousestsexporter/internal/metadata/generated_status.go +++ b/exporter/clickhousestsexporter/internal/metadata/generated_status.go @@ -4,8 +4,6 @@ package metadata import ( "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" ) var ( @@ -17,11 +15,3 @@ const ( MetricsStability = component.StabilityLevelAlpha LogsStability = component.StabilityLevelAlpha ) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/clickhousests") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/clickhousests") -} diff --git a/exporter/clickhousestsexporter/internal/metadata/generated_telemetry.go b/exporter/clickhousestsexporter/internal/metadata/generated_telemetry.go new file mode 100644 index 0000000..6095373 --- /dev/null +++ b/exporter/clickhousestsexporter/internal/metadata/generated_telemetry.go @@ -0,0 +1,17 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("otelcol/clickhousests") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("otelcol/clickhousests") +} diff --git a/exporter/clickhousestsexporter/internal/metadata/generated_telemetry_test.go b/exporter/clickhousestsexporter/internal/metadata/generated_telemetry_test.go new file mode 100644 index 0000000..d56dc03 --- /dev/null +++ b/exporter/clickhousestsexporter/internal/metadata/generated_telemetry_test.go @@ -0,0 +1,63 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/metric" + embeddedmetric "go.opentelemetry.io/otel/metric/embedded" + noopmetric "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + embeddedtrace "go.opentelemetry.io/otel/trace/embedded" + nooptrace "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/collector/component" +) + +type mockMeter struct { + noopmetric.Meter + name string +} +type mockMeterProvider struct { + embeddedmetric.MeterProvider +} + +func (m mockMeterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + return mockMeter{name: name} +} + +type mockTracer struct { + nooptrace.Tracer + name string +} + +type mockTracerProvider struct { + embeddedtrace.TracerProvider +} + +func (m mockTracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return mockTracer{name: name} +} + +func TestProviders(t *testing.T) { + set := component.TelemetrySettings{ + MeterProvider: mockMeterProvider{}, + TracerProvider: mockTracerProvider{}, + } + + meter := Meter(set) + if m, ok := meter.(mockMeter); ok { + require.Equal(t, "otelcol/clickhouse", m.name) + } else { + require.Fail(t, "returned Meter not mockMeter") + } + + tracer := Tracer(set) + if m, ok := tracer.(mockTracer); ok { + require.Equal(t, "otelcol/clickhouse", m.name) + } else { + require.Fail(t, "returned Meter not mockTracer") + } +} diff --git a/exporter/clickhousestsexporter/internal/metrics_model.go b/exporter/clickhousestsexporter/internal/metrics_model.go index 75aa0eb..a1008d5 100644 --- a/exporter/clickhousestsexporter/internal/metrics_model.go +++ b/exporter/clickhousestsexporter/internal/metrics_model.go @@ -51,9 +51,9 @@ func SetLogger(l *zap.Logger) { } // NewMetricsTable create metric tables with an expiry time to storage metric telemetry data -func NewMetricsTable(ctx context.Context, tableName string, ttlExpr string, db *sql.DB) error { +func NewMetricsTable(ctx context.Context, tableName, cluster, engine, ttlExpr string, db *sql.DB) error { for table := range supportedMetricTypes { - query := fmt.Sprintf(table, tableName, ttlExpr) + query := fmt.Sprintf(table, tableName, cluster, engine, ttlExpr) if _, err := db.ExecContext(ctx, query); err != nil { return fmt.Errorf("exec create metrics table sql: %w", err) } diff --git a/exporter/clickhousestsexporter/internal/sum_metrics.go b/exporter/clickhousestsexporter/internal/sum_metrics.go index 8fcfc93..e11c935 100644 --- a/exporter/clickhousestsexporter/internal/sum_metrics.go +++ b/exporter/clickhousestsexporter/internal/sum_metrics.go @@ -11,13 +11,14 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" "go.uber.org/zap" ) const ( // language=ClickHouse SQL createSumTableSQL = ` -CREATE TABLE IF NOT EXISTS %s_sum ( +CREATE TABLE IF NOT EXISTS %s_sum %s ( ResourceAttributes Map(LowCardinality(String), String) CODEC(ZSTD(1)), ResourceSchemaUrl String CODEC(ZSTD(1)), ScopeName String CODEC(ZSTD(1)), @@ -25,6 +26,7 @@ CREATE TABLE IF NOT EXISTS %s_sum ( ScopeAttributes Map(LowCardinality(String), String) CODEC(ZSTD(1)), ScopeDroppedAttrCount UInt32 CODEC(ZSTD(1)), ScopeSchemaUrl String CODEC(ZSTD(1)), + ServiceName LowCardinality(String) CODEC(ZSTD(1)), MetricName String CODEC(ZSTD(1)), MetricDescription String CODEC(ZSTD(1)), MetricUnit String CODEC(ZSTD(1)), @@ -48,10 +50,10 @@ CREATE TABLE IF NOT EXISTS %s_sum ( INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1, INDEX idx_attr_key mapKeys(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1, INDEX idx_attr_value mapValues(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1 -) ENGINE MergeTree() +) ENGINE = %s %s PARTITION BY toDate(TimeUnix) -ORDER BY (MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix)) +ORDER BY (ServiceName, MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix)) SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; ` // language=ClickHouse SQL @@ -63,6 +65,7 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; ScopeAttributes, ScopeDroppedAttrCount, ScopeSchemaUrl, + ServiceName, MetricName, MetricDescription, MetricUnit, @@ -77,7 +80,7 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; Exemplars.SpanId, Exemplars.TraceId, AggTemp, - IsMonotonic) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` + IsMonotonic) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` ) type sumModel struct { @@ -110,6 +113,11 @@ func (s *sumMetrics) insert(ctx context.Context, db *sql.DB) error { }() for _, model := range s.sumModel { + var serviceName string + if v, ok := model.metadata.ResAttr[conventions.AttributeServiceName]; ok { + serviceName = v + } + for i := 0; i < model.sum.DataPoints().Len(); i++ { dp := model.sum.DataPoints().At(i) attrs, times, values, traceIDs, spanIDs := convertExemplars(dp.Exemplars()) @@ -121,6 +129,7 @@ func (s *sumMetrics) insert(ctx context.Context, db *sql.DB) error { attributesToMap(model.metadata.ScopeInstr.Attributes()), model.metadata.ScopeInstr.DroppedAttributesCount(), model.metadata.ScopeURL, + serviceName, model.metricName, model.metricDescription, model.metricUnit, diff --git a/exporter/clickhousestsexporter/internal/summary_metrics.go b/exporter/clickhousestsexporter/internal/summary_metrics.go index f307fd9..804ff9b 100644 --- a/exporter/clickhousestsexporter/internal/summary_metrics.go +++ b/exporter/clickhousestsexporter/internal/summary_metrics.go @@ -11,13 +11,14 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" "go.uber.org/zap" ) const ( // language=ClickHouse SQL createSummaryTableSQL = ` -CREATE TABLE IF NOT EXISTS %s_summary ( +CREATE TABLE IF NOT EXISTS %s_summary %s ( ResourceAttributes Map(LowCardinality(String), String) CODEC(ZSTD(1)), ResourceSchemaUrl String CODEC(ZSTD(1)), ScopeName String CODEC(ZSTD(1)), @@ -25,6 +26,7 @@ CREATE TABLE IF NOT EXISTS %s_summary ( ScopeAttributes Map(LowCardinality(String), String) CODEC(ZSTD(1)), ScopeDroppedAttrCount UInt32 CODEC(ZSTD(1)), ScopeSchemaUrl String CODEC(ZSTD(1)), + ServiceName LowCardinality(String) CODEC(ZSTD(1)), MetricName String CODEC(ZSTD(1)), MetricDescription String CODEC(ZSTD(1)), MetricUnit String CODEC(ZSTD(1)), @@ -44,10 +46,10 @@ CREATE TABLE IF NOT EXISTS %s_summary ( INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1, INDEX idx_attr_key mapKeys(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1, INDEX idx_attr_value mapValues(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1 -) ENGINE MergeTree() +) ENGINE = %s %s PARTITION BY toDate(TimeUnix) -ORDER BY (MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix)) +ORDER BY (ServiceName, MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix)) SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; ` // language=ClickHouse SQL @@ -59,6 +61,7 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; ScopeAttributes, ScopeDroppedAttrCount, ScopeSchemaUrl, + ServiceName, MetricName, MetricDescription, MetricUnit, @@ -69,7 +72,7 @@ SETTINGS index_granularity=8192, ttl_only_drop_parts = 1; Sum, ValueAtQuantiles.Quantile, ValueAtQuantiles.Value, - Flags) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` + Flags) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` ) type summaryModel struct { @@ -100,6 +103,11 @@ func (s *summaryMetrics) insert(ctx context.Context, db *sql.DB) error { _ = statement.Close() }() for _, model := range s.summaryModel { + var serviceName string + if v, ok := model.metadata.ResAttr[conventions.AttributeServiceName]; ok { + serviceName = v + } + for i := 0; i < model.summary.DataPoints().Len(); i++ { dp := model.summary.DataPoints().At(i) quantiles, values := convertValueAtQuantile(dp.QuantileValues()) @@ -112,6 +120,7 @@ func (s *summaryMetrics) insert(ctx context.Context, db *sql.DB) error { attributesToMap(model.metadata.ScopeInstr.Attributes()), model.metadata.ScopeInstr.DroppedAttributesCount(), model.metadata.ScopeURL, + serviceName, model.metricName, model.metricDescription, model.metricUnit, diff --git a/exporter/clickhousestsexporter/testdata/config.yaml b/exporter/clickhousestsexporter/testdata/config.yaml index 76f128c..836cf28 100644 --- a/exporter/clickhousestsexporter/testdata/config.yaml +++ b/exporter/clickhousestsexporter/testdata/config.yaml @@ -22,3 +22,19 @@ clickhousests/full: storage: file_storage/clickhouse clickhousests/invalid-endpoint: endpoint: 127.0.0.1:9000 + +clickhousests/table-engine-empty: + endpoint: clickhouse://127.0.0.1:9000 +clickhousests/table-engine-name-only: + endpoint: clickhouse://127.0.0.1:9000 + table_engine: + name: ReplicatedReplacingMergeTree +clickhousests/table-engine-full: + endpoint: clickhouse://127.0.0.1:9000 + table_engine: + name: ReplicatedReplacingMergeTree + params: "'/clickhouse/tables/{shard}/table_name', '{replica}', ver" +clickhousests/table-engine-params-only: + endpoint: clickhouse://127.0.0.1:9000 + table_engine: + params: "whatever" diff --git a/go.work.sum b/go.work.sum index 4a90ffa..77934cc 100644 --- a/go.work.sum +++ b/go.work.sum @@ -110,6 +110,7 @@ cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdi cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= @@ -427,6 +428,7 @@ contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9 dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +git.sr.ht/~sbinet/gg v0.5.0 h1:6V43j30HM623V329xA9Ntq+WJrMjDxRjuAB1LFWF5m8= git.sr.ht/~sbinet/gg v0.5.0/go.mod h1:G2C0eRESqlKhS7ErsNey6HHrqU1PwsnCQlekFi9Q2Oo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= @@ -440,6 +442,7 @@ github.com/Microsoft/hcsshim v0.11.1 h1:hJ3s7GbWlGK4YVV92sO88BQSyF4ZLVy7/awqOlPx github.com/Microsoft/hcsshim v0.11.1/go.mod h1:nFJmaO4Zr5Y7eADdFOpYswDDlNVbvcIJJNJLECr5JQg= github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyRiyQj/Ud48djTMtMebDqepE95rw= github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -452,6 +455,7 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAu github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/v12 v12.0.0 h1:xtZE63VWl7qLdB0JObIXvvhGjoVNrQ9ciIHG2OK5cmc= github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= @@ -461,6 +465,7 @@ github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= @@ -473,6 +478,7 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= @@ -491,12 +497,14 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dmarkham/enumer v1.5.8 h1:fIF11F9l5jyD++YYvxcSH5WgHfeaSGPaN/T4kOQ4qEM= github.com/dmarkham/enumer v1.5.8/go.mod h1:d10o8R3t/gROm2p3BXqTkMt2+HMuxEmWCXzorAruYak= +github.com/dmarkham/enumer v1.5.9 h1:NM/1ma/AUNieHZg74w67GkHFBNB15muOt3sj486QVZk= github.com/dmarkham/enumer v1.5.9/go.mod h1:e4VILe2b1nYK3JKJpRmNdl5xbDQvELc6tQ8b+GsGk6E= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -505,6 +513,7 @@ github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bc github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v26.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.1+incompatible h1:oI+4kkAgIwwb54b9OC7Xc3hSgu1RlJA/Lln/DF72djQ= github.com/docker/docker v26.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -526,6 +535,7 @@ github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZ github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-fonts/liberation v0.3.2 h1:XuwG0vGHFBPRRI8Qwbi5tIvR3cku9LUfZGq/Ar16wlQ= github.com/go-fonts/liberation v0.3.2/go.mod h1:N0QsDLVUQPy3UYg9XAc3Uh3UDMp2Z7M1o4+X98dXkmI= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -536,6 +546,7 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20231108140139-5c1ce85aa4ea h1:DfZQkvEbdmOe+JK2TMtBM+0I9GSdzE2y/L1/AmD8xKc= github.com/go-latex/latex v0.0.0-20231108140139-5c1ce85aa4ea/go.mod h1:Y7Vld91/HRbTBm7JwoI7HejdDB0u+e9AUBO9MB7yuZk= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -544,12 +555,15 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-pdf/fpdf v0.9.0 h1:PPvSaUuo1iMi9KkaAn90NuKi+P4gwMedWPHhj8YlJQw= github.com/go-pdf/fpdf v0.9.0/go.mod h1:oO8N111TkmKb9D7VvWGLvLJlaZUQVPM+6V42pp3iV4Y= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/goccmack/gocc v0.0.0-20230228185258-2292f9e40198 h1:FSii2UQeSLngl3jFoR4tUKZLprO7qUlh/TKKticc0BM= github.com/goccmack/gocc v0.0.0-20230228185258-2292f9e40198/go.mod h1:DTh/Y2+NbnOVVoypCCQrovMPDKUGp4yZpSbWg5D0XIM= github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= @@ -571,46 +585,117 @@ github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgj github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615 h1:/mD+ABZyXD39BzJI2XyRJlqdZG11gXFo0SSynL+OFeU= github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0 h1:dVINhi/nne11lG+Xnwuy9t/N4xyaH2Om2EU+5lphCA4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0/go.mod h1:kjyfpKOuBfkx3UsJQsbQ5eTJM3yQWiRYaYxs47PpxvI= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/pascaldekloe/name v1.0.1 h1:9lnXOHeqeHHnWLbKfH6X98+4+ETVqFqxN09UXSjcMb0= github.com/pascaldekloe/name v1.0.1/go.mod h1:Z//MfYJnH4jVpQ9wkclwu2I2MkHmXTlT9wR5UZScttM= +github.com/paulmach/protoscan v0.2.1 h1:rM0FpcTjUMvPUNk2BhPJrreDKetq43ChnL+x1sRg8O8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.24.3/go.mod h1:JpND7O217xa72ewWz9zN2eIIkPWsDN/3pl0H8Qt0uwg= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/testcontainers/testcontainers-go v0.28.0 h1:1HLm9qm+J5VikzFDYhOd+Zw12NtOl+8drH2E8nTY1r8= github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= +github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver v1.11.4 h1:4ayjakA013OdpGyL2K3ZqylTac/rMjrJOMZ1EHizXas= +go.opentelemetry.io/collector v0.101.0 h1:jnCI/JZgpEYONWy4LCvif4CjMM7cPS4XvGHp3OrZpYo= +go.opentelemetry.io/collector v0.101.0/go.mod h1:N0xja/N3NUDIC55SjjNzyyIoxE6YoCEZC3aXQ39yIVs= +go.opentelemetry.io/collector/component v0.101.0 h1:2sILYgE8cZJj0Vseh6LUjS9iXPyqDPTx/R8yf8IPu+4= +go.opentelemetry.io/collector/component v0.101.0/go.mod h1:OB1uBpQZ2Ba6wVui/sthh6j+CPxVQIy2ou5rzZPINQQ= +go.opentelemetry.io/collector/config/configopaque v1.8.0 h1:MXNJDG/yNmEX/tkf4EJ+aSucM92l4KfqtCAhBjMVMg8= +go.opentelemetry.io/collector/config/configopaque v1.8.0/go.mod h1:VUBsRa6pi8z1GaR9CCELMOnIZQRdZQ1GGi0W3UTk7x0= +go.opentelemetry.io/collector/config/configretry v0.101.0 h1:5QggLq/lZiZXry1Ut52IOTbrdz1RbGoL29Io/wWdE4g= +go.opentelemetry.io/collector/config/configretry v0.101.0/go.mod h1:uRdmPeCkrW9Zsadh2WEbQ1AGXGYJ02vCfmmT+0g69nY= +go.opentelemetry.io/collector/config/configtelemetry v0.101.0 h1:G9RerNdBUm6rYW6wrJoKzleBiDsCGaCjtQx5UYr0hzw= +go.opentelemetry.io/collector/config/configtelemetry v0.101.0/go.mod h1:YV5PaOdtnU1xRomPcYqoHmyCr48tnaAREeGO96EZw8o= +go.opentelemetry.io/collector/confmap v0.101.0 h1:pGXZRBKnZqys1HgNECGSi8Pec5RBGa9vVCfrpcvW+kA= +go.opentelemetry.io/collector/confmap v0.101.0/go.mod h1:BWKPIpYeUzSG6ZgCJMjF7xsLvyrvJCfYURl57E5vhiQ= +go.opentelemetry.io/collector/consumer v0.101.0 h1:9tDxaeHe1+Uovf3fhdx7T4pV5mo/Dc0hniH7O5H3RBA= +go.opentelemetry.io/collector/consumer v0.101.0/go.mod h1:ud5k64on9m7hHTrhjEeLhWbLkd8+Gp06rDt3p86TKNs= +go.opentelemetry.io/collector/exporter v0.101.0 h1:zAxQBfaWO+PEHL3nDglgMGaWsqLsj1lJHPaBnO8PeDo= +go.opentelemetry.io/collector/exporter v0.101.0/go.mod h1:ZFwUWCmnM2ZbEty71Q13qME9QhvIKMgyYrS3s8vJPM8= +go.opentelemetry.io/collector/extension v0.101.0 h1:A4hq/aci9+/Pxi8sJfyYgbeHjSIL7JFZR81IlSOTla4= +go.opentelemetry.io/collector/extension v0.101.0/go.mod h1:14gQMuybTcppfTTM9AwqeoFrNCLv/ds/c0A4Z0hWuLI= go.opentelemetry.io/collector/featuregate v1.6.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= +go.opentelemetry.io/collector/featuregate v1.8.0 h1:p/bAuk5LiSfdYS88yFl/Jzao9bHEYqCh7YvZJ+L+IZg= +go.opentelemetry.io/collector/featuregate v1.8.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= +go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= +go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= +go.opentelemetry.io/collector/pdata/testdata v0.101.0 h1:JzeUtg5RN1iIFgY8DakGlqBkGxOTJlkaYlLausnEGKY= +go.opentelemetry.io/collector/pdata/testdata v0.101.0/go.mod h1:ZGobfCus4fWo5RduZ7ENI0+HD9BewgKuO6qU2rBVnUg= +go.opentelemetry.io/collector/receiver v0.101.0 h1:+YJQvcAw5Es15Ub8hYqqZumKbe7D0SMU8XCgGRxc25M= +go.opentelemetry.io/collector/receiver v0.101.0/go.mod h1:JFVHAkIIz9uOk85u9pHsYRcyFj1ZAUpw59ahNZ28+ko= +go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= +go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= go.opentelemetry.io/contrib/config v0.5.0/go.mod h1:MY6YLx0DzoiCu0ZjnbNiCk+19yN2P0Zj5SkAdEo3Nz8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= @@ -621,16 +706,21 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.25.0/go.mod h1:e7iXx3Hj go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/image v0.14.0 h1:tNgSxAFe3jC4uYqvZdTr84SZoM1KfwdC9SKIFrLjFn4= golang.org/x/image v0.14.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -643,10 +733,12 @@ golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -655,20 +747,27 @@ golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2 h1:IRJeR9r1pYWsHKTRe/IInb7lYvbBVIqOgsX/u0mbOWY= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/plot v0.14.0 h1:+LBDVFYwFe4LHhdP8coW6296MBEY4nQ+Y4vuUpJopcE= gonum.org/v1/plot v0.14.0/go.mod h1:MLdR9424SJed+5VqC6MsouEpig9pZX2VZ57H9ko2bXU= google.golang.org/api v0.162.0/go.mod h1:6SulDkfoBIg4NFmCuZ39XeeAgSHCPecfSUuDyYlAHs0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= @@ -677,6 +776,7 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240213162025-012b6fc9bca9/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= @@ -684,5 +784,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= +rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= From 96a35aa0c31922af5107f3e77d0e062c293ccd5c Mon Sep 17 00:00:00 2001 From: Lukasz Marchewka Date: Thu, 23 May 2024 13:57:01 +0200 Subject: [PATCH 2/2] STAC-20729 Configure ReplacingMergeTree to run with replication --- exporter/clickhousestsexporter/config.go | 16 ++++++++++++++++ .../clickhousestsexporter/exporter_resources.go | 16 ++++++++-------- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/exporter/clickhousestsexporter/config.go b/exporter/clickhousestsexporter/config.go index a4b59de..9f6116a 100644 --- a/exporter/clickhousestsexporter/config.go +++ b/exporter/clickhousestsexporter/config.go @@ -47,6 +47,8 @@ type Config struct { TTL time.Duration `mapstructure:"ttl"` // TableEngine is the table engine to use. default is `MergeTree()`. TableEngine TableEngine `mapstructure:"table_engine"` + // DeduplicatingTableEngine is the table engine to use that it removes duplicates entries with the same sorting key . default is `ReplacingMergeTree()`. + DeduplicatingTableEngine TableEngine `mapstructure:"deduplicating_table_engine"` // ClusterName if set will append `ON CLUSTER` with the provided name when creating tables. ClusterName string `mapstructure:"cluster_name"` // Create the traces table on startup @@ -63,6 +65,7 @@ type TableEngine struct { const defaultDatabase = "default" const defaultTableEngineName = "MergeTree" +const defaultDeduplicatingTableEngineName = "ReplacingMergeTree" var ( errConfigNoEndpoint = errors.New("endpoint must be specified") @@ -166,6 +169,19 @@ func (cfg *Config) TableEngineString() string { return fmt.Sprintf("%s(%s)", engine, params) } +// DeduplicatingTableEngineString generates the ENGINE string that it removes duplicates entries with the same sorting key +func (cfg *Config) DeduplicatingTableEngineString() string { + engine := cfg.DeduplicatingTableEngine.Name + params := cfg.DeduplicatingTableEngine.Params + + if cfg.DeduplicatingTableEngine.Name == "" { + engine = defaultDeduplicatingTableEngineName + params = "" + } + + return fmt.Sprintf("%s(%s)", engine, params) +} + // ClusterString generates the ON CLUSTER string. Returns empty string if not set. func (cfg *Config) ClusterString() string { if cfg.ClusterName == "" { diff --git a/exporter/clickhousestsexporter/exporter_resources.go b/exporter/clickhousestsexporter/exporter_resources.go index 433a2bd..07ed0a3 100644 --- a/exporter/clickhousestsexporter/exporter_resources.go +++ b/exporter/clickhousestsexporter/exporter_resources.go @@ -74,7 +74,7 @@ func (e *resourcesExporter) start(ctx context.Context, _ component.Host) error { return err } - return createResourcesTable(ctx, e.cfg.TTLDays, e.cfg.TTL, e.cfg.ResourcesTableName, e.client) + return createResourcesTable(ctx, e.cfg, e.client) } func (e *resourcesExporter) InsertResources(ctx context.Context, resources []*resourceModel) error { @@ -112,11 +112,11 @@ func (e *resourcesExporter) InsertResources(ctx context.Context, resources []*re const ( // language=ClickHouse SQL createResourcesTableSQL = ` -CREATE TABLE IF NOT EXISTS %s ( +CREATE TABLE IF NOT EXISTS %s %s ( Timestamp DateTime64(9) CODEC(Delta, ZSTD(1)), ResourceRef UUID, ResourceAttributes Map(LowCardinality(String), String) CODEC(ZSTD(1)), -) ENGINE = ReplacingMergeTree +) ENGINE = %s %s ORDER BY (ResourceRef, toUnixTimestamp(Timestamp)) SETTINGS index_granularity=512, ttl_only_drop_parts = 1; @@ -125,9 +125,8 @@ SETTINGS index_granularity=512, ttl_only_drop_parts = 1; insertResourcesSQLTemplate = `INSERT INTO %s (Timestamp, ResourceRef, ResourceAttributes) VALUES (?, ?, ?)` ) -func createResourcesTable(ctx context.Context, ttlDays uint, ttl time.Duration, tableName string, db *sql.DB) error { - ttlExpr := internal.GenerateTTLExpr(ttlDays, ttl, "Timestamp") - if _, err := db.ExecContext(ctx, renderCreateResourcesTableSQL(ttlExpr, tableName)); err != nil { +func createResourcesTable(ctx context.Context, cfg *Config, db *sql.DB) error { + if _, err := db.ExecContext(ctx, renderCreateResourcesTableSQL(cfg)); err != nil { return fmt.Errorf("exec create resources table sql: %w", err) } return nil @@ -137,6 +136,7 @@ func renderInsertResourcesSQL(tableName string) string { return fmt.Sprintf(strings.ReplaceAll(insertResourcesSQLTemplate, "'", "`"), tableName) } -func renderCreateResourcesTableSQL(ttlExpr string, tableName string) string { - return fmt.Sprintf(createResourcesTableSQL, tableName, ttlExpr) +func renderCreateResourcesTableSQL(cfg *Config) string { + ttlExpr := internal.GenerateTTLExpr(cfg.TTLDays, cfg.TTL, "Timestamp") + return fmt.Sprintf(createResourcesTableSQL, cfg.ResourcesTableName, cfg.ClusterString(), cfg.DeduplicatingTableEngineString(), ttlExpr) }