From 420fc042772086c3fc1771ad257bc6c59620b77c Mon Sep 17 00:00:00 2001 From: Christian Simon Date: Wed, 6 Apr 2022 14:41:43 +0100 Subject: [PATCH] Format proto defintions --- pkg/ingester/checkpoint.proto | 28 ++- pkg/ingester/client/ingester.proto | 64 +++---- pkg/logproto/logproto.proto | 158 ++++++++-------- pkg/logproto/metrics.proto | 33 ++-- pkg/logqlmodel/stats/stats.proto | 71 ++++---- .../frontend/v1/frontendv1pb/frontend.proto | 14 +- .../frontend/v2/frontendv2pb/frontend.proto | 17 +- pkg/querier/queryrange/queryrange.proto | 81 ++++++--- .../queryrangebase/queryrange.proto | 63 ++++--- pkg/querier/stats/stats.proto | 3 +- pkg/ruler/base/ruler.proto | 26 +-- pkg/ruler/rulespb/rules.proto | 8 +- pkg/scheduler/schedulerpb/scheduler.proto | 70 ++++---- pkg/storage/chunk/grpc/grpc.proto | 169 ++++++++---------- .../chunk/storage/caching_index_client.proto | 20 ++- .../indexgateway/indexgatewaypb/gateway.proto | 29 ++- 16 files changed, 455 insertions(+), 399 deletions(-) diff --git a/pkg/ingester/checkpoint.proto b/pkg/ingester/checkpoint.proto index b1f530dd1a9e8..f54a54fe9985e 100644 --- a/pkg/ingester/checkpoint.proto +++ b/pkg/ingester/checkpoint.proto @@ -11,10 +11,14 @@ import "pkg/logproto/logproto.proto"; // Chunk is a {de,}serializable intermediate type for chunkDesc which allows // efficient loading/unloading to disk during WAL checkpoint recovery. message Chunk { - google.protobuf.Timestamp from = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp to = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp flushedAt = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp lastUpdated = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp from = 1 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + google.protobuf.Timestamp to = 2 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + google.protobuf.Timestamp flushedAt = 3 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + google.protobuf.Timestamp lastUpdated = 4 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; bool closed = 5; bool synced = 6; // data to be unmarshaled into a MemChunk @@ -26,17 +30,23 @@ message Chunk { // Series is a {de,}serializable intermediate type for Series. message Series { string userID = 1; - // post mapped fingerprint is necessary because subsequent wal writes will reference it. + // post mapped fingerprint is necessary because subsequent wal writes will + // reference it. uint64 fingerprint = 2; - repeated logproto.LegacyLabelPair labels = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter"]; - repeated Chunk chunks = 4 [(gogoproto.nullable) = false]; + repeated logproto.LegacyLabelPair labels = 3 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter" + ]; + repeated Chunk chunks = 4 [ (gogoproto.nullable) = false ]; // most recently pushed timestamp. - google.protobuf.Timestamp to = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp to = 5 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; // most recently pushed line. string lastLine = 6; // highest counter value for pushes to this stream. // Used to skip already applied entries during WAL replay. int64 entryCt = 7; // highest timestamp pushed to this stream. - google.protobuf.Timestamp highestTs = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp highestTs = 8 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; } diff --git a/pkg/ingester/client/ingester.proto b/pkg/ingester/client/ingester.proto index c24cfc67637f1..c492907f3c197 100644 --- a/pkg/ingester/client/ingester.proto +++ b/pkg/ingester/client/ingester.proto @@ -21,20 +21,20 @@ service Ingester { rpc LabelNames(LabelNamesRequest) returns (LabelNamesResponse) {}; rpc UserStats(UserStatsRequest) returns (UserStatsResponse) {}; rpc AllUserStats(UserStatsRequest) returns (UsersStatsResponse) {}; - rpc MetricsForLabelMatchers(MetricsForLabelMatchersRequest) returns (MetricsForLabelMatchersResponse) {}; - rpc MetricsMetadata(MetricsMetadataRequest) returns (MetricsMetadataResponse) {}; + rpc MetricsForLabelMatchers(MetricsForLabelMatchersRequest) + returns (MetricsForLabelMatchersResponse) {}; + rpc MetricsMetadata(MetricsMetadataRequest) + returns (MetricsMetadataResponse) {}; - // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). - rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {}; + // TransferChunks allows leaving ingester (client) to stream chunks directly + // to joining ingesters (server). + rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) { + }; } -message ReadRequest { - repeated QueryRequest queries = 1; -} +message ReadRequest { repeated QueryRequest queries = 1; } -message ReadResponse { - repeated QueryResponse results = 1; -} +message ReadResponse { repeated QueryResponse results = 1; } message QueryRequest { int64 start_timestamp_ms = 1; @@ -49,17 +49,18 @@ message ExemplarQueryRequest { } message QueryResponse { - repeated logproto.TimeSeries timeseries = 1 [(gogoproto.nullable) = false]; + repeated logproto.TimeSeries timeseries = 1 [ (gogoproto.nullable) = false ]; } -// QueryStreamResponse contains a batch of timeseries chunks or timeseries. Only one of these series will be populated. +// QueryStreamResponse contains a batch of timeseries chunks or timeseries. Only +// one of these series will be populated. message QueryStreamResponse { - repeated TimeSeriesChunk chunkseries = 1 [(gogoproto.nullable) = false]; - repeated logproto.TimeSeries timeseries = 2 [(gogoproto.nullable) = false]; + repeated TimeSeriesChunk chunkseries = 1 [ (gogoproto.nullable) = false ]; + repeated logproto.TimeSeries timeseries = 2 [ (gogoproto.nullable) = false ]; } message ExemplarQueryResponse { - repeated logproto.TimeSeries timeseries = 1 [(gogoproto.nullable) = false]; + repeated logproto.TimeSeries timeseries = 1 [ (gogoproto.nullable) = false ]; } message LabelValuesRequest { @@ -69,18 +70,14 @@ message LabelValuesRequest { LabelMatchers matchers = 4; } -message LabelValuesResponse { - repeated string label_values = 1; -} +message LabelValuesResponse { repeated string label_values = 1; } message LabelNamesRequest { int64 start_timestamp_ms = 1; int64 end_timestamp_ms = 2; } -message LabelNamesResponse { - repeated string label_names = 1; -} +message LabelNamesResponse { repeated string label_names = 1; } message UserStatsRequest {} @@ -96,9 +93,7 @@ message UserIDStatsResponse { UserStatsResponse data = 2; } -message UsersStatsResponse { - repeated UserIDStatsResponse stats = 1; -} +message UsersStatsResponse { repeated UserIDStatsResponse stats = 1; } message MetricsForLabelMatchersRequest { int64 start_timestamp_ms = 1; @@ -106,12 +101,9 @@ message MetricsForLabelMatchersRequest { repeated LabelMatchers matchers_set = 3; } -message MetricsForLabelMatchersResponse { - repeated logproto.Metric metric = 1; -} +message MetricsForLabelMatchersResponse { repeated logproto.Metric metric = 1; } -message MetricsMetadataRequest { -} +message MetricsMetadataRequest {} message MetricsMetadataResponse { repeated logproto.MetricMetadata metadata = 1; @@ -120,8 +112,11 @@ message MetricsMetadataResponse { message TimeSeriesChunk { string from_ingester_id = 1; string user_id = 2; - repeated logproto.LegacyLabelPair labels = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter"]; - repeated Chunk chunks = 4 [(gogoproto.nullable) = false]; + repeated logproto.LegacyLabelPair labels = 3 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter" + ]; + repeated Chunk chunks = 4 [ (gogoproto.nullable) = false ]; } message Chunk { @@ -131,12 +126,9 @@ message Chunk { bytes data = 4; } -message TransferChunksResponse { -} +message TransferChunksResponse {} -message LabelMatchers { - repeated LabelMatcher matchers = 1; -} +message LabelMatchers { repeated LabelMatcher matchers = 1; } enum MatchType { EQUAL = 0; diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto index 0c454ad91442e..5e10092960f35 100644 --- a/pkg/logproto/logproto.proto +++ b/pkg/logproto/logproto.proto @@ -19,36 +19,43 @@ service Querier { rpc Tail(TailRequest) returns (stream TailResponse) {}; rpc Series(SeriesRequest) returns (SeriesResponse) {}; rpc TailersCount(TailersCountRequest) returns (TailersCountResponse) {}; - rpc GetChunkIDs(GetChunkIDsRequest) returns (GetChunkIDsResponse) {}; // GetChunkIDs returns ChunkIDs from the index store holding logs for given selectors and time-range. + rpc GetChunkIDs(GetChunkIDsRequest) returns (GetChunkIDsResponse) { + }; // GetChunkIDs returns ChunkIDs from the index store holding logs for given + // selectors and time-range. } service Ingester { - rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {}; + rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) { + }; } message PushRequest { - repeated StreamAdapter streams = 1 [(gogoproto.jsontag) = "streams", (gogoproto.customtype) = "Stream"]; + repeated StreamAdapter streams = 1 + [ (gogoproto.jsontag) = "streams", (gogoproto.customtype) = "Stream" ]; } -message PushResponse { -} +message PushResponse {} message QueryRequest { string selector = 1; uint32 limit = 2; - google.protobuf.Timestamp start = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp end = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp start = 3 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + google.protobuf.Timestamp end = 4 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; Direction direction = 5; reserved 6; - repeated string shards = 7 [(gogoproto.jsontag) = "shards,omitempty"]; + repeated string shards = 7 [ (gogoproto.jsontag) = "shards,omitempty" ]; repeated Delete deletes = 8; } message SampleQueryRequest { string selector = 1; - google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - repeated string shards = 4 [(gogoproto.jsontag) = "shards,omitempty"]; + google.protobuf.Timestamp start = 2 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + google.protobuf.Timestamp end = 3 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + repeated string shards = 4 [ (gogoproto.jsontag) = "shards,omitempty" ]; repeated Delete deletes = 5; } @@ -59,62 +66,68 @@ message Delete { } message QueryResponse { - repeated StreamAdapter streams = 1 [(gogoproto.customtype) = "Stream", (gogoproto.nullable) = true]; - stats.Ingester stats = 2 [(gogoproto.nullable) = false]; + repeated StreamAdapter streams = 1 + [ (gogoproto.customtype) = "Stream", (gogoproto.nullable) = true ]; + stats.Ingester stats = 2 [ (gogoproto.nullable) = false ]; } message SampleQueryResponse { - repeated Series series = 1 [(gogoproto.customtype) = "Series", (gogoproto.nullable) = true]; - stats.Ingester stats = 2 [(gogoproto.nullable) = false]; + repeated Series series = 1 + [ (gogoproto.customtype) = "Series", (gogoproto.nullable) = true ]; + stats.Ingester stats = 2 [ (gogoproto.nullable) = false ]; } - enum Direction { FORWARD = 0; BACKWARD = 1; } - - message LabelRequest { string name = 1; bool values = 2; // True to fetch label values, false for fetch labels names. - google.protobuf.Timestamp start = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; - google.protobuf.Timestamp end = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; + google.protobuf.Timestamp start = 3 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = true ]; + google.protobuf.Timestamp end = 4 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = true ]; } -message LabelResponse { - repeated string values = 1; -} +message LabelResponse { repeated string values = 1; } message StreamAdapter { - string labels = 1 [(gogoproto.jsontag) = "labels"]; - repeated EntryAdapter entries = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "entries"]; + string labels = 1 [ (gogoproto.jsontag) = "labels" ]; + repeated EntryAdapter entries = 2 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "entries" ]; // hash contains the original hash of the stream. - uint64 hash = 3 [(gogoproto.jsontag) = "-"]; + uint64 hash = 3 [ (gogoproto.jsontag) = "-" ]; } message EntryAdapter { - google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false, (gogoproto.jsontag) = "ts"]; - string line = 2 [(gogoproto.jsontag) = "line"]; + google.protobuf.Timestamp timestamp = 1 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "ts" + ]; + string line = 2 [ (gogoproto.jsontag) = "line" ]; } message Sample { - int64 timestamp = 1 [(gogoproto.jsontag) = "ts"]; - double value = 2 [(gogoproto.jsontag) = "value"]; - uint64 hash = 3 [(gogoproto.jsontag) = "hash"]; + int64 timestamp = 1 [ (gogoproto.jsontag) = "ts" ]; + double value = 2 [ (gogoproto.jsontag) = "value" ]; + uint64 hash = 3 [ (gogoproto.jsontag) = "hash" ]; } -// LegacySample exists for backwards compatibility reasons and is deprecated. Do not use. +// LegacySample exists for backwards compatibility reasons and is deprecated. Do +// not use. message LegacySample { - double value = 1; + double value = 1; int64 timestamp_ms = 2; } message Series { - string labels = 1 [(gogoproto.jsontag) = "labels"]; - repeated Sample samples = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "samples"]; - uint64 streamHash = 3 [(gogoproto.jsontag) = "streamHash"]; + string labels = 1 [ (gogoproto.jsontag) = "labels" ]; + repeated Sample samples = 2 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "samples" ]; + uint64 streamHash = 3 [ (gogoproto.jsontag) = "streamHash" ]; } message TailRequest { @@ -122,32 +135,35 @@ message TailRequest { reserved 2; uint32 delayFor = 3; uint32 limit = 4; - google.protobuf.Timestamp start = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp start = 5 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; } message TailResponse { - StreamAdapter stream = 1 [(gogoproto.customtype) = "Stream"]; + StreamAdapter stream = 1 [ (gogoproto.customtype) = "Stream" ]; repeated DroppedStream droppedStreams = 2; } message SeriesRequest { - google.protobuf.Timestamp start = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp end = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp start = 1 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + google.protobuf.Timestamp end = 2 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; repeated string groups = 3; - repeated string shards = 4 [(gogoproto.jsontag) = "shards,omitempty"]; + repeated string shards = 4 [ (gogoproto.jsontag) = "shards,omitempty" ]; } message SeriesResponse { - repeated SeriesIdentifier series = 1 [(gogoproto.nullable) = false]; + repeated SeriesIdentifier series = 1 [ (gogoproto.nullable) = false ]; } -message SeriesIdentifier { - map labels = 1; -} +message SeriesIdentifier { map labels = 1; } message DroppedStream { - google.protobuf.Timestamp from = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp to = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp from = 1 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + google.protobuf.Timestamp to = 2 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; string labels = 3; } @@ -163,49 +179,51 @@ message LabelPair { string value = 2; } -// LegacyLabelPair exists for backwards compatibility reasons and is deprecated. Do not use. +// LegacyLabelPair exists for backwards compatibility reasons and is deprecated. +// Do not use. message LegacyLabelPair { bytes name = 1; bytes value = 2; } -message Chunk { - bytes data = 1; -} +message Chunk { bytes data = 1; } -message TransferChunksResponse { +message TransferChunksResponse {} -} - -message TailersCountRequest { - -} +message TailersCountRequest {} -message TailersCountResponse { - uint32 count = 1; -} +message TailersCountResponse { uint32 count = 1; } message GetChunkIDsRequest { string matchers = 1; - google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp start = 2 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + google.protobuf.Timestamp end = 3 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; } -message GetChunkIDsResponse { - repeated string chunkIDs = 1; -} +message GetChunkIDsResponse { repeated string chunkIDs = 1; } // ChunkRef contains the metadata to reference a Chunk. // It is embedded by the Chunk type itself and used to generate the Chunk // checksum. So it is imported to take care of the JSON representation of the // resulting Go struct. message ChunkRef { - uint64 fingerprint = 1 [(gogoproto.jsontag) = "fingerprint"]; - string user_id = 2 [(gogoproto.customname) = "UserID", (gogoproto.jsontag) = "userID"]; - int64 from = 3 [(gogoproto.jsontag) = "from", (gogoproto.customtype) = "github.com/prometheus/common/model.Time", (gogoproto.nullable) = false]; - int64 through = 4 [(gogoproto.jsontag) = "through", (gogoproto.customtype) = "github.com/prometheus/common/model.Time", (gogoproto.nullable) = false]; + uint64 fingerprint = 1 [ (gogoproto.jsontag) = "fingerprint" ]; + string user_id = 2 + [ (gogoproto.customname) = "UserID", (gogoproto.jsontag) = "userID" ]; + int64 from = 3 [ + (gogoproto.jsontag) = "from", + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + int64 through = 4 [ + (gogoproto.jsontag) = "through", + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; // The checksum is not written to the external storage. We use crc32, // Castagnoli table. See http://www.evanjones.ca/crc32c.html. - uint32 checksum = 5 [(gogoproto.jsontag) = "-"]; + uint32 checksum = 5 [ (gogoproto.jsontag) = "-" ]; } diff --git a/pkg/logproto/metrics.proto b/pkg/logproto/metrics.proto index 97e85ce5c761a..d5daefd28163c 100644 --- a/pkg/logproto/metrics.proto +++ b/pkg/logproto/metrics.proto @@ -11,36 +11,42 @@ option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; message WriteRequest { - repeated TimeSeries timeseries = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseries"]; + repeated TimeSeries timeseries = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "PreallocTimeseries" + ]; enum SourceEnum { API = 0; RULE = 1; } SourceEnum Source = 2; - repeated MetricMetadata metadata = 3 [(gogoproto.nullable) = true]; + repeated MetricMetadata metadata = 3 [ (gogoproto.nullable) = true ]; - bool skip_label_name_validation = 1000; //set intentionally high to keep WriteRequest compatible with upstream Prometheus + bool skip_label_name_validation = + 1000; // set intentionally high to keep WriteRequest compatible with + // upstream Prometheus } message WriteResponse {} message TimeSeries { - repeated LegacyLabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"]; + repeated LegacyLabelPair labels = 1 + [ (gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter" ]; // Sorted by time, oldest sample first. - repeated LegacySample samples = 2 [(gogoproto.nullable) = false]; + repeated LegacySample samples = 2 [ (gogoproto.nullable) = false ]; // repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; } message MetricMetadata { enum MetricType { - UNKNOWN = 0; - COUNTER = 1; - GAUGE = 2; - HISTOGRAM = 3; + UNKNOWN = 0; + COUNTER = 1; + GAUGE = 2; + HISTOGRAM = 3; GAUGEHISTOGRAM = 4; - SUMMARY = 5; - INFO = 6; - STATESET = 7; + SUMMARY = 5; + INFO = 6; + STATESET = 7; } MetricType type = 1; @@ -50,5 +56,6 @@ message MetricMetadata { } message Metric { - repeated LegacyLabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"]; + repeated LegacyLabelPair labels = 1 + [ (gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter" ]; } \ No newline at end of file diff --git a/pkg/logqlmodel/stats/stats.proto b/pkg/logqlmodel/stats/stats.proto index fdf3f14d2d0fe..ba80b5fe2ea6b 100644 --- a/pkg/logqlmodel/stats/stats.proto +++ b/pkg/logqlmodel/stats/stats.proto @@ -8,76 +8,83 @@ option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; option go_package = "github.com/grafana/loki/pkg/logqlmodel/stats"; - // Result contains LogQL query statistics. message Result { - Summary summary = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "summary"]; - Querier querier = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "querier"]; - Ingester ingester = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "ingester"]; + Summary summary = 1 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "summary" ]; + Querier querier = 2 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "querier" ]; + Ingester ingester = 3 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "ingester" ]; } // Summary is the summary of a query statistics. message Summary { - // Total bytes processed per second. - int64 bytesProcessedPerSecond = 1 [(gogoproto.jsontag) = "bytesProcessedPerSecond"]; + // Total bytes processed per second. + int64 bytesProcessedPerSecond = 1 + [ (gogoproto.jsontag) = "bytesProcessedPerSecond" ]; // Total lines processed per second. - int64 linesProcessedPerSecond = 2 [(gogoproto.jsontag) = "linesProcessedPerSecond"]; + int64 linesProcessedPerSecond = 2 + [ (gogoproto.jsontag) = "linesProcessedPerSecond" ]; // Total bytes processed. - int64 totalBytesProcessed = 3 [(gogoproto.jsontag) = "totalBytesProcessed"]; + int64 totalBytesProcessed = 3 [ (gogoproto.jsontag) = "totalBytesProcessed" ]; // Total lines processed. - int64 totalLinesProcessed = 4 [(gogoproto.jsontag) = "totalLinesProcessed"]; + int64 totalLinesProcessed = 4 [ (gogoproto.jsontag) = "totalLinesProcessed" ]; // Execution time in seconds. // In addition to internal calculations this is also returned by the HTTP API. // Grafana expects time values to be returned in seconds as float. - double execTime = 5 [(gogoproto.jsontag) = "execTime"]; + double execTime = 5 [ (gogoproto.jsontag) = "execTime" ]; // Queue time in seconds. // In addition to internal calculations this is also returned by the HTTP API. // Grafana expects time values to be returned in seconds as float. - double queueTime = 6 [(gogoproto.jsontag) = "queueTime"]; + double queueTime = 6 [ (gogoproto.jsontag) = "queueTime" ]; // Total of subqueries created to fulfill this query. - int64 subqueries = 7 [(gogoproto.jsontag) = "subqueries"]; + int64 subqueries = 7 [ (gogoproto.jsontag) = "subqueries" ]; } message Querier { - Store store = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "store"]; + Store store = 1 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "store" ]; } message Ingester { // Total ingester reached for this query. - int32 totalReached = 1 [(gogoproto.jsontag) = "totalReached"]; + int32 totalReached = 1 [ (gogoproto.jsontag) = "totalReached" ]; // Total of chunks matched by the query from ingesters - int64 totalChunksMatched = 2 [(gogoproto.jsontag) = "totalChunksMatched"]; + int64 totalChunksMatched = 2 [ (gogoproto.jsontag) = "totalChunksMatched" ]; // Total of batches sent from ingesters. - int64 totalBatches = 3 [(gogoproto.jsontag) = "totalBatches"]; + int64 totalBatches = 3 [ (gogoproto.jsontag) = "totalBatches" ]; // Total lines sent by ingesters. - int64 totalLinesSent = 4 [(gogoproto.jsontag) = "totalLinesSent"]; - - Store store = 5 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "store"]; + int64 totalLinesSent = 4 [ (gogoproto.jsontag) = "totalLinesSent" ]; + Store store = 5 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "store" ]; } message Store { - // The total of chunk reference fetched from index. - int64 totalChunksRef = 1 [(gogoproto.jsontag) = "totalChunksRef"]; - // Total number of chunks fetched. - int64 totalChunksDownloaded = 2 [(gogoproto.jsontag) = "totalChunksDownloaded"]; - // Time spent fetching chunks in nanoseconds. - int64 chunksDownloadTime = 3 [(gogoproto.jsontag) = "chunksDownloadTime"]; + // The total of chunk reference fetched from index. + int64 totalChunksRef = 1 [ (gogoproto.jsontag) = "totalChunksRef" ]; + // Total number of chunks fetched. + int64 totalChunksDownloaded = 2 + [ (gogoproto.jsontag) = "totalChunksDownloaded" ]; + // Time spent fetching chunks in nanoseconds. + int64 chunksDownloadTime = 3 [ (gogoproto.jsontag) = "chunksDownloadTime" ]; - Chunk chunk = 4 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "chunk"]; + Chunk chunk = 4 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "chunk" ]; } message Chunk { // Total bytes processed but was already in memory. (found in the headchunk) - int64 headChunkBytes = 4 [(gogoproto.jsontag) = "headChunkBytes"]; + int64 headChunkBytes = 4 [ (gogoproto.jsontag) = "headChunkBytes" ]; // Total lines processed but was already in memory. (found in the headchunk) - int64 headChunkLines = 5 [(gogoproto.jsontag) = "headChunkLines"]; + int64 headChunkLines = 5 [ (gogoproto.jsontag) = "headChunkLines" ]; // Total bytes decompressed and processed from chunks. - int64 decompressedBytes = 6 [(gogoproto.jsontag) = "decompressedBytes"]; + int64 decompressedBytes = 6 [ (gogoproto.jsontag) = "decompressedBytes" ]; // Total lines decompressed and processed from chunks. - int64 decompressedLines = 7 [(gogoproto.jsontag) = "decompressedLines"]; + int64 decompressedLines = 7 [ (gogoproto.jsontag) = "decompressedLines" ]; // Total bytes of compressed chunks (blocks) processed. - int64 compressedBytes = 8 [(gogoproto.jsontag) = "compressedBytes"]; + int64 compressedBytes = 8 [ (gogoproto.jsontag) = "compressedBytes" ]; // Total duplicates found while processing. - int64 totalDuplicates = 9 [(gogoproto.jsontag) = "totalDuplicates"]; + int64 totalDuplicates = 9 [ (gogoproto.jsontag) = "totalDuplicates" ]; } diff --git a/pkg/lokifrontend/frontend/v1/frontendv1pb/frontend.proto b/pkg/lokifrontend/frontend/v1/frontendv1pb/frontend.proto index f2ebd5d683c40..0fd316e512b42 100644 --- a/pkg/lokifrontend/frontend/v1/frontendv1pb/frontend.proto +++ b/pkg/lokifrontend/frontend/v1/frontendv1pb/frontend.proto @@ -15,11 +15,13 @@ option (gogoproto.unmarshaler_all) = true; service Frontend { // After calling this method, client enters a loop, in which it waits for - // a "FrontendToClient" message and replies with single "ClientToFrontend" message. + // a "FrontendToClient" message and replies with single "ClientToFrontend" + // message. rpc Process(stream ClientToFrontend) returns (stream FrontendToClient) {}; // The client notifies the query-frontend that it started a graceful shutdown. - rpc NotifyClientShutdown(NotifyClientShutdownRequest) returns (NotifyClientShutdownResponse); + rpc NotifyClientShutdown(NotifyClientShutdownRequest) + returns (NotifyClientShutdownResponse); } enum Type { @@ -31,8 +33,8 @@ message FrontendToClient { httpgrpc.HTTPRequest httpRequest = 1; Type type = 2; - // Whether query statistics tracking should be enabled. The response will include - // statistics only when this option is enabled. + // Whether query statistics tracking should be enabled. The response will + // include statistics only when this option is enabled. bool statsEnabled = 3; } @@ -42,8 +44,6 @@ message ClientToFrontend { stats.Stats stats = 3; } -message NotifyClientShutdownRequest { - string clientID = 1; -} +message NotifyClientShutdownRequest { string clientID = 1; } message NotifyClientShutdownResponse {} diff --git a/pkg/lokifrontend/frontend/v2/frontendv2pb/frontend.proto b/pkg/lokifrontend/frontend/v2/frontendv2pb/frontend.proto index 861849e4b5fef..d72ada9b1ebdb 100644 --- a/pkg/lokifrontend/frontend/v2/frontendv2pb/frontend.proto +++ b/pkg/lokifrontend/frontend/v2/frontendv2pb/frontend.proto @@ -11,18 +11,19 @@ import "pkg/querier/stats/stats.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; -// Frontend interface exposed to Queriers. Used by queriers to report back the result of the query. +// Frontend interface exposed to Queriers. Used by queriers to report back the +// result of the query. service FrontendForQuerier { - rpc QueryResult (QueryResultRequest) returns (QueryResultResponse) { }; + rpc QueryResult(QueryResultRequest) returns (QueryResultResponse) {}; } message QueryResultRequest { - uint64 queryID = 1; - httpgrpc.HTTPResponse httpResponse = 2; - stats.Stats stats = 3; + uint64 queryID = 1; + httpgrpc.HTTPResponse httpResponse = 2; + stats.Stats stats = 3; - // There is no userID field here, because Querier puts userID into the context when - // calling QueryResult, and that is where Frontend expects to find it. + // There is no userID field here, because Querier puts userID into the context + // when calling QueryResult, and that is where Frontend expects to find it. } -message QueryResultResponse { } +message QueryResultResponse {} diff --git a/pkg/querier/queryrange/queryrange.proto b/pkg/querier/queryrange/queryrange.proto index 21d47641a28ca..610011d0ff6b3 100644 --- a/pkg/querier/queryrange/queryrange.proto +++ b/pkg/querier/queryrange/queryrange.proto @@ -16,71 +16,96 @@ message LokiRequest { uint32 limit = 2; int64 step = 3; int64 interval = 9; - google.protobuf.Timestamp startTs = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp endTs = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp startTs = 4 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + google.protobuf.Timestamp endTs = 5 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; logproto.Direction direction = 6; string path = 7; - repeated string shards = 8 [(gogoproto.jsontag) = "shards"]; + repeated string shards = 8 [ (gogoproto.jsontag) = "shards" ]; } message LokiInstantRequest { string query = 1; uint32 limit = 2; - google.protobuf.Timestamp timeTs = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp timeTs = 3 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; logproto.Direction direction = 4; string path = 5; - repeated string shards = 6 [(gogoproto.jsontag) = "shards"]; + repeated string shards = 6 [ (gogoproto.jsontag) = "shards" ]; } message LokiResponse { - string Status = 1 [(gogoproto.jsontag) = "status"]; - LokiData Data = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty"]; - string ErrorType = 3 [(gogoproto.jsontag) = "errorType,omitempty"]; - string Error = 4 [(gogoproto.jsontag) = "error,omitempty"]; + string Status = 1 [ (gogoproto.jsontag) = "status" ]; + LokiData Data = 2 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty" ]; + string ErrorType = 3 [ (gogoproto.jsontag) = "errorType,omitempty" ]; + string Error = 4 [ (gogoproto.jsontag) = "error,omitempty" ]; logproto.Direction direction = 5; uint32 limit = 6; uint32 version = 7; - stats.Result statistics = 8 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "statistics"]; - repeated queryrangebase.PrometheusResponseHeader Headers = 9 [(gogoproto.jsontag) = "-", (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader"]; + stats.Result statistics = 8 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "statistics" ]; + repeated queryrangebase.PrometheusResponseHeader Headers = 9 [ + (gogoproto.jsontag) = "-", + (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/" + "queryrangebase.PrometheusResponseHeader" + ]; } message LokiSeriesRequest { repeated string match = 1; - google.protobuf.Timestamp startTs = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp endTs = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp startTs = 2 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + google.protobuf.Timestamp endTs = 3 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; string path = 4; - repeated string shards = 5 [(gogoproto.jsontag) = "shards"]; - + repeated string shards = 5 [ (gogoproto.jsontag) = "shards" ]; } message LokiSeriesResponse { - string Status = 1 [(gogoproto.jsontag) = "status"]; - repeated logproto.SeriesIdentifier Data = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty"]; + string Status = 1 [ (gogoproto.jsontag) = "status" ]; + repeated logproto.SeriesIdentifier Data = 2 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty" ]; uint32 version = 3; - repeated queryrangebase.PrometheusResponseHeader Headers = 4 [(gogoproto.jsontag) = "-", (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader"]; + repeated queryrangebase.PrometheusResponseHeader Headers = 4 [ + (gogoproto.jsontag) = "-", + (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/" + "queryrangebase.PrometheusResponseHeader" + ]; } message LokiLabelNamesRequest { - google.protobuf.Timestamp startTs = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp endTs = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp startTs = 1 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + google.protobuf.Timestamp endTs = 2 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; string path = 3; } message LokiLabelNamesResponse { - string Status = 1 [(gogoproto.jsontag) = "status"]; - repeated string Data = 2 [(gogoproto.jsontag) = "data,omitempty"]; + string Status = 1 [ (gogoproto.jsontag) = "status" ]; + repeated string Data = 2 [ (gogoproto.jsontag) = "data,omitempty" ]; uint32 version = 3; - repeated queryrangebase.PrometheusResponseHeader Headers = 4 [(gogoproto.jsontag) = "-", (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase.PrometheusResponseHeader"]; + repeated queryrangebase.PrometheusResponseHeader Headers = 4 [ + (gogoproto.jsontag) = "-", + (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/queryrange/" + "queryrangebase.PrometheusResponseHeader" + ]; } message LokiData { - string ResultType = 1 [(gogoproto.jsontag) = "resultType"]; - repeated logproto.StreamAdapter Result = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "result", (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.Stream"]; + string ResultType = 1 [ (gogoproto.jsontag) = "resultType" ]; + repeated logproto.StreamAdapter Result = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "result", + (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.Stream" + ]; } - // LokiPromResponse wraps a Prometheus response with statistics. message LokiPromResponse { - queryrangebase.PrometheusResponse response = 1 [(gogoproto.nullable) = true]; - stats.Result statistics = 2 [(gogoproto.nullable) = false]; + queryrangebase.PrometheusResponse response = 1 + [ (gogoproto.nullable) = true ]; + stats.Result statistics = 2 [ (gogoproto.nullable) = false ]; } diff --git a/pkg/querier/queryrange/queryrangebase/queryrange.proto b/pkg/querier/queryrange/queryrangebase/queryrange.proto index f49d3e0a60dc7..e87164bb0a2a7 100644 --- a/pkg/querier/queryrange/queryrangebase/queryrange.proto +++ b/pkg/querier/queryrange/queryrangebase/queryrange.proto @@ -13,59 +13,66 @@ option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; message PrometheusRequestHeader { - string Name = 1 [(gogoproto.jsontag) = "-"]; - repeated string Values = 2 [(gogoproto.jsontag) = "-"]; + string Name = 1 [ (gogoproto.jsontag) = "-" ]; + repeated string Values = 2 [ (gogoproto.jsontag) = "-" ]; } message PrometheusRequest { string path = 1; int64 start = 2; int64 end = 3; int64 step = 4; - google.protobuf.Duration timeout = 5 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; + google.protobuf.Duration timeout = 5 + [ (gogoproto.stdduration) = true, (gogoproto.nullable) = false ]; string query = 6; - CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false]; - repeated PrometheusRequestHeader Headers = 8 [(gogoproto.jsontag) = "-"]; + CachingOptions cachingOptions = 7 [ (gogoproto.nullable) = false ]; + repeated PrometheusRequestHeader Headers = 8 [ (gogoproto.jsontag) = "-" ]; } message PrometheusResponseHeader { - string Name = 1 [(gogoproto.jsontag) = "-"]; - repeated string Values = 2 [(gogoproto.jsontag) = "-"]; + string Name = 1 [ (gogoproto.jsontag) = "-" ]; + repeated string Values = 2 [ (gogoproto.jsontag) = "-" ]; } message PrometheusResponse { - string Status = 1 [(gogoproto.jsontag) = "status"]; - PrometheusData Data = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty"]; - string ErrorType = 3 [(gogoproto.jsontag) = "errorType,omitempty"]; - string Error = 4 [(gogoproto.jsontag) = "error,omitempty"]; - repeated PrometheusResponseHeader Headers = 5 [(gogoproto.jsontag) = "-"]; + string Status = 1 [ (gogoproto.jsontag) = "status" ]; + PrometheusData Data = 2 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty" ]; + string ErrorType = 3 [ (gogoproto.jsontag) = "errorType,omitempty" ]; + string Error = 4 [ (gogoproto.jsontag) = "error,omitempty" ]; + repeated PrometheusResponseHeader Headers = 5 [ (gogoproto.jsontag) = "-" ]; } message PrometheusData { - string ResultType = 1 [(gogoproto.jsontag) = "resultType"]; - repeated SampleStream Result = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "result"]; + string ResultType = 1 [ (gogoproto.jsontag) = "resultType" ]; + repeated SampleStream Result = 2 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "result" ]; } message SampleStream { - repeated logproto.LegacyLabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "metric", (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter"]; - repeated logproto.LegacySample samples = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "values"]; + repeated logproto.LegacyLabelPair labels = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "metric", + (gogoproto.customtype) = "github.com/grafana/loki/pkg/logproto.LabelAdapter" + ]; + repeated logproto.LegacySample samples = 2 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "values" ]; } -message CachedResponse { - string key = 1 [(gogoproto.jsontag) = "key"]; +message CachedResponse { + string key = 1 [ (gogoproto.jsontag) = "key" ]; - // List of cached responses; non-overlapping and in order. - repeated Extent extents = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "extents"]; + // List of cached responses; non-overlapping and in order. + repeated Extent extents = 2 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "extents" ]; } -message Extent { - int64 start = 1 [(gogoproto.jsontag) = "start"]; - int64 end = 2 [(gogoproto.jsontag) = "end"]; +message Extent { + int64 start = 1 [ (gogoproto.jsontag) = "start" ]; + int64 end = 2 [ (gogoproto.jsontag) = "end" ]; // reserved the previous key to ensure cache transition reserved 3; - string trace_id = 4 [(gogoproto.jsontag) = "-"]; - google.protobuf.Any response = 5 [(gogoproto.jsontag) = "response"]; + string trace_id = 4 [ (gogoproto.jsontag) = "-" ]; + google.protobuf.Any response = 5 [ (gogoproto.jsontag) = "response" ]; } -message CachingOptions { - bool disabled = 1; -} +message CachingOptions { bool disabled = 1; } diff --git a/pkg/querier/stats/stats.proto b/pkg/querier/stats/stats.proto index 79f74334d1683..796840273ea1c 100644 --- a/pkg/querier/stats/stats.proto +++ b/pkg/querier/stats/stats.proto @@ -11,7 +11,8 @@ option (gogoproto.unmarshaler_all) = true; message Stats { // The sum of all wall time spent in the querier to execute the query. - google.protobuf.Duration wall_time = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; + google.protobuf.Duration wall_time = 1 + [ (gogoproto.stdduration) = true, (gogoproto.nullable) = false ]; // The number of series fetched for the query uint64 fetched_series_count = 2; // The number of bytes of the chunks fetched for the query diff --git a/pkg/ruler/base/ruler.proto b/pkg/ruler/base/ruler.proto index 782dd5002b6a0..5c581e2e2c1e4 100644 --- a/pkg/ruler/base/ruler.proto +++ b/pkg/ruler/base/ruler.proto @@ -19,16 +19,16 @@ service Ruler { message RulesRequest {} -message RulesResponse { - repeated GroupStateDesc groups = 1; -} +message RulesResponse { repeated GroupStateDesc groups = 1; } // GroupStateDesc is a proto representation of a rule group message GroupStateDesc { rules.RuleGroupDesc group = 1; repeated RuleStateDesc active_rules = 2; - google.protobuf.Timestamp evaluationTimestamp = 3 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Duration evaluationDuration = 4 [(gogoproto.nullable) = false,(gogoproto.stdduration) = true]; + google.protobuf.Timestamp evaluationTimestamp = 3 + [ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ]; + google.protobuf.Duration evaluationDuration = 4 + [ (gogoproto.nullable) = false, (gogoproto.stdduration) = true ]; } // RuleStateDesc is a proto representation of a Prometheus Rule @@ -38,8 +38,10 @@ message RuleStateDesc { string health = 3; string lastError = 4; repeated AlertStateDesc alerts = 5; - google.protobuf.Timestamp evaluationTimestamp = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Duration evaluationDuration = 7 [(gogoproto.nullable) = false,(gogoproto.stdduration) = true]; + google.protobuf.Timestamp evaluationTimestamp = 6 + [ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ]; + google.protobuf.Duration evaluationDuration = 7 + [ (gogoproto.nullable) = false, (gogoproto.stdduration) = true ]; } message AlertStateDesc { @@ -54,13 +56,13 @@ message AlertStateDesc { ]; double value = 4; google.protobuf.Timestamp active_at = 5 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + [ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ]; google.protobuf.Timestamp fired_at = 6 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + [ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ]; google.protobuf.Timestamp resolved_at = 7 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + [ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ]; google.protobuf.Timestamp last_sent_at = 8 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + [ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ]; google.protobuf.Timestamp valid_until = 9 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + [ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ]; } \ No newline at end of file diff --git a/pkg/ruler/rulespb/rules.proto b/pkg/ruler/rulespb/rules.proto index cc10170810264..c59995c46e2ea 100644 --- a/pkg/ruler/rulespb/rules.proto +++ b/pkg/ruler/rulespb/rules.proto @@ -19,13 +19,13 @@ message RuleGroupDesc { string name = 1; string namespace = 2; google.protobuf.Duration interval = 3 - [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; + [ (gogoproto.nullable) = false, (gogoproto.stdduration) = true ]; repeated RuleDesc rules = 4; string user = 6; // The options field can be used to extend Ruler functionality without - // having to repeatedly redefine the proto description. It can also be leveraged - // to create custom `ManagerOpts` based on rule configs which can then be passed - // to the Prometheus Manager. + // having to repeatedly redefine the proto description. It can also be + // leveraged to create custom `ManagerOpts` based on rule configs which can + // then be passed to the Prometheus Manager. repeated google.protobuf.Any options = 9; } diff --git a/pkg/scheduler/schedulerpb/scheduler.proto b/pkg/scheduler/schedulerpb/scheduler.proto index 3ae6437567a47..22afaf306d54a 100644 --- a/pkg/scheduler/schedulerpb/scheduler.proto +++ b/pkg/scheduler/schedulerpb/scheduler.proto @@ -12,50 +12,61 @@ option (gogoproto.unmarshaler_all) = true; // Scheduler interface exposed to Queriers. service SchedulerForQuerier { - // After calling this method, both Querier and Scheduler enter a loop, in which querier waits for - // "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, - // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. + // After calling this method, both Querier and Scheduler enter a loop, in + // which querier waits for "SchedulerToQuerier" messages containing HTTP + // requests and processes them. After processing the request, querier signals + // that it is ready to accept another one by sending empty QuerierToScheduler + // message. // - // Long-running loop is used to detect broken connection between scheduler and querier. This is important - // for scheduler to keep a list of connected queriers up-to-date. - rpc QuerierLoop(stream QuerierToScheduler) returns (stream SchedulerToQuerier) { }; - - // The querier notifies the query-scheduler that it started a graceful shutdown. - rpc NotifyQuerierShutdown(NotifyQuerierShutdownRequest) returns (NotifyQuerierShutdownResponse); + // Long-running loop is used to detect broken connection between scheduler and + // querier. This is important for scheduler to keep a list of connected + // queriers up-to-date. + rpc QuerierLoop(stream QuerierToScheduler) + returns (stream SchedulerToQuerier) {}; + + // The querier notifies the query-scheduler that it started a graceful + // shutdown. + rpc NotifyQuerierShutdown(NotifyQuerierShutdownRequest) + returns (NotifyQuerierShutdownResponse); } -// Querier reports its own clientID when it connects, so that scheduler knows how many *different* queriers are connected. -// To signal that querier is ready to accept another request, querier sends empty message. -message QuerierToScheduler { - string querierID = 1; -} +// Querier reports its own clientID when it connects, so that scheduler knows +// how many *different* queriers are connected. To signal that querier is ready +// to accept another request, querier sends empty message. +message QuerierToScheduler { string querierID = 1; } message SchedulerToQuerier { - // Query ID as reported by frontend. When querier sends the response back to frontend (using frontendAddress), - // it identifies the query by using this ID. + // Query ID as reported by frontend. When querier sends the response back to + // frontend (using frontendAddress), it identifies the query by using this ID. uint64 queryID = 1; httpgrpc.HTTPRequest httpRequest = 2; - // Where should querier send HTTP Response to (using FrontendForQuerier interface). + // Where should querier send HTTP Response to (using FrontendForQuerier + // interface). string frontendAddress = 3; // User who initiated the request. Needed to send reply back to frontend. string userID = 4; - // Whether query statistics tracking should be enabled. The response will include - // statistics only when this option is enabled. + // Whether query statistics tracking should be enabled. The response will + // include statistics only when this option is enabled. bool statsEnabled = 5; } -// Scheduler interface exposed to Frontend. Frontend can enqueue and cancel requests. +// Scheduler interface exposed to Frontend. Frontend can enqueue and cancel +// requests. service SchedulerForFrontend { - // After calling this method, both Frontend and Scheduler enter a loop. Frontend will keep sending ENQUEUE and - // CANCEL requests, and scheduler is expected to process them. Scheduler returns one response for each request. + // After calling this method, both Frontend and Scheduler enter a loop. + // Frontend will keep sending ENQUEUE and CANCEL requests, and scheduler is + // expected to process them. Scheduler returns one response for each request. // - // Long-running loop is used to detect broken connection between frontend and scheduler. This is important for both - // parties... if connection breaks, frontend can cancel (and possibly retry on different scheduler) all pending - // requests sent to this scheduler, while scheduler can cancel queued requests from given frontend. - rpc FrontendLoop(stream FrontendToScheduler) returns (stream SchedulerToFrontend) { }; + // Long-running loop is used to detect broken connection between frontend and + // scheduler. This is important for both parties... if connection breaks, + // frontend can cancel (and possibly retry on different scheduler) all pending + // requests sent to this scheduler, while scheduler can cancel queued requests + // from given frontend. + rpc FrontendLoop(stream FrontendToScheduler) + returns (stream SchedulerToFrontend) {}; } enum FrontendToSchedulerType { @@ -71,7 +82,8 @@ message FrontendToScheduler { string frontendAddress = 2; // Used by ENQUEUE and CANCEL. - // Each frontend manages its own queryIDs. Different frontends may use same set of query IDs. + // Each frontend manages its own queryIDs. Different frontends may use same + // set of query IDs. uint64 queryID = 3; // Following are used by ENQUEUE only. @@ -92,8 +104,6 @@ message SchedulerToFrontend { string error = 2; } -message NotifyQuerierShutdownRequest { - string querierID = 1; -} +message NotifyQuerierShutdownRequest { string querierID = 1; } message NotifyQuerierShutdownResponse {} diff --git a/pkg/storage/chunk/grpc/grpc.proto b/pkg/storage/chunk/grpc/grpc.proto index 3eecd35847b8d..07d70fbbc8638 100644 --- a/pkg/storage/chunk/grpc/grpc.proto +++ b/pkg/storage/chunk/grpc/grpc.proto @@ -5,138 +5,115 @@ package grpc; import "google/protobuf/empty.proto"; service grpc_store { - /// index-client - - /// WriteIndex writes batch of indexes to the index tables. - rpc WriteIndex(WriteIndexRequest) returns (google.protobuf.Empty); - /// QueryIndex reads the indexes required for given query & sends back the batch of rows - /// in rpc streams - rpc QueryIndex(QueryIndexRequest) returns (stream QueryIndexResponse); - /// DeleteIndex deletes the batch of index entries from the index tables - rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty); - - /// storage-client - - /// PutChunks saves the batch of chunks into the chunk tables. - rpc PutChunks(PutChunksRequest) returns (google.protobuf.Empty); - /// GetChunks requests for batch of chunks and the batch of chunks are sent back in rpc streams - /// batching needs to be performed at server level as per requirement instead of sending single chunk per stream. - /// In GetChunks rpc request send buf as nil - rpc GetChunks(GetChunksRequest) returns (stream GetChunksResponse); - /// DeleteChunks deletes the chunks based on chunkID. - rpc DeleteChunks(ChunkID) returns (google.protobuf.Empty); - - /// table-client - - /// Lists all the tables that exists in the database. - rpc ListTables(google.protobuf.Empty) returns (ListTablesResponse); - /// Creates a table with provided name & attributes. - rpc CreateTable(CreateTableRequest) returns (google.protobuf.Empty); - // Deletes a table using table name provided. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty); - // Describes a table information for the provided table. - rpc DescribeTable(DescribeTableRequest) returns (DescribeTableResponse); - // Update a table with newly provided table information. - rpc UpdateTable(UpdateTableRequest) returns (google.protobuf.Empty); -} + /// index-client -message PutChunksRequest { - repeated Chunk chunks = 1; -} + /// WriteIndex writes batch of indexes to the index tables. + rpc WriteIndex(WriteIndexRequest) returns (google.protobuf.Empty); + /// QueryIndex reads the indexes required for given query & sends back the + /// batch of rows in rpc streams + rpc QueryIndex(QueryIndexRequest) returns (stream QueryIndexResponse); + /// DeleteIndex deletes the batch of index entries from the index tables + rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty); -message GetChunksRequest { - repeated Chunk chunks = 1; -} + /// storage-client + + /// PutChunks saves the batch of chunks into the chunk tables. + rpc PutChunks(PutChunksRequest) returns (google.protobuf.Empty); + /// GetChunks requests for batch of chunks and the batch of chunks are sent + /// back in rpc streams batching needs to be performed at server level as per + /// requirement instead of sending single chunk per stream. In GetChunks rpc + /// request send buf as nil + rpc GetChunks(GetChunksRequest) returns (stream GetChunksResponse); + /// DeleteChunks deletes the chunks based on chunkID. + rpc DeleteChunks(ChunkID) returns (google.protobuf.Empty); + + /// table-client -message GetChunksResponse { - repeated Chunk chunks = 1; + /// Lists all the tables that exists in the database. + rpc ListTables(google.protobuf.Empty) returns (ListTablesResponse); + /// Creates a table with provided name & attributes. + rpc CreateTable(CreateTableRequest) returns (google.protobuf.Empty); + // Deletes a table using table name provided. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty); + // Describes a table information for the provided table. + rpc DescribeTable(DescribeTableRequest) returns (DescribeTableResponse); + // Update a table with newly provided table information. + rpc UpdateTable(UpdateTableRequest) returns (google.protobuf.Empty); } +message PutChunksRequest { repeated Chunk chunks = 1; } + +message GetChunksRequest { repeated Chunk chunks = 1; } + +message GetChunksResponse { repeated Chunk chunks = 1; } + message Chunk { - bytes encoded = 1; - string key = 2; - string tableName = 3; + bytes encoded = 1; + string key = 2; + string tableName = 3; } -message ChunkID { - string chunkID = 1; -} +message ChunkID { string chunkID = 1; } -message DeleteTableRequest { - string tableName = 1; -} +message DeleteTableRequest { string tableName = 1; } -message DescribeTableRequest { - string tableName = 1; -} +message DescribeTableRequest { string tableName = 1; } message WriteBatch { - repeated IndexEntry writes = 1; - repeated IndexEntry deletes = 2; + repeated IndexEntry writes = 1; + repeated IndexEntry deletes = 2; } -message WriteIndexRequest { - repeated IndexEntry writes = 1; -} +message WriteIndexRequest { repeated IndexEntry writes = 1; } -message DeleteIndexRequest { - repeated IndexEntry deletes = 1; -} +message DeleteIndexRequest { repeated IndexEntry deletes = 1; } -message QueryIndexResponse { - repeated Row rows = 1; -} +message QueryIndexResponse { repeated Row rows = 1; } message Row { - bytes rangeValue = 1; - bytes value = 2; + bytes rangeValue = 1; + bytes value = 2; } message IndexEntry { - string tableName = 1; - string hashValue = 2; - bytes rangeValue = 3; - bytes value = 4; + string tableName = 1; + string hashValue = 2; + bytes rangeValue = 3; + bytes value = 4; } message QueryIndexRequest { - string tableName = 1; - string hashValue = 2; - bytes rangeValuePrefix = 3; - bytes rangeValueStart = 4; - bytes valueEqual = 5; - bool immutable = 6; + string tableName = 1; + string hashValue = 2; + bytes rangeValuePrefix = 3; + bytes rangeValueStart = 4; + bytes valueEqual = 5; + bool immutable = 6; } message UpdateTableRequest { - TableDesc current = 1; - TableDesc expected = 2; + TableDesc current = 1; + TableDesc expected = 2; } message DescribeTableResponse { - TableDesc desc = 1; - bool isActive = 2; + TableDesc desc = 1; + bool isActive = 2; } -message CreateTableRequest { - TableDesc desc = 1; -} +message CreateTableRequest { TableDesc desc = 1; } message TableDesc { - string name = 1; - bool useOnDemandIOMode = 2; - int64 provisionedRead = 3; - int64 provisionedWrite = 4; - map tags = 5; + string name = 1; + bool useOnDemandIOMode = 2; + int64 provisionedRead = 3; + int64 provisionedWrite = 4; + map tags = 5; } -message ListTablesResponse { - repeated string tableNames = 1; -} +message ListTablesResponse { repeated string tableNames = 1; } message Labels { - string name = 1; - string value = 2; + string name = 1; + string value = 2; } - - diff --git a/pkg/storage/chunk/storage/caching_index_client.proto b/pkg/storage/chunk/storage/caching_index_client.proto index 22a9d01ffaff4..eb0091c03bf88 100644 --- a/pkg/storage/chunk/storage/caching_index_client.proto +++ b/pkg/storage/chunk/storage/caching_index_client.proto @@ -8,18 +8,20 @@ option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; message Entry { - bytes Column = 1 [(gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false]; - bytes Value = 2 [(gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false]; + bytes Column = 1 + [ (gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false ]; + bytes Value = 2 + [ (gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false ]; } message ReadBatch { - repeated Entry entries = 1 [(gogoproto.nullable) = false]; - string key = 2; + repeated Entry entries = 1 [ (gogoproto.nullable) = false ]; + string key = 2; - // The time at which the key expires. - int64 expiry = 3; + // The time at which the key expires. + int64 expiry = 3; - // The number of entries; used for cardinality limiting. - // entries will be empty when this is set. - int32 cardinality = 4; + // The number of entries; used for cardinality limiting. + // entries will be empty when this is set. + int32 cardinality = 4; } diff --git a/pkg/storage/stores/shipper/indexgateway/indexgatewaypb/gateway.proto b/pkg/storage/stores/shipper/indexgateway/indexgatewaypb/gateway.proto index e4b3d6d0f4ef2..ce2f5b0110755 100644 --- a/pkg/storage/stores/shipper/indexgateway/indexgatewaypb/gateway.proto +++ b/pkg/storage/stores/shipper/indexgateway/indexgatewaypb/gateway.proto @@ -3,30 +3,27 @@ syntax = "proto3"; package indexgatewaypb; service IndexGateway { - /// QueryIndex reads the indexes required for given query & sends back the batch of rows - /// in rpc streams - rpc QueryIndex(QueryIndexRequest) returns (stream QueryIndexResponse); + /// QueryIndex reads the indexes required for given query & sends back the + /// batch of rows in rpc streams + rpc QueryIndex(QueryIndexRequest) returns (stream QueryIndexResponse); } message QueryIndexResponse { - string QueryKey = 1; - repeated Row rows = 2; + string QueryKey = 1; + repeated Row rows = 2; } message Row { - bytes rangeValue = 1; - bytes value = 2; + bytes rangeValue = 1; + bytes value = 2; } -message QueryIndexRequest { - repeated IndexQuery Queries = 1; -} +message QueryIndexRequest { repeated IndexQuery Queries = 1; } message IndexQuery { - string tableName = 1; - string hashValue = 2; - bytes rangeValuePrefix = 3; - bytes rangeValueStart = 4; - bytes valueEqual = 5; + string tableName = 1; + string hashValue = 2; + bytes rangeValuePrefix = 3; + bytes rangeValueStart = 4; + bytes valueEqual = 5; } -