From 043227d4ea94b6370a0c6d4130a3a5db9473e6dc Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Thu, 25 Apr 2019 16:49:06 -0400 Subject: [PATCH 1/4] Query label values and names are now fetched from the store. A time range is now required by the /api/prom/label with a sane default (6 hours from now). --- Gopkg.lock | 5 + pkg/logproto/logproto.pb.go | 153 ++++++++++++++++-- pkg/logproto/logproto.proto | 2 + pkg/querier/querier.go | 26 +++ .../cortex/pkg/chunk/chunk_store.go | 85 ++++++++++ .../cortex/pkg/chunk/chunk_store_utils.go | 16 ++ .../cortex/pkg/chunk/composite_store.go | 15 ++ .../cortex/pkg/chunk/series_store.go | 62 +++++++ .../cortex/pkg/ingester/client/fnv.go | 3 +- 9 files changed, 357 insertions(+), 10 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 1b1866764884a..2e04555ce5b7a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -248,7 +248,12 @@ "pkg/util/validation", ] pruneopts = "UT" +<<<<<<< HEAD revision = "e1ab5495e8a846891e3b6b8e757e63201b886bec" +======= + revision = "823897b6a214f4db92d611fcd76331ff68b1791f" + source = "https://github.com/grafana/cortex" +>>>>>>> Query label values and names are now fetched from the store. [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go index d2c0e3ed96c25..92000a12a1728 100644 --- a/pkg/logproto/logproto.pb.go +++ b/pkg/logproto/logproto.pb.go @@ -6,17 +6,18 @@ package logproto import ( context "context" fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - _ "github.com/gogo/protobuf/types" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" io "io" math "math" reflect "reflect" strconv "strconv" strings "strings" time "time" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. @@ -257,8 +258,10 @@ func (m *QueryResponse) GetStreams() []*Stream { } type LabelRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Values bool `protobuf:"varint,2,opt,name=values,proto3" json:"values,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Values bool `protobuf:"varint,2,opt,name=values,proto3" json:"values,omitempty"` + Start *time.Time `protobuf:"bytes,3,opt,name=start,proto3,stdtime" json:"start,omitempty"` + End *time.Time `protobuf:"bytes,4,opt,name=end,proto3,stdtime" json:"end,omitempty"` } func (m *LabelRequest) Reset() { *m = LabelRequest{} } @@ -307,6 +310,20 @@ func (m *LabelRequest) GetValues() bool { return false } +func (m *LabelRequest) GetStart() *time.Time { + if m != nil { + return m.Start + } + return nil +} + +func (m *LabelRequest) GetEnd() *time.Time { + if m != nil { + return m.End + } + return nil +} + type LabelResponse struct { Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` } @@ -838,6 +855,20 @@ func (this *LabelRequest) Equal(that interface{}) bool { if this.Values != that1.Values { return false } + if that1.Start == nil { + if this.Start != nil { + return false + } + } else if !this.Start.Equal(*that1.Start) { + return false + } + if that1.End == nil { + if this.End != nil { + return false + } + } else if !this.End.Equal(*that1.End) { + return false + } return true } func (this *LabelResponse) Equal(that interface{}) bool { @@ -1072,10 +1103,12 @@ func (this *LabelRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 8) s = append(s, "&logproto.LabelRequest{") s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1587,6 +1620,26 @@ func (m *LabelRequest) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.Start != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start))) + n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i:]) + if err3 != nil { + return 0, err3 + } + i += n3 + } + if m.End != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.End))) + n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i:]) + if err4 != nil { + return 0, err4 + } + i += n4 + } return i, nil } @@ -1681,7 +1734,7 @@ func (m *Entry) MarshalTo(dAtA []byte) (int, error) { if err3 != nil { return 0, err3 } - i += n3 + i += n5 if len(m.Line) > 0 { dAtA[i] = 0x12 i++ @@ -1894,6 +1947,14 @@ func (m *LabelRequest) Size() (n int) { if m.Values { n += 2 } + if m.Start != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start) + n += 1 + l + sovLogproto(uint64(l)) + } + if m.End != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.End) + n += 1 + l + sovLogproto(uint64(l)) + } return n } @@ -2076,6 +2137,8 @@ func (this *LabelRequest) String() string { s := strings.Join([]string{`&LabelRequest{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `Start:` + strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1) + `,`, + `End:` + strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1) + `,`, `}`, }, "") return s @@ -2694,6 +2757,78 @@ func (m *LabelRequest) Unmarshal(dAtA []byte) error { } } m.Values = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Start == nil { + m.Start = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Start, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.End == nil { + m.End = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.End, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto index d0d8388babb0c..d97dabcc70db2 100644 --- a/pkg/logproto/logproto.proto +++ b/pkg/logproto/logproto.proto @@ -43,6 +43,8 @@ message QueryResponse { message LabelRequest { string name = 1; bool values = 2; // True to fetch label values, false for fetch labels names. + google.protobuf.Timestamp start = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; + google.protobuf.Timestamp end = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; } message LabelResponse { diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 5a2a5e2ff18bd..a42ee8ea8e7cb 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -148,10 +148,36 @@ func (q *Querier) Label(ctx context.Context, req *logproto.LabelRequest) (*logpr return nil, err } + if req.End == nil { + now := time.Now() + req.End = &now + } + + if req.Start == nil { + // by default will look for the last 6 hours + start := req.End.Add(-6 * time.Hour) + req.Start = &start + } + + from, through := model.TimeFromUnixNano(req.Start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano()) + var storeValues []string + if req.Values { + storeValues, err = q.store.LabelValuesForMetricName(ctx, from, through, "logs", req.Name) + if err != nil { + return nil, err + } + } else { + storeValues, err = q.store.LabelNamesForMetricName(ctx, from, through, "logs") + if err != nil { + return nil, err + } + } + results := make([][]string, 0, len(resps)) for _, resp := range resps { results = append(results, resp.response.(*logproto.LabelResponse).Values) } + results = append(results, storeValues) return &logproto.LabelResponse{ Values: mergeLists(results...), diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index 7ae86fc3de429..6fdcb716a8865 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -188,6 +188,91 @@ func (c *store) Get(ctx context.Context, from, through model.Time, allMatchers . return c.getMetricNameChunks(ctx, from, through, matchers, metricName) } +// LabelNamesForMetricName retrieves all label names for a metric name. +func (c *store) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { + log, ctx := spanlogger.New(ctx, "ChunkStore.LabelNamesForMetricName") + defer log.Span.Finish() + level.Debug(log).Log("from", from, "through", through, "metricName", metricName) + + shortcut, err := c.validateQueryTimeRange(ctx, from, &through) + if err != nil { + return nil, err + } else if shortcut { + return nil, nil + } + + chunks, err := c.lookupChunksByMetricName(ctx, from, through, nil, metricName) + if err != nil { + return nil, err + } + level.Debug(log).Log("Chunks in index", len(chunks)) + + // Filter out chunks that are not in the selected time range and keep a single chunk per fingerprint + filtered := filterChunksByTime(from, through, chunks) + filtered, keys := filterChunksByUniqueFingerPrint(filtered) + level.Debug(log).Log("Chunks post filtering", len(chunks)) + + // Now fetch the actual chunk data from Memcache / S3 + allChunks, err := c.FetchChunks(ctx, filtered, keys) + if err != nil { + level.Error(log).Log("msg", "FetchChunks", "err", err) + return nil, err + } + var result []string + for _, c := range allChunks { + for labelName := range c.Metric { + if labelName != model.MetricNameLabel { + result = append(result, string(labelName)) + } + } + } + sort.Strings(result) + result = uniqueStrings(result) + return result, nil +} + +// LabelValuesForMetricName retrieves all label values for a single label name and metric name. +func (c *store) LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName, labelName string) ([]string, error) { + log, ctx := spanlogger.New(ctx, "ChunkStore.LabelValues") + defer log.Span.Finish() + level.Debug(log).Log("from", from, "through", through, "metricName", metricName, "labelName", labelName) + + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + shortcut, err := c.validateQueryTimeRange(ctx, from, &through) + if err != nil { + return nil, err + } else if shortcut { + return nil, nil + } + + queries, err := c.schema.GetReadQueriesForMetricLabel(from, through, userID, model.LabelValue(metricName), model.LabelName(labelName)) + if err != nil { + return nil, err + } + + entries, err := c.lookupEntriesByQueries(ctx, queries) + if err != nil { + return nil, err + } + + var result []string + for _, entry := range entries { + _, labelValue, _, _, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value) + if err != nil { + return nil, err + } + result = append(result, string(labelValue)) + } + + sort.Strings(result) + result = uniqueStrings(result) + return result, nil +} + func (c *store) GetChunkRefs(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { return nil, nil, errors.New("not implemented") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go index c5e7cade69b63..71ce2d8a71303 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go @@ -36,6 +36,22 @@ func keysFromChunks(chunks []Chunk) []string { return keys } +func filterChunksByUniqueFingerPrint(chunks []Chunk) ([]Chunk, []string) { + filtered := make([]Chunk, 0, len(chunks)) + keys := make([]string, 0, len(chunks)) + uniqueFp := map[model.Fingerprint]struct{}{} + + for _, chunk := range chunks { + if _, ok := uniqueFp[chunk.Fingerprint]; ok { + continue + } + filtered = append(filtered, chunk) + keys = append(keys, chunk.ExternalKey()) + uniqueFp[chunk.Fingerprint] = struct{}{} + } + return filtered, keys +} + func filterChunksByMatchers(chunks []Chunk, filters []*labels.Matcher) []Chunk { filteredChunks := make([]Chunk, 0, len(chunks)) outer: diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go index 762aa424e3625..5814ec7083d42 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go @@ -18,6 +18,7 @@ type Store interface { // GetChunkRefs returns the un-loaded chunks and the fetchers to be used to load them. You can load each slice of chunks ([]Chunk), // using the corresponding Fetcher (fetchers[i].FetchChunks(ctx, chunks[i], ...) GetChunkRefs(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) + LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName string, labelName string) ([]string, error) Stop() } @@ -92,6 +93,20 @@ func (c compositeStore) Get(ctx context.Context, from, through model.Time, match return results, err } +// LabelNamesForMetricName retrieves all label names for a metric name. +func (c compositeStore) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { + var result []string + err := c.forStores(from, through, func(from, through model.Time, store Store) error { + labelNames, err := store.LabelNamesForMetricName(ctx, from, through, metricName) + if err != nil { + return err + } + result = append(result, labelNames...) + return nil + }) + return result, err +} + // LabelValuesForMetricName retrieves all label values for a single label name and metric name. func (c compositeStore) LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName string, labelName string) ([]string, error) { var result []string diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go index 5462a0fc38fcf..e2c29bc166cc8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "sort" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -142,6 +143,67 @@ func (c *seriesStore) Get(ctx context.Context, from, through model.Time, allMatc return filteredChunks, nil } +// LabelNamesForMetricName retrieves all label names for a metric name. +func (c *seriesStore) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { + log, ctx := spanlogger.New(ctx, "SeriesStore.LabelNamesForMetricName") + defer log.Span.Finish() + + shortcut, err := c.validateQueryTimeRange(ctx, from, &through) + if err != nil { + return nil, err + } else if shortcut { + return nil, nil + } + level.Debug(log).Log("metric", metricName) + + // Fetch the series IDs from the index + seriesIDs, err := c.lookupSeriesByMetricNameMatchers(ctx, from, through, metricName, nil) + if err != nil { + return nil, err + } + level.Debug(log).Log("series-ids", len(seriesIDs)) + + // Lookup the series in the index to get the chunks. + chunkIDs, err := c.lookupChunksBySeries(ctx, from, through, seriesIDs) + if err != nil { + level.Error(log).Log("msg", "lookupChunksBySeries", "err", err) + return nil, err + } + level.Debug(log).Log("chunk-ids", len(chunkIDs)) + + chunks, err := c.convertChunkIDsToChunks(ctx, chunkIDs) + if err != nil { + level.Error(log).Log("err", "convertChunkIDsToChunks", "err", err) + return nil, err + } + + // Filter out chunks that are not in the selected time range and keep a single chunk per fingerprint + filtered, _ := filterChunksByTime(from, through, chunks) + filtered, keys := filterChunksByUniqueFingerPrint(filtered) + level.Debug(log).Log("Chunks post filtering", len(chunks)) + + chunksPerQuery.Observe(float64(len(filtered))) + + // Now fetch the actual chunk data from Memcache / S3 + allChunks, err := c.FetchChunks(ctx, filtered, keys) + if err != nil { + level.Error(log).Log("msg", "FetchChunks", "err", err) + return nil, err + } + + var result []string + for _, c := range allChunks { + for labelName := range c.Metric { + if labelName != model.MetricNameLabel { + result = append(result, string(labelName)) + } + } + } + sort.Strings(result) + result = uniqueStrings(result) + return result, nil +} + func (c *seriesStore) GetChunkRefs(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { log, ctx := spanlogger.New(ctx, "SeriesStore.GetChunkRefs") defer log.Span.Finish() diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go index ab7980b9b2c26..09fc79d11920b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go @@ -63,8 +63,9 @@ func HashNew32() uint32 { // Note this is the same algorithm as Go stdlib `sum32.Write()` func HashAdd32(h uint32, s string) uint32 { for i := 0; i < len(s); i++ { - h *= prime32 h ^= uint32(s[i]) + h *= prime32 + } return h } From 6de6d708a4327dd554e8e2235b5b155b6884c779 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Wed, 15 May 2019 11:00:49 -0400 Subject: [PATCH 2/4] fix http querystring and update doc --- docs/api.md | 17 ++++-- pkg/logproto/logproto.pb.go | 60 ++++++++++++++++--- pkg/querier/http.go | 17 ++++++ pkg/querier/querier.go | 11 ---- .../cortex/pkg/chunk/series_store.go | 9 ++- 5 files changed, 87 insertions(+), 27 deletions(-) diff --git a/docs/api.md b/docs/api.md index 14b9b40ffcc1c..51e1544f0a72d 100644 --- a/docs/api.md +++ b/docs/api.md @@ -39,7 +39,7 @@ The Loki server has the following API endpoints (_Note:_ Authentication is out o Responses looks like this: - ``` + ```json { "streams": [ { @@ -59,11 +59,14 @@ The Loki server has the following API endpoints (_Note:_ Authentication is out o - `GET /api/prom/label` - For retrieving the names of the labels one can query on. + For doing label name queries, accepts the following parameters in the query-string: + + - `start`: the start time for the query, as a nanosecond Unix epoch (nanoseconds since 1970). Default is always 6 hour ago. + - `end`: the end time for the query, as a nanosecond Unix epoch (nanoseconds since 1970). Default is current time. Responses looks like this: - ``` + ```json { "values": [ "instance", @@ -74,11 +77,15 @@ The Loki server has the following API endpoints (_Note:_ Authentication is out o ``` - `GET /api/prom/label//values` - For retrieving the label values one can query on. + + For doing label values queries, accepts the following parameters in the query-string: + + - `start`: the start time for the query, as a nanosecond Unix epoch (nanoseconds since 1970). Default is always 6 hour ago. + - `end`: the end time for the query, as a nanosecond Unix epoch (nanoseconds since 1970). Default is current time. Responses looks like this: - ``` + ```json { "values": [ "default", diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go index 92000a12a1728..b3130f276be8c 100644 --- a/pkg/logproto/logproto.pb.go +++ b/pkg/logproto/logproto.pb.go @@ -6,18 +6,17 @@ package logproto import ( context "context" fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + grpc "google.golang.org/grpc" io "io" math "math" reflect "reflect" strconv "strconv" strings "strings" time "time" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - _ "github.com/gogo/protobuf/types" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. @@ -656,6 +655,7 @@ func init() { func init() { proto.RegisterFile("logproto.proto", fileDescriptor_7a8976f235a02f79) } var fileDescriptor_7a8976f235a02f79 = []byte{ +<<<<<<< HEAD // 728 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4d, 0x4f, 0x13, 0x4f, 0x18, 0xdf, 0xe9, 0x7b, 0x9f, 0xbe, 0x40, 0xe6, 0xff, 0x17, 0x9a, 0xc6, 0x6c, 0x9b, 0x3d, 0x68, @@ -703,6 +703,48 @@ var fileDescriptor_7a8976f235a02f79 = []byte{ 0x26, 0xfa, 0x19, 0x9a, 0xe8, 0x57, 0x68, 0x1a, 0xe7, 0xa1, 0x89, 0x0e, 0x4e, 0x4d, 0xe3, 0xe8, 0xd4, 0x34, 0x8e, 0x4f, 0x4d, 0xa3, 0x97, 0x93, 0xb7, 0x3d, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0x37, 0x0c, 0xea, 0x1a, 0x0f, 0x06, 0x00, 0x00, +======= + // 613 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xf5, 0xb6, 0x89, 0x13, 0x4f, 0xd2, 0xb4, 0x5a, 0xa0, 0x58, 0x11, 0x5a, 0x47, 0x3e, 0x40, + 0x54, 0x09, 0x17, 0x02, 0xa2, 0x52, 0xe1, 0x52, 0x53, 0x2a, 0x24, 0x90, 0x80, 0x05, 0x89, 0xb3, + 0xd3, 0x2e, 0xae, 0x25, 0x7f, 0xb4, 0xf6, 0x1a, 0xd1, 0x1b, 0x12, 0x7f, 0xa0, 0x3f, 0x83, 0x13, + 0xbf, 0xa3, 0xc7, 0x1e, 0x7b, 0x0a, 0xd4, 0xb9, 0xa0, 0x9c, 0x7a, 0xe3, 0x8a, 0x76, 0x6d, 0xc7, + 0x06, 0x24, 0x50, 0xb9, 0x38, 0xf3, 0x76, 0xdf, 0x4c, 0xe6, 0xbd, 0x9d, 0x81, 0x9e, 0x1f, 0xb9, + 0x07, 0x71, 0xc4, 0x23, 0x4b, 0x7e, 0x71, 0xbb, 0xc4, 0x7d, 0xc3, 0x8d, 0x22, 0xd7, 0x67, 0xeb, + 0x12, 0x8d, 0xd3, 0x77, 0xeb, 0xdc, 0x0b, 0x58, 0xc2, 0x9d, 0xe0, 0x20, 0xa7, 0xf6, 0x6f, 0xbb, + 0x1e, 0xdf, 0x4f, 0xc7, 0xd6, 0x6e, 0x14, 0xac, 0xbb, 0x91, 0x1b, 0x55, 0x4c, 0x81, 0x24, 0x90, + 0x51, 0x4e, 0x37, 0x77, 0xa0, 0xf3, 0x32, 0x4d, 0xf6, 0x29, 0x3b, 0x4c, 0x59, 0xc2, 0xf1, 0x06, + 0xb4, 0x12, 0x1e, 0x33, 0x27, 0x48, 0x74, 0x34, 0x58, 0x1c, 0x76, 0x46, 0x2b, 0xd6, 0xbc, 0x95, + 0xd7, 0xf2, 0xc2, 0xee, 0xcc, 0x26, 0x46, 0x49, 0xa2, 0x65, 0x60, 0xf6, 0xa0, 0x9b, 0xd7, 0x49, + 0x0e, 0xa2, 0x30, 0x61, 0xe6, 0x0f, 0x04, 0xdd, 0x57, 0x29, 0x8b, 0x8f, 0xca, 0xca, 0x57, 0xa1, + 0x79, 0x28, 0xb0, 0x8e, 0x06, 0x68, 0xa8, 0xd1, 0x1c, 0x88, 0x53, 0xdf, 0x0b, 0x3c, 0xae, 0x2f, + 0x0c, 0xd0, 0x70, 0x89, 0xe6, 0x00, 0x6f, 0x42, 0x33, 0xe1, 0x4e, 0xcc, 0xf5, 0xc5, 0x01, 0x1a, + 0x76, 0x46, 0x7d, 0x2b, 0x17, 0x6d, 0x95, 0x52, 0xac, 0x37, 0xa5, 0x68, 0xbb, 0x7d, 0x32, 0x31, + 0x94, 0xe3, 0xaf, 0x06, 0xa2, 0x79, 0x0a, 0x7e, 0x00, 0x8b, 0x2c, 0xdc, 0xd3, 0x1b, 0x97, 0xc8, + 0x14, 0x09, 0xf8, 0x2e, 0x68, 0x7b, 0x5e, 0xcc, 0x76, 0xb9, 0x17, 0x85, 0x7a, 0x73, 0x80, 0x86, + 0xbd, 0xd1, 0x95, 0x4a, 0xfb, 0x76, 0x79, 0x45, 0x2b, 0x96, 0x68, 0x3e, 0x66, 0x2e, 0xfb, 0xa0, + 0xab, 0xb9, 0x24, 0x09, 0xcc, 0x87, 0xb0, 0x54, 0x08, 0xcf, 0xad, 0xc0, 0x6b, 0xff, 0xf4, 0xb4, + 0xb2, 0xf1, 0x0b, 0x82, 0xee, 0x73, 0x67, 0xcc, 0xfc, 0xd2, 0x36, 0x0c, 0x8d, 0xd0, 0x09, 0x58, + 0xe1, 0x9a, 0x8c, 0xf1, 0x2a, 0xa8, 0xef, 0x1d, 0x3f, 0x65, 0x89, 0x74, 0xad, 0x4d, 0x0b, 0x74, + 0x59, 0xdb, 0xd0, 0x7f, 0xdb, 0x86, 0xe6, 0xb6, 0x99, 0xb7, 0x60, 0xa9, 0xe8, 0xb7, 0x50, 0x5b, + 0x35, 0x27, 0xc4, 0x6a, 0x65, 0x73, 0xe6, 0x3e, 0xa8, 0xb9, 0x58, 0x6c, 0x82, 0xea, 0x8b, 0x94, + 0x24, 0x17, 0x65, 0xc3, 0x6c, 0x62, 0x14, 0x27, 0xb4, 0xf8, 0xc5, 0x9b, 0xd0, 0x62, 0x21, 0x8f, + 0x3d, 0xa9, 0x51, 0x78, 0xb6, 0x5c, 0x79, 0xf6, 0x24, 0xe4, 0xf1, 0x91, 0xbd, 0x2c, 0x9e, 0x4f, + 0x8c, 0x62, 0xc1, 0xa3, 0x65, 0x60, 0x46, 0xd0, 0x94, 0x14, 0xfc, 0x14, 0xb4, 0xf9, 0x76, 0xc8, + 0xff, 0xfa, 0xbb, 0xb2, 0x5e, 0x51, 0x71, 0x81, 0x27, 0x52, 0x5f, 0x95, 0x8c, 0x6f, 0x40, 0xc3, + 0xf7, 0x42, 0x26, 0xfd, 0xd6, 0xec, 0xf6, 0x6c, 0x62, 0x48, 0x4c, 0xe5, 0x77, 0xed, 0x26, 0x68, + 0xf3, 0xf9, 0xc0, 0x1d, 0x68, 0xed, 0xbc, 0xa0, 0x6f, 0xb7, 0xe8, 0xf6, 0x8a, 0x82, 0xbb, 0xd0, + 0xb6, 0xb7, 0x1e, 0x3f, 0x93, 0x08, 0x8d, 0xb6, 0x40, 0x15, 0x3b, 0xc2, 0x62, 0xbc, 0x01, 0x0d, + 0x11, 0xe1, 0x6b, 0x95, 0xaa, 0xda, 0x16, 0xf6, 0x57, 0x7f, 0x3f, 0x2e, 0x96, 0x4a, 0x19, 0x7d, + 0x42, 0xd0, 0x12, 0xd3, 0xe5, 0xb1, 0x18, 0x3f, 0x82, 0xa6, 0x1c, 0x34, 0x5c, 0xa3, 0xd7, 0x57, + 0xae, 0x7f, 0xfd, 0x8f, 0xf3, 0xb2, 0xce, 0x1d, 0x24, 0x86, 0x45, 0x3e, 0x5c, 0x3d, 0xbb, 0x3e, + 0x79, 0xf5, 0xec, 0x5f, 0x5e, 0xd8, 0x54, 0xec, 0xfb, 0xa7, 0xe7, 0x44, 0x39, 0x3b, 0x27, 0xca, + 0xc5, 0x39, 0x41, 0x1f, 0x33, 0x82, 0x3e, 0x67, 0x04, 0x9d, 0x64, 0x04, 0x9d, 0x66, 0x04, 0x7d, + 0xcb, 0x08, 0xfa, 0x9e, 0x11, 0xe5, 0x22, 0x23, 0xe8, 0x78, 0x4a, 0x94, 0xd3, 0x29, 0x51, 0xce, + 0xa6, 0x44, 0x19, 0xab, 0xb2, 0xd8, 0xbd, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x32, 0x5b, + 0xc6, 0xdd, 0x04, 0x00, 0x00, +>>>>>>> fix http querystring and update doc } func (x Direction) String() string { @@ -1730,9 +1772,9 @@ func (m *Entry) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp))) - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) - if err3 != nil { - return 0, err3 + n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + if err5 != nil { + return 0, err5 } i += n5 if len(m.Line) > 0 { diff --git a/pkg/querier/http.go b/pkg/querier/http.go index 4d1cbedeba2e3..f1df00b1ec393 100644 --- a/pkg/querier/http.go +++ b/pkg/querier/http.go @@ -143,10 +143,27 @@ func (q *Querier) QueryHandler(w http.ResponseWriter, r *http.Request) { // LabelHandler is a http.HandlerFunc for handling label queries. func (q *Querier) LabelHandler(w http.ResponseWriter, r *http.Request) { name, ok := mux.Vars(r)["name"] + params := r.URL.Query() + now := time.Now() req := &logproto.LabelRequest{ Values: ok, Name: name, } + + end, err := unixNanoTimeParam(params, "end", now) + if err != nil { + http.Error(w, httpgrpc.Errorf(http.StatusBadRequest, err.Error()).Error(), http.StatusBadRequest) + return + } + req.End = &end + + start, err := unixNanoTimeParam(params, "start", end.Add(-6*time.Hour)) + if err != nil { + http.Error(w, httpgrpc.Errorf(http.StatusBadRequest, err.Error()).Error(), http.StatusBadRequest) + return + } + req.Start = &start + resp, err := q.Label(r.Context(), req) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index a42ee8ea8e7cb..d0dbbf1cdcf41 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -148,17 +148,6 @@ func (q *Querier) Label(ctx context.Context, req *logproto.LabelRequest) (*logpr return nil, err } - if req.End == nil { - now := time.Now() - req.End = &now - } - - if req.Start == nil { - // by default will look for the last 6 hours - start := req.End.Add(-6 * time.Hour) - req.Start = &start - } - from, through := model.TimeFromUnixNano(req.Start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano()) var storeValues []string if req.Values { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go index e2c29bc166cc8..f65774b2f1f39 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go @@ -148,6 +148,11 @@ func (c *seriesStore) LabelNamesForMetricName(ctx context.Context, from, through log, ctx := spanlogger.New(ctx, "SeriesStore.LabelNamesForMetricName") defer log.Span.Finish() + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + shortcut, err := c.validateQueryTimeRange(ctx, from, &through) if err != nil { return nil, err @@ -171,14 +176,14 @@ func (c *seriesStore) LabelNamesForMetricName(ctx context.Context, from, through } level.Debug(log).Log("chunk-ids", len(chunkIDs)) - chunks, err := c.convertChunkIDsToChunks(ctx, chunkIDs) + chunks, err := c.convertChunkIDsToChunks(ctx, userID, chunkIDs) if err != nil { level.Error(log).Log("err", "convertChunkIDsToChunks", "err", err) return nil, err } // Filter out chunks that are not in the selected time range and keep a single chunk per fingerprint - filtered, _ := filterChunksByTime(from, through, chunks) + filtered := filterChunksByTime(from, through, chunks) filtered, keys := filterChunksByUniqueFingerPrint(filtered) level.Debug(log).Log("Chunks post filtering", len(chunks)) From 87bc0a5ce2ae57a7117996a23a3798d7f2c9716c Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Mon, 8 Jul 2019 10:48:20 -0400 Subject: [PATCH 3/4] update vendor --- Gopkg.lock | 4 + .../cortex/pkg/chunk/chunk_store.go | 93 +++++++++-------- .../cortex/pkg/chunk/chunk_store_utils.go | 40 ++++++-- .../cortex/pkg/chunk/composite_store.go | 18 ++-- .../cortex/pkg/chunk/series_store.go | 99 +++++++++---------- .../cortex/pkg/ingester/client/fnv.go | 3 +- 6 files changed, 137 insertions(+), 120 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 2e04555ce5b7a..c76b09091e8d9 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -248,12 +248,16 @@ "pkg/util/validation", ] pruneopts = "UT" +<<<<<<< HEAD <<<<<<< HEAD revision = "e1ab5495e8a846891e3b6b8e757e63201b886bec" ======= revision = "823897b6a214f4db92d611fcd76331ff68b1791f" source = "https://github.com/grafana/cortex" >>>>>>> Query label values and names are now fetched from the store. +======= + revision = "ef492f6bbafb185bbe61ae7a6955b7a4af5f3d9a" +>>>>>>> update vendor [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index 6fdcb716a8865..679b35324281f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -64,11 +64,18 @@ type StoreConfig struct { // Limits query start time to be greater than now() - MaxLookBackPeriod, if set. MaxLookBackPeriod time.Duration `yaml:"max_look_back_period"` +<<<<<<< HEAD +======= + + // Not visible in yaml because the setting shouldn't be common between ingesters and queriers + chunkCacheStubs bool // don't write the full chunk to cache, just a stub entry +>>>>>>> update vendor } // RegisterFlags adds the flags required to config this to the given FlagSet func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.ChunkCacheConfig.RegisterFlagsWithPrefix("", "Cache config for chunks. ", f) + f.BoolVar(&cfg.chunkCacheStubs, "store.chunk-cache-stubs", false, "If true, don't write the full chunk to cache, just a stub entry.") cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "Cache config for index entry writing. ", f) f.DurationVar(&cfg.MinChunkAge, "store.min-chunk-age", 0, "Minimum time between chunk update and being saved to the store.") @@ -92,7 +99,7 @@ type store struct { } func newStore(cfg StoreConfig, schema Schema, index IndexClient, chunks ObjectClient, limits *validation.Overrides) (Store, error) { - fetcher, err := NewChunkFetcher(cfg.ChunkCacheConfig, chunks) + fetcher, err := NewChunkFetcher(cfg.ChunkCacheConfig, cfg.chunkCacheStubs, chunks) if err != nil { return nil, err } @@ -188,47 +195,8 @@ func (c *store) Get(ctx context.Context, from, through model.Time, allMatchers . return c.getMetricNameChunks(ctx, from, through, matchers, metricName) } -// LabelNamesForMetricName retrieves all label names for a metric name. -func (c *store) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { - log, ctx := spanlogger.New(ctx, "ChunkStore.LabelNamesForMetricName") - defer log.Span.Finish() - level.Debug(log).Log("from", from, "through", through, "metricName", metricName) - - shortcut, err := c.validateQueryTimeRange(ctx, from, &through) - if err != nil { - return nil, err - } else if shortcut { - return nil, nil - } - - chunks, err := c.lookupChunksByMetricName(ctx, from, through, nil, metricName) - if err != nil { - return nil, err - } - level.Debug(log).Log("Chunks in index", len(chunks)) - - // Filter out chunks that are not in the selected time range and keep a single chunk per fingerprint - filtered := filterChunksByTime(from, through, chunks) - filtered, keys := filterChunksByUniqueFingerPrint(filtered) - level.Debug(log).Log("Chunks post filtering", len(chunks)) - - // Now fetch the actual chunk data from Memcache / S3 - allChunks, err := c.FetchChunks(ctx, filtered, keys) - if err != nil { - level.Error(log).Log("msg", "FetchChunks", "err", err) - return nil, err - } - var result []string - for _, c := range allChunks { - for labelName := range c.Metric { - if labelName != model.MetricNameLabel { - result = append(result, string(labelName)) - } - } - } - sort.Strings(result) - result = uniqueStrings(result) - return result, nil +func (c *store) GetChunkRefs(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { + return nil, nil, errors.New("not implemented") } // LabelValuesForMetricName retrieves all label values for a single label name and metric name. @@ -242,14 +210,14 @@ func (c *store) LabelValuesForMetricName(ctx context.Context, from, through mode return nil, err } - shortcut, err := c.validateQueryTimeRange(ctx, from, &through) + shortcut, err := c.validateQueryTimeRange(ctx, &from, &through) if err != nil { return nil, err } else if shortcut { return nil, nil } - queries, err := c.schema.GetReadQueriesForMetricLabel(from, through, userID, model.LabelValue(metricName), model.LabelName(labelName)) + queries, err := c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName) if err != nil { return nil, err } @@ -267,16 +235,45 @@ func (c *store) LabelValuesForMetricName(ctx context.Context, from, through mode } result = append(result, string(labelValue)) } - sort.Strings(result) result = uniqueStrings(result) return result, nil } -func (c *store) GetChunkRefs(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { - return nil, nil, errors.New("not implemented") +// LabelNamesForMetricName retrieves all label names for a metric name. +func (c *store) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { + log, ctx := spanlogger.New(ctx, "ChunkStore.LabelNamesForMetricName") + defer log.Span.Finish() + level.Debug(log).Log("from", from, "through", through, "metricName", metricName) + + shortcut, err := c.validateQueryTimeRange(ctx, &from, &through) + if err != nil { + return nil, err + } else if shortcut { + return nil, nil + } + + chunks, err := c.lookupChunksByMetricName(ctx, from, through, nil, metricName) + if err != nil { + return nil, err + } + level.Debug(log).Log("msg", "Chunks in index", "chunks", len(chunks)) + + // Filter out chunks that are not in the selected time range and keep a single chunk per fingerprint + filtered := filterChunksByTime(from, through, chunks) + filtered, keys := filterChunksByUniqueFingerprint(filtered) + level.Debug(log).Log("msg", "Chunks post filtering", "chunks", len(chunks)) + + // Now fetch the actual chunk data from Memcache / S3 + allChunks, err := c.FetchChunks(ctx, filtered, keys) + if err != nil { + level.Error(log).Log("msg", "FetchChunks", "err", err) + return nil, err + } + return labelNamesFromChunks(allChunks), nil } +<<<<<<< HEAD // LabelValuesForMetricName retrieves all label values for a single label name and metric name. func (c *store) LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName, labelName string) ([]string, error) { log, ctx := spanlogger.New(ctx, "ChunkStore.LabelValues") @@ -319,6 +316,8 @@ func (c *store) LabelValuesForMetricName(ctx context.Context, from, through mode return result, nil } +======= +>>>>>>> update vendor func (c *store) validateQueryTimeRange(ctx context.Context, from *model.Time, through *model.Time) (bool, error) { log, ctx := spanlogger.New(ctx, "store.validateQueryTimeRange") defer log.Span.Finish() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go index 71ce2d8a71303..49506be76a382 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go @@ -2,6 +2,7 @@ package chunk import ( "context" + "sort" "sync" "github.com/go-kit/kit/log/level" @@ -36,7 +37,24 @@ func keysFromChunks(chunks []Chunk) []string { return keys } -func filterChunksByUniqueFingerPrint(chunks []Chunk) ([]Chunk, []string) { +func labelNamesFromChunks(chunks []Chunk) []string { + keys := map[string]struct{}{} + var result []string + for _, c := range chunks { + for _, l := range c.Metric { + if l.Name != model.MetricNameLabel { + if _, ok := keys[string(l.Name)]; !ok { + keys[string(l.Name)] = struct{}{} + result = append(result, string(l.Name)) + } + } + } + } + sort.Strings(result) + return result +} + +func filterChunksByUniqueFingerprint(chunks []Chunk) ([]Chunk, []string) { filtered := make([]Chunk, 0, len(chunks)) keys := make([]string, 0, len(chunks)) uniqueFp := map[model.Fingerprint]struct{}{} @@ -70,8 +88,9 @@ outer: // and writing back any misses to the cache. Also responsible for decoding // chunks from the cache, in parallel. type Fetcher struct { - storage ObjectClient - cache cache.Cache + storage ObjectClient + cache cache.Cache + cacheStubs bool wait sync.WaitGroup decodeRequests chan decodeRequest @@ -88,7 +107,7 @@ type decodeResponse struct { } // NewChunkFetcher makes a new ChunkFetcher. -func NewChunkFetcher(cfg cache.Config, storage ObjectClient) (*Fetcher, error) { +func NewChunkFetcher(cfg cache.Config, cacheStubs bool, storage ObjectClient) (*Fetcher, error) { cache, err := cache.New(cfg) if err != nil { return nil, err @@ -97,6 +116,7 @@ func NewChunkFetcher(cfg cache.Config, storage ObjectClient) (*Fetcher, error) { c := &Fetcher{ storage: storage, cache: cache, + cacheStubs: cacheStubs, decodeRequests: make(chan decodeRequest), } @@ -165,10 +185,14 @@ func (c *Fetcher) writeBackCache(ctx context.Context, chunks []Chunk) error { keys := make([]string, 0, len(chunks)) bufs := make([][]byte, 0, len(chunks)) for i := range chunks { - encoded, err := chunks[i].Encoded() - // TODO don't fail, just log and conitnue? - if err != nil { - return err + var encoded []byte + var err error + if !c.cacheStubs { + encoded, err = chunks[i].Encoded() + // TODO don't fail, just log and conitnue? + if err != nil { + return err + } } keys = append(keys, chunks[i].ExternalKey()) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go index 5814ec7083d42..c00c3da8dba46 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go @@ -18,8 +18,8 @@ type Store interface { // GetChunkRefs returns the un-loaded chunks and the fetchers to be used to load them. You can load each slice of chunks ([]Chunk), // using the corresponding Fetcher (fetchers[i].FetchChunks(ctx, chunks[i], ...) GetChunkRefs(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) - LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName string, labelName string) ([]string, error) + LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) Stop() } @@ -93,29 +93,29 @@ func (c compositeStore) Get(ctx context.Context, from, through model.Time, match return results, err } -// LabelNamesForMetricName retrieves all label names for a metric name. -func (c compositeStore) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { +// LabelValuesForMetricName retrieves all label values for a single label name and metric name. +func (c compositeStore) LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName string, labelName string) ([]string, error) { var result []string err := c.forStores(from, through, func(from, through model.Time, store Store) error { - labelNames, err := store.LabelNamesForMetricName(ctx, from, through, metricName) + labelValues, err := store.LabelValuesForMetricName(ctx, from, through, metricName, labelName) if err != nil { return err } - result = append(result, labelNames...) + result = append(result, labelValues...) return nil }) return result, err } -// LabelValuesForMetricName retrieves all label values for a single label name and metric name. -func (c compositeStore) LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName string, labelName string) ([]string, error) { +// LabelNamesForMetricName retrieves all label names for a metric name. +func (c compositeStore) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { var result []string err := c.forStores(from, through, func(from, through model.Time, store Store) error { - labelValues, err := store.LabelValuesForMetricName(ctx, from, through, metricName, labelName) + labelNames, err := store.LabelNamesForMetricName(ctx, from, through, metricName) if err != nil { return err } - result = append(result, labelValues...) + result = append(result, labelNames...) return nil }) return result, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go index f65774b2f1f39..7698848063bdc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "net/http" - "sort" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -69,7 +68,7 @@ type seriesStore struct { } func newSeriesStore(cfg StoreConfig, schema Schema, index IndexClient, chunks ObjectClient, limits *validation.Overrides) (Store, error) { - fetcher, err := NewChunkFetcher(cfg.ChunkCacheConfig, chunks) + fetcher, err := NewChunkFetcher(cfg.ChunkCacheConfig, cfg.chunkCacheStubs, chunks) if err != nil { return nil, err } @@ -143,89 +142,72 @@ func (c *seriesStore) Get(ctx context.Context, from, through model.Time, allMatc return filteredChunks, nil } -// LabelNamesForMetricName retrieves all label names for a metric name. -func (c *seriesStore) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { - log, ctx := spanlogger.New(ctx, "SeriesStore.LabelNamesForMetricName") +func (c *seriesStore) GetChunkRefs(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { + log, ctx := spanlogger.New(ctx, "SeriesStore.GetChunkRefs") defer log.Span.Finish() userID, err := user.ExtractOrgID(ctx) if err != nil { - return nil, err + return nil, nil, err } - shortcut, err := c.validateQueryTimeRange(ctx, from, &through) + // Validate the query is within reasonable bounds. + metricName, matchers, shortcut, err := c.validateQuery(ctx, &from, &through, allMatchers) if err != nil { - return nil, err + return nil, nil, err } else if shortcut { - return nil, nil + return nil, nil, nil } + level.Debug(log).Log("metric", metricName) - // Fetch the series IDs from the index - seriesIDs, err := c.lookupSeriesByMetricNameMatchers(ctx, from, through, metricName, nil) + // Fetch the series IDs from the index, based on non-empty matchers from + // the query. + _, matchers = util.SplitFiltersAndMatchers(matchers) + seriesIDs, err := c.lookupSeriesByMetricNameMatchers(ctx, from, through, userID, metricName, matchers) if err != nil { - return nil, err + return nil, nil, err } level.Debug(log).Log("series-ids", len(seriesIDs)) // Lookup the series in the index to get the chunks. - chunkIDs, err := c.lookupChunksBySeries(ctx, from, through, seriesIDs) + chunkIDs, err := c.lookupChunksBySeries(ctx, from, through, userID, seriesIDs) if err != nil { level.Error(log).Log("msg", "lookupChunksBySeries", "err", err) - return nil, err + return nil, nil, err } level.Debug(log).Log("chunk-ids", len(chunkIDs)) chunks, err := c.convertChunkIDsToChunks(ctx, userID, chunkIDs) if err != nil { - level.Error(log).Log("err", "convertChunkIDsToChunks", "err", err) - return nil, err + level.Error(log).Log("op", "convertChunkIDsToChunks", "err", err) + return nil, nil, err } - // Filter out chunks that are not in the selected time range and keep a single chunk per fingerprint - filtered := filterChunksByTime(from, through, chunks) - filtered, keys := filterChunksByUniqueFingerPrint(filtered) - level.Debug(log).Log("Chunks post filtering", len(chunks)) - - chunksPerQuery.Observe(float64(len(filtered))) - - // Now fetch the actual chunk data from Memcache / S3 - allChunks, err := c.FetchChunks(ctx, filtered, keys) - if err != nil { - level.Error(log).Log("msg", "FetchChunks", "err", err) - return nil, err - } + chunks = filterChunksByTime(from, through, chunks) + level.Debug(log).Log("chunks-post-filtering", len(chunks)) + chunksPerQuery.Observe(float64(len(chunks))) - var result []string - for _, c := range allChunks { - for labelName := range c.Metric { - if labelName != model.MetricNameLabel { - result = append(result, string(labelName)) - } - } - } - sort.Strings(result) - result = uniqueStrings(result) - return result, nil + return [][]Chunk{chunks}, []*Fetcher{c.store.Fetcher}, nil } -func (c *seriesStore) GetChunkRefs(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { - log, ctx := spanlogger.New(ctx, "SeriesStore.GetChunkRefs") +// LabelNamesForMetricName retrieves all label names for a metric name. +func (c *seriesStore) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { + log, ctx := spanlogger.New(ctx, "SeriesStore.LabelNamesForMetricName") defer log.Span.Finish() userID, err := user.ExtractOrgID(ctx) if err != nil { - return nil, nil, err + return nil, err } // Validate the query is within reasonable bounds. metricName, matchers, shortcut, err := c.validateQuery(ctx, &from, &through, allMatchers) if err != nil { - return nil, nil, err + return nil, err } else if shortcut { - return nil, nil, nil + return nil, nil } - level.Debug(log).Log("metric", metricName) // Fetch the series IDs from the index, based on non-empty matchers from @@ -233,7 +215,7 @@ func (c *seriesStore) GetChunkRefs(ctx context.Context, from, through model.Time _, matchers = util.SplitFiltersAndMatchers(matchers) seriesIDs, err := c.lookupSeriesByMetricNameMatchers(ctx, from, through, userID, metricName, matchers) if err != nil { - return nil, nil, err + return nil, err } level.Debug(log).Log("series-ids", len(seriesIDs)) @@ -241,21 +223,30 @@ func (c *seriesStore) GetChunkRefs(ctx context.Context, from, through model.Time chunkIDs, err := c.lookupChunksBySeries(ctx, from, through, userID, seriesIDs) if err != nil { level.Error(log).Log("msg", "lookupChunksBySeries", "err", err) - return nil, nil, err + return nil, err } level.Debug(log).Log("chunk-ids", len(chunkIDs)) chunks, err := c.convertChunkIDsToChunks(ctx, userID, chunkIDs) if err != nil { - level.Error(log).Log("op", "convertChunkIDsToChunks", "err", err) - return nil, nil, err + level.Error(log).Log("err", "convertChunkIDsToChunks", "err", err) + return nil, err } - chunks = filterChunksByTime(from, through, chunks) - level.Debug(log).Log("chunks-post-filtering", len(chunks)) - chunksPerQuery.Observe(float64(len(chunks))) + // Filter out chunks that are not in the selected time range and keep a single chunk per fingerprint + filtered := filterChunksByTime(from, through, chunks) + filtered, keys := filterChunksByUniqueFingerprint(filtered) + level.Debug(log).Log("Chunks post filtering", len(chunks)) - return [][]Chunk{chunks}, []*Fetcher{c.store.Fetcher}, nil + chunksPerQuery.Observe(float64(len(filtered))) + + // Now fetch the actual chunk data from Memcache / S3 + allChunks, err := c.FetchChunks(ctx, filtered, keys) + if err != nil { + level.Error(log).Log("msg", "FetchChunks", "err", err) + return nil, err + } + return labelNamesFromChunks(allChunks), nil } func (c *seriesStore) lookupSeriesByMetricNameMatchers(ctx context.Context, from, through model.Time, userID, metricName string, matchers []*labels.Matcher) ([]string, error) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go index 09fc79d11920b..ab7980b9b2c26 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go @@ -63,9 +63,8 @@ func HashNew32() uint32 { // Note this is the same algorithm as Go stdlib `sum32.Write()` func HashAdd32(h uint32, s string) uint32 { for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) h *= prime32 - + h ^= uint32(s[i]) } return h } From 51ba819b680ea15b62a5224b1bd8b2e00f29b228 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Mon, 8 Jul 2019 13:07:40 -0400 Subject: [PATCH 4/4] rebased --- Gopkg.lock | 68 +------------------ pkg/logproto/logproto.pb.go | 54 ++------------- pkg/querier/querier.go | 1 + .../cortex/pkg/chunk/chunk_store.go | 48 ------------- .../cortex/pkg/chunk/series_store.go | 9 +-- 5 files changed, 11 insertions(+), 169 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index c76b09091e8d9..35077a3a2d6bb 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -222,7 +222,7 @@ [[projects]] branch = "master" - digest = "1:2f0846dd85df3365a80c32ff994eb1fcee5eec2c51a812ceec182398f3ef85f4" + digest = "1:5a07b5363e4c2aa127a3afd1e8e323d3a288ba1d90d37793d2e14843f5b5b82e" name = "github.com/cortexproject/cortex" packages = [ "pkg/chunk", @@ -248,16 +248,7 @@ "pkg/util/validation", ] pruneopts = "UT" -<<<<<<< HEAD -<<<<<<< HEAD - revision = "e1ab5495e8a846891e3b6b8e757e63201b886bec" -======= - revision = "823897b6a214f4db92d611fcd76331ff68b1791f" - source = "https://github.com/grafana/cortex" ->>>>>>> Query label values and names are now fetched from the store. -======= revision = "ef492f6bbafb185bbe61ae7a6955b7a4af5f3d9a" ->>>>>>> update vendor [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" @@ -569,39 +560,6 @@ revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" version = "v1.4.0" -[[projects]] - branch = "master" - digest = "1:1f4181cfeacebef71babf22e99d727c1667e1f620982787c7035653d6e887dbb" - name = "github.com/grafana/loki" - packages = [ - "pkg/chunkenc", - "pkg/distributor", - "pkg/helpers", - "pkg/ingester", - "pkg/ingester/client", - "pkg/iter", - "pkg/logentry/metric", - "pkg/logentry/stages", - "pkg/logproto", - "pkg/logql", - "pkg/loki", - "pkg/promtail", - "pkg/promtail/api", - "pkg/promtail/client", - "pkg/promtail/client/fake", - "pkg/promtail/config", - "pkg/promtail/positions", - "pkg/promtail/scrape", - "pkg/promtail/server", - "pkg/promtail/server/ui", - "pkg/promtail/targets", - "pkg/querier", - "pkg/util", - "pkg/util/flagext", - ] - pruneopts = "UT" - revision = "4c7138231f77997909564616efc5d0cdbcb1ead8" - [[projects]] digest = "1:1168584a5881d371e96cb0e66ef6db71d7cef0856cc7f311490bc856627f8328" name = "github.com/grpc-ecosystem/go-grpc-middleware" @@ -1628,30 +1586,6 @@ "github.com/golang/snappy", "github.com/gorilla/mux", "github.com/gorilla/websocket", - "github.com/grafana/loki/pkg/chunkenc", - "github.com/grafana/loki/pkg/distributor", - "github.com/grafana/loki/pkg/helpers", - "github.com/grafana/loki/pkg/ingester", - "github.com/grafana/loki/pkg/ingester/client", - "github.com/grafana/loki/pkg/iter", - "github.com/grafana/loki/pkg/logentry/metric", - "github.com/grafana/loki/pkg/logentry/stages", - "github.com/grafana/loki/pkg/logproto", - "github.com/grafana/loki/pkg/logql", - "github.com/grafana/loki/pkg/loki", - "github.com/grafana/loki/pkg/promtail", - "github.com/grafana/loki/pkg/promtail/api", - "github.com/grafana/loki/pkg/promtail/client", - "github.com/grafana/loki/pkg/promtail/client/fake", - "github.com/grafana/loki/pkg/promtail/config", - "github.com/grafana/loki/pkg/promtail/positions", - "github.com/grafana/loki/pkg/promtail/scrape", - "github.com/grafana/loki/pkg/promtail/server", - "github.com/grafana/loki/pkg/promtail/server/ui", - "github.com/grafana/loki/pkg/promtail/targets", - "github.com/grafana/loki/pkg/querier", - "github.com/grafana/loki/pkg/util", - "github.com/grafana/loki/pkg/util/flagext", "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc", "github.com/hpcloud/tail", "github.com/jmespath/go-jmespath", diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go index b3130f276be8c..e63820f2282b2 100644 --- a/pkg/logproto/logproto.pb.go +++ b/pkg/logproto/logproto.pb.go @@ -6,17 +6,18 @@ package logproto import ( context "context" fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - _ "github.com/gogo/protobuf/types" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" io "io" math "math" reflect "reflect" strconv "strconv" strings "strings" time "time" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. @@ -655,7 +656,6 @@ func init() { func init() { proto.RegisterFile("logproto.proto", fileDescriptor_7a8976f235a02f79) } var fileDescriptor_7a8976f235a02f79 = []byte{ -<<<<<<< HEAD // 728 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4d, 0x4f, 0x13, 0x4f, 0x18, 0xdf, 0xe9, 0x7b, 0x9f, 0xbe, 0x40, 0xe6, 0xff, 0x17, 0x9a, 0xc6, 0x6c, 0x9b, 0x3d, 0x68, @@ -703,48 +703,6 @@ var fileDescriptor_7a8976f235a02f79 = []byte{ 0x26, 0xfa, 0x19, 0x9a, 0xe8, 0x57, 0x68, 0x1a, 0xe7, 0xa1, 0x89, 0x0e, 0x4e, 0x4d, 0xe3, 0xe8, 0xd4, 0x34, 0x8e, 0x4f, 0x4d, 0xa3, 0x97, 0x93, 0xb7, 0x3d, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0x37, 0x0c, 0xea, 0x1a, 0x0f, 0x06, 0x00, 0x00, -======= - // 613 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0xf5, 0xb6, 0x89, 0x13, 0x4f, 0xd2, 0xb4, 0x5a, 0xa0, 0x58, 0x11, 0x5a, 0x47, 0x3e, 0x40, - 0x54, 0x09, 0x17, 0x02, 0xa2, 0x52, 0xe1, 0x52, 0x53, 0x2a, 0x24, 0x90, 0x80, 0x05, 0x89, 0xb3, - 0xd3, 0x2e, 0xae, 0x25, 0x7f, 0xb4, 0xf6, 0x1a, 0xd1, 0x1b, 0x12, 0x7f, 0xa0, 0x3f, 0x83, 0x13, - 0xbf, 0xa3, 0xc7, 0x1e, 0x7b, 0x0a, 0xd4, 0xb9, 0xa0, 0x9c, 0x7a, 0xe3, 0x8a, 0x76, 0x6d, 0xc7, - 0x06, 0x24, 0x50, 0xb9, 0x38, 0xf3, 0x76, 0xdf, 0x4c, 0xe6, 0xbd, 0x9d, 0x81, 0x9e, 0x1f, 0xb9, - 0x07, 0x71, 0xc4, 0x23, 0x4b, 0x7e, 0x71, 0xbb, 0xc4, 0x7d, 0xc3, 0x8d, 0x22, 0xd7, 0x67, 0xeb, - 0x12, 0x8d, 0xd3, 0x77, 0xeb, 0xdc, 0x0b, 0x58, 0xc2, 0x9d, 0xe0, 0x20, 0xa7, 0xf6, 0x6f, 0xbb, - 0x1e, 0xdf, 0x4f, 0xc7, 0xd6, 0x6e, 0x14, 0xac, 0xbb, 0x91, 0x1b, 0x55, 0x4c, 0x81, 0x24, 0x90, - 0x51, 0x4e, 0x37, 0x77, 0xa0, 0xf3, 0x32, 0x4d, 0xf6, 0x29, 0x3b, 0x4c, 0x59, 0xc2, 0xf1, 0x06, - 0xb4, 0x12, 0x1e, 0x33, 0x27, 0x48, 0x74, 0x34, 0x58, 0x1c, 0x76, 0x46, 0x2b, 0xd6, 0xbc, 0x95, - 0xd7, 0xf2, 0xc2, 0xee, 0xcc, 0x26, 0x46, 0x49, 0xa2, 0x65, 0x60, 0xf6, 0xa0, 0x9b, 0xd7, 0x49, - 0x0e, 0xa2, 0x30, 0x61, 0xe6, 0x0f, 0x04, 0xdd, 0x57, 0x29, 0x8b, 0x8f, 0xca, 0xca, 0x57, 0xa1, - 0x79, 0x28, 0xb0, 0x8e, 0x06, 0x68, 0xa8, 0xd1, 0x1c, 0x88, 0x53, 0xdf, 0x0b, 0x3c, 0xae, 0x2f, - 0x0c, 0xd0, 0x70, 0x89, 0xe6, 0x00, 0x6f, 0x42, 0x33, 0xe1, 0x4e, 0xcc, 0xf5, 0xc5, 0x01, 0x1a, - 0x76, 0x46, 0x7d, 0x2b, 0x17, 0x6d, 0x95, 0x52, 0xac, 0x37, 0xa5, 0x68, 0xbb, 0x7d, 0x32, 0x31, - 0x94, 0xe3, 0xaf, 0x06, 0xa2, 0x79, 0x0a, 0x7e, 0x00, 0x8b, 0x2c, 0xdc, 0xd3, 0x1b, 0x97, 0xc8, - 0x14, 0x09, 0xf8, 0x2e, 0x68, 0x7b, 0x5e, 0xcc, 0x76, 0xb9, 0x17, 0x85, 0x7a, 0x73, 0x80, 0x86, - 0xbd, 0xd1, 0x95, 0x4a, 0xfb, 0x76, 0x79, 0x45, 0x2b, 0x96, 0x68, 0x3e, 0x66, 0x2e, 0xfb, 0xa0, - 0xab, 0xb9, 0x24, 0x09, 0xcc, 0x87, 0xb0, 0x54, 0x08, 0xcf, 0xad, 0xc0, 0x6b, 0xff, 0xf4, 0xb4, - 0xb2, 0xf1, 0x0b, 0x82, 0xee, 0x73, 0x67, 0xcc, 0xfc, 0xd2, 0x36, 0x0c, 0x8d, 0xd0, 0x09, 0x58, - 0xe1, 0x9a, 0x8c, 0xf1, 0x2a, 0xa8, 0xef, 0x1d, 0x3f, 0x65, 0x89, 0x74, 0xad, 0x4d, 0x0b, 0x74, - 0x59, 0xdb, 0xd0, 0x7f, 0xdb, 0x86, 0xe6, 0xb6, 0x99, 0xb7, 0x60, 0xa9, 0xe8, 0xb7, 0x50, 0x5b, - 0x35, 0x27, 0xc4, 0x6a, 0x65, 0x73, 0xe6, 0x3e, 0xa8, 0xb9, 0x58, 0x6c, 0x82, 0xea, 0x8b, 0x94, - 0x24, 0x17, 0x65, 0xc3, 0x6c, 0x62, 0x14, 0x27, 0xb4, 0xf8, 0xc5, 0x9b, 0xd0, 0x62, 0x21, 0x8f, - 0x3d, 0xa9, 0x51, 0x78, 0xb6, 0x5c, 0x79, 0xf6, 0x24, 0xe4, 0xf1, 0x91, 0xbd, 0x2c, 0x9e, 0x4f, - 0x8c, 0x62, 0xc1, 0xa3, 0x65, 0x60, 0x46, 0xd0, 0x94, 0x14, 0xfc, 0x14, 0xb4, 0xf9, 0x76, 0xc8, - 0xff, 0xfa, 0xbb, 0xb2, 0x5e, 0x51, 0x71, 0x81, 0x27, 0x52, 0x5f, 0x95, 0x8c, 0x6f, 0x40, 0xc3, - 0xf7, 0x42, 0x26, 0xfd, 0xd6, 0xec, 0xf6, 0x6c, 0x62, 0x48, 0x4c, 0xe5, 0x77, 0xed, 0x26, 0x68, - 0xf3, 0xf9, 0xc0, 0x1d, 0x68, 0xed, 0xbc, 0xa0, 0x6f, 0xb7, 0xe8, 0xf6, 0x8a, 0x82, 0xbb, 0xd0, - 0xb6, 0xb7, 0x1e, 0x3f, 0x93, 0x08, 0x8d, 0xb6, 0x40, 0x15, 0x3b, 0xc2, 0x62, 0xbc, 0x01, 0x0d, - 0x11, 0xe1, 0x6b, 0x95, 0xaa, 0xda, 0x16, 0xf6, 0x57, 0x7f, 0x3f, 0x2e, 0x96, 0x4a, 0x19, 0x7d, - 0x42, 0xd0, 0x12, 0xd3, 0xe5, 0xb1, 0x18, 0x3f, 0x82, 0xa6, 0x1c, 0x34, 0x5c, 0xa3, 0xd7, 0x57, - 0xae, 0x7f, 0xfd, 0x8f, 0xf3, 0xb2, 0xce, 0x1d, 0x24, 0x86, 0x45, 0x3e, 0x5c, 0x3d, 0xbb, 0x3e, - 0x79, 0xf5, 0xec, 0x5f, 0x5e, 0xd8, 0x54, 0xec, 0xfb, 0xa7, 0xe7, 0x44, 0x39, 0x3b, 0x27, 0xca, - 0xc5, 0x39, 0x41, 0x1f, 0x33, 0x82, 0x3e, 0x67, 0x04, 0x9d, 0x64, 0x04, 0x9d, 0x66, 0x04, 0x7d, - 0xcb, 0x08, 0xfa, 0x9e, 0x11, 0xe5, 0x22, 0x23, 0xe8, 0x78, 0x4a, 0x94, 0xd3, 0x29, 0x51, 0xce, - 0xa6, 0x44, 0x19, 0xab, 0xb2, 0xd8, 0xbd, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x32, 0x5b, - 0xc6, 0xdd, 0x04, 0x00, 0x00, ->>>>>>> fix http querystring and update doc } func (x Direction) String() string { diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index d0dbbf1cdcf41..eacf22fb0edd6 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -10,6 +10,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/util" token_util "github.com/grafana/loki/pkg/util" + "github.com/prometheus/common/model" "github.com/weaveworks/common/user" "google.golang.org/grpc/health/grpc_health_v1" diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index 679b35324281f..f93472c09cfcf 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -64,12 +64,9 @@ type StoreConfig struct { // Limits query start time to be greater than now() - MaxLookBackPeriod, if set. MaxLookBackPeriod time.Duration `yaml:"max_look_back_period"` -<<<<<<< HEAD -======= // Not visible in yaml because the setting shouldn't be common between ingesters and queriers chunkCacheStubs bool // don't write the full chunk to cache, just a stub entry ->>>>>>> update vendor } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -273,51 +270,6 @@ func (c *store) LabelNamesForMetricName(ctx context.Context, from, through model return labelNamesFromChunks(allChunks), nil } -<<<<<<< HEAD -// LabelValuesForMetricName retrieves all label values for a single label name and metric name. -func (c *store) LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName, labelName string) ([]string, error) { - log, ctx := spanlogger.New(ctx, "ChunkStore.LabelValues") - defer log.Span.Finish() - level.Debug(log).Log("from", from, "through", through, "metricName", metricName, "labelName", labelName) - - userID, err := user.ExtractOrgID(ctx) - if err != nil { - return nil, err - } - - shortcut, err := c.validateQueryTimeRange(ctx, &from, &through) - if err != nil { - return nil, err - } else if shortcut { - return nil, nil - } - - queries, err := c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName) - if err != nil { - return nil, err - } - - entries, err := c.lookupEntriesByQueries(ctx, queries) - if err != nil { - return nil, err - } - - var result []string - for _, entry := range entries { - _, labelValue, _, _, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value) - if err != nil { - return nil, err - } - result = append(result, string(labelValue)) - } - - sort.Strings(result) - result = uniqueStrings(result) - return result, nil -} - -======= ->>>>>>> update vendor func (c *store) validateQueryTimeRange(ctx context.Context, from *model.Time, through *model.Time) (bool, error) { log, ctx := spanlogger.New(ctx, "store.validateQueryTimeRange") defer log.Span.Finish() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go index 7698848063bdc..38bd313a51cf3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go @@ -201,8 +201,7 @@ func (c *seriesStore) LabelNamesForMetricName(ctx context.Context, from, through return nil, err } - // Validate the query is within reasonable bounds. - metricName, matchers, shortcut, err := c.validateQuery(ctx, &from, &through, allMatchers) + shortcut, err := c.validateQueryTimeRange(ctx, &from, &through) if err != nil { return nil, err } else if shortcut { @@ -210,10 +209,8 @@ func (c *seriesStore) LabelNamesForMetricName(ctx context.Context, from, through } level.Debug(log).Log("metric", metricName) - // Fetch the series IDs from the index, based on non-empty matchers from - // the query. - _, matchers = util.SplitFiltersAndMatchers(matchers) - seriesIDs, err := c.lookupSeriesByMetricNameMatchers(ctx, from, through, userID, metricName, matchers) + // Fetch the series IDs from the index + seriesIDs, err := c.lookupSeriesByMetricNameMatchers(ctx, from, through, userID, metricName, nil) if err != nil { return nil, err }