diff --git a/pkg/storage/stores/series/index/caching_index_client.go b/pkg/storage/stores/series/index/caching_index_client.go index bde219465df4e..79461659995ee 100644 --- a/pkg/storage/stores/series/index/caching_index_client.go +++ b/pkg/storage/stores/series/index/caching_index_client.go @@ -16,7 +16,6 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/cache" util_log "github.com/grafana/loki/pkg/util/log" - "github.com/grafana/loki/pkg/util/spanlogger" ) var ( @@ -322,10 +321,6 @@ func (s *cachingIndexClient) cacheStore(ctx context.Context, keys []string, batc } func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (batches []ReadBatch, missed []string) { - spanLogger := spanlogger.FromContext(ctx) - logger := util_log.WithContext(ctx, s.logger) - level.Debug(spanLogger).Log("requested", len(keys)) - cacheGets.Add(float64(len(keys))) // Build a map from hash -> key; NB there can be collisions here; we'll fetch @@ -354,7 +349,7 @@ func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (bat var readBatch ReadBatch if err := proto.Unmarshal(bufs[j], &readBatch); err != nil { - level.Warn(spanLogger).Log("msg", "error unmarshalling index entry from cache", "err", err) + level.Warn(util_log.Logger).Log("msg", "error unmarshalling index entry from cache", "err", err) cacheCorruptErrs.Inc() continue } @@ -362,7 +357,7 @@ func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (bat // Make sure the hash(key) is not a collision in the cache by looking at the // key in the value. if key != readBatch.Key { - level.Debug(spanLogger).Log("msg", "dropping index cache entry due to key collision", "key", key, "readBatch.Key", readBatch.Key, "expiry") + level.Debug(util_log.Logger).Log("msg", "dropping index cache entry due to key collision", "key", key, "readBatch.Key", readBatch.Key, "expiry") continue } @@ -370,11 +365,6 @@ func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (bat continue } - if len(readBatch.Entries) != 0 { - // not using spanLogger to avoid over-inflating traces since the query count can go much higher - level.Debug(logger).Log("msg", "found index cache entries", "key", key, "count", len(readBatch.Entries)) - } - cacheHits.Inc() batches = append(batches, readBatch) } @@ -392,6 +382,5 @@ func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (bat missed = append(missed, miss) } - level.Debug(spanLogger).Log("hits", len(batches), "misses", len(misses)) return batches, missed } diff --git a/pkg/storage/stores/series/index/schema_util.go b/pkg/storage/stores/series/index/schema_util.go index 0a04e5c616584..460c1adb193ea 100644 --- a/pkg/storage/stores/series/index/schema_util.go +++ b/pkg/storage/stores/series/index/schema_util.go @@ -195,8 +195,8 @@ func ParseChunkTimeRangeValue(rangeValue []byte, value []byte) ( // v1 & v2 schema had three components - label name, label value and chunk ID. // No version number. case len(components) == 3: - chunkID = string(components[2]) - labelValue = model.LabelValue(components[1]) + chunkID = yoloString(components[2]) + labelValue = model.LabelValue(yoloString(components[1])) return case len(components[3]) == 1: @@ -205,42 +205,42 @@ func ParseChunkTimeRangeValue(rangeValue []byte, value []byte) ( // "version" is 1 and label value is base64 encoded. // (older code wrote "version" as 1, not '1') case chunkTimeRangeKeyV1a, chunkTimeRangeKeyV1: - chunkID = string(components[2]) + chunkID = yoloString(components[2]) labelValue, err = decodeBase64Value(components[1]) return // v4 schema wrote v3 range keys and a new range key - version 2, // with four components - , , chunk ID and version. case chunkTimeRangeKeyV2: - chunkID = string(components[2]) + chunkID = yoloString(components[2]) return // v5 schema version 3 range key is chunk end time, , chunk ID, version case chunkTimeRangeKeyV3: - chunkID = string(components[2]) + chunkID = yoloString(components[2]) return // v5 schema version 4 range key is chunk end time, label value, chunk ID, version case chunkTimeRangeKeyV4: - chunkID = string(components[2]) + chunkID = yoloString(components[2]) labelValue, err = decodeBase64Value(components[1]) return // v6 schema added version 5 range keys, which have the label value written in // to the value, not the range key. So they are [chunk end time, , chunk ID, version]. case chunkTimeRangeKeyV5: - chunkID = string(components[2]) - labelValue = model.LabelValue(value) + chunkID = yoloString(components[2]) + labelValue = model.LabelValue(yoloString(value)) return // v9 schema actually return series IDs case seriesRangeKeyV1: - chunkID = string(components[0]) + chunkID = yoloString(components[0]) return case labelSeriesRangeKeyV1: - chunkID = string(components[1]) - labelValue = model.LabelValue(value) + chunkID = yoloString(components[1]) + labelValue = model.LabelValue(yoloString(value)) return } } diff --git a/pkg/storage/stores/series/series_index_store.go b/pkg/storage/stores/series/series_index_store.go index c4fc5b36b56f7..e9bf5bce79fd7 100644 --- a/pkg/storage/stores/series/series_index_store.go +++ b/pkg/storage/stores/series/series_index_store.go @@ -251,6 +251,8 @@ func (c *IndexStore) LabelValuesForMetricName(ctx context.Context, userID string if err != nil { return nil, err } + // nolint:staticcheck + defer entriesPool.Put(entries) var result util.UniqueStrings for _, entry := range entries { @@ -289,6 +291,8 @@ func (c *IndexStore) labelValuesForMetricNameWithMatchers(ctx context.Context, u if err != nil { return nil, err } + // nolint:staticcheck + defer entriesPool.Put(entries) result := util.NewUniqueStrings(len(entries)) for _, entry := range entries { @@ -409,7 +413,6 @@ func (c *IndexStore) lookupIdsByMetricNameMatcher(ctx context.Context, from, thr if err != nil { return nil, err } - unfilteredQueries := len(queries) if filter != nil { queries = filter(queries) @@ -423,20 +426,13 @@ func (c *IndexStore) lookupIdsByMetricNameMatcher(ctx context.Context, from, thr } else if err != nil { return nil, err } + // nolint:staticcheck + defer entriesPool.Put(entries) ids, err := parseIndexEntries(ctx, entries, matcher) if err != nil { return nil, err } - level.Debug(util_log.WithContext(ctx, util_log.Logger)). - Log( - "msg", "Store.lookupIdsByMetricNameMatcher", - "matcher", formatMatcher(matcher), - "queries", unfilteredQueries, - "filteredQueries", len(queries), - "entries", len(entries), - "ids", len(ids), - ) return ids, nil } @@ -486,6 +482,12 @@ func parseIndexEntries(_ context.Context, entries []index.Entry, matcher *labels return result, nil } +var entriesPool = sync.Pool{ + New: func() interface{} { + return make([]index.Entry, 0, 1024) + }, +} + func (c *IndexStore) lookupEntriesByQueries(ctx context.Context, queries []index.Query) ([]index.Entry, error) { // Nothing to do if there are no queries. if len(queries) == 0 { @@ -493,7 +495,7 @@ func (c *IndexStore) lookupEntriesByQueries(ctx context.Context, queries []index } var lock sync.Mutex - var entries []index.Entry + entries := entriesPool.Get().([]index.Entry)[:0] err := c.index.QueryPages(ctx, queries, func(query index.Query, resp index.ReadBatchResult) bool { iter := resp.Iterator() lock.Lock() @@ -532,6 +534,9 @@ func (c *IndexStore) lookupLabelNamesBySeries(ctx context.Context, from, through if err != nil { return nil, err } + // nolint:staticcheck + defer entriesPool.Put(entries) + level.Debug(log).Log("entries", len(entries)) var result util.UniqueStrings @@ -595,11 +600,8 @@ func (c *IndexStore) lookupChunksBySeries(ctx context.Context, from, through mod if err != nil { return nil, err } - level.Debug(util_log.WithContext(ctx, util_log.Logger)).Log( - "msg", "SeriesStore.lookupChunksBySeries", - "seriesIDs", len(seriesIDs), - "queries", len(queries), - "entries", len(entries)) + // nolint:staticcheck + defer entriesPool.Put(entries) result, err := parseIndexEntries(ctx, entries, nil) return result, err diff --git a/pkg/storage/stores/series/series_store_utils.go b/pkg/storage/stores/series/series_store_utils.go index a0cbaf307a2de..317e0547bdd0d 100644 --- a/pkg/storage/stores/series/series_store_utils.go +++ b/pkg/storage/stores/series/series_store_utils.go @@ -5,7 +5,6 @@ import ( "unicode/utf8" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/storage/chunk" @@ -171,13 +170,3 @@ func FindSetMatches(pattern string) []string { } return matches } - -// Using this function avoids logging of nil matcher, which works, but indirectly via panic and recover. -// That confuses attached debugger, which wants to breakpoint on each panic. -// Using simple check is also faster. -func formatMatcher(matcher *labels.Matcher) string { - if matcher == nil { - return "nil" - } - return matcher.String() -}