Skip to content

Commit

Permalink
Making changes in the test cases
Browse files Browse the repository at this point in the history
Added few more test cases to check if the data being downsampled gets converted to aggregated and also a test case to check if non-empty XOR chunks can be iterated through.

Signed-off-by: Kartik-Garg <kartik.garg@infracloud.io>
  • Loading branch information
Kartik-Garg committed Feb 20, 2023
1 parent bd3cdc4 commit e2f16a9
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 15 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re
- [#6098](https://github.com/thanos-io/thanos/pull/6098) Cache/Redis: upgrade `rueidis` to v0.0.93 to fix potential panic when the client-side caching is disabled.
- [#6103](https://github.com/thanos-io/thanos/pull/6103) Mixins(Rule): Fix query for long rule evaluations.
- [#6121](https://github.com/thanos-io/thanos/pull/6121) Receive: Deduplicate metamonitoring queries.
- [#6137](https://github.com/thanos-io/thanos/pull/6137) Compact: Repair of non-empty XOR chunks during 1h downsampling.

### Changed

Expand Down
3 changes: 1 addition & 2 deletions pkg/compact/downsample/downsample.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,7 @@ func Downsample(
for _, cn := range aggrDataChunks {
ac, ok = cn.Chunk.(*AggrChunk)
if !ok {
level.Warn(logger).Log("Not able to convert non-empty XOR chunks into 5m downsampled Aggregated chunks")
continue
return id, errors.New("Not able to convert non-empty XOR chunks to 5m downsampled aggregated chunks.")
}
}
}
Expand Down
38 changes: 25 additions & 13 deletions pkg/compact/downsample/downsample_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -538,26 +538,27 @@ func TestDownsampleAggrAndEmptyXORChunks(t *testing.T) {
}

func TestDownsampleAggrAndNonEmptyXORChunks(t *testing.T) {
var (
reuseIt chunkenc.Iterator
all []sample
)

logger := log.NewLogfmtLogger(os.Stderr)
dir := t.TempDir()

ser := &series{lset: labels.FromStrings("__name__", "a")}
aggr := map[AggrType][]sample{
AggrCount: {{t: 1587690299999, v: 20}},
AggrSum: {{t: 1587693590791, v: 255746}},
AggrMin: {{t: 1587690299999, v: 461968}},
AggrMax: {{t: 1587690299999, v: 465870}},
AggrCounter: {{t: 1587690005791, v: 461968}},
AggrCount: {{t: 1587690299999, v: 20}, {t: 1587690599999, v: 20}, {t: 1587690899999, v: 20}, {t: 1587691199999, v: 20}, {t: 1587691499999, v: 20}, {t: 1587691799999, v: 20}, {t: 1587692099999, v: 20}, {t: 1587692399999, v: 20}, {t: 1587692699999, v: 16}, {t: 1587692999999, v: 20}, {t: 1587693299999, v: 20}, {t: 1587693590791, v: 20}},
AggrSum: {{t: 1587690299999, v: 9.276972e+06}, {t: 1587690599999, v: 9.359861e+06}, {t: 1587690899999, v: 9.447457e+06}, {t: 1587691199999, v: 9.542732e+06}, {t: 1587691499999, v: 9.630379e+06}, {t: 1587691799999, v: 9.715631e+06}, {t: 1587692099999, v: 9.799808e+06}, {t: 1587692399999, v: 9.888117e+06}, {t: 1587692699999, v: 2.98928e+06}, {t: 1587692999999, v: 81592}, {t: 1587693299999, v: 163711}, {t: 1587693590791, v: 255746}},
AggrMin: {{t: 1587690299999, v: 461968}, {t: 1587690599999, v: 466070}, {t: 1587690899999, v: 470131}, {t: 1587691199999, v: 474913}, {t: 1587691499999, v: 479625}, {t: 1587691799999, v: 483709}, {t: 1587692099999, v: 488036}, {t: 1587692399999, v: 492223}, {t: 1587692699999, v: 75}, {t: 1587692999999, v: 2261}, {t: 1587693299999, v: 6210}, {t: 1587693590791, v: 10464}},
AggrMax: {{t: 1587690299999, v: 465870}, {t: 1587690599999, v: 469951}, {t: 1587690899999, v: 474726}, {t: 1587691199999, v: 479368}, {t: 1587691499999, v: 483566}, {t: 1587691799999, v: 487787}, {t: 1587692099999, v: 492065}, {t: 1587692399999, v: 496245}, {t: 1587692699999, v: 496544}, {t: 1587692999999, v: 6010}, {t: 1587693299999, v: 10242}, {t: 1587693590791, v: 14956}},
AggrCounter: {{t: 1587690005791, v: 461968}, {t: 1587690299999, v: 465870}, {t: 1587690599999, v: 469951}, {t: 1587690899999, v: 474726}, {t: 1587691199999, v: 479368}, {t: 1587691499999, v: 483566}, {t: 1587691799999, v: 487787}, {t: 1587692099999, v: 492065}, {t: 1587692399999, v: 496245}, {t: 1587692699999, v: 498647}, {t: 1587692999999, v: 502554}, {t: 1587693299999, v: 506786}, {t: 1587693590791, v: 511500}, {t: 1587693590791, v: 14956}},
}
raw := chunkenc.NewXORChunk()
app, err := raw.Appender()
testutil.Ok(t, err)
// this comes in !ok and passes through our newly created funcionality

app.Append(1587690005794, 42.5)
//app.Append(1587690005795, 42.6)
// app.Append(1587690005796, 42.7)
// app.Append(1587690005797, 42.8)
// app.Append(1587690005798, 42.9)

ser.chunks = append(ser.chunks, encodeTestAggrSeries(aggr), chunks.Meta{
MinTime: math.MaxInt64,
MaxTime: math.MinInt64,
Expand All @@ -567,10 +568,21 @@ func TestDownsampleAggrAndNonEmptyXORChunks(t *testing.T) {
mb := newMemBlock()
mb.addSeries(ser)

// Checking if able to iterate through the non-empty XOR chunks.
for _, c := range ser.chunks {
err := expandChunkIterator(c.Chunk.Iterator(reuseIt), &all)
testutil.Ok(t, err)
}

// Checking if XOR chunks get downsampled to 5m aggregated chunks.
aggregatedData := DownsampleRaw(all, 300_000)
for _, cn := range aggregatedData {
_, ok := cn.Chunk.(*AggrChunk)
testutil.Equals(t, true, ok, "Not able to convert XOR chunks to 5m donwsampled aggreagted chunks.")
}

fakeMeta := &metadata.Meta{}
// target
fakeMeta.Thanos.Downsample.Resolution = 300_000
// already existing resolution
id, err := Downsample(logger, fakeMeta, mb, dir, 3_600_000)
_ = id
testutil.Ok(t, err)
Expand Down

0 comments on commit e2f16a9

Please sign in to comment.