diff --git a/ast/ast.go b/ast/ast.go index 18fe2d7efc..e3fa7985a0 100644 --- a/ast/ast.go +++ b/ast/ast.go @@ -1117,7 +1117,8 @@ func (e *FunctionExpression) Copy() Node { // Result of evaluating an equality operator is always of type Boolean based on whether the // comparison is true // Arithmetic operators take numerical values (either literals or variables) as their operands -// and return a single numerical value. +// +// and return a single numerical value. type OperatorKind int const ( @@ -1263,7 +1264,7 @@ func (o *LogicalOperatorKind) UnmarshalText(data []byte) error { // LogicalExpression represent the rule conditions that collectively evaluate to either true or false. // `or` expressions compute the disjunction of two boolean expressions and return boolean values. -// `and`` expressions compute the conjunction of two boolean expressions and return boolean values. +// `and“ expressions compute the conjunction of two boolean expressions and return boolean values. type LogicalExpression struct { BaseNode Operator LogicalOperatorKind `json:"operator"` diff --git a/ast/edit/option_editor.go b/ast/edit/option_editor.go index fe1fb77e38..dd42684bb1 100644 --- a/ast/edit/option_editor.go +++ b/ast/edit/option_editor.go @@ -76,9 +76,9 @@ func OptionObjectFn(keyMap map[string]ast.Expression) OptionFn { } } -//Finds the `OptionStatement` with the specified `identifier` and updates its value. -//There shouldn't be more then one option statement with the same identifier -//in a valid query. +// Finds the `OptionStatement` with the specified `identifier` and updates its value. +// There shouldn't be more then one option statement with the same identifier +// in a valid query. type optionEditor struct { identifier string optionFn OptionFn diff --git a/ast/testcase/testcase.go b/ast/testcase/testcase.go index b14e0d7c4f..21c395130e 100644 --- a/ast/testcase/testcase.go +++ b/ast/testcase/testcase.go @@ -18,17 +18,17 @@ import ( // // A testcase is defined with the testcase statement such as below. // -// import "testing/assert" -// myVar = 4 -// testcase addition { -// assert.equal(want: 2 + 2, got: myVar) -// } +// import "testing/assert" +// myVar = 4 +// testcase addition { +// assert.equal(want: 2 + 2, got: myVar) +// } // // This gets transformed into a package that looks like this: // -// import "testing/assert" -// myVar = 4 -// assert.equal(want: 2 + 2, got: myVar) +// import "testing/assert" +// myVar = 4 +// assert.equal(want: 2 + 2, got: myVar) // // It is allowed to include options within the testcase block as they will be extracted // to the top level. @@ -37,23 +37,22 @@ import ( // This will transform the the extended testcase in a slightly different way. // The syntax for extending is as such: // -// import "math" -// testcase addition_v2 extends "math_test.addition" { -// option math.enable_v2 = true -// super() -// } +// import "math" +// testcase addition_v2 extends "math_test.addition" { +// option math.enable_v2 = true +// super() +// } // // The extending test case is then transformed into a single file combining both the parent // statements and the current statements. // -// import "testing/assert" -// import "math" +// import "testing/assert" +// import "math" // -// option math.enable_v2 = true -// -// myVar = 4 -// assert.equal(want: 2 + 2, got: myVar) +// option math.enable_v2 = true // +// myVar = 4 +// assert.equal(want: 2 + 2, got: myVar) // // The call to `super()` is replaced with the body of the parent test case. // @@ -62,7 +61,7 @@ import ( // It is allowed for an imported testcase to have an option, but no attempt is made // to remove duplicate options. If there is a duplicate option, this will likely // cause an error when the test is actually run. -func Transform(ctx context.Context, pkg *ast.Package, modules TestModules) ([]string, []*ast.Package, error) { +func Transform(ctx context.Context, pkg *ast.Package, modules TestModules) ([]*ast.Identifier, []*ast.Package, error) { if len(pkg.Files) != 1 { return nil, nil, errors.Newf(codes.FailedPrecondition, "unsupported number of files in test case package, got %d", len(pkg.Files)) } @@ -81,7 +80,7 @@ func Transform(ctx context.Context, pkg *ast.Package, modules TestModules) ([]st } var ( - names = make([]string, 0, n) + idens = make([]*ast.Identifier, 0, n) pkgs = make([]*ast.Package, 0, n) ) for _, item := range file.Body { @@ -94,11 +93,11 @@ func Transform(ctx context.Context, pkg *ast.Package, modules TestModules) ([]st if err != nil { return nil, nil, err } - names = append(names, testcase.ID.Name) + idens = append(idens, testcase.ID) pkgs = append(pkgs, testpkg) } - return names, pkgs, nil + return idens, pkgs, nil } func newTestPackage(ctx context.Context, basePkg *ast.Package, preamble []ast.Statement, tc *ast.TestCaseStatement, modules TestModules) (*ast.Package, error) { diff --git a/ast/testcase/testcase_test.go b/ast/testcase/testcase_test.go index 58795827ff..9cd12598af 100644 --- a/ast/testcase/testcase_test.go +++ b/ast/testcase/testcase_test.go @@ -51,12 +51,16 @@ testcase test_subtraction { d := parser.ParseSource(testFile) - names, transformed, err := testcase.Transform(context.Background(), d, nil) + idens, transformed, err := testcase.Transform(context.Background(), d, nil) if err != nil { t.Fatal(err) } + testNames := make([]string, len(idens)) + for i := range idens { + testNames[i] = idens[i].Name + } - if want, got := []string{"test_addition", "test_subtraction"}, names; !cmp.Equal(want, got) { + if want, got := []string{"test_addition", "test_subtraction"}, testNames; !cmp.Equal(want, got) { t.Errorf("unexpected test names: -want/+got:\n%s", cmp.Diff(want, got)) } if !cmp.Equal(expected, transformed, asttest.IgnoreBaseNodeOptions...) { @@ -114,7 +118,7 @@ testcase b extends "flux/a/a_test.a" { } pkg.Files[0].Name = "b/b_test.flux" - names, pkgs, err := testcase.Transform(ctx, pkg, testcase.TestModules{ + idens, pkgs, err := testcase.Transform(ctx, pkg, testcase.TestModules{ "flux": testcase.TestModule{ Service: fs, }, @@ -122,8 +126,11 @@ testcase b extends "flux/a/a_test.a" { if err != nil { t.Fatalf("unexpected error: %s", err) } - - if want, got := []string{"b"}, names; !cmp.Equal(want, got) { + testNames := make([]string, len(idens)) + for i := range idens { + testNames[i] = idens[i].Name + } + if want, got := []string{"b"}, testNames; !cmp.Equal(want, got) { t.Fatalf("unexpected testcase names -want/+got:\n%s", cmp.Diff(want, got)) } diff --git a/cmd/flux/cmd/test.go b/cmd/flux/cmd/test.go index 5ee35a1d43..f9776b5dd0 100644 --- a/cmd/flux/cmd/test.go +++ b/cmd/flux/cmd/test.go @@ -165,17 +165,20 @@ type Test struct { ast *ast.Package // set of tags specified for the test case tags []string + // set package name for the test case + pkg string // indicates if the test should be skipped skip bool err error } // NewTest creates a new Test instance from an ast.Package. -func NewTest(name string, ast *ast.Package, tags []string) Test { +func NewTest(name string, ast *ast.Package, tags []string, pkg string) Test { return Test{ name: name, ast: ast, tags: tags, + pkg: pkg, } } @@ -188,6 +191,10 @@ func (t *Test) Name() string { return t.name } +func (t *Test) PackageName() string { + return t.pkg +} + // Get the error from the test, if one exists. func (t *Test) Error() error { return t.err @@ -260,6 +267,33 @@ func contains(names []string, name string) bool { return false } +func containsWithPkgName(names []string, test *Test) bool { + var aTest, tTestName string + for _, name := range names { + fTest := splitAny(name, ".") + // handle package.TestName or package/TestName case + if len(fTest) > 1 { + aTest = strings.Join(fTest, ".") + tTestName = test.PackageName() + "." + test.Name() + } else { + aTest = name + tTestName = test.Name() + } + if aTest == tTestName { + return true + } + } + return false +} + +// split a string by multiple runes +func splitAny(s string, seps string) []string { + splitter := func(r rune) bool { + return strings.ContainsRune(seps, r) + } + return strings.FieldsFunc(s, splitter) +} + // TestRunner gathers and runs all tests. type TestRunner struct { tests []*Test @@ -317,6 +351,11 @@ func (t *TestRunner) Gather(roots []string) error { } ctx := filesystem.Inject(context.Background(), fs) + // check for the duplicate testcase names + type testcaseLoc struct { + loc *ast.SourceLocation + } + seen := make(map[string]testcaseLoc) for _, file := range files { q, err := filesystem.ReadFile(ctx, file.path) if err != nil { @@ -326,20 +365,26 @@ func (t *TestRunner) Gather(roots []string) error { if len(baseAST.Files) > 0 { baseAST.Files[0].Name = file.path } - tcnames, asts, err := testcase.Transform(ctx, baseAST, modules) + tcidens, asts, err := testcase.Transform(ctx, baseAST, modules) if err != nil { return err } + pkg := strings.TrimSuffix(baseAST.Package, "_test") for i, astf := range asts { tags, err := readTags(astf) if err != nil { return err } if invalid := invalidTags(tags, mods.Tags(file.module)); len(invalid) != 0 { - return errors.Newf(codes.Invalid, "testcase %q, contains invalid tags %v, valid tags are: %v", tcnames[i], invalid, mods.Tags(file.module)) + return errors.Newf(codes.Invalid, "testcase %q, contains invalid tags %v, valid tags are: %v", tcidens[i].Name, invalid, mods.Tags(file.module)) + } + pkgTest := pkg + "." + tcidens[i].Name + if _, ok := seen[pkgTest]; ok { + return errors.Newf(codes.AlreadyExists, "duplicate testcase name %q, found in package %q, at locations %v and %v", tcidens[i].Name, pkg, seen[pkgTest].loc.String(), tcidens[i].Loc.String()) } - test := NewTest(tcnames[i], astf, tags) + test := NewTest(tcidens[i].Name, astf, tags, pkg) t.tests = append(t.tests, &test) + seen[pkgTest] = testcaseLoc{tcidens[i].Loc} } } } @@ -370,26 +415,21 @@ func union(a, b []string) []string { // MarkSkipped checks the provided filters and marks each test case as skipped as needed. // // Skip rules: -// - When testNames is not empty any test in the list will be run, all others skipped. -// - When a test name is in skips, the test is skipped. -// - When a test contains any tags all tags must be specified for the test to run. -// - When skipUntagged is true, any test that does not have any tags is skipped. +// - When testNames is not empty any test in the list will be run, all others skipped. +// - When a test name is in skips, the test is skipped. +// - When a test contains any tags all tags must be specified for the test to run. +// - When skipUntagged is true, any test that does not have any tags is skipped. // // The list of tests takes precedence over all other parameters. func (t *TestRunner) MarkSkipped(testNames, skips, tags []string, skipUntagged bool) { - skipMap := make(map[string]bool) - for _, n := range skips { - skipMap[n] = true - } - for i := range t.tests { // If testNames is not empty then check only that list if len(testNames) > 0 { - t.tests[i].skip = !contains(testNames, t.tests[i].Name()) + t.tests[i].skip = !containsWithPkgName(testNames, t.tests[i]) continue } // Now we assume the test is not skipped and check the rest of the rules - skip := false + skipBecauseTags := false if len(t.tests[i].tags) > 0 { // Tags must be present for all test tags @@ -398,10 +438,16 @@ func (t *TestRunner) MarkSkipped(testNames, skips, tags []string, skipUntagged b isMatch = isMatch && contains(tags, tag) } if !isMatch { - skip = true + skipBecauseTags = true } } - t.tests[i].skip = skip || skipMap[t.tests[i].Name()] || (skipUntagged && len(t.tests[i].tags) == 0) + // skip tests coming from skip list + skipBecauseSkipList := false + if !skipBecauseTags && len(skips) > 0 { + skipBecauseSkipList = containsWithPkgName(skips, t.tests[i]) + } + + t.tests[i].skip = skipBecauseTags || skipBecauseSkipList || (skipUntagged && len(t.tests[i].tags) == 0) } } diff --git a/cmd/flux/test_test.go b/cmd/flux/test_test.go index ddc7e12e0c..20f7d827f5 100644 --- a/cmd/flux/test_test.go +++ b/cmd/flux/test_test.go @@ -214,8 +214,8 @@ func runForPath(t *testing.T, path string, wantErr error, args ...string) Summar func Test_TestCmd(t *testing.T) { want := Summary{ - Found: 7, - Passed: 1, + Found: 9, + Passed: 3, Failed: 0, Skipped: 6, } @@ -229,10 +229,10 @@ func Test_TestCmd(t *testing.T) { func Test_TestCmd_TestName(t *testing.T) { want := Summary{ - Found: 7, + Found: 9, Passed: 1, Failed: 0, - Skipped: 6, + Skipped: 8, } got := runAll(t, nil, "--test", "a") for name, got := range got { @@ -241,10 +241,41 @@ func Test_TestCmd_TestName(t *testing.T) { } } } -func Test_TestCmd_Fails(t *testing.T) { + +func Test_TestCmd_TestName_DuplicateWithPackage(t *testing.T) { want := Summary{ - Found: 7, + Found: 9, Passed: 1, + Failed: 0, + Skipped: 8, + } + got := runAll(t, nil, "--test", "pkgb.duplicate") + for name, got := range got { + if want != got { + t.Errorf("%s: unexpected summary got %+v want %+v", name, got, want) + } + } +} + +func Test_TestCmd_TestName_Duplicate(t *testing.T) { + want := Summary{ + Found: 9, + Passed: 2, + Failed: 0, + Skipped: 7, + } + got := runAll(t, nil, "--test", "duplicate") + for name, got := range got { + if want != got { + t.Errorf("%s: unexpected summary got %+v want %+v", name, got, want) + } + } +} + +func Test_TestCmd_Fails(t *testing.T) { + want := Summary{ + Found: 9, + Passed: 3, Failed: 1, Skipped: 5, } @@ -268,56 +299,56 @@ func Test_TestCmd_Tags(t *testing.T) { { tags: []string{"a"}, want: Summary{ - Found: 7, - Passed: 2, + Found: 9, + Passed: 4, Skipped: 5, }, }, { tags: []string{"a", "b"}, want: Summary{ - Found: 7, - Passed: 3, + Found: 9, + Passed: 5, Skipped: 4, }, }, { tags: []string{"a", "b", "c"}, want: Summary{ - Found: 7, - Passed: 4, + Found: 9, + Passed: 6, Skipped: 3, }, }, { tags: []string{"b", "c"}, want: Summary{ - Found: 7, - Passed: 1, + Found: 9, + Passed: 3, Skipped: 6, }, }, { tags: []string{"c"}, want: Summary{ - Found: 7, - Passed: 1, + Found: 9, + Passed: 3, Skipped: 6, }, }, { tags: []string{"b"}, want: Summary{ - Found: 7, - Passed: 1, + Found: 9, + Passed: 3, Skipped: 6, }, }, { tags: []string{"foo"}, want: Summary{ - Found: 7, - Passed: 3, + Found: 9, + Passed: 5, Skipped: 4, }, }, @@ -337,8 +368,8 @@ func Test_TestCmd_Tags(t *testing.T) { } func Test_TestCmd_Skip(t *testing.T) { want := Summary{ - Found: 7, - Passed: 0, + Found: 9, + Passed: 2, Skipped: 7, } got := runAll(t, nil, "--skip", "untagged") @@ -349,11 +380,45 @@ func Test_TestCmd_Skip(t *testing.T) { } } +func Test_TestCmd_Skip_DuplicateWithPackage(t *testing.T) { + want := Summary{ + Found: 9, + Passed: 2, + Skipped: 7, + } + got := runAll(t, nil, "--skip", "pkga.duplicate") + for name, got := range got { + if want != got { + t.Errorf("%s: unexpected summary got %+v want %+v", name, got, want) + } + } +} + +func Test_TestCmd_Skip_Duplicate(t *testing.T) { + want := Summary{ + Found: 9, + Passed: 1, + Skipped: 8, + } + got := runAll(t, nil, "--skip", "duplicate") + for name, got := range got { + if want != got { + t.Errorf("%s: unexpected summary got %+v want %+v", name, got, want) + } + } +} + +func Test_TestCmd_Error_Duplicate(t *testing.T) { + wantErr := errors.New("duplicate testcase name \"duplicate\", found in package \"test\", at locations testdataduplicate/test_test.flux|7:10-7:19 and testdataduplicate/test_test.flux|14:10-14:19") + runForPath(t, "./testdataduplicate", wantErr, "--test", "duplicate") + +} + func Test_TestCmd_SkipUntagged(t *testing.T) { want := Summary{ - Found: 7, + Found: 9, Passed: 0, - Skipped: 7, + Skipped: 9, } got := runAll(t, nil, "--skip-untagged") for name, got := range got { diff --git a/cmd/flux/testdata/pkga/pkga_test.flux b/cmd/flux/testdata/pkga/pkga_test.flux index 996957e78d..684ff5563a 100644 --- a/cmd/flux/testdata/pkga/pkga_test.flux +++ b/cmd/flux/testdata/pkga/pkga_test.flux @@ -4,14 +4,21 @@ package pkga_test import "testing" import "array" -option testing.tags = ["foo"] - testcase bar { + option testing.tags = ["foo"] array.from(rows: [{}]) } testcase untagged_extends extends "testdata/test_test.untagged" { + option testing.tags = ["foo"] // Note this test is tagged with foo because of the package level // option statement super() } + +testcase duplicate { + want = array.from(rows: [{_value: 1}]) + got = array.from(rows: [{_value: 1}]) + + testing.diff(want, got) +} diff --git a/cmd/flux/testdata/pkgb/pkgb_test.flux b/cmd/flux/testdata/pkgb/pkgb_test.flux new file mode 100644 index 0000000000..39c38a1b75 --- /dev/null +++ b/cmd/flux/testdata/pkgb/pkgb_test.flux @@ -0,0 +1,12 @@ +package pkgb_test + + +import "testing" +import "array" + +testcase duplicate { + want = array.from(rows: [{_value: 1}]) + got = array.from(rows: [{_value: 1}]) + + testing.diff(want, got) +} diff --git a/cmd/flux/testdata/test_test.flux b/cmd/flux/testdata/test_test.flux index fc6dd827ec..3ce3868af8 100644 --- a/cmd/flux/testdata/test_test.flux +++ b/cmd/flux/testdata/test_test.flux @@ -30,4 +30,4 @@ testcase fails { } testcase untagged { array.from(rows: [{}]) -} +} \ No newline at end of file diff --git a/cmd/flux/testdataduplicate/test_test.flux b/cmd/flux/testdataduplicate/test_test.flux new file mode 100644 index 0000000000..3473ab1d43 --- /dev/null +++ b/cmd/flux/testdataduplicate/test_test.flux @@ -0,0 +1,19 @@ +package test_test + + +import "testing" +import "array" + +testcase duplicate { + want = array.from(rows: [{_value: 1}]) + got = array.from(rows: [{_value: 1}]) + + testing.diff(want, got) +} + +testcase duplicate { + want = array.from(rows: [{_value: 1}]) + got = array.from(rows: [{_value: 1}]) + + testing.diff(want, got) +} diff --git a/compiler/runtime_test.go b/compiler/runtime_test.go index 4de45ab9da..042156bf3b 100644 --- a/compiler/runtime_test.go +++ b/compiler/runtime_test.go @@ -88,7 +88,8 @@ func TestFunctionValue_Resolve(t *testing.T) { } // XXX: sort of confusing but this error message is actually coming -// from runtime.Eval but can also be seen in interpreter... +// +// from runtime.Eval but can also be seen in interpreter... func TestIndexExpr_TableObjectIsError(t *testing.T) { src := ` import "array" diff --git a/compiler/vectorized_test.go b/compiler/vectorized_test.go index 80daa80c77..91345ae83e 100644 --- a/compiler/vectorized_test.go +++ b/compiler/vectorized_test.go @@ -34,14 +34,14 @@ func vectorizedObjectFromMap(mp map[string]interface{}, mem memory.Allocator) va } // Check that: -// 1. Vectorized inputs yield vectorized outputs when compiled and evaluated -// 2. The number of bytes allocated is 0 once evaluation is complete -// and values are released -// 3. Only certain function expressions are vectorized when invoking the -// analyzer from go code. The criteria for supported expressions may -// change in the future, but right now we only support trivial identity -// functions (i.e., those in the form of `(r) => ({a: r.a})`, or something -// similar) +// 1. Vectorized inputs yield vectorized outputs when compiled and evaluated +// 2. The number of bytes allocated is 0 once evaluation is complete +// and values are released +// 3. Only certain function expressions are vectorized when invoking the +// analyzer from go code. The criteria for supported expressions may +// change in the future, but right now we only support trivial identity +// functions (i.e., those in the form of `(r) => ({a: r.a})`, or something +// similar) func TestVectorizedFns(t *testing.T) { type TestCase struct { name string diff --git a/dependencies/url/validator.go b/dependencies/url/validator.go index d1ce7b4d9c..7e6f28387a 100644 --- a/dependencies/url/validator.go +++ b/dependencies/url/validator.go @@ -80,7 +80,7 @@ func init() { } } -// isPrivateIP reports whether an IP exists in a known private IP space. +// isPrivateIP reports whether an IP exists in a known private IP space. func isPrivateIP(ip net.IP) bool { for _, block := range privateIPBlocks { if block.Contains(ip) { diff --git a/execute/bounds.go b/execute/bounds.go index 53bcc1e11e..9be0c6df3c 100644 --- a/execute/bounds.go +++ b/execute/bounds.go @@ -46,7 +46,8 @@ func (b Bounds) Overlaps(o Bounds) bool { // Intersect returns the intersection of two bounds. // It returns empty bounds if one of the input bounds are empty. // TODO: there are several places that implement bounds and related utilities. -// consider a central place for them? +// +// consider a central place for them? func (b *Bounds) Intersect(o Bounds) Bounds { if b.IsEmpty() || o.IsEmpty() || !b.Overlaps(o) { return Bounds{ diff --git a/execute/format.go b/execute/format.go index 957536b349..de527297b5 100644 --- a/execute/format.go +++ b/execute/format.go @@ -267,7 +267,6 @@ func (f *Formatter) valueBuf(i, j int, typ flux.ColType, cr flux.ColReader) []by // * common tags sorted by label // * other tags sorted by label // * value -// type orderedCols struct { indexMap []int cols []flux.ColMeta diff --git a/execute/table.go b/execute/table.go index fc17984adf..82a0886981 100644 --- a/execute/table.go +++ b/execute/table.go @@ -359,9 +359,11 @@ func AppendKeyValues(key flux.GroupKey, builder TableBuilder) error { // AppendKeyValuesN runs AppendKeyValues `n` times. // This is different from // ``` -// for i := 0; i < n; i++ { -// AppendKeyValues(key, builder) -// } +// +// for i := 0; i < n; i++ { +// AppendKeyValues(key, builder) +// } +// // ``` // Because it saves the overhead of calculating the column mapping `n` times. func AppendKeyValuesN(key flux.GroupKey, builder TableBuilder, n int) error { diff --git a/internal/pkg/feature/cmd/feature/strings.go b/internal/pkg/feature/cmd/feature/strings.go index b7c98aa840..cad6567548 100644 --- a/internal/pkg/feature/cmd/feature/strings.go +++ b/internal/pkg/feature/cmd/feature/strings.go @@ -83,11 +83,12 @@ func replace(old, new, src string) string { // camelcase is to convert words separated by space, underscore and hyphen to camel case. // // Some samples. -// "some_words" => "SomeWords" -// "http_server" => "HttpServer" -// "no_https" => "NoHttps" -// "_complex__case_" => "_Complex_Case_" -// "some words" => "SomeWords" +// +// "some_words" => "SomeWords" +// "http_server" => "HttpServer" +// "no_https" => "NoHttps" +// "_complex__case_" => "_Complex_Case_" +// "some words" => "SomeWords" func camelcase(str string) string { if len(str) == 0 { return "" diff --git a/internal/pkg/feature/doc.go b/internal/pkg/feature/doc.go index f7913ba611..4504d45ab0 100644 --- a/internal/pkg/feature/doc.go +++ b/internal/pkg/feature/doc.go @@ -19,9 +19,9 @@ // For example, to check a boolean flag that was generated by the `feature` command, // you might do it this way: // -// if feature.MyFeature().Enabled(ctx) { -// // my feature is enabled -// } else { -// // my feature is disabled -// } +// if feature.MyFeature().Enabled(ctx) { +// // my feature is enabled +// } else { +// // my feature is disabled +// } package feature diff --git a/internal/zoneinfo/zoneinfo.go b/internal/zoneinfo/zoneinfo.go index fc47a000cc..5d5c89f0a7 100644 --- a/internal/zoneinfo/zoneinfo.go +++ b/internal/zoneinfo/zoneinfo.go @@ -181,14 +181,14 @@ func (l *Location) lookup(sec int64) (name string, offset int, start, end int64, // The reference implementation in localtime.c from // https://www.iana.org/time-zones/repository/releases/tzcode2013g.tar.gz // implements the following algorithm for these cases: -// 1) If the first zone is unused by the transitions, use it. -// 2) Otherwise, if there are transition times, and the first -// transition is to a zone in daylight time, find the first -// non-daylight-time zone before and closest to the first transition -// zone. -// 3) Otherwise, use the first zone that is not daylight time, if -// there is one. -// 4) Otherwise, use the first zone. +// 1. If the first zone is unused by the transitions, use it. +// 2. Otherwise, if there are transition times, and the first +// transition is to a zone in daylight time, find the first +// non-daylight-time zone before and closest to the first transition +// zone. +// 3. Otherwise, use the first zone that is not daylight time, if +// there is one. +// 4. Otherwise, use the first zone. func (l *Location) lookupFirstZone() int { // Case 1. if !l.firstZoneUsed() { diff --git a/interval/bounds.go b/interval/bounds.go index 924d0f0b74..25a74799f5 100644 --- a/interval/bounds.go +++ b/interval/bounds.go @@ -74,7 +74,8 @@ func (b Bounds) Length() values.Duration { // Intersect returns the intersection of two bounds. // It returns empty bounds if one of the input bounds are empty. // TODO: there are several places that implement bounds and related utilities. -// consider a central place for them? +// +// consider a central place for them? func (b Bounds) Intersect(o Bounds) Bounds { if b.IsEmpty() || o.IsEmpty() || !b.Overlaps(o) { return Bounds{ diff --git a/libflux/go/libflux/buildinfo.gen.go b/libflux/go/libflux/buildinfo.gen.go index 7a61e601fb..76213cc7aa 100644 --- a/libflux/go/libflux/buildinfo.gen.go +++ b/libflux/go/libflux/buildinfo.gen.go @@ -103,7 +103,7 @@ var sourceHashes = map[string]string{ "stdlib/contrib/sranka/webexteams/webexteams.flux": "e4c37a318d580139cc17982868f021e6ae28d26b70353dbf55b63387419c5258", "stdlib/contrib/tomhollingworth/events/duration.flux": "611336375816b73cdb7c9c711501c9dbd2e653720b7725f6e658974e3bd6181c", "stdlib/contrib/tomhollingworth/events/duration_test.flux": "41500a9a391b5e6aa3fb7aa4db6db5e414947be5251736d91ae498d06d1f851e", - "stdlib/contrib/tomhollingworth/events/duration_with_stop_test.flux": "d776dcbc86133bc58cf7eeee79619d2b9797a280cbbeeebff28976a99cfa7acb", + "stdlib/contrib/tomhollingworth/events/duration_with_stop_test.flux": "dbecebfe7aaeb988efad6d2160e8f5df28f6816ec6d61d1ccbe9bee99af2f144", "stdlib/csv/csv.flux": "94a1d8dd59c0e092617e9c974bad4edaf1a2c6ebb20fd185524b58cb657329b0", "stdlib/csv/csv_test.flux": "3840dd74e86252b6f91faa6ae758b064efb1b5ae9af4cd270607a5b18efcbe4c", "stdlib/date/boundaries/boundaries.flux": "1217fd01e02edcffc872416f2a4168e7fa024231a9a2be5a53e31789eb5d8231", @@ -122,7 +122,7 @@ var sourceHashes = map[string]string{ "stdlib/date/month_day_time_test.flux": "6eaf4a9cc84681fa4537a9fc96435655cec74742e8138a142338b1cf1f0fc7a3", "stdlib/date/month_duration_test.flux": "df850206739f85d933c79da09b9c63daade80e39fbbdf1fafabb316584317482", "stdlib/date/month_time_test.flux": "a234e52a04990e79a455a76ca9958a93ac491de3c839495f6e34e0edc292877f", - "stdlib/date/nanosecond_duration_test.flux": "e8e33307895abadf813cc06039a9267976f21f5f7fd078f6736eb33375ac63da", + "stdlib/date/nanosecond_duration_test.flux": "4f72e9668a196c7b685aa9f3da26113147b90aa8fb4382576f2182bcef99161f", "stdlib/date/nanosecond_time_test.flux": "21ab88ac71fcfe7b7a9cbd4d88217eda149c5784e7c8988fe12e045b89149937", "stdlib/date/quarter_duration_test.flux": "97cc4ac954901bc266920784ea0b2b08a0658dca4e811f5982df1d2c947a82ee", "stdlib/date/quarter_time_test.flux": "e0ca82e1c28abeea1606471867de61f5d1a42be45aca4da947396e64a1fe6f60", @@ -140,24 +140,24 @@ var sourceHashes = map[string]string{ "stdlib/date/year_duration_test.flux": "09a0610557b23767041cf80bd1715c86e46608032b5b37d65f081b94ce0205d7", "stdlib/date/year_time_test.flux": "5aa74f7ac4d7a97a8457ee7708796f1f64b1fc67ad2a01921afe2a1e89330fe2", "stdlib/dict/dict.flux": "3b87675dd4c0cd4f02a9df02461b7a13c40da0eef782ca9a89807395a3b4f06a", - "stdlib/dict/dict_insert_remove_test.flux": "4eeaebb515ceebed6ddcfe7e93bab73652a1c878425fb026334384d83d14342d", - "stdlib/dict/dict_lit_lambda_test.flux": "7574306b568c10a66f3f12c9062301e495ba45ca06dd69cfb1ff4825809cdaf4", - "stdlib/dict/dict_lit_test.flux": "418bc93bffbb1a61d857d9109cc2438a5c1f5390f4463883cab80f6f237fd6ab", + "stdlib/dict/dict_insert_remove_test.flux": "26ee0ac6b12e799dc59c7831f482a7e40f2688339386c5a8ef0e1a29b1cb732e", + "stdlib/dict/dict_lit_lambda_test.flux": "c58285ed9431d43d10081e36e0169f1051b581409790e2fee66a75f9e0f0b40c", + "stdlib/dict/dict_lit_test.flux": "fa161c2ae4bb6bbf689217f691e5ceaec13ce7ad383630853eba60b6abc9f92f", "stdlib/dict/dict_test.flux": "c71ad5c2d5dfdd141f23ee44d50d4cef81e24fe77b2fe7b76f78a9e8f79a010d", - "stdlib/dict/empty_dict_lambda_test.flux": "3e66aefb2d476b7ddd15bf3cfda019326d1a191acefd82ad2a0e3956da2f3610", - "stdlib/dict/empty_dict_lit_test.flux": "e8205b5330bc603d3d558de53f6ca0a51ef9ff1a75d47d7fc161d06af4543ca0", + "stdlib/dict/empty_dict_lambda_test.flux": "7fc81c62f3f80b569f73035a21f0f5f39877551ce69b6a57112da4de67e09f8d", + "stdlib/dict/empty_dict_lit_test.flux": "bf54d986ea3d9d31b6c043f31f747db0d02b2897c5bcdd1791cb61e7b2c5e60b", "stdlib/experimental/aggregate/aggregate.flux": "baed6a789f4db9739ad7699367039973121bbbe694a11cd5bebbe3007b7a4712", "stdlib/experimental/aggregate/aggregate_test.flux": "dfebbe6d046de66d167c0a6ea97b30436fb6019f27fd68fd4106089cbe9b31fd", - "stdlib/experimental/alignTime_test.flux": "036a84e36bdc319486e5c550193d30a544ebd01bf14769018f9d1369136f1b68", + "stdlib/experimental/alignTime_test.flux": "22e8a03a7683bbef0d63e0862c1445df91fded700cafacf60acf20268e88007a", "stdlib/experimental/array/array.flux": "0c1eb0792d175937d31f24ed7b6241900dda5fc8c9553bc9211ac5d6f38cf17a", - "stdlib/experimental/array/array_test.flux": "225b514823b213b25ad163d5b073c7c569599e20aa494a442b55f61e23588f14", + "stdlib/experimental/array/array_test.flux": "7416663639ef971f5e2e7160bdeace1ede47dc187369eaf76640718405e7903f", "stdlib/experimental/bigtable/bigtable.flux": "1ff47e5e0aec0268fab0f12573f6a0f3bc9df2607d3244b60dad244c898aeb73", "stdlib/experimental/bitwise/bitwise.flux": "ec1f4e358301b06905a4af3d8c5f5f98530bf31a2476f5c3875746f0b22fe6d9", - "stdlib/experimental/bitwise/bitwise_test.flux": "2ca1f5aa9323b12d54deefe4778f1b89934474087d90fedcdcb2cd668ded3804", + "stdlib/experimental/bitwise/bitwise_test.flux": "9d51ec0db32684d8c002ed493cbbc47ab36af44eab27a0a35481739526e33e4d", "stdlib/experimental/count_test.flux": "120a145eb287266e35638cc5220f5cecf0eee5bf2eec1bb1e9852e7d382050c7", "stdlib/experimental/csv/csv.flux": "7f033d93ed2e456d7ee181f0afa88f8c4c81f3b1a0984e56d53d3e146db48d20", "stdlib/experimental/date/boundaries/boundaries.flux": "977d79939b0f87feb7b6d6d784b267f14647054662173d4398066e48be421cf4", - "stdlib/experimental/date/boundaries/boundaries_test.flux": "d78f243174f6a24b3186d6b20b4eee6d1dcfd9dcc0a9206e62db5f7f8532124b", + "stdlib/experimental/date/boundaries/boundaries_test.flux": "35f4198aae27017b38227a20d16ecf5c6ec81382643a36aafadda844b8d36e2b", "stdlib/experimental/diff_test.flux": "2d0d11128fef3f36307a90ed5e6acc46e6a979637916f2b0bcebb8511372093d", "stdlib/experimental/distinct_test.flux": "c7358d31972d0931aef6735ea94d901827c13fbaaeb9b02ff255391b5f95ea30", "stdlib/experimental/experimental.flux": "6158f8ecf5c7a7da52fa960fc060e2444216bb72766e2c4cfeed3a4f2cb3a251", @@ -169,7 +169,7 @@ var sourceHashes = map[string]string{ "stdlib/experimental/geo/filterRowsPivoted_test.flux": "25d56c93b61818606810c0ec7279a03deec9712a44a81eeb86957067d7bb2ee0", "stdlib/experimental/geo/filterRowsStrict_test.flux": "397c4ff9001976d268876574160c256563a1d5983101d54f4ec7fb9b91c45c5f", "stdlib/experimental/geo/geo.flux": "fed7f77c7d3ec8f0a76aa886917ef794eb6da869628fe82a0d0b9aee45a17aad", - "stdlib/experimental/geo/gridFilterLevel_test.flux": "b09251cc97e6085fa6d0a6702571ec0f828a56d3ce6907dd3c49a834c90f0a3c", + "stdlib/experimental/geo/gridFilterLevel_test.flux": "f9d7c9b79a91cfcff7b4cd29a3b511fc397cb8666481a1ac40f0d8f28d94e146", "stdlib/experimental/geo/gridFilter_test.flux": "6de30bc0d5672a18d4d4e0351efabb9d24b783b71945faa6f67302ac4409b225", "stdlib/experimental/geo/groupByArea_test.flux": "cce3a07b36b7b30a51b1f55ff495cc86ef4f673d95928e8b18641ba3a8b2db30", "stdlib/experimental/geo/shapeDataWithFilter_test.flux": "f6b49a1e9c676e0bc254f8343fac91989980457b486338e0963f7071036529d4", @@ -219,7 +219,7 @@ var sourceHashes = map[string]string{ "stdlib/experimental/spread_test.flux": "2651d6a5dd37bf8081c281e4e3e7f7fc8816334394a6e205b216041f3fb93b16", "stdlib/experimental/stddev_test.flux": "7599e6ac73721abbc5f4f54e2de926dc56fff0255cf880e9106bc4cdd0db7d71", "stdlib/experimental/sum_test.flux": "66504d439f767a16ad0b4d7d9a7e053d32269ff3663b96ca5a40369971771858", - "stdlib/experimental/table/fill_test.flux": "e7c377cf56f3dbe00237dfe8b3386a86e886401c5e95696dd8eea5f80baeb423", + "stdlib/experimental/table/fill_test.flux": "8715fd0badf1b1abeae4c86d42a8e8030abbbde379954428285dbcd860393cfa", "stdlib/experimental/table/table.flux": "ec101a99eb148c5112217c622395d5915fb055219cb4b814a1e18df602c25524", "stdlib/experimental/unique_test.flux": "01a47cbabac1307c8e3360f227054362c55e97e25929c5bf8e67051bcdf2478e", "stdlib/experimental/universe/universe.flux": "a4a9debf27ab2f7febd79541f3898de991f05591eaeb14162179cbe1df1121e1", @@ -243,7 +243,7 @@ var sourceHashes = map[string]string{ "stdlib/influxdata/influxdb/monitor/state_changes_big_ok_to_info_test.flux": "12de3fdf934e54431867ab476cbd55ff4866ac6047388b8f09f270c1be5fe661", "stdlib/influxdata/influxdb/monitor/state_changes_custom_any_to_any_test.flux": "742c1fec94b2a77b7ed6cb28999ec15dd229efe44d4d4c3f2312dd705e770aac", "stdlib/influxdata/influxdb/monitor/state_changes_info_to_any_test.flux": "ea7d48c795fc1089dfa1d9accdfa36ba609ecf184a4a7929b3f530a885d2cf50", - "stdlib/influxdata/influxdb/monitor/state_changes_invalid_any_to_any_test.flux": "d6d5c32bf7c2e0889a68bb107f00c3aa017079f041554b4298a4a28e2799a70c", + "stdlib/influxdata/influxdb/monitor/state_changes_invalid_any_to_any_test.flux": "53168bb5421486d86b13f1c602913d2fc80499e9461cc274888488babac0d281", "stdlib/influxdata/influxdb/monitor/state_changes_test.flux": "7118f50ecf2e949a1a0048fc1a55a246c00d20d750d6b3e86aa1e22f3d5b3edc", "stdlib/influxdata/influxdb/sample/alignToNow_test.flux": "3ae4ba6cbec062c3c41ba5a9eafb5bcf9bf6b02a9aae322028d86bfdb09264ea", "stdlib/influxdata/influxdb/sample/list_test.flux": "5f993c8f349194d8dd34987d769e74b8305aa62e253c1eb38e4aa2bc67a037b8", @@ -254,10 +254,10 @@ var sourceHashes = map[string]string{ "stdlib/influxdata/influxdb/schema/schema_test.flux": "b1e66a39f1544c155f34c073357a1270f9bf8aa52225b144afaa079515cbe2b1", "stdlib/influxdata/influxdb/secrets/secrets.flux": "9adb9658f7e6bbf6c2a8fa9acb5f447cd9ec4c08094cb041e7203eb9db315588", "stdlib/influxdata/influxdb/secrets/secrets_test.flux": "f0c496e043047bbb9b102d8cecac0410fbeb35846bdb34228561d9853aa82c3a", - "stdlib/influxdata/influxdb/tasks/last_success_duration_no_option_test.flux": "6267c43bacf0bee793728b03ad57f85c20ef2333522899a52fafc96531ffecec", - "stdlib/influxdata/influxdb/tasks/last_success_duration_option_test.flux": "604d2086a4e2840fa4ca6111e8cb14f0dfd4c8423ff0db6fd7e37f639ba37f17", - "stdlib/influxdata/influxdb/tasks/last_success_with_option_test.flux": "308421561e8a5c784e3f673502e0443cbfd87b9a6b19c0c613d275e7642a78be", - "stdlib/influxdata/influxdb/tasks/last_success_without_option_test.flux": "50f9abf06f66838a46d4e3b8883ed4e1a895ea40a67ad83568af2c01627adad7", + "stdlib/influxdata/influxdb/tasks/last_success_duration_no_option_test.flux": "a96e9e5a5d98f9ea3b8dca5ba8f1cd1d8f638e175765301f5bdcf2f4a51365a4", + "stdlib/influxdata/influxdb/tasks/last_success_duration_option_test.flux": "0341a21b4fce8a63b039f4d6f89df08aae03fe3e69416e78b61a1f6320339384", + "stdlib/influxdata/influxdb/tasks/last_success_with_option_test.flux": "ca89eb78908567e574ac36b30b07dfcb740133544b62a2c11fda719468af3e4f", + "stdlib/influxdata/influxdb/tasks/last_success_without_option_test.flux": "5425eeb07d2822f8615ce86ca23af4ae04d85e53c026245197d448e9ab9dbb77", "stdlib/influxdata/influxdb/tasks/tasks.flux": "24caada214fad78ec414693891ec69961eb52ba96d3a56630aae77f692aeaf09", "stdlib/influxdata/influxdb/to_test.flux": "fcf5253c42f7668988e6b951d4ec11c5ba754ee19ba1fcc67ec3e0b3c0d3d579", "stdlib/influxdata/influxdb/v1/v1.flux": "86ffb049a2b5d7568a8c8791ad7ccc0a9b62b564ce9009384267cdbc77f1a275", @@ -297,7 +297,7 @@ var sourceHashes = map[string]string{ "stdlib/planner/bare_sum_eval_test.flux": "a023264107222bac949f81b76c7a34a33123b5db642fe5d0f8a0124045509d7c", "stdlib/planner/bare_sum_push_test.flux": "b5f8dfb9ce7b2a8beb0c636a0f7b44d704f3d830e0d2a9952e4b1e5a15fbe345", "stdlib/planner/group_agg_test.flux": "45866e2aff83ad34439ea335dbeadb42533b4f9eefdd3cea663fabb5f5675079", - "stdlib/planner/group_agg_uneven_keys_test.flux": "aafa761dca11490c2dade712ea673678d59e41e17e325847eb96b23d9403799f", + "stdlib/planner/group_agg_uneven_keys_test.flux": "3f1f0d73d4eff61acdf878b6db2f6d72eaebc216ca1073e14240de93024b0ac0", "stdlib/planner/group_count_eval_test.flux": "c59954e31734b7a9c0cb6562f7ba7b23795898065653f4f43ad5a7d5fe98c6da", "stdlib/planner/group_count_push_test.flux": "5ad7203a545821eb213dd0ae13829ebf23e5eec5644ef0220a26e8534e379548", "stdlib/planner/group_first_last_test.flux": "9d6fad804d35eb6ed9e78d067bf2a7a99dedb2045620ca9f1a3522a8d13ae097", @@ -309,22 +309,22 @@ var sourceHashes = map[string]string{ "stdlib/planner/group_min_push_test.flux": "32878aa1dfc411a652511c8b77e4c1b586f260d4f3453393a756072fe7f70367", "stdlib/planner/group_min_test.flux": "f2fd6a3f37a36fa351ca7c7178e42e814e4ca42ed412a5b756b0f3e37416e623", "stdlib/planner/group_sum_eval_test.flux": "1123cfa595dbb3d8e7605bced637b1be6ac9a23980c52ff3fda4ce686a66d819", - "stdlib/planner/group_sum_push_test.flux": "58acc5344bb261dba1c29a71730031a5ff13b37944562323ede814a31b46cd73", + "stdlib/planner/group_sum_push_test.flux": "caea4e056b5862f1b380f6d14d21d6c6034496b957eaf880e7deef2c3e5435f6", "stdlib/planner/planner.flux": "bb64ba6af236f78b7c62c5a478b2afebb6e196860a3e13417aa95126e3c4f5a5", "stdlib/planner/window_count_eval_test.flux": "bb57f0e477857097e66bcb0f305fc05017ebdf49d635d03b459ba9c11f3ff48b", "stdlib/planner/window_count_push_test.flux": "faecf4cff7686e0d6b86a61b322a6c887705846dec34c10b74e2284f7d0e6fd2", "stdlib/planner/window_eval_test.flux": "74a25c13ea157420ce755ee307e6e0f9827fe8897ce64eb0f31cbb3815531292", - "stdlib/planner/window_group_agg_eval_test.flux": "6054c3020967e914cdb9f233511e9e82ed5b60868e269a32e0f9f4cb6cbf527f", + "stdlib/planner/window_group_agg_eval_test.flux": "4172b48ad1db5f5e2675666272f64f6e8c64a53159dfc44f9a4885ce9b7176d3", "stdlib/planner/window_group_agg_push_test.flux": "13128373ae7733aeea74b6930923ed10975b80268c230d4bcd5b5f9a11f58373", "stdlib/planner/window_max_eval_test.flux": "b636de04316a52596e0542d6bb4917f8cbbdbaeddf2f482460fd2e360cdd7f2d", "stdlib/planner/window_max_push_test.flux": "9a76a7141dff134794e123c0bada42de2a00eecc0c056b82f7fe604826950470", "stdlib/planner/window_mean_eval_test.flux": "a14a35351fec426b45291d85b9c7d137903efaf992d6a1c69c91e2326055712e", - "stdlib/planner/window_mean_push_test.flux": "036069bb4b1a10732cb6a8a092756908f568f64b98bfb78f110e18d74e4ee3ad", + "stdlib/planner/window_mean_push_test.flux": "cafb5d693010c4e00d29b26676b8813fe711bee0a4b7db75ef2baadaba743fde", "stdlib/planner/window_min_eval_test.flux": "64e7d5d6c2a7d83e7ecd75b1470d7bb491b22e804c4e80030881fa17867fe08f", "stdlib/planner/window_min_push_test.flux": "fa9745677b107c682fb0195bb8230268de667c20b09bbc44dfdef5d15f5cfb26", "stdlib/planner/window_push_test.flux": "779f83caca79fec5e4129cb6d1b05b2fbcbb45b479acab69e075ad2ad62d9df2", "stdlib/planner/window_sum_eval_test.flux": "460d45de75419da033d9bdf0b9a8c0d1bbdb57ed0d02f44d5f55d4d4b45086f1", - "stdlib/planner/window_sum_push_test.flux": "eb8a9bb757503a40da2f397acce7fda67cd71419f1eae420fb3c171d2f01d185", + "stdlib/planner/window_sum_push_test.flux": "7ae2d85c889e95134d3976daf01bccacef9b350ef7657400c2c70213fe0c419f", "stdlib/profiler/profiler.flux": "8e5c0ca87f0c5207c05fa1d0748f008767cf2e7e5451f6bd34a4eb3432573f7a", "stdlib/pushbullet/pushbullet.flux": "91d0025516d02f53f0e8a973fc1273926c18fb71bfaa09c6b7a9712bb023a6d8", "stdlib/regexp/regexp.flux": "5d9d67f486f4496bc367a0d64fe5e2f96d760f039a7c22b5da9cb9186f58c188", @@ -363,17 +363,17 @@ var sourceHashes = map[string]string{ "stdlib/testing/pandas/capitalize_strings_toUpper_test.flux": "4d44c037d52de9aba668aaa04859ad3722ff52b9e4b4c0a513616bd505ff6f00", "stdlib/testing/pandas/cat_strings_joinStr_test.flux": "17c57a02f21f89925dcac633cdd3babf4aef6ac9601b7243976619c32be70097", "stdlib/testing/pandas/center_strings_joinStr_test.flux": "225d8217e02a1ced8e19ce8b3890c70eba1f09120c77067ad5079bb7db35d762", - "stdlib/testing/pandas/contains_filter_by_regex_match_test.flux": "e0769490dbd07ba06912451b6a86e0090b8fc2b65c7f4366a97e2b7e0cc9951b", + "stdlib/testing/pandas/contains_filter_by_regex_match_test.flux": "539a6c10e2277df22597da24e60fb7e1a5dfeba6524f7bfa6f35e1fed34f1a87", "stdlib/testing/pandas/count_string_countStr_test.flux": "9ca66de5bb0a3560bbf5773ae993d55903893db6d69d250c143ff8a0fabfcf26", "stdlib/testing/pandas/endswith_strings_regexp_hasSuffix_test.flux": "95051604ec3de0884da97157abcfb44d57006dc08e8ea036b70882098a76db7d", - "stdlib/testing/pandas/extract_regexp_findStringIndex_test.flux": "a52f196e4548bc07b223c19ae509a6b6a73b8b5c85e6c7ed6c519f1ead2e588e", + "stdlib/testing/pandas/extract_regexp_findStringIndex_test.flux": "654653fc99456d9b6ae36bcb74acf9c8f9783828c33e41d2366fa247d0a38465", "stdlib/testing/pandas/extract_regexp_findString_test.flux": "94bda73911f0812f9b0831c629308358750effbbe20f5c227f7758c75dbb72de", "stdlib/testing/pandas/get_strings_substring_test.flux": "08123d7cb882c04c27687c03189ec2d5620052e3dfe6ea02ff5381d0001bf790", "stdlib/testing/pandas/ljust_string_joinStr_test.flux": "a947a25e4bf653b806fb997c91eba1363961afbefe30db427fbb2505a0f382fc", "stdlib/testing/pandas/lower_strings_toLower_test.flux": "84251b07e918c10f56e07d5ecdf93d032326787e0f4c5157e933f262a99ecc70", "stdlib/testing/pandas/lstrip_string_trimLeft_test.flux": "7b38b14fc5795690919c5cf89c145ebbbe21b4f431092fab4ad706d6630569ea", - "stdlib/testing/pandas/partition_strings_splitN_test.flux": "8dc317ddabc14ee79be2bdf3ef923acf90f13b3de5e700efa22890decf33221b", - "stdlib/testing/pandas/repeat_strings_repeat_test.flux": "4a4212df3f6799c49f6fcbce8312ee130855964c65cb63369b112a3cdf3723c7", + "stdlib/testing/pandas/partition_strings_splitN_test.flux": "2abaca00a50d4d411f8e28ac0684d4eefa238f848563e0b29ca5b8148dc88b7f", + "stdlib/testing/pandas/repeat_strings_repeat_test.flux": "a450763650cc96af9664c8220f826699ef3873f54887451ebe5bc235954ae714", "stdlib/testing/pandas/replace_strings_replace_test.flux": "3ac2a23b0ec2ac3de2f93f8b7b8a12bea3601e40e5ef44d96c897e070e2e128d", "stdlib/testing/pandas/rstrip_string_trimRight_test.flux": "adc98153ddfeef2ebcae09f876f26d25cc04ba938b5269069d754a0edcf15e5d", "stdlib/testing/pandas/startswith_strings_regexp_hasPrefix_test.flux": "6ef29300a7d1989c6066db9d6aa5e02568d9eab41892ebef8c7bd48c1815b7d3", @@ -392,19 +392,19 @@ var sourceHashes = map[string]string{ "stdlib/testing/promql/holtWinters_test.flux": "ca13c04572dee253d231081b09b42a580c1585d2668d6109722973f729df702b", "stdlib/testing/promql/hour_test.flux": "e48337b36d713af708fb0e5e2a292834634c7129843638c5615f571cc2715013", "stdlib/testing/promql/instantRate_test.flux": "a83dac2f8313a17dd238eeba964553bf612785e803c85663d86f8590616e8e53", - "stdlib/testing/promql/labelReplace_empty_dst_test.flux": "d0b70802615dc57776b0c6d2e2829b9edc91f4f4858b827e12eabc16c19fde27", - "stdlib/testing/promql/labelReplace_full_string_match_test.flux": "c15e6182b0ceaf3b452f2fe77353f292a6278550f7dcd4d8f7b03a9d1267e07f", - "stdlib/testing/promql/labelReplace_multiple_groups_test.flux": "b56456a3d2e0b0eebed9f4dfab3d60a35b59a5d8d41fc656d18cd643250c096e", - "stdlib/testing/promql/labelReplace_src_empty_test.flux": "bde63aa51f280c3f8f190b97d32473b8e26aac86e4473f734c2e540307942a1b", - "stdlib/testing/promql/labelReplace_src_nonexistent_test.flux": "85769f1bcc6e212ac505d4f2ceebef4af731cf17fe5843ee95a26b82c76f08fb", - "stdlib/testing/promql/labelReplace_src_not_matched_test.flux": "4d2000338992ed2ef0a60a1db6a4097c67cccfa55bd77ddfd9b1aee74afe5bb5", - "stdlib/testing/promql/labelReplace_sub_string_match_test.flux": "b43a5f441dcdf500071e4c780323f20f532b72a76abe85607cd180383cfad255", + "stdlib/testing/promql/labelReplace_empty_dst_test.flux": "d0cc40bfb5231a68acaad672fa5b650152c7f45b3837bc2473a5a79d12311c78", + "stdlib/testing/promql/labelReplace_full_string_match_test.flux": "6660b8146eca521aafe44c083a40654825d4da4ad120d185b0a0638e712f3fcb", + "stdlib/testing/promql/labelReplace_multiple_groups_test.flux": "8cfdac553045a54c33541456de6d18fa44ff02b843ce97030824ab9e0d374330", + "stdlib/testing/promql/labelReplace_src_empty_test.flux": "dd686b85bd1ff59bea1f73b853c44849356d81172f7e2d033c6dc39eeaba78f2", + "stdlib/testing/promql/labelReplace_src_nonexistent_test.flux": "f5f2836556ea5885e895cbbbd45a72d3064f6dcab2bba616792610c7723e70a7", + "stdlib/testing/promql/labelReplace_src_not_matched_test.flux": "af4540cda23dcc79095262209fdc3d8b4fbe0ceed3f9fc9d6b0963a1ef7889a0", + "stdlib/testing/promql/labelReplace_sub_string_match_test.flux": "8d8089842249ab8604ec7bbead3551a88a93ae34a8332e4122e2a427858df05b", "stdlib/testing/promql/linearRegression_nopredict_test.flux": "6df38e2bc21e90b36ec2d6fc5e465cb3cbdbb59d00ccf121501e354c05001a9a", "stdlib/testing/promql/linearRegression_predict_test.flux": "d585efb1ba21c86faaa9d7fdb50d534208c1596ab8bf83a7607849dba21af786", "stdlib/testing/promql/minute_test.flux": "2371bd27c1c248a8e1fc9418dbdd196d5ba423743502c5ccd4d1257c5c8e7abe", "stdlib/testing/promql/month_test.flux": "bf91fe720e113a83a78bc52d8b9fa74ac71e32f9601b9430f7e2bb4a08250e78", - "stdlib/testing/promql/quantile_neg_test.flux": "eed19ce5df1167a7953f27ef6c5459ad1405fdafaf66c20b7a9a27eb4bcc1946", - "stdlib/testing/promql/quantile_pos_test.flux": "9d017abef1f7d4d331cbcd8587ece787a99acf092097b671b5650b0c5e3f33d7", + "stdlib/testing/promql/quantile_neg_test.flux": "700afb73f9599499ae1e4e56dee74f4abbe6443162fdfdfdc567e595d55e105d", + "stdlib/testing/promql/quantile_pos_test.flux": "fc6ad73e576e8472c45a2c418a49689c053506735d45846f862b3833c9939c0e", "stdlib/testing/promql/quantile_test.flux": "c1bd4c51ad33ca5402b8b81b38152428cc0ed684e2c9c65738aa5640acb90e4b", "stdlib/testing/promql/resets_test.flux": "e01bb24f576f8a2cb69fa1f5d9061cc852431970792ae2c59c3753580058926d", "stdlib/testing/promql/timestamp_test.flux": "350dc1161f222d2bfc327aee399ed9873e198c9858e845c2ca0b9ca0e2e38ef8", @@ -428,8 +428,8 @@ var sourceHashes = map[string]string{ "stdlib/universe/aggregate_empty_window_sum_test.flux": "877ec8c125286941a35d11690b8ff0cef7d9e9f2c2d22e78b9df55975a6ce2d0", "stdlib/universe/aggregate_fill_window_test.flux": "22da0718386b1209d082f2538ff9a52b794c7854571dd6307f462c1c20dbf565", "stdlib/universe/aggregate_window__offset_test.flux": "dca4027b2492a5819096a34b35e6c1af779ddbed4124671f4d75b30a068cf389", - "stdlib/universe/aggregate_window_max_test.flux": "8ee5d927ef375e7ac3687ea1ea00a0e8add969c31357362d0032ee079c2ac906", - "stdlib/universe/aggregate_window_mean_test.flux": "ba33848419748489bc330c67dfaa344fac757f8af782879c71e08b345f0f652f", + "stdlib/universe/aggregate_window_max_test.flux": "0dcea6df3e39c7ed4adad7b2a61cbe089c6d79ee870a39a69f59a58a92fef3d4", + "stdlib/universe/aggregate_window_mean_test.flux": "2b65e510517e5510fa42eaadc9e6205280dd47bd7b6d8752f61371018bcd3388", "stdlib/universe/aggregate_window_median_test.flux": "2791ed98310aae23deab7cb79836e60539cd29810c67910c30e4b891356a2698", "stdlib/universe/aggregate_window_test.flux": "d415e0613744096fd7f624bdef2faf1fdae480b83feee2298b672385de14a8c8", "stdlib/universe/cmo_test.flux": "3793f5cf21ae42879d6789cdc36bbcf29ddb99bf16af8a96775a9415fd8d1c09", @@ -445,13 +445,13 @@ var sourceHashes = map[string]string{ "stdlib/universe/cumulative_sum_test.flux": "056f988fb722ce92a2451fcdaa0dac3b3b8da5d12da4bbca23b8cd4f0a8da73a", "stdlib/universe/derivative_test.flux": "f8fc030eb41be54ea3f71001ed2dbc002a3ae2d57834c8b28a7448bc509226ef", "stdlib/universe/difference_columns_test.flux": "43850b253e586bdf46bef8233570ef2fe456c31c22bc7e4c102fc80bcd670501", - "stdlib/universe/difference_keepfirst_test.flux": "96caab413b04ea2d33d1c2288decc02d8aa9fb5a873c06e729e27298fe933e1c", + "stdlib/universe/difference_keepfirst_test.flux": "13b48d160a21f623718e1899ec086102e971761ecf5b786daebc2a97176e736e", "stdlib/universe/difference_nonnegative_test.flux": "3989644024670641375711decd324682e3b12d9dfd17f28380849c91fb000487", "stdlib/universe/difference_one_value_test.flux": "9ab9f9d6f6ad114cce19caa1357269183e6794bffaca886148ecb5af4627bcbb", "stdlib/universe/difference_panic_test.flux": "7b566a30fbe4a291e6ccde8d769f49638a9853614cd7afddda8a6a3008512822", "stdlib/universe/difference_test.flux": "505ff42d178c77bf58d0b031ac8aaa1836d54d85f7cc3b744c7378b8aade37b7", "stdlib/universe/display_test.flux": "433de3b8730c0ba0a5be1109ef1b223e7f92bd380d6dc0f9efdd35ee8d11edd1", - "stdlib/universe/distinct_test.flux": "ee1dfdff9fd21b0c19f08f1e7ccb8f6a792cd2dc844121af78c15677e0517490", + "stdlib/universe/distinct_test.flux": "8791b2601e28999fde3a2930a612d7724f8108736b060f088305b49e369c18d8", "stdlib/universe/double_exponential_moving_average_test.flux": "704952061eaf2ff0e886b2fe0a0b9864a1fc49c3feea0a4667d62113332dbd15", "stdlib/universe/drop_after_rename_test.flux": "e85edeca357240e7108447bcec5123ab8895f237dc87171bfff295204feabebd", "stdlib/universe/drop_before_rename_test.flux": "0eebd7a82a3fbbbc58e86dbd1ce75855fd30396abd219d1f5a23568bf471b018", @@ -461,38 +461,38 @@ var sourceHashes = map[string]string{ "stdlib/universe/drop_non_existent_test.flux": "36eeac7f90a4ac35d48b7c2d4fc7d5b14cdd8e3abb5a549f715b38fe5e07c6da", "stdlib/universe/drop_referenced_test.flux": "6fb13a4d184c9e31d1e21d820bd2cdadee8c5234ed1b2e7595e40c34fd57ce8b", "stdlib/universe/drop_unused_test.flux": "52259c94e834c1a084b297d74e5453103ce714668512a3def0bd917d8d842844", - "stdlib/universe/duplicate_overwrite_test.flux": "9cf7bb6ce8a973cfc0296cdeb5ca4ab46a9c0f75346b55fb543dd75ecd2a05fa", + "stdlib/universe/duplicate_overwrite_test.flux": "a6e772609ecf6393b59b89225cf2e1caa2fd220de62f02f5b0c608529a5255d2", "stdlib/universe/duplicate_test.flux": "cba5a0b21e770ec70b374532331e5521671908241fa4581bc6ca9bd749d89b12", "stdlib/universe/dynamic_query_test.flux": "64ce037df555b4750e9ed1b190e37fe1a1706ee739cc47248d511ef0b3c3b68c", "stdlib/universe/elapsed_median_test.flux": "9872bd757b6ce5d9c658ae14052faa7fede4fffef1d56228357d162932aa805b", "stdlib/universe/elapsed_test.flux": "0db5ca1ab33d9df6719cf3801c3d848af9ec58df6909daf8a783fdc3359b4601", "stdlib/universe/exponential_moving_average_test.flux": "e52f7e3e0d56afa315bca3d5def4692cb6fc95449616d084ac28605e6d506215", - "stdlib/universe/fill_bool_test.flux": "4e0b7ddb835ea2c790097dfc8c7618fe04be45320320466de751134df47e2831", - "stdlib/universe/fill_float_test.flux": "4488f9b48853ee8283665cee2df93c81edcf4e598c020f24b83258f348eef95e", - "stdlib/universe/fill_int_test.flux": "20cd5cbee8b9a491b65ae6d35b056df2d8f867e29302f413d2baabcc3efea81c", - "stdlib/universe/fill_previous_test.flux": "257306e18cddf9851d1a9e3954614295887b1c5cd8fe583b4bffb4529b83a634", - "stdlib/universe/fill_string_test.flux": "a4675034697b7e84a97536f02ce5681cac4055cb3a8227e053ffb7c4e76ff042", - "stdlib/universe/fill_time_test.flux": "78ab38f84daf4fc139d044f409f7274f312a5902fac49549954ec033773d7c27", - "stdlib/universe/fill_uint_test.flux": "0d5af3c85816e052842d7103064bfa2f84d197410cd9ca4b485afee406b2b676", - "stdlib/universe/filter_by_regex_compile_test.flux": "2d890d62545b3ad5253241ffc99c0caa74f09ebec8e4591b841f5feb8d8c5ef2", + "stdlib/universe/fill_bool_test.flux": "dd545c25df4e474109b951541375c7a354cc6408de6d9511b6ffa29377c92074", + "stdlib/universe/fill_float_test.flux": "39cc86b92593342000943211c2fb26dfea6dd2eae417c1e043bc3726833f1130", + "stdlib/universe/fill_int_test.flux": "f08accd9bd151476f40288602cc055e292018a2f6d3ec9b170304874be844ec9", + "stdlib/universe/fill_previous_test.flux": "e58959212270267d7021036fe6b9dba05a5ef9c733d7b1b6f3fb80344ad23430", + "stdlib/universe/fill_string_test.flux": "f9acadebe70728dd695fc5dd602238ca3ad7816a187357fa774740a2772b8a84", + "stdlib/universe/fill_time_test.flux": "339c87d634430b7fc038e51766ab692313823d36e63295657ee7159dd6933197", + "stdlib/universe/fill_uint_test.flux": "e9fced351fe4959bc4dc2ed35fa3803bb39aa6aeb25a9c47a37e77a63b6a22a5", + "stdlib/universe/filter_by_regex_compile_test.flux": "9cf4aa647d745a565a7f4050fdc008879fe728ccdbfbc6eaac63174ed23304c7", "stdlib/universe/filter_by_regex_function_test.flux": "e4f118697db9d0978a5aa106d0790c1b4d57abeac936bbf2eb6418b819a473c6", "stdlib/universe/filter_by_regex_test.flux": "1df07191ee0e12f878442e5af3fe46e16fb6d1a4ca9906a2656fcf54fbce916a", "stdlib/universe/filter_by_tags_test.flux": "513c19a9d41f1a1b241b89050ab9bd5618240cf1f6ff6aff649253cbb75f981e", "stdlib/universe/filter_drop_empty_test.flux": "c8dce2c83fc6e572d2b65264c156993b37f1196165db5b060246dc8efc215278", "stdlib/universe/filter_keep_empty_test.flux": "1041ff242740e2d56dbdd9671da6fbf10e94bc1c68acd47c455dd24d5ffeab82", "stdlib/universe/filter_mixed_empty_test.flux": "f16aaf277cb71b04ad6962120def1e5090466ca0d1af2cd478b30cef424786a3", - "stdlib/universe/filter_partial_pushdown_test.flux": "496b6aad41fec207a8182adc604ec9da94c5a413b013f68ceb05b8b2dbea623b", + "stdlib/universe/filter_partial_pushdown_test.flux": "80cab6317455b1d859541064a27a3ae485a43eda712ea5f36b41b91a856a6783", "stdlib/universe/first_test.flux": "fb7014a3317446f546bdfe747f254509c219f18f587e45ed25a68b349f65fbbb", "stdlib/universe/group_by_field_test.flux": "761e11864ae51afdb8ba3869f082a8b28923ad40745749fb48f52bdac33766d1", "stdlib/universe/group_by_irregular_test.flux": "98a4852cf3aa62c56408623ff81fe1d45c0c09cee801c3af9d0f5432a3fb7ea0", "stdlib/universe/group_except_test.flux": "475900e9cb86141775c4c6b673f1decae2afc91daec206bc7d6ab581577ac4e8", - "stdlib/universe/group_nulls_test.flux": "5b05da41c81f8cfaa9108d654f055940d92542f1a3832d8241eef355a75b89d6", + "stdlib/universe/group_nulls_test.flux": "65e7fd3ddea10fcae60ae8892b017094e999bc4dfe0f33cef978dc30514de89a", "stdlib/universe/group_test.flux": "ebd142cd94c3036959dc60e37a092342dc4c6a0d7145be50768262f9a199e645", "stdlib/universe/group_ungroup_test.flux": "a13d8e3b8784e58f8382f76fbbad04955768c4d786a200e77c2c44133f4a7c10", "stdlib/universe/highestAverage_test.flux": "65744d16b7c5d8ac2f4e3c47621195de1840ad72b12bfbb692d4f645e71a00a8", "stdlib/universe/highestCurrent_test.flux": "c285ff40a7d8789d2c20bcf90d8063b710499ddc24621d627f6693c30351ebe5", "stdlib/universe/highestMax_test.flux": "fff9f21422d2607afb9ff2786d67d173c7cd797418eb7b3521f8cf7e49443b88", - "stdlib/universe/histogram_normalize_test.flux": "745eb384a8c87580196b8928801afef3779f00687747a031d99303ea4e06530a", + "stdlib/universe/histogram_normalize_test.flux": "1e72019710cfb231d43150d941ac9e7e4fe703ea8d5d82bcd310fca854d4d272", "stdlib/universe/histogram_quantile_minvalue_test.flux": "70b762305a5207556c9189ac7a49c4b3099cc1e92a1e5f96fb320b7d79cdb0f2", "stdlib/universe/histogram_quantile_test.flux": "d3f1c99c505dddb7bf9745d3123459e2d8efa610eb3851a137299f6862054dfd", "stdlib/universe/histogram_test.flux": "5d0f3bba9802868dbacda21f3943237dcdc71c046ebb1d55d321f0f4607b921f", @@ -501,8 +501,8 @@ var sourceHashes = map[string]string{ "stdlib/universe/hour_selection_test.flux": "af919d0f515984e1155194a05dbe92d533c212bef1eb23e014e862ffb85810a5", "stdlib/universe/increase_test.flux": "3e94fe06b5b71aadc5ac8b6f070278bba663a9633b4d4b3ec58903792bfe0bc8", "stdlib/universe/integral_test.flux": "7206d881f059f0e6009fe3f0cfeb18b5fcec9ba192b607f63d375541851fcb85", - "stdlib/universe/join_across_measurements_test.flux": "5d04e9054bba2f3a56f17426d142ed907dfecb7d94fe6da37da109aca3aafca4", - "stdlib/universe/join_agg_test.flux": "f5027a3a51f55191265a46c45c85b2b2ef19a8aeb04bc2795ef644132dc1dcc4", + "stdlib/universe/join_across_measurements_test.flux": "4362913c852740d1e55dc13d2b9ac798ffde4263a4b78ca0c96bd5a1333278ad", + "stdlib/universe/join_agg_test.flux": "94c4de77895501644c9fd100dda93d85a76e35b39e97bd801f59f3e6179b337c", "stdlib/universe/join_mismatched_schema_test.flux": "4a50ae64612961f8fc9decc0ad87305eb937931d9608f8a9796cedad35699ff8", "stdlib/universe/join_missing_on_col_test.flux": "477ddf1b38200ff1d3ff63b020e5a89c6b0c9adb4f468d69f56176669c4363e3", "stdlib/universe/join_panic_test.flux": "79f4f6c1ca92b211633458b80cb7ced2103d4a8ea47bfbae64421718c01ec54f", @@ -511,9 +511,9 @@ var sourceHashes = map[string]string{ "stdlib/universe/join_unsorted_columns_test.flux": "1e67d4c1aef04040f9ce31cc6a7394ed84c066d286c9ba4d93f5e0db78308632", "stdlib/universe/join_use_previous_test.flux": "6d7ed740ab6ea95da6830285b15d0912971e917624e58c382b29b0c1e809a328", "stdlib/universe/kama_test.flux": "a4c04e049a217333131267aee134f6a6aa3c22493b99bab8cf62ee8c702ed861", - "stdlib/universe/kama_v2_test.flux": "192f79570ad58c6041bda25e807bfb03541aed477d527b75ff9de3c1ec8b6d33", + "stdlib/universe/kama_v2_test.flux": "1c74e4ee5266d78f851041d3264f72436e573dc176cf2653d3ec31958b0dcff9", "stdlib/universe/keep_fn_test.flux": "197e9d0d112d40c468ad73166fb6f2a2e27913ba6f5d145b22cc427020958fdf", - "stdlib/universe/keep_non_existent_only_test.flux": "954a53c22e9563427f47171dc4f0575a13c399157bbaa2821222c1e55a8d9653", + "stdlib/universe/keep_non_existent_only_test.flux": "a7cd89a7d5df8144ccf5471398300feef8bb474d55f088149456459927b9f13e", "stdlib/universe/keep_non_existent_test.flux": "84e94a864b51c1e03915a54440313d3d37e97c742829b9fca7e6f0e39eed951b", "stdlib/universe/keep_test.flux": "aac1c7266e0a6d62fec94e05efab5f1979d5a925380ce131c3a700d6212a6530", "stdlib/universe/ker_test.flux": "8e86291c8cc0cd24692df2128d8b227ca37d62d1161360a17064a0fbfec32858", @@ -526,29 +526,29 @@ var sourceHashes = map[string]string{ "stdlib/universe/lowestAverage_test.flux": "79681d441fc0340d4f76e238f10ccc50dd2b67be62f6c5061157a4e11f634055", "stdlib/universe/lowestCurrent_test.flux": "0110e5d56e9feb926cba8097d418b08fce0a70d194aeb0bf93254272d3f4136b", "stdlib/universe/lowestMin_test.flux": "823a9d836aaf31f1692eadffb46822ab12ac2d5051eafaf4ec4ffc67fb15d2d6", - "stdlib/universe/map_test.flux": "94f5b544780c77ff483941ac580fce4644de943565330bd36ed9b86205b81489", + "stdlib/universe/map_test.flux": "7ecbad2c73db6375056233db770624371af6bd45e836c0b8ddc9768b89e09c77", "stdlib/universe/map_vectorize_conditionals_test.flux": "d03d1e2fd0559188e18f768a908d469c055439bdd34cb72993bd85c0dd77be68", "stdlib/universe/map_vectorize_const_test.flux": "417f2dbb048324eaefd40dddc848685c553730d78c00433166e3bbc0b0e224de", "stdlib/universe/math_fn_constant_test.flux": "8eca1c46dbf36f5f502446545316bbbbe53afedfc7cf4e3b260bcb2e91568830", "stdlib/universe/math_m_max_test.flux": "b55994b73bb128a6818e78fa125fa58d7356bd933360e83cb75f6775281d4245", "stdlib/universe/max_test.flux": "9caaadbcf19a8131c1799e13a5f5f357e983187a0a918a762b6e464d7cc1f7c9", - "stdlib/universe/max_time_test.flux": "5bc244b489db9e2ac893f0db6b918ca7292bf228b1f7f2d83d5c777752ee30fe", + "stdlib/universe/max_time_test.flux": "982b946f7d894fd1e343e42576b0a5d89c3c45cbd2724ddf0dc70afb82bc0e0d", "stdlib/universe/mean_test.flux": "116a45a54db05ef35bed0ab0488d3c8eabf8c596acd475ef8c4a9d52c368a44d", "stdlib/universe/median_column_test.flux": "ad7652f01d22316ff91522431ebc6bd23c8e2a31a6a9702865f57d2953571810", - "stdlib/universe/median_compression_test.flux": "b0e5cd365e7a5998b79fc19962c653e6620052a94e44121b82a9de4768be1346", - "stdlib/universe/median_method_test.flux": "3609bad64384e5664bc87f587d09b975a2b2b11f1bf93cfd4076fbd0338dff32", + "stdlib/universe/median_compression_test.flux": "6525711997648a681eff96c03610022abf38b8212d43137190a40160a52df938", + "stdlib/universe/median_method_test.flux": "6480f27d4c36f86d30825970eb0247ca896c6352b52bafb4098bed96a62826ec", "stdlib/universe/median_test.flux": "76b7d0ceb69c6c02a3c68c22757ace595d243be6d8badcf20661a8f66dc179c4", - "stdlib/universe/merge_filter_flag_off_test.flux": "e3bbace624a29e2abd596f67148abd9ca8fc792705c98036310bd4a1b3c0ffe2", - "stdlib/universe/merge_filter_flag_on_test.flux": "718f92af1959470ad1beaf7045fdbbff485e9f2ee395a0216795c3ec99744ff3", - "stdlib/universe/merge_filter_test.flux": "b2b8f95eef0b90b039a779a1f01c69ef9aed2aa3f61e364e426901c505ed93c3", + "stdlib/universe/merge_filter_flag_off_test.flux": "87605fcd4f2bee847cc391812f31f69ddc2398b28e7302b0e748a21def082363", + "stdlib/universe/merge_filter_flag_on_test.flux": "e069a23875c6f774b28ce9e6a916a1b84e46081b17e0a48d32f3f882d01e3d08", + "stdlib/universe/merge_filter_test.flux": "d91ace5f1801ec226cf2ba03296d37d58788e68a6f1fefd80e14cfd02a32d0e2", "stdlib/universe/meta_query_keys_test.flux": "bd8034a73e891ce9d857ade752a49d4f38a0b413f3c8d94e2f8c80452f900970", "stdlib/universe/min_test.flux": "e90e953011e9cf3dc8be498aef3bb4a7d5f087c91df93f3715c1eab91979b012", - "stdlib/universe/mode_string_test.flux": "657a8ce153411e70103460d8d8ecf18e3198450c0bc6eefa33bb1684fa025cd3", + "stdlib/universe/mode_string_test.flux": "8324f06cc3057aeca31c7967d95ebe857ef23c69dbdbb09e6626f1167f3cc885", "stdlib/universe/mode_test.flux": "90221f7e5565fdf9c3f07c7efa2922f786c49674a1b3f8a543219a069968ede3", - "stdlib/universe/moving_average_test.flux": "25d3a25bfbded2cfcfcdeca47e355be705a09cd640447e744cded0f0fe47849d", + "stdlib/universe/moving_average_test.flux": "a30c745f6a9bc155a14204006dc03b82918452f24f6c7b2f43cce4c3f8f0cd29", "stdlib/universe/multiple_range_test.flux": "d01a115a15408039f7eb36fec1f8bfcb0b1a96a6b203ba03e1b0fc42838a7c71", "stdlib/universe/parse_regex_test.flux": "992b3a1e2885dc844d0b7d3e3040aa61252b1d000cd16ab90659890fdb4765e1", - "stdlib/universe/pivot_col_order_test.flux": "98c7fb481ba4eafca961a40220d5e1a3025a8b6982dc17be5f016c48f2124a2f", + "stdlib/universe/pivot_col_order_test.flux": "0340b45a48279f18d67a2a40ba58f4e5d9c19bffec56dabd99d6a7161a0760bd", "stdlib/universe/pivot_fields_test.flux": "1a1517c9f7dd624600f3f3c1c9d01073ed24967975245833a18a20f0e506c0ad", "stdlib/universe/pivot_mean_test.flux": "ecab0e8a5567172b6afefe62ed5900746e60c96d6ac78ab10c3c2cd6b48f1f48", "stdlib/universe/pivot_table_test.flux": "52947b9aa9e499e9d750b6a5e41c5b67e2a0e7f3307b49103651cae525bb0072", @@ -556,13 +556,13 @@ var sourceHashes = map[string]string{ "stdlib/universe/pivot_test.flux": "e976a5124292d2d7d8e9022d085466deb0ad6639d422723bb1d00d698448e573", "stdlib/universe/quantile_agg_test.flux": "184c61f758aa9e1bc4ad42e07a1a43bcafe1061e821f2e644ae695da817375d4", "stdlib/universe/quantile_aggregate_test.flux": "962cdb34f399b3ad901fbdf9375e084abf0a1c260049aa25ead9f72e776ab401", - "stdlib/universe/quantile_defaults_test.flux": "a11cc56cc2f6590f949c38b306f36b3845b116e1a04b4d809ad42ae46bee822d", + "stdlib/universe/quantile_defaults_test.flux": "d3a2a1b42e54af3224390e318b4617800e7a249c0d26f1ea8ccd082ccf1644b8", "stdlib/universe/quantile_tdigest_test.flux": "5c0b1dcd0ed278c1eb5075bad21c026ac184205a04433f0ec3c81784f9e3614e", "stdlib/universe/quantile_test.flux": "fac8116215c48526740805d4d614b84f4a28c54e69a69a1347f3a189ddff690a", "stdlib/universe/range_nsecs_test.flux": "c4f3da9d621e1ab250e13c76dd7d59c44058802570844a9ae6e9e9c5ce8aedcf", - "stdlib/universe/range_stop_test.flux": "32d38b0e520c76254388d1439274a54a074ed297ce04aedf9b597f093413ce8f", + "stdlib/universe/range_stop_test.flux": "602201c695c0300e5333a4709b968115f79465f75548c6b017d9bfddd77aff3b", "stdlib/universe/range_test.flux": "c1360537c44a305e8c8c4aef81fbbd5a8f3fcf3eba96544e2ee46938d055bdfe", - "stdlib/universe/reduce_noref_test.flux": "1cb9fcea0ee00b65006efcdf046244edc49954c9e4fe6480b5d9b535e7e5fefc", + "stdlib/universe/reduce_noref_test.flux": "6541839dfebb48bc490a06473d0b11f22475942294a0aa6ff0c13d1a54686f94", "stdlib/universe/reduce_test.flux": "6c3519c948a048506a11676ebc56b5986f12091667966849a1640a4504313f5c", "stdlib/universe/relative_strength_index_test.flux": "fb3f3d935a79e5c7ceedb042622780e671223a69d76b7eae60261ed88596b937", "stdlib/universe/rename_fn_test.flux": "7f2dbdb47f1e0c90b46c89975f20f5c2d74a79645bada2d3238404855bd3b1fb", @@ -592,7 +592,7 @@ var sourceHashes = map[string]string{ "stdlib/universe/sum_test.flux": "63c5078f09cdba7f1c55914431895f1066ffd890d73cb3e9ad20e19375e733cc", "stdlib/universe/table_fns_findcolumn_map_test.flux": "a48985e14997948f9ecd10b94c6efae67f51f2d71508325eb07c77a92eb79748", "stdlib/universe/table_fns_findrecord_map_test.flux": "824b7055f62090954ce31fe33d60e01001e6276045351ae17d813e1bf4803bdf", - "stdlib/universe/table_fns_test.flux": "7cafa2949e715a0fcdc2ffbea1d5a377e74bc9ee0307b76f29b4936d2e3b119b", + "stdlib/universe/table_fns_test.flux": "eb79c996fa3dd9d1e875ff57fcad266a7afeba0b96f9c054a9f92248e52d400d", "stdlib/universe/tail_offset_test.flux": "838ed2329f35f91803707a26161bb5463ca2c3c5a2ff9225630d05ffdca67dee", "stdlib/universe/tail_test.flux": "755f2cb7f03f8589e7392f0c55bef9c59d8f086c4b29232c5c8f2bf034f340f2", "stdlib/universe/task_per_line_test.flux": "e01e162708d1b9ce0e4af9f63cff4435ef6ed9d60851a99eb61cae00a1e36401", @@ -608,9 +608,9 @@ var sourceHashes = map[string]string{ "stdlib/universe/unique_test.flux": "4341d11d277edb94ab41dc98861ff9a97e34b53831e6b4aaeeac9ad26a1e707b", "stdlib/universe/universe.flux": "022e5b6573225e8143330e8c9210100d1307bcb83337eb05f927bbfc02edc3b6", "stdlib/universe/universe_truncateTimeColumn_test.flux": "8acb700c612e9eba87c0525b33fd1f0528e6139cc912ed844932caef25d37b56", - "stdlib/universe/window_aggregate_test.flux": "cd0a1a7e788a50fa04289aa6e8b557f6c960eaf6ae95f9d8c0ff3044a48b4beb", + "stdlib/universe/window_aggregate_test.flux": "c8f66f7ee188bb2e979e5a8b526057b653922197ae441658f7c7f11251c96576", "stdlib/universe/window_default_start_align_test.flux": "0aaf612796fbb5ac421579151ad32a8861f4494a314ea615d0ccedd18067b980", - "stdlib/universe/window_default_test.flux": "6ab858f39c067c8cfe6de82f937109fff92544a57256760f53a191e642f2a902", + "stdlib/universe/window_default_test.flux": "69bfd28f52d5755cb7d751fde06a7120dcb4f9fd6179363425d0e4dbd7f314bb", "stdlib/universe/window_generate_empty_test.flux": "969d2ef16a9e7c17a59d240520fd41efce35a5e7afcdfce45d183ec8d05df472", "stdlib/universe/window_group_mean_ungroup_test.flux": "8a8c4b09eed96ab6b2f0f203c57ae200be97bcde54e491a3582a16406e99fc32", "stdlib/universe/window_location_test.flux": "72180499ed57c8e4c9a68652cd6ceee0f145c4975b838d629b935f43a41a3a90", diff --git a/mock/query.go b/mock/query.go index aa643b6a21..30bf8da97b 100644 --- a/mock/query.go +++ b/mock/query.go @@ -58,6 +58,7 @@ func (q *Query) ProfilerResults() (flux.ResultIterator, error) { // ProduceResults lets the user provide a function to produce results on the channel returned by `Results`. // `resultProvider` should check if `canceled` has been closed before sending results. E.g.: // ``` +// // func (results chan<- flux.Result, canceled <-chan struct{}) { // for _, r := range resultsSlice { // select { @@ -68,6 +69,7 @@ func (q *Query) ProfilerResults() (flux.ResultIterator, error) { // } // } // } +// // ``` // `resultProvider` is run in a separate goroutine and Results() is closed after function completion. // ProduceResults can be called only once per Query. diff --git a/mock/source.go b/mock/source.go index 1d8ef82285..ca0e5757e5 100644 --- a/mock/source.go +++ b/mock/source.go @@ -28,7 +28,8 @@ func (s *Source) Run(ctx context.Context) { // CreateMockFromSource will register a mock "from" source. Use it like this in the init() // of your test: -// execute.RegisterSource(influxdb.FromKind, mock.CreateMockFromSource) +// +// execute.RegisterSource(influxdb.FromKind, mock.CreateMockFromSource) func CreateMockFromSource(spec plan.ProcedureSpec, id execute.DatasetID, ctx execute.Administration) (execute.Source, error) { return &Source{}, nil } diff --git a/plan/attributes.go b/plan/attributes.go index b035117c09..37af2023fc 100644 --- a/plan/attributes.go +++ b/plan/attributes.go @@ -113,7 +113,9 @@ func CheckRequiredAttributes(node *PhysicalPlanNode) error { // GetOutputAttribute will return the attribute with the given key // provided by the given plan node, traversing backwards through predecessors // as needed for attributes that may pass through. E.g., -// sort |> filter +// +// sort |> filter +// // The "filter" node will still provide the collation attribute, even though it's // the "sort" node that actually does the collating. func GetOutputAttribute(node Node, attrKey string) PhysicalAttr { diff --git a/plan/format.go b/plan/format.go index b3c8a25abd..f5da41a734 100644 --- a/plan/format.go +++ b/plan/format.go @@ -10,7 +10,8 @@ type FormatOption func(*formatter) // Formatted accepts a plan.Spec and options, and returns a Formatter // that can be used with the standard fmt package, e.g., -// fmt.Println(Formatted(plan, WithDetails()) +// +// fmt.Println(Formatted(plan, WithDetails()) func Formatted(p *Spec, opts ...FormatOption) fmt.Formatter { f := formatter{ p: p, diff --git a/plan/pattern.go b/plan/pattern.go index cd88f3a5c6..e60932d5e1 100644 --- a/plan/pattern.go +++ b/plan/pattern.go @@ -20,10 +20,11 @@ type Pattern interface { // // For example, to construct a pattern that matches a sort node that succeeds a join: // -// sort -// | -// join <=> join(A, B) |> sum() <=> MultiSuccessor(SortKind, SingleSuccessor(JoinKind, AnyMultiSuccessor(), AnyMultiSuccessor())) -// / \ +// sort +// | +// join <=> join(A, B) |> sum() <=> MultiSuccessor(SortKind, SingleSuccessor(JoinKind, AnyMultiSuccessor(), AnyMultiSuccessor())) +// / \ +// // A B func SingleSuccessor(kind ProcedureKind, predecessors ...Pattern) Pattern { return SingleSuccessorOneOf([]ProcedureKind{kind}, predecessors...) @@ -86,9 +87,9 @@ func (p PhysicalOneKindPattern) Match(node Node) bool { // For example, UnionKindPattern( { Proc1Kind, Proc2Kind }, { Pat1, Pat2 } ) // will match either Proc1Kind { Pat1, Pat2 } or Proc2Kind { Pat1, Pat2 } // -// [ ProcedureKind ] -// / | ... \ -// pattern1 pattern2 ... patternK +// [ ProcedureKind ] +// / | ... \ +// pattern1 pattern2 ... patternK type UnionKindPattern struct { kinds []ProcedureKind predecessors []Pattern diff --git a/plan/types.go b/plan/types.go index 0bb8b3b053..d4e6b865ab 100644 --- a/plan/types.go +++ b/plan/types.go @@ -80,27 +80,26 @@ func (plan *Spec) Replace(root, with Node) { } // CheckIntegrity checks the integrity of the plan, i.e.: -// - node A is predecessor of B iff B is successor of A; -// - there is no cycle. +// - node A is predecessor of B iff B is successor of A; +// - there is no cycle. // // This check only detects this problem (N2 is predecessor of R, but not viceversa): // -// N1 <----> R -// | -// N2 <------- +// N1 <----> R +// | +// N2 <------- // // And this one (R is successor of N2, but not viceversa): // -// N1 <------- -// | |--> R -// N2 -------- +// N1 <------- +// | |--> R +// N2 -------- // // But not this one, because N2 is not reachable from R (root): // -// N1 <------- -// |--> R -// N2 -------- -// +// N1 <------- +// |--> R +// N2 -------- func (plan *Spec) CheckIntegrity() error { sinks := make([]Node, 0, len(plan.Roots)) for root := range plan.Roots { @@ -217,13 +216,13 @@ func (e *edges) shallowCopy() edges { // MergeToLogicalNode merges top and bottom plan nodes into a new plan node, with the // given procedure spec. // -// V1 V2 V1 V2 <-- successors -// \ / -// top mergedNode -// | ==> | -// bottom W -// | -// W +// V1 V2 V1 V2 <-- successors +// \ / +// top mergedNode +// | ==> | +// bottom W +// | +// W // // The returned node will have its predecessors set to the predecessors // of "bottom", however, it's successors will not be set---it will be the responsibility of @@ -280,13 +279,13 @@ func mergePlanNodes(top, bottom, merged Node) (Node, error) { // SwapPlanNodes swaps two plan nodes and returns an equivalent sub-plan with the nodes swapped. // -// V1 V2 V1 V2 -// \ / -// A B -// | ==> | -// B copy of A -// | | -// W W +// V1 V2 V1 V2 +// \ / +// A B +// | ==> | +// B copy of A +// | | +// W W // // Note that successors of the original top node will not be updated, and the returned // plan node will have no successors. It will be the responsibility of the plan to @@ -319,11 +318,11 @@ func SwapPlanNodes(top, bottom Node) (Node, error) { // ReplaceNode accepts two nodes and attaches // all the predecessors of the old node to the new node. // -// S1 S2 S1 S2 -// \ / -// oldNode => newNode -// / \ / \ -// P1 P2 P1 P2 +// S1 S2 S1 S2 +// \ / +// oldNode => newNode +// / \ / \ +// P1 P2 P1 P2 // // As is convention, newNode will not have any successors attached. // The planner will take care of this. diff --git a/semantic/doc.go b/semantic/doc.go index 276946e1fc..a37de3664e 100644 --- a/semantic/doc.go +++ b/semantic/doc.go @@ -7,6 +7,5 @@ The semantic structures are to be designed to facilitate the interpretation and For example since Flux uses the javascript AST structures, arguments to a function are represented as a single positional argument that is always an object expression. The semantic graph validates that the AST correctly follows these semantics, and use structures that are strongly typed for this expectation. - */ package semantic diff --git a/semantic/utils.go b/semantic/utils.go index 3156fadec9..dfe54eeda4 100644 --- a/semantic/utils.go +++ b/semantic/utils.go @@ -5,12 +5,11 @@ import "github.com/influxdata/flux/ast" // ConjunctionsToExprSlice finds all children of AndOperators that are not themselves AndOperators, // and returns them in a slice. If the root node of expr is not an AndOperator, just returns expr. // -// AND -// / \ -// AND r => {p, q, r} -// / \ -// p q -// +// AND +// / \ +// AND r => {p, q, r} +// / \ +// p q func ConjunctionsToExprSlice(expr Expression) []Expression { if e, ok := expr.(*LogicalExpression); ok && e.Operator == ast.AndOperator { exprSlice := make([]Expression, 0, 2) @@ -25,12 +24,11 @@ func ConjunctionsToExprSlice(expr Expression) []Expression { // ExprsToConjunction accepts a variable number of expressions and ANDs them // together into a single expression. // -// AND -// / \ -// {p, q, r} => AND r -// / \ -// p q -// +// AND +// / \ +// {p, q, r} => AND r +// / \ +// p q func ExprsToConjunction(exprs ...Expression) Expression { if len(exprs) == 0 { return nil @@ -56,12 +54,11 @@ func ExprsToConjunction(exprs ...Expression) Expression { // // Suppose partitonFn returns true for p and r, and false for q: // -// AND passExpr failExpr -// / \ -// AND r => AND q -// / \ / \ -// p q p r -// +// AND passExpr failExpr +// / \ +// AND r => AND q +// / \ / \ +// p q p r func PartitionPredicates(expr Expression, partitionFn func(expression Expression) (bool, error)) (passExpr, failExpr Expression, err error) { exprSlice := ConjunctionsToExprSlice(expr) var passSlice, failSlice []Expression diff --git a/stdlib/contrib/tomhollingworth/events/duration_with_stop_test.flux b/stdlib/contrib/tomhollingworth/events/duration_with_stop_test.flux index eec974e195..977fc50055 100644 --- a/stdlib/contrib/tomhollingworth/events/duration_with_stop_test.flux +++ b/stdlib/contrib/tomhollingworth/events/duration_with_stop_test.flux @@ -46,7 +46,7 @@ outData = ,,1,2018-05-22T19:53:26Z,2018-05-22T19:54:36Z,2018-05-22T19:54:16Z,34.982252364543626,used_percent,disk,disk1s2,apfs,host.local,/,30 " -testcase duration { +testcase duration_with_stop { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/date/nanosecond_duration_test.flux b/stdlib/date/nanosecond_duration_test.flux index d337f19ece..3e3fdab529 100644 --- a/stdlib/date/nanosecond_duration_test.flux +++ b/stdlib/date/nanosecond_duration_test.flux @@ -34,7 +34,7 @@ outData = ,,0,2018-01-01T00:00:00Z,2030-01-01T00:00:00Z,FF,_m,2018-05-22T19:06:00.982342357Z,2 " -testcase time_nanosecond { +testcase duration_nanosecond { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/dict/dict_insert_remove_test.flux b/stdlib/dict/dict_insert_remove_test.flux index 601598e301..f85aab8ee9 100644 --- a/stdlib/dict/dict_insert_remove_test.flux +++ b/stdlib/dict/dict_insert_remove_test.flux @@ -41,7 +41,7 @@ outData = ,,3,2018-05-22T19:53:36Z,requests,error,unknown,network error,3,-1,-1 " -testcase dict { +testcase dict_insert_remove { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/dict/dict_lit_lambda_test.flux b/stdlib/dict/dict_lit_lambda_test.flux index da8c28f47d..aea6164c60 100644 --- a/stdlib/dict/dict_lit_lambda_test.flux +++ b/stdlib/dict/dict_lit_lambda_test.flux @@ -26,7 +26,7 @@ outData = ,,0,2018-05-22T19:53:46Z,_m,_f,2,c " -testcase dict { +testcase dict_lit_lambda { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/dict/dict_lit_test.flux b/stdlib/dict/dict_lit_test.flux index 1a5d1a0230..e850d2ba3d 100644 --- a/stdlib/dict/dict_lit_test.flux +++ b/stdlib/dict/dict_lit_test.flux @@ -27,7 +27,7 @@ outData = ,,0,2018-05-22T19:53:46Z,_m,_f,2,c " -testcase dict { +testcase dict_lit { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/dict/empty_dict_lambda_test.flux b/stdlib/dict/empty_dict_lambda_test.flux index 5018ca8694..c1e355adcc 100644 --- a/stdlib/dict/empty_dict_lambda_test.flux +++ b/stdlib/dict/empty_dict_lambda_test.flux @@ -24,7 +24,7 @@ outData = ,,0,2018-05-22T19:53:36Z,_m,_f,0,2 " -testcase dict { +testcase dict_empty_lambada { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/dict/empty_dict_lit_test.flux b/stdlib/dict/empty_dict_lit_test.flux index bd5b6c0e40..1a21f11c72 100644 --- a/stdlib/dict/empty_dict_lit_test.flux +++ b/stdlib/dict/empty_dict_lit_test.flux @@ -25,7 +25,7 @@ outData = ,,0,2018-05-22T19:53:36Z,_m,_f,0,2 " -testcase dict { +testcase dict_empty_lit { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/doc.go b/stdlib/doc.go index 12444a0f36..18f00b0a67 100644 --- a/stdlib/doc.go +++ b/stdlib/doc.go @@ -31,13 +31,14 @@ The following registrations are typically executed in the function's init() for Note that to register a function value with a package, the value passed into flux.RegisterPackageValue is computed using the followingfunction: - flux.FunctionValue(name string, c CreateOperationSpec, sig semantic.FunctionPolySignature) + flux.FunctionValue(name string, c CreateOperationSpec, sig semantic.FunctionPolySignature) In the plan phase, an operation spec must be converted to a plan.ProcedureSpec. A query plan must know what operations to carry out, including the function names and parameters. In the trivial case, the OperationSpec and ProcedureSpec have identical fields and the operation spec may be encapsulated as part of the procedure spec. The base interface for a plan.ProcedureSpec requires a Kind() function, as well as a Copy() function which should perform a deep copy of the object. Refer to the following interfaces for more information about designing a procedure spec: + plan.ProcedureSpec plan.PushDownProcedureSpec plan.BoundedProcedureSpec @@ -46,6 +47,7 @@ of the object. Refer to the following interfaces for more information about des plan.ParentAwareProcedureSpec Once you have determined the interface(s) that must be implemented for your function, you register them with + plan.RegisterProcedureSpec(k ProcedureKind, c CreateProcedureSpec, qks ...flux.OperationKind) The registration in this phase creates two lookups. First, it creates a named lookup in a similar fashion as for OperationSpecs @@ -66,6 +68,7 @@ the rules and methods for executing a pushdown operation. A Rewrite rule is used to modify one or more ProcedureSpecs in cases where redundant or complementary operations can be combined to get a simpler result. Similar to a pushdown operation, the rewrite is triggered whenever certain rules apply. Rewrite rules are implemented differently and require a separate registration: + plan.RegisterRewriteRule(r RewriteRule) Which in turn requires an implementation of plan.RewriteRule. @@ -74,12 +77,14 @@ Finally, the execute phase is tasked with executing the specific data processing implementation registers an implementation of the execute.Transformation interface that implements functions that control how the execution engine will take an input table, apply the function, and produce an output table. A transformation implementation is registered via: + execute.RegisterTransformation(k plan.ProcedureKind, c execute.CreateTransformation) The registration will record a mapping of the procedure's kind to the given transformation type. In addition to implementing the transformation type, a number of helper types and functions are provided that facilitate the transformation process: + execute.Administration execute.Dataset execute.TableBuilderCache @@ -103,6 +108,7 @@ Finally, there is a special class of functions do not receive an input table fro In other words, these transformations do not have a parent process that supplies it with table data. These transformation functions are referred to as sources, and naturally implement a connection to a data source (e.g. influxdb, prometheus, csvFile, etc.). They are registered using: + execute.RegisterSource(k plan.ProcedureKind, c execute.CreateSource) The substantial part of a source implementation is its Run method, which should connect to the data source, diff --git a/stdlib/experimental/alignTime_test.flux b/stdlib/experimental/alignTime_test.flux index 0190db6d67..d5192816c8 100644 --- a/stdlib/experimental/alignTime_test.flux +++ b/stdlib/experimental/alignTime_test.flux @@ -182,7 +182,7 @@ outData = ,,2,total_cases,covid-19,2020-02-22T00:00:00Z,2020-03-22T00:00:00Z,2020-01-19T00:00:00Z,19624,United States " -testcase set { +testcase align_time { option testing.tags = ["skip"] got = diff --git a/stdlib/experimental/array/array_test.flux b/stdlib/experimental/array/array_test.flux index c2b3424354..d4af28774c 100644 --- a/stdlib/experimental/array/array_test.flux +++ b/stdlib/experimental/array/array_test.flux @@ -4,7 +4,7 @@ package array_test import "experimental/array" import "testing" -testcase array_concat { +testcase array_concat_exp { got = array.from( rows: @@ -27,7 +27,7 @@ testcase array_concat { testing.diff(got, want) } -testcase array_concat_to_empty { +testcase array_concat_to_empty_exp { got = array.from( rows: @@ -40,7 +40,7 @@ testcase array_concat_to_empty { testing.diff(got, want) } -testcase array_map { +testcase array_map_exp { got = array.from( rows: @@ -52,7 +52,7 @@ testcase array_map { testing.diff(want: want, got: got) } -testcase array_filter { +testcase array_filter_exp { got = array.from( rows: diff --git a/stdlib/experimental/bitwise/bitwise_test.flux b/stdlib/experimental/bitwise/bitwise_test.flux index d1c2ae8a38..ceb22735ac 100644 --- a/stdlib/experimental/bitwise/bitwise_test.flux +++ b/stdlib/experimental/bitwise/bitwise_test.flux @@ -6,7 +6,7 @@ import "math" import "experimental/bitwise" import "testing" -testcase uand { +testcase uand_exp { cases = array.from(rows: [{a: 1, b: 1, want: 1}, {a: 1, b: 0, want: 0}, {a: 5, b: 1, want: 1}, {a: 5, b: 4, want: 4}]) |> map(fn: (r) => ({a: uint(v: r.a), b: uint(v: r.b), want: uint(v: r.want)})) @@ -21,7 +21,7 @@ testcase uand { testing.diff(want: want, got: got) } -testcase uor { +testcase uor_exp { cases = array.from(rows: [{a: 1, b: 1, want: 1}, {a: 1, b: 0, want: 1}, {a: 5, b: 1, want: 5}, {a: 5, b: 4, want: 5}]) |> map(fn: (r) => ({a: uint(v: r.a), b: uint(v: r.b), want: uint(v: r.want)})) @@ -37,7 +37,7 @@ testcase uor { testing.diff(want: want, got: got) } -testcase unot { +testcase unot_exp { cases = array.from(rows: [{a: uint(v: 1), want: math.maxuint - uint(v: 1)}, {a: math.maxuint, want: uint(v: 0)}]) got = @@ -51,7 +51,7 @@ testcase unot { testing.diff(want: want, got: got) } -testcase uclear { +testcase uclear_exp { cases = array.from(rows: [{a: 1, b: 1, want: 0}, {a: 1, b: 0, want: 1}, {a: 5, b: 1, want: 4}, {a: 5, b: 4, want: 1}]) |> map(fn: (r) => ({a: uint(v: r.a), b: uint(v: r.b), want: uint(v: r.want)})) @@ -66,7 +66,7 @@ testcase uclear { testing.diff(want: want, got: got) } -testcase ulshift { +testcase ulshift_exp { cases = array.from(rows: [{a: 1, b: 1, want: 2}, {a: 1, b: 0, want: 1}, {a: 5, b: 1, want: 10}, {a: 5, b: 4, want: 80}]) |> map(fn: (r) => ({a: uint(v: r.a), b: uint(v: r.b), want: uint(v: r.want)})) @@ -81,7 +81,7 @@ testcase ulshift { testing.diff(want: want, got: got) } -testcase urshift { +testcase urshift_exp { cases = array.from(rows: [{a: 2, b: 1, want: 1}, {a: 1, b: 0, want: 1}, {a: 10, b: 1, want: 5}, {a: 80, b: 4, want: 5}]) |> map(fn: (r) => ({a: uint(v: r.a), b: uint(v: r.b), want: uint(v: r.want)})) @@ -97,7 +97,7 @@ testcase urshift { testing.diff(want: want, got: got) } -testcase sand { +testcase sand_exp { cases = array.from( rows: [ @@ -124,7 +124,7 @@ testcase sand { testing.diff(want: want, got: got) } -testcase sor { +testcase sor_exp { cases = array.from(rows: [{a: 1, b: 1, want: 1}, {a: 1, b: 0, want: 1}, {a: 5, b: 1, want: 5}, {a: 5, b: 4, want: 5}]) @@ -139,7 +139,7 @@ testcase sor { testing.diff(want: want, got: got) } -testcase snot { +testcase snot_exp { cases = array.from(rows: [{a: 1, want: -2}, {a: math.maxint, want: math.minint}]) got = @@ -153,7 +153,7 @@ testcase snot { testing.diff(want: want, got: got) } -testcase sclear { +testcase sclear_exp { cases = array.from(rows: [{a: 1, b: 1, want: 0}, {a: 1, b: 0, want: 1}, {a: 5, b: 1, want: 4}, {a: 5, b: 4, want: 1}]) @@ -167,7 +167,7 @@ testcase sclear { testing.diff(want: want, got: got) } -testcase slshift { +testcase slshift_exp { cases = array.from(rows: [{a: 1, b: 1, want: 2}, {a: 1, b: 0, want: 1}, {a: 5, b: 1, want: 10}, {a: 5, b: 4, want: 80}]) @@ -181,7 +181,7 @@ testcase slshift { testing.diff(want: want, got: got) } -testcase srshift { +testcase srshift_exp { cases = array.from(rows: [{a: 2, b: 1, want: 1}, {a: 1, b: 0, want: 1}, {a: 10, b: 1, want: 5}, {a: 80, b: 4, want: 5}]) diff --git a/stdlib/experimental/date/boundaries/boundaries_test.flux b/stdlib/experimental/date/boundaries/boundaries_test.flux index 1cc190833c..2f79f000ae 100644 --- a/stdlib/experimental/date/boundaries/boundaries_test.flux +++ b/stdlib/experimental/date/boundaries/boundaries_test.flux @@ -17,7 +17,7 @@ testcase yesterday_test { testing.diff(want: want, got: got) } -testcase yesterday_test { +testcase yesterday_test_t { option now = () => 2018-10-12T14:20:11Z ret = boundaries.yesterday() @@ -125,7 +125,7 @@ testcase month_start_two_offset_test { testing.diff(want: want, got: got) } -testcase month_start_two_offset_test { +testcase month_start_two_offset_test_t { option now = () => 2020-12-10T22:10:00Z ret = boundaries.month(month_offset: -2) @@ -194,7 +194,7 @@ testcase tuesday_test_two_timeable { testing.diff(want: want, got: got) } -testcase tuesday_test_two_timeable { +testcase tuesday_test_two_timeable_t { option now = () => 2021-12-30T00:40:44Z option location = timezone.fixed(offset: 6h) diff --git a/stdlib/experimental/geo/gridFilterLevel_test.flux b/stdlib/experimental/geo/gridFilterLevel_test.flux index 08aed08325..23549602a1 100644 --- a/stdlib/experimental/geo/gridFilterLevel_test.flux +++ b/stdlib/experimental/geo/gridFilterLevel_test.flux @@ -213,7 +213,7 @@ outData = ,,3,2019-11-01T00:17:38.287113937Z,89c2664,taxi,start,1572567458287113937,-73.776665,40.645245 " -testcase gridFilter { +testcase grid_filter_level { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/experimental/group.go b/stdlib/experimental/group.go index e91076c605..53de76acca 100644 --- a/stdlib/experimental/group.go +++ b/stdlib/experimental/group.go @@ -23,7 +23,9 @@ const ( // GroupOpSpec in package experimental defines a special group() function // that has just one mode called "extend", which adds additional columns to the group key. // This is a workaround until schema introspection is implemented: -// https://github.com/influxdata/flux/issues/27 +// +// https://github.com/influxdata/flux/issues/27 +// // Most of this code has simply been copied from stdlib/universe/group.go type GroupOpSpec struct { Mode string `json:"mode"` diff --git a/stdlib/experimental/table/fill_test.flux b/stdlib/experimental/table/fill_test.flux index ddcaa0fe73..967d257e2d 100644 --- a/stdlib/experimental/table/fill_test.flux +++ b/stdlib/experimental/table/fill_test.flux @@ -103,7 +103,7 @@ loadData = () => |> testing.load() |> range(start: 2021-04-13T09:00:00Z, stop: 2021-04-13T10:00:00Z) -testcase window { +testcase window_fill { want = csv.from( csv: diff --git a/stdlib/influxdata/influxdb/monitor/state_changes_invalid_any_to_any_test.flux b/stdlib/influxdata/influxdb/monitor/state_changes_invalid_any_to_any_test.flux index ca6ad1a75d..ee374891c8 100644 --- a/stdlib/influxdata/influxdb/monitor/state_changes_invalid_any_to_any_test.flux +++ b/stdlib/influxdata/influxdb/monitor/state_changes_invalid_any_to_any_test.flux @@ -47,7 +47,7 @@ outData = ,,2,000000000000000a,cpu threshold check,ok,statuses,whoa!,cpu,1527018860000000000,2018-05-22T19:54:22Z,threshold,vaaa,vbbb,cpu-total,host.local,7.05 " -testcase state_changes_any_to_any { +testcase state_changes_invalid_any_to_any { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/influxdata/influxdb/tasks/last_success_duration_no_option_test.flux b/stdlib/influxdata/influxdb/tasks/last_success_duration_no_option_test.flux index c48026fdbf..917fa492df 100644 --- a/stdlib/influxdata/influxdb/tasks/last_success_duration_no_option_test.flux +++ b/stdlib/influxdata/influxdb/tasks/last_success_duration_no_option_test.flux @@ -18,7 +18,7 @@ outData = " t_last_success = () => array.from(rows: [{_time: tasks.lastSuccess(orTime: -1d)}]) -testcase last_success { +testcase last_success_duration_no_option { tables = t_last_success() got = tables want = csv.from(csv: outData) diff --git a/stdlib/influxdata/influxdb/tasks/last_success_duration_option_test.flux b/stdlib/influxdata/influxdb/tasks/last_success_duration_option_test.flux index 40e5dae670..83ed5b937f 100644 --- a/stdlib/influxdata/influxdb/tasks/last_success_duration_option_test.flux +++ b/stdlib/influxdata/influxdb/tasks/last_success_duration_option_test.flux @@ -19,7 +19,7 @@ outData = " t_last_success = () => array.from(rows: [{_time: tasks.lastSuccess(orTime: -1d)}]) -testcase last_success { +testcase last_success_duration_option { tables = t_last_success() got = tables want = csv.from(csv: outData) diff --git a/stdlib/influxdata/influxdb/tasks/last_success_with_option_test.flux b/stdlib/influxdata/influxdb/tasks/last_success_with_option_test.flux index 7e5bf91c2b..f5679d0259 100644 --- a/stdlib/influxdata/influxdb/tasks/last_success_with_option_test.flux +++ b/stdlib/influxdata/influxdb/tasks/last_success_with_option_test.flux @@ -19,7 +19,7 @@ outData = " t_last_success = () => array.from(rows: [{_time: tasks.lastSuccess(orTime: now())}]) -testcase last_success { +testcase last_success_with_option { tables = t_last_success() got = tables want = csv.from(csv: outData) diff --git a/stdlib/influxdata/influxdb/tasks/last_success_without_option_test.flux b/stdlib/influxdata/influxdb/tasks/last_success_without_option_test.flux index 390b29e750..42e4139f7c 100644 --- a/stdlib/influxdata/influxdb/tasks/last_success_without_option_test.flux +++ b/stdlib/influxdata/influxdb/tasks/last_success_without_option_test.flux @@ -18,7 +18,7 @@ outData = " t_last_success = () => array.from(rows: [{_time: tasks.lastSuccess(orTime: now())}]) -testcase last_success { +testcase last_success_without_option { tables = t_last_success() got = tables want = csv.from(csv: outData) diff --git a/stdlib/internal/promql/histogram_quantile.go b/stdlib/internal/promql/histogram_quantile.go index 187b30ba2c..955cb3ec4f 100644 --- a/stdlib/internal/promql/histogram_quantile.go +++ b/stdlib/internal/promql/histogram_quantile.go @@ -346,10 +346,10 @@ func coalesceBuckets(buckets []bucket) []bucket { // The assumption that bucket counts increase monotonically with increasing // upperBound may be violated during: // -// * Recording rule evaluation of histogram_quantile, especially when rate() -// has been applied to the underlying bucket timeseries. -// * Evaluation of histogram_quantile computed over federated bucket -// timeseries, especially when rate() has been applied. +// - Recording rule evaluation of histogram_quantile, especially when rate() +// has been applied to the underlying bucket timeseries. +// - Evaluation of histogram_quantile computed over federated bucket +// timeseries, especially when rate() has been applied. // // This is because scraped data is not made available to rule evaluation or // federation atomically, so some buckets are computed with data from the diff --git a/stdlib/join/merge_join.go b/stdlib/join/merge_join.go index 5a1e77ff32..05fb06d389 100644 --- a/stdlib/join/merge_join.go +++ b/stdlib/join/merge_join.go @@ -184,27 +184,27 @@ func (t *MergeJoinTransformation) processChunk(chunk table.Chunk, state interfac // mergeJoin takes a table chunk, and attempts to produce joined output from it // if possible. It will follow these steps: // -// 1. Scan the rows in `chunk` for a complete join key. If it finds the end -// of a join key, it returns that key, along with all of the rows with that -// join key. If it can't find a complete join key (which means it has reached -// the end of the chunk), it will store the rows with that join key in the -// `chunks` field of `sideState`, and return a nil joinKey, which is the signal -// to break the mergeJoin loop. +// 1. Scan the rows in `chunk` for a complete join key. If it finds the end +// of a join key, it returns that key, along with all of the rows with that +// join key. If it can't find a complete join key (which means it has reached +// the end of the chunk), it will store the rows with that join key in the +// `chunks` field of `sideState`, and return a nil joinKey, which is the signal +// to break the mergeJoin loop. // -// 2. If scanKey finds a complete join key, we pass it and the returned joinRows -// into `insert()`, which will attempt to find the appropriate place for it -// in the joinState's `products` field (while maintaining sort order). If -// `insert()` detects that some subset of the stored products can be joined, -// it will return `true` and a position. The position is the index into -// s.products up to which it is safe to join. +// 2. If scanKey finds a complete join key, we pass it and the returned joinRows +// into `insert()`, which will attempt to find the appropriate place for it +// in the joinState's `products` field (while maintaining sort order). If +// `insert()` detects that some subset of the stored products can be joined, +// it will return `true` and a position. The position is the index into +// s.products up to which it is safe to join. // -// 3. For every product in s.products[:i] (inclusive on both ends), `join()` -// will attempt to produce a set of table chunks that contains the joined -// cross-product of the rows on each side of the joinProduct. +// 3. For every product in s.products[:i] (inclusive on both ends), `join()` +// will attempt to produce a set of table chunks that contains the joined +// cross-product of the rows on each side of the joinProduct. // -// 4. Pass each joined chunk onto the next node in the transformation. +// 4. Pass each joined chunk onto the next node in the transformation. // -// 5. Repeat each of the previous steps until every row in `chunk` has been scanned. +// 5. Repeat each of the previous steps until every row in `chunk` has been scanned. func (t *MergeJoinTransformation) mergeJoin(chunk table.Chunk, s *joinState, isLeft bool) error { for { key, rows, err := s.scanKey(chunk, isLeft, t.on) @@ -315,9 +315,10 @@ func getJoinKeyCols(on []ColumnPair, isLeft bool) []string { // the returned position. // // Returns true under 2 circumstances: -// (1) Inserting `rows` completes the left and right pair for a given product -// (2) `rows` was inserted at an index greater than 0, and all of the products that -// come before it only have entries on the opposite side. +// +// (1) Inserting `rows` completes the left and right pair for a given product +// (2) `rows` was inserted at an index greater than 0, and all of the products that +// come before it only have entries on the opposite side. // // If condition 1 is true, we can join everything up to and including the index where // `rows` was inserted. diff --git a/stdlib/planner/group_agg_uneven_keys_test.flux b/stdlib/planner/group_agg_uneven_keys_test.flux index bed5f873f8..aaf4110b8a 100644 --- a/stdlib/planner/group_agg_uneven_keys_test.flux +++ b/stdlib/planner/group_agg_uneven_keys_test.flux @@ -56,7 +56,7 @@ output = ,,2,2018-05-22T19:00:00Z,2030-01-01T00:00:00Z,hostC,o1,load5,1.95 " -testcase group_max_pushdown { +testcase group_agg_uneven_keys_pushdown { got = csv.from(csv: input) |> testing.load() diff --git a/stdlib/planner/group_sum_push_test.flux b/stdlib/planner/group_sum_push_test.flux index d7bf332a62..b2a490c3e3 100644 --- a/stdlib/planner/group_sum_push_test.flux +++ b/stdlib/planner/group_sum_push_test.flux @@ -45,7 +45,7 @@ output = ,,2,2018-05-22T19:00:00Z,2030-01-01T00:00:00Z,hostC,11.52 " -testcase group_count_pushdown { +testcase group_sum_pushdown { got = csv.from(csv: input) |> testing.load() diff --git a/stdlib/planner/window_group_agg_eval_test.flux b/stdlib/planner/window_group_agg_eval_test.flux index 8c81cb873a..601a30fe9c 100644 --- a/stdlib/planner/window_group_agg_eval_test.flux +++ b/stdlib/planner/window_group_agg_eval_test.flux @@ -51,7 +51,7 @@ output = ,,8,2018-05-22T19:54:00Z,2018-05-22T19:54:20Z,hostC,1 " -testcase group_window_agg_pushdown { +testcase group_window_agg_eval_pushdown { got = csv.from(csv: input) |> testing.load() diff --git a/stdlib/planner/window_mean_push_test.flux b/stdlib/planner/window_mean_push_test.flux index 0afa7f8e4d..bbf1b5467d 100644 --- a/stdlib/planner/window_mean_push_test.flux +++ b/stdlib/planner/window_mean_push_test.flux @@ -51,7 +51,7 @@ output = ,,8,2018-05-22T19:54:00Z,2018-05-22T19:54:20Z,system,host.local,load5,2.75 " -testcase window_mean_evaluate { +testcase window_mean_push { got = csv.from(csv: input) |> testing.load() diff --git a/stdlib/planner/window_sum_push_test.flux b/stdlib/planner/window_sum_push_test.flux index 62720dbc0f..223c9bf42f 100644 --- a/stdlib/planner/window_sum_push_test.flux +++ b/stdlib/planner/window_sum_push_test.flux @@ -49,7 +49,7 @@ output = ,,8,2018-05-22T19:54:00Z,2018-05-22T19:54:20Z,system,host.local,load5,1.93 " -testcase window_sum_evaluate { +testcase window_sum_push { got = csv.from(csv: input) |> testing.load() diff --git a/stdlib/testing.go b/stdlib/testing.go index 96623fcaf5..06ec9829bc 100644 --- a/stdlib/testing.go +++ b/stdlib/testing.go @@ -73,7 +73,7 @@ func (v testStmtVisitor) Visit(node ast.Node) ast.Visitor { func (v testStmtVisitor) Done(node ast.Node) {} -/// Scans `rootDir` for all packages that contain `testcase` statements and returns them +// / Scans `rootDir` for all packages that contain `testcase` statements and returns them func FindTestPackages(rootDir string) ([]*ast.Package, error) { var testPackages []*ast.Package pkgName := "github.com/influxdata/flux/stdlib" diff --git a/stdlib/testing/pandas/contains_filter_by_regex_match_test.flux b/stdlib/testing/pandas/contains_filter_by_regex_match_test.flux index f0d8bac529..c8aa6b0e8b 100644 --- a/stdlib/testing/pandas/contains_filter_by_regex_match_test.flux +++ b/stdlib/testing/pandas/contains_filter_by_regex_match_test.flux @@ -41,7 +41,7 @@ outData = " re = regexp.compile(v: ".*0") -testcase filter_by_regex { +testcase filter_by_regex_match { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/testing/pandas/extract_regexp_findStringIndex_test.flux b/stdlib/testing/pandas/extract_regexp_findStringIndex_test.flux index b3b2b2a375..3be5668e93 100644 --- a/stdlib/testing/pandas/extract_regexp_findStringIndex_test.flux +++ b/stdlib/testing/pandas/extract_regexp_findStringIndex_test.flux @@ -36,7 +36,7 @@ outData = " re = regexp.compile(v: "[[:alpha:]]{1}") -testcase string_extract { +testcase string_extract_index { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/testing/pandas/partition_strings_splitN_test.flux b/stdlib/testing/pandas/partition_strings_splitN_test.flux index 100162a5db..6d700808d5 100644 --- a/stdlib/testing/pandas/partition_strings_splitN_test.flux +++ b/stdlib/testing/pandas/partition_strings_splitN_test.flux @@ -34,7 +34,7 @@ outData = ,,0,2018-05-22T19:53:26Z,2030-01-01T00:00:00Z,2018-05-22T19:54:16Z,13F2,used_percent,disk,disk1,apfs,host.local,/ " -testcase string_partition { +testcase string_partition_split_n { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/testing/pandas/repeat_strings_repeat_test.flux b/stdlib/testing/pandas/repeat_strings_repeat_test.flux index e7be490bd5..a9c86f7e66 100644 --- a/stdlib/testing/pandas/repeat_strings_repeat_test.flux +++ b/stdlib/testing/pandas/repeat_strings_repeat_test.flux @@ -34,7 +34,7 @@ outData = ,,0,2018-05-22T19:53:26Z,2030-01-01T00:00:00Z,2018-05-22T19:54:16Z,13F213F2,used_percent,disk,disk1,apfs,host.local,/ " -testcase string_partition { +testcase string_partition_repeat { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/testing/promql/labelReplace_empty_dst_test.flux b/stdlib/testing/promql/labelReplace_empty_dst_test.flux index 4a911b3d3f..12f0077057 100644 --- a/stdlib/testing/promql/labelReplace_empty_dst_test.flux +++ b/stdlib/testing/promql/labelReplace_empty_dst_test.flux @@ -30,7 +30,7 @@ outData = ,,1,metric_name,source-value-20,4,prometheus " -testcase labelReplace { +testcase labelReplace_empty_dst { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/testing/promql/labelReplace_full_string_match_test.flux b/stdlib/testing/promql/labelReplace_full_string_match_test.flux index 8ca2a6aec1..6557d91610 100644 --- a/stdlib/testing/promql/labelReplace_full_string_match_test.flux +++ b/stdlib/testing/promql/labelReplace_full_string_match_test.flux @@ -30,7 +30,7 @@ outData = ,,1,metric_name,source-value-20,destination-value-20,4,prometheus " -testcase labelReplace { +testcase labelReplace_full_string_match { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/testing/promql/labelReplace_multiple_groups_test.flux b/stdlib/testing/promql/labelReplace_multiple_groups_test.flux index b09fa64ae8..f5e246da9f 100644 --- a/stdlib/testing/promql/labelReplace_multiple_groups_test.flux +++ b/stdlib/testing/promql/labelReplace_multiple_groups_test.flux @@ -30,7 +30,7 @@ outData = ,,1,metric_name,source-value-20,source-value-20,4,prometheus " -testcase labelReplace { +testcase labelReplace_multiple_groups { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/testing/promql/labelReplace_src_empty_test.flux b/stdlib/testing/promql/labelReplace_src_empty_test.flux index 37a3f1a61e..59c24f16de 100644 --- a/stdlib/testing/promql/labelReplace_src_empty_test.flux +++ b/stdlib/testing/promql/labelReplace_src_empty_test.flux @@ -30,7 +30,7 @@ outData = ,,1,metric_name,source-value-20,value-,4,prometheus " -testcase labelReplace { +testcase labelReplace_src_empty { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/testing/promql/labelReplace_src_nonexistent_test.flux b/stdlib/testing/promql/labelReplace_src_nonexistent_test.flux index 31235c7d92..c383d1862e 100644 --- a/stdlib/testing/promql/labelReplace_src_nonexistent_test.flux +++ b/stdlib/testing/promql/labelReplace_src_nonexistent_test.flux @@ -30,7 +30,7 @@ outData = ,,1,metric_name,source-value-20,original-destination-value,4,prometheus " -testcase labelReplace { +testcase labelReplace_src_nonexistent { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/testing/promql/labelReplace_src_not_matched_test.flux b/stdlib/testing/promql/labelReplace_src_not_matched_test.flux index 6d0b986ccb..f4ca0a8996 100644 --- a/stdlib/testing/promql/labelReplace_src_not_matched_test.flux +++ b/stdlib/testing/promql/labelReplace_src_not_matched_test.flux @@ -30,7 +30,7 @@ outData = ,,1,metric_name,source-value-20,original-destination-value,4,prometheus " -testcase labelReplace { +testcase labelReplace_src_not_matched { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/testing/promql/labelReplace_sub_string_match_test.flux b/stdlib/testing/promql/labelReplace_sub_string_match_test.flux index f9f19520e9..3e4b4f8d1c 100644 --- a/stdlib/testing/promql/labelReplace_sub_string_match_test.flux +++ b/stdlib/testing/promql/labelReplace_sub_string_match_test.flux @@ -30,7 +30,7 @@ outData = ,,1,metric_name,source-value-20,original-destination-value,4,prometheus " -testcase labelReplace { +testcase labelReplace_sub_string_match { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/testing/promql/quantile_neg_test.flux b/stdlib/testing/promql/quantile_neg_test.flux index 37de7bffbf..728fdfdb2d 100644 --- a/stdlib/testing/promql/quantile_neg_test.flux +++ b/stdlib/testing/promql/quantile_neg_test.flux @@ -36,7 +36,7 @@ outData = ,,3,2019-01-01T00:00:00Z,2030-01-01T00:00:00Z,Reiva,OAOJWe7,qCnJDC,-Inf " -testcase quantile { +testcase quantile_neg { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/testing/promql/quantile_pos_test.flux b/stdlib/testing/promql/quantile_pos_test.flux index 3ef5f0afc5..c53c79937d 100644 --- a/stdlib/testing/promql/quantile_pos_test.flux +++ b/stdlib/testing/promql/quantile_pos_test.flux @@ -36,7 +36,7 @@ outData = ,,3,2019-01-01T00:00:00Z,2030-01-01T00:00:00Z,Reiva,OAOJWe7,qCnJDC,+Inf " -testcase quantile { +testcase quantile_pos { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/aggregate_window_max_test.flux b/stdlib/universe/aggregate_window_max_test.flux index 97b80990e7..95c8fed89a 100644 --- a/stdlib/universe/aggregate_window_max_test.flux +++ b/stdlib/universe/aggregate_window_max_test.flux @@ -37,7 +37,7 @@ outData = ,,1,2018-05-22T00:00:00Z,2018-05-22T00:01:00Z,2018-05-22T00:01:00Z,used_percent,disk,disk1s1,apfs,host.local,/tmp,45 " -testcase aggregate_window { +testcase aggregate_window_max { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/aggregate_window_mean_test.flux b/stdlib/universe/aggregate_window_mean_test.flux index 5be41cf9ae..8e67fc2e63 100644 --- a/stdlib/universe/aggregate_window_mean_test.flux +++ b/stdlib/universe/aggregate_window_mean_test.flux @@ -37,7 +37,7 @@ outData = ,,1,2018-05-22T00:00:00Z,2018-05-22T00:01:00Z,2018-05-22T00:01:00Z,used_percent,disk,disk1s1,apfs,host.local,/tmp,45 " -testcase aggregate_window { +testcase aggregate_window_mean { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/difference_keepfirst_test.flux b/stdlib/universe/difference_keepfirst_test.flux index 91883cecc2..276011fb93 100644 --- a/stdlib/universe/difference_keepfirst_test.flux +++ b/stdlib/universe/difference_keepfirst_test.flux @@ -45,7 +45,7 @@ outData = ,,1,2018-05-22T19:53:26Z,2030-01-01T00:00:00Z,2018-05-22T19:54:16Z,0.00021082472700584276,used_percent,disk,disk1s2,apfs,host.local,/ " -testcase difference { +testcase difference_keepfirst { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/distinct_test.flux b/stdlib/universe/distinct_test.flux index 6db39717e4..91ed7c4659 100644 --- a/stdlib/universe/distinct_test.flux +++ b/stdlib/universe/distinct_test.flux @@ -56,7 +56,7 @@ testcase normal { testing.diff(got, want) |> yield() } -testcase nulls { +testcase nulls_distinct { got = csv.from( csv: diff --git a/stdlib/universe/duplicate_overwrite_test.flux b/stdlib/universe/duplicate_overwrite_test.flux index d43e1f1203..bd6a1c5816 100644 --- a/stdlib/universe/duplicate_overwrite_test.flux +++ b/stdlib/universe/duplicate_overwrite_test.flux @@ -45,7 +45,7 @@ outData = ,,1,2018-05-22T19:53:26Z,2030-01-01T00:00:00Z,2018-05-22T19:54:16Z,0,usage_guest_nice,cpu,host.local,host.local " -testcase duplicate { +testcase duplicate_overwrite { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/fill_bool_test.flux b/stdlib/universe/fill_bool_test.flux index d319977e05..7bfbd56ff6 100644 --- a/stdlib/universe/fill_bool_test.flux +++ b/stdlib/universe/fill_bool_test.flux @@ -45,7 +45,7 @@ outData = ,,1,2018-12-15T00:00:00Z,2030-01-01T00:00:00Z,m1,f1,server02,2018-12-19T22:14:20Z,false " -testcase fill { +testcase fill_bool { option testing.tags = ["skip"] got = diff --git a/stdlib/universe/fill_float_test.flux b/stdlib/universe/fill_float_test.flux index 8eea6364c3..ea473dfc5a 100644 --- a/stdlib/universe/fill_float_test.flux +++ b/stdlib/universe/fill_float_test.flux @@ -45,7 +45,7 @@ outData = ,,1,2018-12-15T00:00:00Z,2030-01-01T00:00:00Z,m1,f1,server02,2018-12-19T22:14:20Z,41.91029522104053 " -testcase fill { +testcase fill_float { option testing.tags = ["skip"] got = diff --git a/stdlib/universe/fill_int_test.flux b/stdlib/universe/fill_int_test.flux index 8381f59087..3eba78ab82 100644 --- a/stdlib/universe/fill_int_test.flux +++ b/stdlib/universe/fill_int_test.flux @@ -45,7 +45,7 @@ outData = ,,1,2018-12-15T00:00:00Z,2030-01-01T00:00:00Z,m1,f1,server02,2018-12-19T22:14:20Z,99 " -testcase fill { +testcase fill_int { option testing.tags = ["skip"] got = diff --git a/stdlib/universe/fill_previous_test.flux b/stdlib/universe/fill_previous_test.flux index cab34facc9..8c1b282b65 100644 --- a/stdlib/universe/fill_previous_test.flux +++ b/stdlib/universe/fill_previous_test.flux @@ -45,7 +45,7 @@ outData = ,,1,2018-12-15T00:00:00Z,2030-01-01T00:00:00Z,m1,f1,server02,2018-12-19T22:14:20Z,99 " -testcase fill { +testcase fill_previous { got = csv.from(csv: inData) |> range(start: 2018-12-15T00:00:00Z) diff --git a/stdlib/universe/fill_string_test.flux b/stdlib/universe/fill_string_test.flux index 710f91c8f6..35233798d9 100644 --- a/stdlib/universe/fill_string_test.flux +++ b/stdlib/universe/fill_string_test.flux @@ -45,7 +45,7 @@ outData = ,,1,2018-12-15T00:00:00Z,2030-01-01T00:00:00Z,m1,f1,server02,2018-12-19T22:14:20Z,A " -testcase fill { +testcase fill_string { option testing.tags = ["skip"] got = diff --git a/stdlib/universe/fill_time_test.flux b/stdlib/universe/fill_time_test.flux index c12dd82409..5ef8ebc89a 100644 --- a/stdlib/universe/fill_time_test.flux +++ b/stdlib/universe/fill_time_test.flux @@ -47,7 +47,7 @@ outData = ,,1,2018-12-19T22:13:30Z,2018-12-19T22:14:20Z,m1,f1,server02,2018-12-19T22:14:20Z,A " -testcase fill { +testcase fill_time { option testing.tags = ["skip"] got = diff --git a/stdlib/universe/fill_uint_test.flux b/stdlib/universe/fill_uint_test.flux index 603f7c787f..6ae84cdd37 100644 --- a/stdlib/universe/fill_uint_test.flux +++ b/stdlib/universe/fill_uint_test.flux @@ -45,7 +45,7 @@ outData = ,,1,2018-12-15T00:00:00Z,2030-01-01T00:00:00Z,m1,f1,server02,2018-12-19T22:14:20Z,0 " -testcase fill { +testcase fill_unit { option testing.tags = ["skip"] got = diff --git a/stdlib/universe/filter_by_regex_compile_test.flux b/stdlib/universe/filter_by_regex_compile_test.flux index 45ab064c00..a889d3ac30 100644 --- a/stdlib/universe/filter_by_regex_compile_test.flux +++ b/stdlib/universe/filter_by_regex_compile_test.flux @@ -40,7 +40,7 @@ outData = ,,0,2018-05-20T19:53:26Z,2030-01-01T00:00:00Z,2018-05-22T19:54:16Z,15205755,io_time,diskio,host.local,disk0 " -testcase filter_by_regex { +testcase filter_by_regex_compile { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/filter_partial_pushdown_test.flux b/stdlib/universe/filter_partial_pushdown_test.flux index 9b1c4bab4e..03f47deff5 100644 --- a/stdlib/universe/filter_partial_pushdown_test.flux +++ b/stdlib/universe/filter_partial_pushdown_test.flux @@ -39,7 +39,7 @@ outData = ,,0,2018-05-22T19:53:26Z,2030-01-01T00:00:00Z,diskio,io_time,host.local,disk0,2018-05-22T19:54:16Z,15205755 " -testcase filter_by_tags { +testcase filter_partial_pushdown { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/group_nulls_test.flux b/stdlib/universe/group_nulls_test.flux index ccf68fa4f4..2a472c9aed 100644 --- a/stdlib/universe/group_nulls_test.flux +++ b/stdlib/universe/group_nulls_test.flux @@ -99,7 +99,7 @@ outData = ,,2,2018-05-22T19:54:16Z,,io_time,diskio,host2 " -testcase group { +testcase group_nulls { option testing.tags = ["skip"] got = diff --git a/stdlib/universe/histogram_normalize_test.flux b/stdlib/universe/histogram_normalize_test.flux index 6c6768ad48..75d3d9a814 100644 --- a/stdlib/universe/histogram_normalize_test.flux +++ b/stdlib/universe/histogram_normalize_test.flux @@ -34,7 +34,7 @@ outData = ,,1,2018-05-22T19:53:00Z,y_duration_seconds,2,1,m0 " -testcase histogram { +testcase histogram_normalize { option testing.tags = ["skip"] got = diff --git a/stdlib/universe/holt_winters.go b/stdlib/universe/holt_winters.go index f764569ba1..4233ce74ad 100644 --- a/stdlib/universe/holt_winters.go +++ b/stdlib/universe/holt_winters.go @@ -289,9 +289,10 @@ func (hwt *holtWintersTransformation) Process(id execute.DatasetID, tbl flux.Tab // Rows that have a null timestamp get discarded. // Rows that have a null value are considered invalid, but used by the algorithm. // HoltWinters supposes to work with evenly spaced values in time, so: -// - the Interval passed to the transformation is used to divide the data in time buckets; -// - if many values are in the same bucket, the first one is selected, the others are skipped; -// - if no value is present for a bucket, that is considered as an invalid value (treated like null values). +// - the Interval passed to the transformation is used to divide the data in time buckets; +// - if many values are in the same bucket, the first one is selected, the others are skipped; +// - if no value is present for a bucket, that is considered as an invalid value (treated like null values). +// // HoltWinters will only be provided with the values returned. // Timestamps can be deduced by summing interval to the first/last valid timestamp. func (hwt *holtWintersTransformation) getCleanData(tbl flux.Table, colIdx, timeIdx int) (*array.Float, values.Time, values.Time, error) { diff --git a/stdlib/universe/holt_winters/holt_winters.go b/stdlib/universe/holt_winters/holt_winters.go index b6062b791a..71a8a3caf3 100644 --- a/stdlib/universe/holt_winters/holt_winters.go +++ b/stdlib/universe/holt_winters/holt_winters.go @@ -12,8 +12,8 @@ import ( // HoltWinters forecasts a series into the future. // This is done using the Holt-Winters damped method. -// 1. The initial values are calculated using a SSE. -// 2. The series is forecast into the future using the iterative relations. +// 1. The initial values are calculated using a SSE. +// 2. The series is forecast into the future using the iterative relations. type HoltWinters struct { n int s int diff --git a/stdlib/universe/holt_winters_test.go b/stdlib/universe/holt_winters_test.go index 655fe8be36..7a96f78304 100644 --- a/stdlib/universe/holt_winters_test.go +++ b/stdlib/universe/holt_winters_test.go @@ -166,15 +166,19 @@ func TestHoltWinters_PassThrough(t *testing.T) { // The initial data used in tests is obtained from the original (large) dataset with this query: // ``` // SELECT FIRST("water_level") into "first"."autogen"."data" -// FROM "water"."autogen"."h2o_feet" -// WHERE "location"='santa_monica' and time >= '2015-08-22 22:12:00' and time <= '2015-08-28 03:00:00' -// GROUP BY time(379m,348m) +// +// FROM "water"."autogen"."h2o_feet" +// WHERE "location"='santa_monica' and time >= '2015-08-22 22:12:00' and time <= '2015-08-28 03:00:00' +// GROUP BY time(379m,348m) +// // ``` // HoltWinters is then calculated on the database "first": // ``` // SELECT holt_winters(max("first"), 10, 4) -// from "first"."autogen"."data" -// GROUP BY time(379m,348m) +// +// from "first"."autogen"."data" +// GROUP BY time(379m,348m) +// // ``` // We followed a similar procedure for other tests with missing values. func TestHoltWinters_Process(t *testing.T) { diff --git a/stdlib/universe/join.go b/stdlib/universe/join.go index 11416b71da..e45eea61ee 100644 --- a/stdlib/universe/join.go +++ b/stdlib/universe/join.go @@ -364,17 +364,20 @@ func (t *mergeJoinTransformation) Finish(id execute.DatasetID, err error) { // buffers: Buffers to hold the tables for each incoming stream. // // postJoinKeys: The post-join group keys for all joined tables. -// These group keys are constructed and stored as soon -// as a table is consumed by the join operator, but prior -// to actually joining the data. +// +// These group keys are constructed and stored as soon +// as a table is consumed by the join operator, but prior +// to actually joining the data. // // reverseLookup: Each output group key that is stored is mapped to its -// corresponding pre-join group keys. These pre-join group -// keys are then used to retrieve their corresponding -// tables from the buffers. +// +// corresponding pre-join group keys. These pre-join group +// keys are then used to retrieve their corresponding +// tables from the buffers. // // tables: All output tables are materialized and stored in this -// map before being sent to downstream operators. +// +// map before being sent to downstream operators. type MergeJoinCache struct { leftID execute.DatasetID rightID execute.DatasetID diff --git a/stdlib/universe/join_across_measurements_test.flux b/stdlib/universe/join_across_measurements_test.flux index d4a03a4223..32f98e4e5a 100644 --- a/stdlib/universe/join_across_measurements_test.flux +++ b/stdlib/universe/join_across_measurements_test.flux @@ -185,7 +185,7 @@ outData = ,,0,used,total,mem,processes,2018-05-22T19:53:00Z,2018-05-22T19:55:00Z,2018-05-22T19:54:16Z,10731827200,417,host.local " -testcase join { +testcase join_across_measurements { table = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/join_agg_test.flux b/stdlib/universe/join_agg_test.flux index 5d7cc9049c..0786c43fce 100644 --- a/stdlib/universe/join_agg_test.flux +++ b/stdlib/universe/join_agg_test.flux @@ -31,7 +31,7 @@ outData = ,,1,disk2,782,0 " -testcase join { +testcase join_agg { table = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/kama_v2_test.flux b/stdlib/universe/kama_v2_test.flux index 9eeb043678..c7cf83c29d 100644 --- a/stdlib/universe/kama_v2_test.flux +++ b/stdlib/universe/kama_v2_test.flux @@ -70,7 +70,7 @@ outData = ,,0,2018-05-22T00:04:40Z,108.42,used_percent,disk " -testcase kama { +testcase kama_v2 { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/keep_non_existent_only_test.flux b/stdlib/universe/keep_non_existent_only_test.flux index fec4288886..5a99e87855 100644 --- a/stdlib/universe/keep_non_existent_only_test.flux +++ b/stdlib/universe/keep_non_existent_only_test.flux @@ -38,7 +38,7 @@ outData = " ,result,table " -testcase keep_non_existent { +testcase keep_non_existent_only { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/map_test.flux b/stdlib/universe/map_test.flux index 186c3cfd5b..a68a253620 100644 --- a/stdlib/universe/map_test.flux +++ b/stdlib/universe/map_test.flux @@ -39,7 +39,7 @@ testcase basic { testing.diff(want: want, got: got) |> yield() } -testcase nulls { +testcase nulls_map { inData = " #datatype,string,long,dateTime:RFC3339,string,long,string diff --git a/stdlib/universe/max_time_test.flux b/stdlib/universe/max_time_test.flux index 8acabc971d..e30f40867d 100644 --- a/stdlib/universe/max_time_test.flux +++ b/stdlib/universe/max_time_test.flux @@ -43,7 +43,7 @@ outData = ,,3,2018-05-22T19:52:26Z,2018-05-22T19:55:16Z,2018-05-22T19:53:46Z,82.598876953125,used_percent,swap,host.local " -testcase max { +testcase max_time { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/median_compression_test.flux b/stdlib/universe/median_compression_test.flux index 8873d7749b..bee6b7f540 100644 --- a/stdlib/universe/median_compression_test.flux +++ b/stdlib/universe/median_compression_test.flux @@ -28,7 +28,7 @@ outData = ,,0,2018-12-01T00:00:00Z,2030-01-01T00:00:00Z,SOYcRk,NC7N,41.666666666666664 " -testcase median { +testcase median_compression { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/median_method_test.flux b/stdlib/universe/median_method_test.flux index 0ec9c74a6f..d6add6f8d6 100644 --- a/stdlib/universe/median_method_test.flux +++ b/stdlib/universe/median_method_test.flux @@ -28,7 +28,7 @@ outData = ,,0,2018-12-01T00:00:00Z,2030-01-01T00:00:00Z,SOYcRk,NC7N,2018-12-18T21:13:05Z,25 " -testcase median { +testcase median_method { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/merge_filter_flag_off_test.flux b/stdlib/universe/merge_filter_flag_off_test.flux index e975b8c6c1..c332e4933d 100644 --- a/stdlib/universe/merge_filter_flag_off_test.flux +++ b/stdlib/universe/merge_filter_flag_off_test.flux @@ -31,7 +31,7 @@ output = ,,0,2018-05-22T19:53:26Z,2030-01-01T00:00:00Z,2018-05-22T19:53:46Z,system,host.local,load4,1.77 " -testcase merge_filter_evaluate { +testcase merge_filter_flag_off { got = csv.from(csv: input) |> testing.load() diff --git a/stdlib/universe/merge_filter_flag_on_test.flux b/stdlib/universe/merge_filter_flag_on_test.flux index 9f52ebd342..8d15ffcaf8 100644 --- a/stdlib/universe/merge_filter_flag_on_test.flux +++ b/stdlib/universe/merge_filter_flag_on_test.flux @@ -29,7 +29,7 @@ output = ,,0,2018-05-22T19:53:26Z,2030-01-01T00:00:00Z,2018-05-22T19:53:46Z,system,host.local,load4,1.77 " -testcase merge_filter_evaluate { +testcase merge_filter_flag_on { got = csv.from(csv: input) |> testing.load() diff --git a/stdlib/universe/merge_filter_test.flux b/stdlib/universe/merge_filter_test.flux index c816f106b5..3c29e6067c 100644 --- a/stdlib/universe/merge_filter_test.flux +++ b/stdlib/universe/merge_filter_test.flux @@ -43,7 +43,7 @@ merge_filter_fn = () => // result = merge_filter_fn() // testing.diff(got: result, want: csv.from(csv: output)) //} -testcase merge_filter_flag_off { +testcase merge_filter { option planner.disableLogicalRules = ["MergeFiltersRule"] expect.planner(rules: ["MergeFiltersRule": 0]) diff --git a/stdlib/universe/mode_string_test.flux b/stdlib/universe/mode_string_test.flux index 23301d5e1f..f81d149546 100644 --- a/stdlib/universe/mode_string_test.flux +++ b/stdlib/universe/mode_string_test.flux @@ -28,7 +28,7 @@ outData = ,,0,2018-12-01T00:00:00Z,2030-01-01T00:00:00Z,Sgf,DlXwgrw,glass " -testcase mode { +testcase mode_string { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/moving_average_test.flux b/stdlib/universe/moving_average_test.flux index 88b310c106..eabf2ac0c1 100644 --- a/stdlib/universe/moving_average_test.flux +++ b/stdlib/universe/moving_average_test.flux @@ -26,7 +26,7 @@ runTest = (n) => |> debug.slurp() |> drop(columns: ["_start", "_stop"]) -testcase normal { +testcase normal_average { got = runTest(n: 6) want = array.from( diff --git a/stdlib/universe/pivot_col_order_test.flux b/stdlib/universe/pivot_col_order_test.flux index 31d6efe190..d4c9dc4c07 100644 --- a/stdlib/universe/pivot_col_order_test.flux +++ b/stdlib/universe/pivot_col_order_test.flux @@ -52,7 +52,7 @@ outData = ,,0,2018-05-22T19:53:26Z,2030-01-01T00:00:00Z,2018-05-22T19:54:16Z,host.local,1.84,1.97,1.93,82.6416015625 " -testcase pivot { +testcase pivot_col_order { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/quantile_defaults_test.flux b/stdlib/universe/quantile_defaults_test.flux index 1a9b0521d8..f8b568f284 100644 --- a/stdlib/universe/quantile_defaults_test.flux +++ b/stdlib/universe/quantile_defaults_test.flux @@ -28,7 +28,7 @@ outData = ,,0,2018-01-01T00:00:00Z,2030-01-01T00:00:00Z,SOYcRk,NC7N,29.50336437998469 " -testcase quantile_tdigest { +testcase quantile_defaults { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/range_stop_test.flux b/stdlib/universe/range_stop_test.flux index 59395e2cfd..475bb1d6b1 100644 --- a/stdlib/universe/range_stop_test.flux +++ b/stdlib/universe/range_stop_test.flux @@ -41,7 +41,7 @@ outData = ,,1,2018-05-22T19:53:36Z,2018-05-22T19:54:16Z,2018-05-22T19:54:06Z,0,usage_guest_nice,cpu,cpu-total,host.local " -testcase range { +testcase range_stop { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/reduce_noref_test.flux b/stdlib/universe/reduce_noref_test.flux index f565dca2f3..86d4fc5b17 100644 --- a/stdlib/universe/reduce_noref_test.flux +++ b/stdlib/universe/reduce_noref_test.flux @@ -32,7 +32,7 @@ outData = ,,1,2018-05-21T13:09:22.885021542Z,2030-01-01T00:00:00Z,used_percent,swap,host.local2,1.0,3 " -testcase reduce { +testcase reduce_noref { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/table_fns_test.flux b/stdlib/universe/table_fns_test.flux index a098e09e97..7614ab1696 100644 --- a/stdlib/universe/table_fns_test.flux +++ b/stdlib/universe/table_fns_test.flux @@ -29,7 +29,7 @@ col = |> tableFind(fn: (key) => true) |> getColumn(column: "_value") -testcase sum { +testcase table_fns { got = data |> filter(fn: (r) => contains(value: r._value, set: col)) diff --git a/stdlib/universe/window_aggregate_test.flux b/stdlib/universe/window_aggregate_test.flux index ca975b3c49..0999dce52c 100644 --- a/stdlib/universe/window_aggregate_test.flux +++ b/stdlib/universe/window_aggregate_test.flux @@ -39,7 +39,7 @@ outData = ,,3,1970-01-01T00:00:12Z,1970-01-01T00:00:15Z,3,f,m0,k0 " -testcase window { +testcase window_aggregate { got = csv.from(csv: inData) |> testing.load() diff --git a/stdlib/universe/window_default_test.flux b/stdlib/universe/window_default_test.flux index 0465ab8c86..e42fc9c18f 100644 --- a/stdlib/universe/window_default_test.flux +++ b/stdlib/universe/window_default_test.flux @@ -39,7 +39,7 @@ outData = ,,0,2018-05-22T19:53:00Z,2018-05-22T19:55:00Z,diskio,2018-05-22T19:54:16Z,7603201.5 " -testcase window { +testcase window_default { got = csv.from(csv: inData) |> testing.load()