diff --git a/hack/make-rules/test.sh b/hack/make-rules/test.sh index 75fa127c7e8a9..afddf8df9825d 100755 --- a/hack/make-rules/test.sh +++ b/hack/make-rules/test.sh @@ -81,6 +81,8 @@ fi # Set to 'y' to keep the verbose stdout from tests when KUBE_JUNIT_REPORT_DIR is # set. KUBE_KEEP_VERBOSE_TEST_OUTPUT=${KUBE_KEEP_VERBOSE_TEST_OUTPUT:-n} +# Set to 'false' to disable reduction of the JUnit file to only the top level tests. +KUBE_PRUNE_JUNIT_TESTS=${KUBE_PRUNE_JUNIT_TESTS:-true} kube::test::usage() { kube::log::usage_from_stdin < 200 MB for @@ -1115,6 +1116,32 @@ func featureGatesMerge(src map[featuregate.Feature]bool, overrides map[featurega return result } +// fixJSONOutput works around Go not emitting a "pass" action for +// sub-benchmarks +// (https://github.com/golang/go/issues/66825#issuecomment-2343229005), which +// causes gotestsum to report a successful benchmark run as failed +// (https://github.com/gotestyourself/gotestsum/issues/413#issuecomment-2343206787). +// +// It does this by printing the missing "PASS" output line that test2json +// then converts into the "pass" action. +func fixJSONOutput(b *testing.B) { + if !slices.Contains(os.Args, "-test.v=test2json") { + // Not printing JSON. + return + } + + start := time.Now() + b.Cleanup(func() { + if b.Failed() { + // Really has failed, do nothing. + return + } + // SYN gets injected when using -test.v=test2json, see + // https://cs.opensource.google/go/go/+/refs/tags/go1.23.3:src/testing/testing.go;drc=87ec2c959c73e62bfae230ef7efca11ec2a90804;l=527 + fmt.Fprintf(os.Stderr, "%c--- PASS: %s (%.2fs)\n", 22 /* SYN */, b.Name(), time.Since(start).Seconds()) + }) +} + // RunBenchmarkPerfScheduling runs the scheduler performance benchmark tests. // // You can pass your own scheduler plugins via outOfTreePluginRegistry. @@ -1128,11 +1155,12 @@ func RunBenchmarkPerfScheduling(b *testing.B, configFile string, topicName strin if err = validateTestCases(testCases); err != nil { b.Fatal(err) } + fixJSONOutput(b) if testing.Short() { - *PerfSchedulingLabelFilter += ",+short" + PerfSchedulingLabelFilter += ",+short" } - testcaseLabelSelectors := strings.Split(*PerfSchedulingLabelFilter, ",") + testcaseLabelSelectors := strings.Split(PerfSchedulingLabelFilter, ",") output := initTestOutput(b) @@ -1147,11 +1175,13 @@ func RunBenchmarkPerfScheduling(b *testing.B, configFile string, topicName strin dataItems := DataItems{Version: "v1"} for _, tc := range testCases { b.Run(tc.Name, func(b *testing.B) { + fixJSONOutput(b) for _, w := range tc.Workloads { b.Run(w.Name, func(b *testing.B) { if !enabled(testcaseLabelSelectors, append(tc.Labels, w.Labels...)...) { - b.Skipf("disabled by label filter %v", PerfSchedulingLabelFilter) + b.Skipf("disabled by label filter %q", PerfSchedulingLabelFilter) } + fixJSONOutput(b) featureGates := featureGatesMerge(tc.FeatureGates, w.FeatureGates) informerFactory, tCtx := setupTestCase(b, tc, featureGates, output, outOfTreePluginRegistry) @@ -1244,16 +1274,16 @@ func RunIntegrationPerfScheduling(t *testing.T, configFile string) { } if testing.Short() { - *TestSchedulingLabelFilter += ",+short" + TestSchedulingLabelFilter += ",+short" } - testcaseLabelSelectors := strings.Split(*TestSchedulingLabelFilter, ",") + testcaseLabelSelectors := strings.Split(TestSchedulingLabelFilter, ",") for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { for _, w := range tc.Workloads { t.Run(w.Name, func(t *testing.T) { if !enabled(testcaseLabelSelectors, append(tc.Labels, w.Labels...)...) { - t.Skipf("disabled by label filter %q", *TestSchedulingLabelFilter) + t.Skipf("disabled by label filter %q", TestSchedulingLabelFilter) } featureGates := featureGatesMerge(tc.FeatureGates, w.FeatureGates) informerFactory, tCtx := setupTestCase(t, tc, featureGates, nil, nil)