diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 00379f245b06..100017d42f35 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -111,7 +111,9 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { if err != nil { t.Fatalf("Failed to set up the test proxy: %s", err) } - startArgs := append([]string{"start", "-p", profile, "--wait=false"}, StartArgs()...) + + // Use more memory so that we may reliably fit MySQL and nginx + startArgs := append([]string{"start", "-p", profile, "--wait=false", "--memory", "2500MB"}, StartArgs()...) c := exec.CommandContext(ctx, Target(), startArgs...) env := os.Environ() env = append(env, fmt.Sprintf("HTTP_PROXY=%s", srv.Addr)) @@ -159,8 +161,8 @@ func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { // validateAddonManager asserts that the kube-addon-manager pod is deployed properly func validateAddonManager(ctx context.Context, t *testing.T, profile string) { // If --wait=false, this may take a couple of minutes - if _, err := PodWait(ctx, t, profile, "kube-system", "component=kube-addon-manager", 3*time.Minute); err != nil { - t.Errorf("wait: %v", err) + if _, err := PodWait(ctx, t, profile, "kube-system", "component=kube-addon-manager", 5*time.Minute); err != nil { + t.Fatalf("wait: %v", err) } } @@ -272,7 +274,7 @@ func validateDNS(ctx context.Context, t *testing.T, profile string) { t.Fatalf("%s failed: %v", rr.Args, err) } - names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", 3*time.Minute) + names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", 5*time.Minute) if err != nil { t.Fatalf("wait: %v", err) } @@ -298,7 +300,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { if NoneDriver() { t.Skipf("skipping: cache unsupported by none") } - for _, img := range []string{"busybox", "busybox:1.28.4-glibc", "mysql:5.6"} { + for _, img := range []string{"busybox", "busybox:1.28.4-glibc", "mysql:5.6", "gcr.io/hello-minikube-zero-install/hello-node"} { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) @@ -407,7 +409,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Logf("%s failed: %v (may not be an error)", rr.Args, err) } - if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", 4*time.Minute); err != nil { + if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", 5*time.Minute); err != nil { t.Fatalf("wait: %v", err) } @@ -550,16 +552,17 @@ func validateMySQL(ctx context.Context, t *testing.T, profile string) { t.Fatalf("%s failed: %v", rr.Args, err) } + names, err := PodWait(ctx, t, profile, "default", "app=mysql", 5*time.Minute) + if err != nil { + t.Fatalf("podwait: %v", err) + } + // Retry, as mysqld first comes up without users configured. Scan for names in case of a reschedule. mysql := func() error { - names, err := PodWait(ctx, t, profile, "default", "app=mysql", 5*time.Second) - if err != nil { - return err - } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "mysql", "-ppassword", "-e", "show databases;")) return err } - if err = retry.Expo(mysql, 1*time.Second, 2*time.Minute); err != nil { + if err = retry.Expo(mysql, 5*time.Second, 180*time.Second); err != nil { t.Errorf("mysql failing: %v", err) } } diff --git a/test/integration/helpers.go b/test/integration/helpers.go index 505d1aeea1d6..1539170f7720 100644 --- a/test/integration/helpers.go +++ b/test/integration/helpers.go @@ -177,14 +177,35 @@ func Cleanup(t *testing.T, profile string, cancel context.CancelFunc) { // CleanupWithLogs cleans up after a test run, fetching logs and deleting the profile func CleanupWithLogs(t *testing.T, profile string, cancel context.CancelFunc) { t.Helper() - if t.Failed() && *postMortemLogs { - t.Logf("%s failed, collecting logs ...", t.Name()) - rr, err := Run(t, exec.Command(Target(), "-p", profile, "logs", "--problems")) + if !t.Failed() { + Cleanup(t, profile, cancel) + return + } + + t.Logf("*** %s FAILED at %s", t.Name(), time.Now()) + + if *postMortemLogs { + t.Logf(">>> %s FAILED: start of post-mortem logs >>>", t.Name()) + + rr, rerr := Run(t, exec.Command("kubectl", "--context", profile, "get", "po", "-A", "--show-labels")) + if rerr != nil { + t.Logf("%s: %v", rr.Command(), rerr) + } + t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Stdout) + + rr, err := Run(t, exec.Command("kubectl", "--context", profile, "describe", "node")) + if err != nil { + t.Logf("%s: %v", rr.Command(), err) + } else { + t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Stdout) + } + + rr, err = Run(t, exec.Command(Target(), "-p", profile, "logs", "--problems")) if err != nil { t.Logf("failed logs error: %v", err) } t.Logf("%s logs: %s", t.Name(), rr.Stdout) - t.Logf("Sorry that %s failed :(", t.Name()) + t.Logf("<<< %s FAILED: end of post-mortem logs <<<", t.Name()) } Cleanup(t, profile, cancel) } @@ -225,11 +246,11 @@ func PodWait(ctx context.Context, t *testing.T, profile string, ns string, selec lastMsg := "" start := time.Now() - t.Logf("(dbg) waiting for pods with labels %q in namespace %q ...", selector, ns) + t.Logf("(dbg) %s: waiting %s for pods matching %q in namespace %q ...", t.Name(), timeout, selector, ns) f := func() (bool, error) { pods, err := client.CoreV1().Pods(ns).List(listOpts) if err != nil { - t.Logf("WARNING: pod list for %q %q returned: %v", ns, selector, err) + t.Logf("%s: WARNING: pod list for %q %q returned: %v", t.Name(), ns, selector, err) // Don't return the error upwards so that this is retried, in case the apiserver is rescheduled podStart = time.Time{} return false, nil @@ -254,7 +275,7 @@ func PodWait(ctx context.Context, t *testing.T, profile string, ns string, selec // Long-running process state if pod.Status.Phase != core.PodRunning { if !podStart.IsZero() { - t.Logf("WARNING: %s was running %s ago - may be unstable", selector, time.Since(podStart)) + t.Logf("%s: WARNING: %s was running %s ago - may be unstable", t.Name(), selector, time.Since(podStart)) } podStart = time.Time{} return false, nil @@ -271,31 +292,25 @@ func PodWait(ctx context.Context, t *testing.T, profile string, ns string, selec return false, nil } - err = wait.PollImmediate(500*time.Millisecond, timeout, f) + err = wait.PollImmediate(1*time.Second, timeout, f) names := []string{} for n := range foundNames { names = append(names, n) } if err == nil { - t.Logf("(dbg) pods %s up and healthy within %s", selector, time.Since(start)) + t.Logf("(dbg) %s: %s healthy within %s", t.Name(), selector, time.Since(start)) return names, nil } - t.Logf("pod %q failed to start: %v", selector, err) + t.Logf("***** %s: pod %q failed to start within %s: %v ****", t.Name(), selector, timeout, err) showPodLogs(ctx, t, profile, ns, names) return names, fmt.Errorf("%s: %v", fmt.Sprintf("%s within %s", selector, timeout), err) } // showPodLogs logs debug info for pods func showPodLogs(ctx context.Context, t *testing.T, profile string, ns string, names []string) { - rr, rerr := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "po", "-A", "--show-labels")) - if rerr != nil { - t.Logf("%s: %v", rr.Command(), rerr) - // return now, because kubectl is hosed - return - } - t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Stdout) + t.Logf("%s: showing logs for failed pods as of %s", t.Name(), time.Now()) for _, name := range names { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "describe", "po", name, "-n", ns))