From 4f57a44acf1c808d9297cebfb20bf125429d7c43 Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Wed, 29 Jan 2020 10:30:05 -0500 Subject: [PATCH] Add e2e test to cover TaskRun retries This test creates a PipelineRun with a single always-failing TaskRun configured to retry N times. It checks that the PipelineRun fails (due to retry exhaustion), that the single TaskRun created fails, and that it reports the correct number of retry statuses. It also checks that there are the correct number of Pods created, and it turns out there isn't! :o We should fix the retry pod count bug, but for now having a test that covers it is an improvement, and gives us an easy repro case to fix the retry bug. After writing this test, I realized TestPipelineRunFailedAndRetry intended to test the same thing, but was (I think) harder to read, and ended up only testing aspects of retries that happen to work (namely, that there are as many retriesStatuses as there should be, not that there are the correct number of Pods). So I've also deleted that test. --- test/artifact_bucket_test.go | 2 +- test/retry_test.go | 139 +++++++++++++++++++++++++++++++++++ test/timeout_test.go | 52 ------------- 3 files changed, 140 insertions(+), 53 deletions(-) create mode 100644 test/retry_test.go diff --git a/test/artifact_bucket_test.go b/test/artifact_bucket_test.go index 0b93af0b35e..0fc1a3338b1 100644 --- a/test/artifact_bucket_test.go +++ b/test/artifact_bucket_test.go @@ -61,7 +61,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { t.Logf("Creating Secret %s", bucketSecretName) if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getBucketSecret(t, configFilePath, namespace)); err != nil { - t.Fatalf("Failed to create Secret `%s`: %s", bucketSecretName, err) + t.Fatalf("Failed to create Secret %q: %v", bucketSecretName, err) } defer deleteBucketSecret(c, t, namespace) diff --git a/test/retry_test.go b/test/retry_test.go new file mode 100644 index 00000000000..1f3d10d0bd0 --- /dev/null +++ b/test/retry_test.go @@ -0,0 +1,139 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" + knativetest "knative.dev/pkg/test" +) + +// TestTaskRunRetry tests that retries behave as expected, by creating multiple +// Pods for the same TaskRun each time it fails, up to the configured max. +func TestTaskRunRetry(t *testing.T) { + c, namespace := setup(t) + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + // Create a PipelineRun with a single TaskRun that can only fail, + // configured to retry 5 times. + pipelineRunName := "retry-pipeline" + numRetries := 5 + if _, err := c.PipelineRunClient.Create(&v1alpha1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName}, + Spec: v1alpha1.PipelineRunSpec{ + PipelineSpec: &v1alpha1.PipelineSpec{ + Tasks: []v1alpha1.PipelineTask{{ + Name: "retry-me", + TaskSpec: &v1alpha1.TaskSpec{ + Steps: []v1alpha1.Step{{ + Container: corev1.Container{Image: "busybox"}, + Script: "exit 1", + }}, + }, + Retries: numRetries, + }}, + }, + }, + }); err != nil { + t.Fatalf("Failed to create PipelineRun %q: %v", pipelineRunName, err) + } + + // Wait for the PipelineRun to fail, when retries are exhausted. + if err := WaitForPipelineRunState(c, pipelineRunName, time.Minute, PipelineRunFailed(pipelineRunName), "PipelineRunFailed"); err != nil { + t.Fatalf("Waiting for PipelineRun to fail: %v", err) + } + + // Get the status of the PipelineRun. + pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get PipelineRun %q: %v", pipelineRunName, err) + } + + // PipelineRunStatus should have 1 TaskRun status, and it should be failed. + if len(pr.Status.TaskRuns) != 1 { + t.Errorf("Got %d TaskRun statuses, wanted %d", len(pr.Status.TaskRuns), numRetries) + } + for taskRunName, trs := range pr.Status.TaskRuns { + if !isFailed(t, taskRunName, trs.Status.Conditions) { + t.Errorf("TaskRun status %q is not failed", taskRunName) + } + } + + // There should only be one TaskRun created. + trs, err := c.TaskRunClient.List(metav1.ListOptions{}) + if err != nil { + t.Errorf("Failed to list TaskRuns: %v", err) + } else if len(trs.Items) != 1 { + t.Errorf("Found %d TaskRuns, want 1", len(trs.Items)) + } + + // The TaskRun status should have N retriesStatuses, all failures. + tr := trs.Items[0] + podNames := map[string]struct{}{} + for idx, r := range tr.Status.RetriesStatus { + if !isFailed(t, tr.Name, r.Conditions) { + t.Errorf("TaskRun %q retry status %d is not failed", tr.Name, idx) + } + podNames[r.PodName] = struct{}{} + } + if len(tr.Status.RetriesStatus) != numRetries { + t.Errorf("TaskRun %q had %d retriesStatuses, want %d", tr.Name, len(tr.Status.RetriesStatus), numRetries) + } + + // There should be N Pods created, all failed, all owned by the TaskRun. + pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + if err != nil { + t.Fatalf("Failed to list Pods: %v", err) + } else if len(pods.Items) != numRetries { + // TODO: Make this an error. + t.Logf("BUG: Found %d Pods, want %d", len(pods.Items), numRetries) + } + for _, p := range pods.Items { + if _, found := podNames[p.Name]; !found { + // TODO: Make this an error. + t.Logf("BUG: TaskRunStatus.RetriesStatus did not report pod name %q", p.Name) + } + if p.Status.Phase != corev1.PodFailed { + // TODO: Make this an error. + t.Logf("BUG: Pod %q is not failed: %v", p.Name, p.Status.Phase) + } + } +} + +// This method is necessary because PipelineRunTaskRunStatus and TaskRunStatus +// don't have an IsFailed method. +func isFailed(t *testing.T, taskRunName string, conds duckv1beta1.Conditions) bool { + for _, c := range conds { + if c.Type == apis.ConditionSucceeded { + if c.Status != corev1.ConditionFalse { + t.Errorf("TaskRun status %q is not failed, got %q", taskRunName, c.Status) + } + return true + } + } + t.Errorf("TaskRun status %q had no Succeeded condition", taskRunName) + return false +} diff --git a/test/timeout_test.go b/test/timeout_test.go index 292cad7d98a..4efcf46d07d 100644 --- a/test/timeout_test.go +++ b/test/timeout_test.go @@ -181,58 +181,6 @@ func TestPipelineRunTimeout(t *testing.T) { } } -func TestPipelineRunFailedAndRetry(t *testing.T) { - numberOfRetries := 2 - c, namespace := setup(t) - t.Parallel() - - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) - - t.Logf("Creating Task in namespace %s", namespace) - task := tb.Task("banana", namespace, tb.TaskSpec( - tb.Step("busybox", tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "exit 1")), - )) - if _, err := c.TaskClient.Create(task); err != nil { - t.Fatalf("Failed to create Task `%s`: %s", "banana", err) - } - - pipeline := tb.Pipeline("tomatoes", namespace, - tb.PipelineSpec(tb.PipelineTask("foo", "banana", tb.Retries(numberOfRetries))), - ) - pipelineRun := tb.PipelineRun("pear", namespace, tb.PipelineRunSpec(pipeline.Name)) - if _, err := c.PipelineClient.Create(pipeline); err != nil { - t.Fatalf("Failed to create Pipeline `%s`: %s", pipeline.Name, err) - } - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { - t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRun.Name, err) - } - - t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", pipelineRun.Name, namespace) - if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, PipelineRunFailed(pipelineRun.Name), "PipelineRunRunning"); err != nil { - t.Fatalf("Error waiting for PipelineRun %s to be failed: %s", pipelineRun.Name, err) - } - - r, err := c.PipelineRunClient.Get(pipelineRun.Name, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Error getting pipeline %s", pipelineRun.Name) - } - - if len(r.Status.TaskRuns) != 1 { - t.Fatalf("Only one TaskRun is expected, but got %d", len(r.Status.TaskRuns)) - } - - for taskRunName := range r.Status.TaskRuns { - taskrun, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Error getting task run %s", taskRunName) - } - if len(taskrun.Status.RetriesStatus) != numberOfRetries { - t.Fatalf("expected %d retry, but got %d", numberOfRetries, len(r.Status.TaskRuns)) - } - } -} - // TestTaskRunTimeout is an integration test that will verify a TaskRun can be timed out. func TestTaskRunTimeout(t *testing.T) { c, namespace := setup(t)