diff --git a/pkg/pod/entrypoint.go b/pkg/pod/entrypoint.go index 83b323fb7de..2efbcc0b7ea 100644 --- a/pkg/pod/entrypoint.go +++ b/pkg/pod/entrypoint.go @@ -169,7 +169,7 @@ func StopSidecars(nopImage string, kubeclient kubernetes.Interface, pod corev1.P updated := false if newPod.Status.Phase == corev1.PodRunning { for _, s := range newPod.Status.ContainerStatuses { - if IsContainerSidecar(s.Name) && s.State.Running != nil { + if isContainerSidecar(s.Name) && s.State.Running != nil { for j, c := range newPod.Spec.Containers { if c.Name == s.Name && c.Image != nopImage { updated = true @@ -187,16 +187,14 @@ func StopSidecars(nopImage string, kubeclient kubernetes.Interface, pod corev1.P return nil } -// TODO(#1605): Move taskrunpod.go into pkg/pod and unexport these methods. +// isContainerStep returns true if the container name indicates that it represents a step. +func isContainerStep(name string) bool { return strings.HasPrefix(name, stepPrefix) } -// IsContainerStep returns true if the container name indicates that it represents a step. -func IsContainerStep(name string) bool { return strings.HasPrefix(name, stepPrefix) } +// isContainerSidecar returns true if the container name indicates that it represents a sidecar. +func isContainerSidecar(name string) bool { return strings.HasPrefix(name, sidecarPrefix) } -// IsContainerSidecar returns true if the container name indicates that it represents a sidecar. -func IsContainerSidecar(name string) bool { return strings.HasPrefix(name, sidecarPrefix) } +// trimStepPrefix returns the container name, stripped of its step prefix. +func trimStepPrefix(name string) string { return strings.TrimPrefix(name, stepPrefix) } -// TrimStepPrefix returns the container name, stripped of its step prefix. -func TrimStepPrefix(name string) string { return strings.TrimPrefix(name, stepPrefix) } - -// TrimSidecarPrefix returns the container name, stripped of its sidecar prefix. -func TrimSidecarPrefix(name string) string { return strings.TrimPrefix(name, sidecarPrefix) } +// trimSidecarPrefix returns the container name, stripped of its sidecar prefix. +func trimSidecarPrefix(name string) string { return strings.TrimPrefix(name, sidecarPrefix) } diff --git a/pkg/pod/status.go b/pkg/pod/status.go new file mode 100644 index 00000000000..b1639502631 --- /dev/null +++ b/pkg/pod/status.go @@ -0,0 +1,320 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" +) + +const ( + // reasonCouldntGetTask indicates that the reason for the failure status is that the + // Task couldn't be found + ReasonCouldntGetTask = "CouldntGetTask" + + // reasonFailedResolution indicated that the reason for failure status is + // that references within the TaskRun could not be resolved + ReasonFailedResolution = "TaskRunResolutionFailed" + + // reasonFailedValidation indicated that the reason for failure status is + // that taskrun failed runtime validation + ReasonFailedValidation = "TaskRunValidationFailed" + + // reasonRunning indicates that the reason for the inprogress status is that the TaskRun + // is just starting to be reconciled + ReasonRunning = "Running" + + // reasonTimedOut indicates that the TaskRun has taken longer than its configured timeout + ReasonTimedOut = "TaskRunTimeout" + + // reasonExceededResourceQuota indicates that the TaskRun failed to create a pod due to + // a ResourceQuota in the namespace + ReasonExceededResourceQuota = "ExceededResourceQuota" + + // reasonExceededNodeResources indicates that the TaskRun's pod has failed to start due + // to resource constraints on the node + ReasonExceededNodeResources = "ExceededNodeResources" + + // ReasonSucceeded indicates that the reason for the finished status is that all of the steps + // completed successfully + ReasonSucceeded = "Succeeded" + + // ReasonFailed indicates that the reason for the failure status is unknown or that one of the steps failed + ReasonFailed = "Failed" +) + +// SidecarsReady returns true if all of the Pod's sidecars are Ready or +// Terminated. +func SidecarsReady(podStatus corev1.PodStatus) bool { + if podStatus.Phase != corev1.PodRunning { + return false + } + for _, s := range podStatus.ContainerStatuses { + if !isContainerSidecar(s.Name) { + continue + } + if s.State.Running != nil && s.Ready { + continue + } + if s.State.Terminated != nil { + continue + } + return false + } + return true +} + +// MakeTaskRunStatus returns a TaskRunStatus based on the Pod's status. +func MakeTaskRunStatus(tr v1alpha1.TaskRun, pod *corev1.Pod, taskSpec v1alpha1.TaskSpec) v1alpha1.TaskRunStatus { + trs := &tr.Status + if trs.GetCondition(apis.ConditionSucceeded) == nil || trs.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown { + // If the taskRunStatus doesn't exist yet, it's because we just started running + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: ReasonRunning, + Message: "Not all Steps in the Task have finished executing", + }) + } + + trs.PodName = pod.Name + + trs.Steps = []v1alpha1.StepState{} + trs.Sidecars = []v1alpha1.SidecarState{} + for _, s := range pod.Status.ContainerStatuses { + if isContainerStep(s.Name) { + trs.Steps = append(trs.Steps, v1alpha1.StepState{ + ContainerState: *s.State.DeepCopy(), + Name: trimStepPrefix(s.Name), + ContainerName: s.Name, + ImageID: s.ImageID, + }) + } else if isContainerSidecar(s.Name) { + trs.Sidecars = append(trs.Sidecars, v1alpha1.SidecarState{ + Name: trimSidecarPrefix(s.Name), + ImageID: s.ImageID, + }) + } + } + + // Complete if we did not find a step that is not complete, or the pod is in a definitely complete phase + complete := areStepsComplete(pod) || pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed + + if complete { + updateCompletedTaskRun(trs, pod) + } else { + updateIncompleteTaskRun(trs, pod) + } + + // Sort step states according to the order specified in the TaskRun spec's steps. + trs.Steps = sortTaskRunStepOrder(trs.Steps, taskSpec.Steps) + + return *trs +} + +func updateCompletedTaskRun(trs *v1alpha1.TaskRunStatus, pod *corev1.Pod) { + if didTaskRunFail(pod) { + msg := getFailureMessage(pod) + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: ReasonFailed, + Message: msg, + }) + } else { + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + Reason: ReasonSucceeded, + Message: "All Steps have completed executing", + }) + } + // update tr completed time + trs.CompletionTime = &metav1.Time{Time: time.Now()} +} + +func updateIncompleteTaskRun(trs *v1alpha1.TaskRunStatus, pod *corev1.Pod) { + switch pod.Status.Phase { + case corev1.PodRunning: + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: ReasonRunning, + Message: "Not all Steps in the Task have finished executing", + }) + case corev1.PodPending: + var reason, msg string + if IsPodExceedingNodeResources(pod) { + reason = ReasonExceededNodeResources + msg = "TaskRun Pod exceeded available resources" + } else { + reason = "Pending" + msg = getWaitingMessage(pod) + } + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: reason, + Message: msg, + }) + } +} + +func didTaskRunFail(pod *corev1.Pod) bool { + f := pod.Status.Phase == corev1.PodFailed + for _, s := range pod.Status.ContainerStatuses { + if isContainerStep(s.Name) { + if s.State.Terminated != nil { + f = f || s.State.Terminated.ExitCode != 0 + } + } + } + return f +} + +func areStepsComplete(pod *corev1.Pod) bool { + stepsComplete := len(pod.Status.ContainerStatuses) > 0 && pod.Status.Phase == corev1.PodRunning + for _, s := range pod.Status.ContainerStatuses { + if isContainerStep(s.Name) { + if s.State.Terminated == nil { + stepsComplete = false + } + } + } + return stepsComplete +} + +func getFailureMessage(pod *corev1.Pod) string { + // First, try to surface an error about the actual build step that failed. + for _, status := range pod.Status.ContainerStatuses { + term := status.State.Terminated + if term != nil && term.ExitCode != 0 { + return fmt.Sprintf("%q exited with code %d (image: %q); for logs run: kubectl -n %s logs %s -c %s", + status.Name, term.ExitCode, status.ImageID, + pod.Namespace, pod.Name, status.Name) + } + } + // Next, return the Pod's status message if it has one. + if pod.Status.Message != "" { + return pod.Status.Message + } + // Lastly fall back on a generic error message. + return "build failed for unspecified reasons." +} + +// IsPodExceedingNodeResources returns true if the Pod's status indicates there +// are insufficient resources to schedule the Pod. +func IsPodExceedingNodeResources(pod *corev1.Pod) bool { + for _, podStatus := range pod.Status.Conditions { + if podStatus.Reason == corev1.PodReasonUnschedulable && strings.Contains(podStatus.Message, "Insufficient") { + return true + } + } + return false +} + +func getWaitingMessage(pod *corev1.Pod) string { + // First, try to surface reason for pending/unknown about the actual build step. + for _, status := range pod.Status.ContainerStatuses { + wait := status.State.Waiting + if wait != nil && wait.Message != "" { + return fmt.Sprintf("build step %q is pending with reason %q", + status.Name, wait.Message) + } + } + // Try to surface underlying reason by inspecting pod's recent status if condition is not true + for i, podStatus := range pod.Status.Conditions { + if podStatus.Status != corev1.ConditionTrue { + return fmt.Sprintf("pod status %q:%q; message: %q", + pod.Status.Conditions[i].Type, + pod.Status.Conditions[i].Status, + pod.Status.Conditions[i].Message) + } + } + // Next, return the Pod's status message if it has one. + if pod.Status.Message != "" { + return pod.Status.Message + } + + // Lastly fall back on a generic pending message. + return "Pending" +} + +// sortTaskRunStepOrder sorts the StepStates in the same order as the original +// TaskSpec steps. +func sortTaskRunStepOrder(taskRunSteps []v1alpha1.StepState, taskSpecSteps []v1alpha1.Step) []v1alpha1.StepState { + trt := &stepStateSorter{ + taskRunSteps: taskRunSteps, + } + trt.mapForSort = trt.constructTaskStepsSorter(taskSpecSteps) + sort.Sort(trt) + return trt.taskRunSteps +} + +// stepStateSorter implements a sorting mechanism to align the order of the steps in TaskRun +// with the spec steps in Task. +type stepStateSorter struct { + taskRunSteps []v1alpha1.StepState + mapForSort map[string]int +} + +// constructTaskStepsSorter constructs a map matching the names of +// the steps to their indices for a task. +func (trt *stepStateSorter) constructTaskStepsSorter(taskSpecSteps []v1alpha1.Step) map[string]int { + sorter := make(map[string]int) + for index, step := range taskSpecSteps { + sorter[step.Name] = index + } + return sorter +} + +// changeIndex sorts the steps of the task run, based on the +// order of the steps in the task. Instead of changing the element with the one next to it, +// we directly swap it with the desired index. +func (trt *stepStateSorter) changeIndex(index int) { + // Check if the current index is equal to the desired index. If they are equal, do not swap; if they + // are not equal, swap index j with the desired index. + desiredIndex, exist := trt.mapForSort[trt.taskRunSteps[index].Name] + if exist && index != desiredIndex { + trt.taskRunSteps[desiredIndex], trt.taskRunSteps[index] = trt.taskRunSteps[index], trt.taskRunSteps[desiredIndex] + } +} + +func (trt *stepStateSorter) Len() int { return len(trt.taskRunSteps) } + +func (trt *stepStateSorter) Swap(i, j int) { + trt.changeIndex(j) + // The index j is unable to reach the last index. + // When i reaches the end of the array, we need to check whether the last one needs a swap. + if i == trt.Len()-1 { + trt.changeIndex(i) + } +} + +func (trt *stepStateSorter) Less(i, j int) bool { + // Since the logic is complicated, we move it into the Swap function to decide whether + // and how to change the index. We set it to true here in order to iterate all the + // elements of the array in the Swap function. + return true +} diff --git a/pkg/status/taskrunpod_test.go b/pkg/pod/status_test.go similarity index 78% rename from pkg/status/taskrunpod_test.go rename to pkg/pod/status_test.go index d6d0f5067f1..a1ae052d6df 100644 --- a/pkg/status/taskrunpod_test.go +++ b/pkg/pod/status_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status +package pod import ( "testing" @@ -22,33 +22,21 @@ import ( "github.com/google/go-cmp/cmp" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - fakeclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" - informers "github.com/tektoncd/pipeline/pkg/client/informers/externalversions" - tb "github.com/tektoncd/pipeline/test/builder" - "go.uber.org/zap" - "go.uber.org/zap/zaptest/observer" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - fakekubeclientset "k8s.io/client-go/kubernetes/fake" "knative.dev/pkg/apis" duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" ) var ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { return true }) -func TestUpdateStatusFromPod(t *testing.T) { +func TestMakeTaskRunStatus(t *testing.T) { conditionRunning := apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown, Reason: ReasonRunning, Message: "Not all Steps in the Task have finished executing", } - conditionTrue := apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionTrue, - Reason: ReasonSucceeded, - Message: "All Steps have completed executing", - } for _, c := range []struct { desc string podStatus corev1.PodStatus @@ -146,7 +134,12 @@ func TestUpdateStatusFromPod(t *testing.T) { }, want: v1alpha1.TaskRunStatus{ Status: duckv1beta1.Status{ - Conditions: []apis.Condition{conditionTrue}, + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + Reason: ReasonSucceeded, + Message: "All Steps have completed executing", + }}, }, Steps: []v1alpha1.StepState{{ ContainerState: corev1.ContainerState{ @@ -369,7 +362,7 @@ func TestUpdateStatusFromPod(t *testing.T) { Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown, Reason: ReasonExceededNodeResources, - Message: `TaskRun pod "taskRun" exceeded available resources`, + Message: "TaskRun Pod exceeded available resources", }}, }, Steps: []v1alpha1.StepState{}, @@ -379,22 +372,19 @@ func TestUpdateStatusFromPod(t *testing.T) { desc: "with-sidecar-running", podStatus: corev1.PodStatus{ Phase: corev1.PodRunning, - ContainerStatuses: []corev1.ContainerStatus{ - { - Name: "step-running-step", - State: corev1.ContainerState{ - Running: &corev1.ContainerStateRunning{}, - }, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-running-step", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, }, - { - Name: "sidecar-running", - ImageID: "image-id", - State: corev1.ContainerState{ - Running: &corev1.ContainerStateRunning{}, - }, - Ready: true, + }, { + Name: "sidecar-running", + ImageID: "image-id", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, }, - }, + Ready: true, + }}, }, want: v1alpha1.TaskRunStatus{ Status: duckv1beta1.Status{ @@ -414,33 +404,8 @@ func TestUpdateStatusFromPod(t *testing.T) { }, }} { t.Run(c.desc, func(t *testing.T) { - observer, _ := observer.New(zap.InfoLevel) - logger := zap.New(observer).Sugar() - fakeClient := fakeclientset.NewSimpleClientset() - sharedInfomer := informers.NewSharedInformerFactory(fakeClient, 0) - pipelineResourceInformer := sharedInfomer.Tekton().V1alpha1().PipelineResources() - resourceLister := pipelineResourceInformer.Lister() - fakekubeclient := fakekubeclientset.NewSimpleClientset() - - rs := []*v1alpha1.PipelineResource{{ - ObjectMeta: metav1.ObjectMeta{ - Name: "source-image", - Namespace: "marshmallow", - }, - Spec: v1alpha1.PipelineResourceSpec{ - Type: "image", - }, - }} - - for _, r := range rs { - err := pipelineResourceInformer.Informer().GetIndexer().Add(r) - if err != nil { - t.Errorf("pipelineResourceInformer.Informer().GetIndexer().Add(r) failed with err: %s", err) - } - } - now := metav1.Now() - p := &corev1.Pod{ + pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "foo", @@ -449,8 +414,17 @@ func TestUpdateStatusFromPod(t *testing.T) { Status: c.podStatus, } startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC) - tr := tb.TaskRun("taskRun", "foo", tb.TaskRunStatus(tb.TaskRunStartTime(startTime))) - UpdateStatusFromPod(tr, p, resourceLister, fakekubeclient, logger) + tr := v1alpha1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "task-run", + Namespace: "foo", + }, + Status: v1alpha1.TaskRunStatus{ + StartTime: &metav1.Time{Time: startTime}, + }, + } + + got := MakeTaskRunStatus(tr, pod, v1alpha1.TaskSpec{}) // Common traits, set for test case brevity. c.want.PodName = "pod" @@ -462,9 +436,8 @@ func TestUpdateStatusFromPod(t *testing.T) { } return y != nil }) - if d := cmp.Diff(c.want, tr.Status, ignoreVolatileTime, ensureTimeNotNil); d != "" { - t.Errorf("Wanted:%s %v", c.desc, c.want.Conditions[0]) - t.Errorf("Diff:\n%s", d) + if d := cmp.Diff(c.want, got, ignoreVolatileTime, ensureTimeNotNil); d != "" { + t.Errorf("Diff(-want, +got): %s", d) } if tr.Status.StartTime.Time != c.want.StartTime.Time { t.Errorf("Expected TaskRun startTime to be unchanged but was %s", tr.Status.StartTime) @@ -473,27 +446,23 @@ func TestUpdateStatusFromPod(t *testing.T) { } } -func TestCountSidecars(t *testing.T) { - tests := []struct { - description string - expectedCount int - expectedReadyOrTerminated int - statuses []corev1.ContainerStatus +func TestSidecarsReady(t *testing.T) { + for _, c := range []struct { + desc string + statuses []corev1.ContainerStatus + want bool }{{ - description: "three steps and no sidecars", - expectedCount: 0, - expectedReadyOrTerminated: 0, + desc: "no sidecars", statuses: []corev1.ContainerStatus{ - {Name: "step-foo"}, - {Name: "step-bar"}, - {Name: "step-baz"}, + {Name: "step-ignore-me"}, + {Name: "step-ignore-me"}, + {Name: "step-ignore-me"}, }, + want: true, }, { - description: "one step and two sidecars both terminated or ready", - expectedCount: 2, - expectedReadyOrTerminated: 2, + desc: "both sidecars ready", statuses: []corev1.ContainerStatus{ - {Name: "step-foo"}, + {Name: "step-ignore-me"}, { Name: "sidecar-bar", Ready: true, @@ -502,7 +471,9 @@ func TestCountSidecars(t *testing.T) { StartedAt: metav1.NewTime(time.Now()), }, }, - }, { + }, + {Name: "step-ignore-me"}, + { Name: "sidecar-stopped-baz", State: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ @@ -510,11 +481,11 @@ func TestCountSidecars(t *testing.T) { }, }, }, + {Name: "step-ignore-me"}, }, + want: true, }, { - description: "one step and two sidecars one ready and one not", - expectedCount: 2, - expectedReadyOrTerminated: 1, + desc: "one sidecar ready, one not running", statuses: []corev1.ContainerStatus{ {Name: "step-ignore-me"}, { @@ -526,30 +497,96 @@ func TestCountSidecars(t *testing.T) { }, }, }, + {Name: "step-ignore-me"}, { Name: "sidecar-unready", + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{}, + }, + }, + {Name: "step-ignore-me"}, + }, + want: false, + }, { + desc: "one sidecar running but not ready", + statuses: []corev1.ContainerStatus{ + {Name: "step-ignore-me"}, + { + Name: "sidecar-running-not-ready", + Ready: false, // Not ready. State: corev1.ContainerState{ Running: &corev1.ContainerStateRunning{ StartedAt: metav1.NewTime(time.Now()), }, }, }, + {Name: "step-ignore-me"}, }, - }} - for _, test := range tests { - t.Run(test.description, func(t *testing.T) { - p := &corev1.Pod{ - Status: corev1.PodStatus{ - ContainerStatuses: test.statuses, - }, - } - count, readyOrTerminated := countSidecars(p) - if count != test.expectedCount { - t.Errorf("incorrect count of sidecars, expected %d got %d", test.expectedCount, count) - } - if readyOrTerminated != test.expectedReadyOrTerminated { - t.Errorf("incorrect count of ready or terminated sidecars, expected %d got %d", test.expectedReadyOrTerminated, readyOrTerminated) + want: false, + }} { + t.Run(c.desc, func(t *testing.T) { + got := SidecarsReady(corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: c.statuses, + }) + if got != c.want { + t.Errorf("SidecarsReady got %t, want %t", got, c.want) } }) } } + +func TestSortTaskRunStepOrder(t *testing.T) { + steps := []v1alpha1.Step{{Container: corev1.Container{ + Name: "hello", + }}, {Container: corev1.Container{ + Name: "exit", + }}, {Container: corev1.Container{ + Name: "world", + }}} + + stepStates := []v1alpha1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 0, + Reason: "Completed", + }, + }, + Name: "world", + }, { + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "Error", + }, + }, + Name: "exit", + }, { + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 0, + Reason: "Completed", + }, + }, + Name: "hello", + }, { + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 0, + Reason: "Completed", + }, + }, + Name: "nop", + }} + + gotStates := sortTaskRunStepOrder(stepStates, steps) + var gotNames []string + for _, g := range gotStates { + gotNames = append(gotNames, g.Name) + } + + want := []string{"hello", "exit", "world", "nop"} + if d := cmp.Diff(want, gotNames); d != "" { + t.Errorf("Unexpected step order (-want, +got): %s", d) + } +} diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index 8b866cf8c7b..f930d3fe6e8 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -33,7 +33,6 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler" "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources" "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/cloudevent" - "github.com/tektoncd/pipeline/pkg/status" "go.uber.org/zap" "golang.org/x/xerrors" corev1 "k8s.io/api/core/v1" @@ -243,7 +242,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error tr.Status.SetCondition(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse, - Reason: status.ReasonFailedResolution, + Reason: podconvert.ReasonFailedResolution, Message: err.Error(), }) return nil @@ -286,7 +285,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error tr.Status.SetCondition(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse, - Reason: status.ReasonFailedResolution, + Reason: podconvert.ReasonFailedResolution, Message: err.Error(), }) return nil @@ -297,7 +296,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error tr.Status.SetCondition(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse, - Reason: status.ReasonFailedValidation, + Reason: podconvert.ReasonFailedValidation, Message: err.Error(), }) return nil @@ -338,26 +337,25 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error return err } - if status.IsPodExceedingNodeResources(pod) { - c.Recorder.Eventf(tr, corev1.EventTypeWarning, status.ReasonExceededNodeResources, "Insufficient resources to schedule pod %q", pod.Name) + if podconvert.IsPodExceedingNodeResources(pod) { + c.Recorder.Eventf(tr, corev1.EventTypeWarning, podconvert.ReasonExceededNodeResources, "Insufficient resources to schedule pod %q", pod.Name) } - before := tr.Status.GetCondition(apis.ConditionSucceeded) + if podconvert.SidecarsReady(pod.Status) { + if err := podconvert.UpdateReady(c.KubeClientSet, *pod); err != nil { + return err + } + } - addReady := status.UpdateStatusFromPod(tr, pod, c.resourceLister, c.KubeClientSet, c.Logger) + before := tr.Status.GetCondition(apis.ConditionSucceeded) - status.SortTaskRunStepOrder(tr.Status.Steps, taskSpec.Steps) + // Convert the Pod's status to the equivalent TaskRun Status. + tr.Status = podconvert.MakeTaskRunStatus(*tr, pod, *taskSpec) updateTaskRunResourceResult(tr, pod, c.Logger) after := tr.Status.GetCondition(apis.ConditionSucceeded) - if addReady { - if err := podconvert.UpdateReady(c.KubeClientSet, *pod); err != nil { - return err - } - } - reconciler.EmitEvent(c.Recorder, before, after, tr) c.Logger.Infof("Successfully reconciled taskrun %s/%s with status: %#v", tr.Name, tr.Namespace, after) @@ -369,15 +367,15 @@ func (c *Reconciler) handlePodCreationError(tr *v1alpha1.TaskRun, err error) { var succeededStatus corev1.ConditionStatus if isExceededResourceQuotaError(err) { succeededStatus = corev1.ConditionUnknown - reason = status.ReasonExceededResourceQuota + reason = podconvert.ReasonExceededResourceQuota backoff, currentlyBackingOff := c.timeoutHandler.GetBackoff(tr) if !currentlyBackingOff { go c.timeoutHandler.SetTaskRunTimer(tr, time.Until(backoff.NextAttempt)) } - msg = fmt.Sprintf("%s, reattempted %d times", status.GetExceededResourcesMessage(tr), backoff.NumAttempts) + msg = fmt.Sprintf("TaskRun Pod exceeded available resources, reattempted %d times", backoff.NumAttempts) } else { succeededStatus = corev1.ConditionFalse - reason = status.ReasonCouldntGetTask + reason = podconvert.ReasonCouldntGetTask if tr.Spec.TaskRef != nil { msg = fmt.Sprintf("Missing or invalid Task %s/%s", tr.Namespace, tr.Spec.TaskRef.Name) } else { @@ -517,7 +515,7 @@ func (c *Reconciler) updateTaskRunStatusForTimeout(tr *v1alpha1.TaskRun, dp Dele tr.Status.SetCondition(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse, - Reason: status.ReasonTimedOut, + Reason: podconvert.ReasonTimedOut, Message: timeoutMsg, }) // update tr completed time diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index e8c6f2a0f4c..cbafa75eb7b 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -29,10 +29,9 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - "github.com/tektoncd/pipeline/pkg/pod" + podconvert "github.com/tektoncd/pipeline/pkg/pod" "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/cloudevent" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" - "github.com/tektoncd/pipeline/pkg/status" "github.com/tektoncd/pipeline/pkg/system" "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" @@ -288,7 +287,7 @@ func TestReconcile_ExplicitDefaultSA(t *testing.T) { wantPod: tb.Pod("test-taskrun-run-success-pod-123456", "foo", tb.PodLabel(taskNameLabelKey, "test-task"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-run-success"), - tb.PodLabel(pod.ManagedByLabelKey, pod.ManagedByLabelValue), + tb.PodLabel(podconvert.ManagedByLabelKey, podconvert.ManagedByLabelValue), tb.PodOwnerReference("TaskRun", "test-taskrun-run-success", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( @@ -327,7 +326,7 @@ func TestReconcile_ExplicitDefaultSA(t *testing.T) { wantPod: tb.Pod("test-taskrun-with-sa-run-success-pod-123456", "foo", tb.PodLabel(taskNameLabelKey, "test-with-sa"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-sa-run-success"), - tb.PodLabel(pod.ManagedByLabelKey, pod.ManagedByLabelValue), + tb.PodLabel(podconvert.ManagedByLabelKey, podconvert.ManagedByLabelValue), tb.PodOwnerReference("TaskRun", "test-taskrun-with-sa-run-success", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( @@ -401,8 +400,8 @@ func TestReconcile_ExplicitDefaultSA(t *testing.T) { if condition == nil || condition.Status != corev1.ConditionUnknown { t.Errorf("Expected invalid TaskRun to have in progress status, but had %v", condition) } - if condition != nil && condition.Reason != status.ReasonRunning { - t.Errorf("Expected reason %q but was %s", status.ReasonRunning, condition.Reason) + if condition != nil && condition.Reason != podconvert.ReasonRunning { + t.Errorf("Expected reason %q but was %s", podconvert.ReasonRunning, condition.Reason) } if tr.Status.PodName == "" { @@ -550,7 +549,7 @@ func TestReconcile(t *testing.T) { wantPod: tb.Pod("test-taskrun-run-success-pod-123456", "foo", tb.PodLabel(taskNameLabelKey, "test-task"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-run-success"), - tb.PodLabel(pod.ManagedByLabelKey, pod.ManagedByLabelValue), + tb.PodLabel(podconvert.ManagedByLabelKey, podconvert.ManagedByLabelValue), tb.PodOwnerReference("TaskRun", "test-taskrun-run-success", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( @@ -588,7 +587,7 @@ func TestReconcile(t *testing.T) { wantPod: tb.Pod("test-taskrun-with-sa-run-success-pod-123456", "foo", tb.PodLabel(taskNameLabelKey, "test-with-sa"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-sa-run-success"), - tb.PodLabel(pod.ManagedByLabelKey, pod.ManagedByLabelValue), + tb.PodLabel(podconvert.ManagedByLabelKey, podconvert.ManagedByLabelValue), tb.PodOwnerReference("TaskRun", "test-taskrun-with-sa-run-success", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( @@ -627,7 +626,7 @@ func TestReconcile(t *testing.T) { wantPod: tb.Pod("test-taskrun-substitution-pod-123456", "foo", tb.PodLabel(taskNameLabelKey, "test-task-with-substitution"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-substitution"), - tb.PodLabel(pod.ManagedByLabelKey, pod.ManagedByLabelValue), + tb.PodLabel(podconvert.ManagedByLabelKey, podconvert.ManagedByLabelValue), tb.PodOwnerReference("TaskRun", "test-taskrun-substitution", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( @@ -717,7 +716,7 @@ func TestReconcile(t *testing.T) { taskRun: taskRunWithTaskSpec, wantPod: tb.Pod("test-taskrun-with-taskspec-pod-123456", "foo", tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-taskspec"), - tb.PodLabel(pod.ManagedByLabelKey, pod.ManagedByLabelValue), + tb.PodLabel(podconvert.ManagedByLabelKey, podconvert.ManagedByLabelValue), tb.PodOwnerReference("TaskRun", "test-taskrun-with-taskspec", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( @@ -777,7 +776,7 @@ func TestReconcile(t *testing.T) { wantPod: tb.Pod("test-taskrun-with-cluster-task-pod-123456", "foo", tb.PodLabel(taskNameLabelKey, "test-cluster-task"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-cluster-task"), - tb.PodLabel(pod.ManagedByLabelKey, pod.ManagedByLabelValue), + tb.PodLabel(podconvert.ManagedByLabelKey, podconvert.ManagedByLabelValue), tb.PodOwnerReference("TaskRun", "test-taskrun-with-cluster-task", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( @@ -814,7 +813,7 @@ func TestReconcile(t *testing.T) { taskRun: taskRunWithResourceSpecAndTaskSpec, wantPod: tb.Pod("test-taskrun-with-resource-spec-pod-123456", "foo", tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-resource-spec"), - tb.PodLabel(pod.ManagedByLabelKey, pod.ManagedByLabelValue), + tb.PodLabel(podconvert.ManagedByLabelKey, podconvert.ManagedByLabelValue), tb.PodOwnerReference("TaskRun", "test-taskrun-with-resource-spec", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( @@ -872,7 +871,7 @@ func TestReconcile(t *testing.T) { wantPod: tb.Pod("test-taskrun-with-pod-pod-123456", "foo", tb.PodLabel(taskNameLabelKey, "test-task"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-pod"), - tb.PodLabel(pod.ManagedByLabelKey, pod.ManagedByLabelValue), + tb.PodLabel(podconvert.ManagedByLabelKey, podconvert.ManagedByLabelValue), tb.PodOwnerReference("TaskRun", "test-taskrun-with-pod", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( @@ -943,8 +942,8 @@ func TestReconcile(t *testing.T) { if condition == nil || condition.Status != corev1.ConditionUnknown { t.Errorf("Expected invalid TaskRun to have in progress status, but had %v", condition) } - if condition != nil && condition.Reason != status.ReasonRunning { - t.Errorf("Expected reason %q but was %s", status.ReasonRunning, condition.Reason) + if condition != nil && condition.Reason != podconvert.ReasonRunning { + t.Errorf("Expected reason %q but was %s", podconvert.ReasonRunning, condition.Reason) } if tr.Status.PodName == "" { @@ -1115,18 +1114,15 @@ func TestReconcileInvalidTaskRuns(t *testing.T) { name string taskRun *v1alpha1.TaskRun reason string - }{ - { - name: "task run with no task", - taskRun: noTaskRun, - reason: status.ReasonFailedResolution, - }, - { - name: "task run with no task", - taskRun: withWrongRef, - reason: status.ReasonFailedResolution, - }, - } + }{{ + name: "task run with no task", + taskRun: noTaskRun, + reason: podconvert.ReasonFailedResolution, + }, { + name: "task run with no task", + taskRun: withWrongRef, + reason: podconvert.ReasonFailedResolution, + }} for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { @@ -1198,12 +1194,12 @@ func makePod(taskRun *v1alpha1.TaskRun, task *v1alpha1.Task) (*corev1.Pod, error }, }) - entrypointCache, err := pod.NewEntrypointCache(kubeclient) + entrypointCache, err := podconvert.NewEntrypointCache(kubeclient) if err != nil { return nil, err } - return pod.MakePod(images, taskRun, task.Spec, kubeclient, entrypointCache) + return podconvert.MakePod(images, taskRun, task.Spec, kubeclient, entrypointCache) } func TestReconcilePodUpdateStatus(t *testing.T) { @@ -1261,7 +1257,7 @@ func TestReconcilePodUpdateStatus(t *testing.T) { if d := cmp.Diff(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, - Reason: status.ReasonSucceeded, + Reason: podconvert.ReasonSucceeded, Message: "All Steps have completed executing", }, newTr.Status.GetCondition(apis.ConditionSucceeded), ignoreLastTransitionTime); d != "" { t.Errorf("Did not get expected condition (-want, +got): %v", d) @@ -1458,13 +1454,13 @@ func TestHandlePodCreationError(t *testing.T) { err: k8sapierrors.NewForbidden(k8sruntimeschema.GroupResource{Group: "foo", Resource: "bar"}, "baz", errors.New("exceeded quota")), expectedType: apis.ConditionSucceeded, expectedStatus: corev1.ConditionUnknown, - expectedReason: status.ReasonExceededResourceQuota, + expectedReason: podconvert.ReasonExceededResourceQuota, }, { description: "errors other than exceeded quota fail the taskrun", err: errors.New("this is a fatal error"), expectedType: apis.ConditionSucceeded, expectedStatus: corev1.ConditionFalse, - expectedReason: status.ReasonCouldntGetTask, + expectedReason: podconvert.ReasonCouldntGetTask, }} for _, tc := range testcases { t.Run(tc.description, func(t *testing.T) { diff --git a/pkg/status/stepstatesorter.go b/pkg/status/stepstatesorter.go deleted file mode 100644 index e2f2c43c973..00000000000 --- a/pkg/status/stepstatesorter.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -import ( - "sort" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" -) - -// StepStateSorter implements a sorting mechanism to align the order of the steps in TaskRun -// with the spec steps in Task. -type StepStateSorter struct { - taskRunSteps []v1alpha1.StepState - mapForSort map[string]int -} - -func (trt *StepStateSorter) Init(taskRunSteps []v1alpha1.StepState, taskSpecSteps []v1alpha1.Step) { - trt.taskRunSteps = taskRunSteps - trt.mapForSort = trt.constructTaskStepsSorter(taskSpecSteps) -} - -// constructTaskStepsSorter constructs a map matching the names of -// the steps to their indices for a task. -func (trt *StepStateSorter) constructTaskStepsSorter(taskSpecSteps []v1alpha1.Step) map[string]int { - sorter := make(map[string]int) - for index, step := range taskSpecSteps { - sorter[step.Name] = index - } - return sorter -} - -// changeIndex sorts the steps of the task run, based on the -// order of the steps in the task. Instead of changing the element with the one next to it, -// we directly swap it with the desired index. -func (trt *StepStateSorter) changeIndex(index int) { - // Check if the current index is equal to the desired index. If they are equal, do not swap; if they - // are not equal, swap index j with the desired index. - desiredIndex, exist := trt.mapForSort[trt.taskRunSteps[index].Name] - if exist && index != desiredIndex { - trt.taskRunSteps[desiredIndex], trt.taskRunSteps[index] = trt.taskRunSteps[index], trt.taskRunSteps[desiredIndex] - } -} - -func (trt *StepStateSorter) Len() int { - return len(trt.taskRunSteps) -} - -func (trt *StepStateSorter) Swap(i, j int) { - trt.changeIndex(j) - // The index j is unable to reach the last index. - // When i reaches the end of the array, we need to check whether the last one needs a swap. - if i == trt.Len()-1 { - trt.changeIndex(i) - } -} - -func (trt *StepStateSorter) Less(i, j int) bool { - // Since the logic is complicated, we move it into the Swap function to decide whether - // and how to change the index. We set it to true here in order to iterate all the - // elements of the array in the Swap function. - return true -} - -func SortTaskRunStepOrder(taskRunSteps []v1alpha1.StepState, taskSpecSteps []v1alpha1.Step) { - trt := new(StepStateSorter) - trt.Init(taskRunSteps, taskSpecSteps) - sort.Sort(trt) -} diff --git a/pkg/status/stepstatesorter_test.go b/pkg/status/stepstatesorter_test.go deleted file mode 100644 index 66a3d7f04b3..00000000000 --- a/pkg/status/stepstatesorter_test.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - tb "github.com/tektoncd/pipeline/test/builder" - corev1 "k8s.io/api/core/v1" -) - -func TestSortTaskRunStepOrder(t *testing.T) { - task := tb.Task("failing-task", "default", tb.TaskSpec( - tb.Step("hello", "busybox", - tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "echo hello"), - ), - tb.Step("exit", "busybox", - tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "exit 1"), - ), - tb.Step("world", "busybox", - tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "sleep 30s"), - ), - )) - - taskRunStatusSteps := []v1alpha1.StepState{{ - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - Reason: "Completed", - }, - }, - Name: "world", - }, { - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 1, - Reason: "Error", - }, - }, - Name: "exit", - }, { - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - Reason: "Completed", - }, - }, - Name: "hello", - }, { - - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - Reason: "Completed", - }, - }, - Name: "nop", - }} - - SortTaskRunStepOrder(taskRunStatusSteps, task.Spec.Steps) - actualStepOrder := []string{} - for _, state := range taskRunStatusSteps { - actualStepOrder = append(actualStepOrder, state.Name) - } - - expectedStepOrder := []string{"hello", "exit", "world", "nop"} - - if d := cmp.Diff(actualStepOrder, expectedStepOrder); d != "" { - t.Errorf("The status steps in TaksRun doesn't match the spec steps in Task, diff: %s", d) - } -} diff --git a/pkg/status/taskrunpod.go b/pkg/status/taskrunpod.go deleted file mode 100644 index 7e075cc8da3..00000000000 --- a/pkg/status/taskrunpod.go +++ /dev/null @@ -1,222 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -import ( - "fmt" - "strings" - "time" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" - podconvert "github.com/tektoncd/pipeline/pkg/pod" - "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "knative.dev/pkg/apis" -) - -// UpdateStatusFromPod modifies the task run status based on the pod and then returns true if the pod is running and -// all sidecars are ready -func UpdateStatusFromPod(taskRun *v1alpha1.TaskRun, pod *corev1.Pod, resourceLister listers.PipelineResourceLister, kubeclient kubernetes.Interface, logger *zap.SugaredLogger) bool { - if taskRun.Status.GetCondition(apis.ConditionSucceeded) == nil || taskRun.Status.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown { - // If the taskRunStatus doesn't exist yet, it's because we just started running - taskRun.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: ReasonRunning, - Message: "Not all Steps in the Task have finished executing", - }) - } - - taskRun.Status.PodName = pod.Name - - taskRun.Status.Steps = []v1alpha1.StepState{} - taskRun.Status.Sidecars = []v1alpha1.SidecarState{} - for _, s := range pod.Status.ContainerStatuses { - if podconvert.IsContainerStep(s.Name) { - taskRun.Status.Steps = append(taskRun.Status.Steps, v1alpha1.StepState{ - ContainerState: *s.State.DeepCopy(), - Name: podconvert.TrimStepPrefix(s.Name), - ContainerName: s.Name, - ImageID: s.ImageID, - }) - } else if podconvert.IsContainerSidecar(s.Name) { - taskRun.Status.Sidecars = append(taskRun.Status.Sidecars, v1alpha1.SidecarState{ - Name: podconvert.TrimSidecarPrefix(s.Name), - ImageID: s.ImageID, - }) - } - } - - // Complete if we did not find a step that is not complete, or the pod is in a definitely complete phase - complete := areStepsComplete(pod) || pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed - - if complete { - updateCompletedTaskRun(taskRun, pod) - } else { - updateIncompleteTaskRun(taskRun, pod) - } - - sidecarsCount, readyOrTerminatedSidecarsCount := countSidecars(pod) - return pod.Status.Phase == corev1.PodRunning && readyOrTerminatedSidecarsCount == sidecarsCount -} - -func updateCompletedTaskRun(taskRun *v1alpha1.TaskRun, pod *corev1.Pod) { - if didTaskRunFail(pod) { - msg := getFailureMessage(pod) - taskRun.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionFalse, - Reason: ReasonFailed, - Message: msg, - }) - } else { - taskRun.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionTrue, - Reason: ReasonSucceeded, - Message: "All Steps have completed executing", - }) - } - // update tr completed time - taskRun.Status.CompletionTime = &metav1.Time{Time: time.Now()} -} - -func updateIncompleteTaskRun(taskRun *v1alpha1.TaskRun, pod *corev1.Pod) { - switch pod.Status.Phase { - case corev1.PodRunning: - taskRun.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: ReasonRunning, - Message: "Not all Steps in the Task have finished executing", - }) - case corev1.PodPending: - var reason, msg string - if IsPodExceedingNodeResources(pod) { - reason = ReasonExceededNodeResources - msg = GetExceededResourcesMessage(taskRun) - } else { - reason = "Pending" - msg = GetWaitingMessage(pod) - } - taskRun.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: reason, - Message: msg, - }) - } -} - -func didTaskRunFail(pod *corev1.Pod) bool { - f := pod.Status.Phase == corev1.PodFailed - for _, s := range pod.Status.ContainerStatuses { - if podconvert.IsContainerStep(s.Name) { - if s.State.Terminated != nil { - f = f || s.State.Terminated.ExitCode != 0 - } - } - } - return f -} - -func areStepsComplete(pod *corev1.Pod) bool { - stepsComplete := len(pod.Status.ContainerStatuses) > 0 && pod.Status.Phase == corev1.PodRunning - for _, s := range pod.Status.ContainerStatuses { - if podconvert.IsContainerStep(s.Name) { - if s.State.Terminated == nil { - stepsComplete = false - } - } - } - return stepsComplete -} - -func countSidecars(pod *corev1.Pod) (total int, readyOrTerminated int) { - for _, s := range pod.Status.ContainerStatuses { - if podconvert.IsContainerSidecar(s.Name) { - if s.State.Running != nil && s.Ready { - readyOrTerminated++ - } else if s.State.Terminated != nil { - readyOrTerminated++ - } - total++ - } - } - return total, readyOrTerminated -} - -func getFailureMessage(pod *corev1.Pod) string { - // First, try to surface an error about the actual build step that failed. - for _, status := range pod.Status.ContainerStatuses { - term := status.State.Terminated - if term != nil && term.ExitCode != 0 { - return fmt.Sprintf("%q exited with code %d (image: %q); for logs run: kubectl -n %s logs %s -c %s", - status.Name, term.ExitCode, status.ImageID, - pod.Namespace, pod.Name, status.Name) - } - } - // Next, return the Pod's status message if it has one. - if pod.Status.Message != "" { - return pod.Status.Message - } - // Lastly fall back on a generic error message. - return "build failed for unspecified reasons." -} - -func IsPodExceedingNodeResources(pod *corev1.Pod) bool { - for _, podStatus := range pod.Status.Conditions { - if podStatus.Reason == corev1.PodReasonUnschedulable && strings.Contains(podStatus.Message, "Insufficient") { - return true - } - } - return false -} - -func GetExceededResourcesMessage(tr *v1alpha1.TaskRun) string { - return fmt.Sprintf("TaskRun pod %q exceeded available resources", tr.Name) -} - -func GetWaitingMessage(pod *corev1.Pod) string { - // First, try to surface reason for pending/unknown about the actual build step. - for _, status := range pod.Status.ContainerStatuses { - wait := status.State.Waiting - if wait != nil && wait.Message != "" { - return fmt.Sprintf("build step %q is pending with reason %q", - status.Name, wait.Message) - } - } - // Try to surface underlying reason by inspecting pod's recent status if condition is not true - for i, podStatus := range pod.Status.Conditions { - if podStatus.Status != corev1.ConditionTrue { - return fmt.Sprintf("pod status %q:%q; message: %q", - pod.Status.Conditions[i].Type, - pod.Status.Conditions[i].Status, - pod.Status.Conditions[i].Message) - } - } - // Next, return the Pod's status message if it has one. - if pod.Status.Message != "" { - return pod.Status.Message - } - - // Lastly fall back on a generic pending message. - return "Pending" -} diff --git a/pkg/status/taskrunreasons.go b/pkg/status/taskrunreasons.go deleted file mode 100644 index 26960a509b6..00000000000 --- a/pkg/status/taskrunreasons.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -const ( - // reasonCouldntGetTask indicates that the reason for the failure status is that the - // Task couldn't be found - ReasonCouldntGetTask = "CouldntGetTask" - - // reasonFailedResolution indicated that the reason for failure status is - // that references within the TaskRun could not be resolved - ReasonFailedResolution = "TaskRunResolutionFailed" - - // reasonFailedValidation indicated that the reason for failure status is - // that taskrun failed runtime validation - ReasonFailedValidation = "TaskRunValidationFailed" - - // reasonRunning indicates that the reason for the inprogress status is that the TaskRun - // is just starting to be reconciled - ReasonRunning = "Running" - - // reasonTimedOut indicates that the TaskRun has taken longer than its configured timeout - ReasonTimedOut = "TaskRunTimeout" - - // reasonExceededResourceQuota indicates that the TaskRun failed to create a pod due to - // a ResourceQuota in the namespace - ReasonExceededResourceQuota = "ExceededResourceQuota" - - // reasonExceededNodeResources indicates that the TaskRun's pod has failed to start due - // to resource constraints on the node - ReasonExceededNodeResources = "ExceededNodeResources" - - // ReasonSucceeded indicates that the reason for the finished status is that all of the steps - // completed successfully - ReasonSucceeded = "Succeeded" - - // ReasonFailed indicates that the reason for the failure status is unknown or that one of the steps failed - ReasonFailed = "Failed" -)