diff --git a/pkg/apis/config/default_test.go b/pkg/apis/config/default_test.go index 8fbe506e603..5ad8c3929e3 100644 --- a/pkg/apis/config/default_test.go +++ b/pkg/apis/config/default_test.go @@ -14,12 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package config +package config_test import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" test "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/test/diff" @@ -27,25 +28,25 @@ import ( func TestNewDefaultsFromConfigMap(t *testing.T) { type testCase struct { - expectedConfig *Defaults + expectedConfig *config.Defaults expectedError bool fileName string } testCases := []testCase{ { - expectedConfig: &Defaults{ + expectedConfig: &config.Defaults{ DefaultTimeoutMinutes: 50, DefaultServiceAccount: "tekton", DefaultManagedByLabelValue: "something-else", }, - fileName: GetDefaultsConfigName(), + fileName: config.GetDefaultsConfigName(), }, { - expectedConfig: &Defaults{ + expectedConfig: &config.Defaults{ DefaultTimeoutMinutes: 50, DefaultServiceAccount: "tekton", - DefaultManagedByLabelValue: DefaultManagedByLabelValue, + DefaultManagedByLabelValue: config.DefaultManagedByLabelValue, DefaultPodTemplate: &pod.Template{ NodeSelector: map[string]string{ "label": "value", @@ -77,7 +78,7 @@ func TestNewDefaultsFromConfigMap(t *testing.T) { func TestNewDefaultsFromEmptyConfigMap(t *testing.T) { DefaultsConfigEmptyName := "config-defaults-empty" - expectedConfig := &Defaults{ + expectedConfig := &config.Defaults{ DefaultTimeoutMinutes: 60, DefaultManagedByLabelValue: "tekton-pipelines", } @@ -87,8 +88,8 @@ func TestNewDefaultsFromEmptyConfigMap(t *testing.T) { func TestEquals(t *testing.T) { testCases := []struct { name string - left *Defaults - right *Defaults + left *config.Defaults + right *config.Defaults expected bool }{ { @@ -100,51 +101,51 @@ func TestEquals(t *testing.T) { { name: "left nil", left: nil, - right: &Defaults{}, + right: &config.Defaults{}, expected: false, }, { name: "right nil", - left: &Defaults{}, + left: &config.Defaults{}, right: nil, expected: false, }, { name: "right and right default", - left: &Defaults{}, - right: &Defaults{}, + left: &config.Defaults{}, + right: &config.Defaults{}, expected: true, }, { name: "different default timeout", - left: &Defaults{ + left: &config.Defaults{ DefaultTimeoutMinutes: 10, }, - right: &Defaults{ + right: &config.Defaults{ DefaultTimeoutMinutes: 20, }, expected: false, }, { name: "same default timeout", - left: &Defaults{ + left: &config.Defaults{ DefaultTimeoutMinutes: 20, }, - right: &Defaults{ + right: &config.Defaults{ DefaultTimeoutMinutes: 20, }, expected: true, }, { name: "different default pod template", - left: &Defaults{ + left: &config.Defaults{ DefaultPodTemplate: &pod.Template{ NodeSelector: map[string]string{ "label": "value", }, }, }, - right: &Defaults{ + right: &config.Defaults{ DefaultPodTemplate: &pod.Template{ NodeSelector: map[string]string{ "label2": "value", @@ -155,14 +156,14 @@ func TestEquals(t *testing.T) { }, { name: "same default pod template", - left: &Defaults{ + left: &config.Defaults{ DefaultPodTemplate: &pod.Template{ NodeSelector: map[string]string{ "label": "value", }, }, }, - right: &Defaults{ + right: &config.Defaults{ DefaultPodTemplate: &pod.Template{ NodeSelector: map[string]string{ "label": "value", @@ -183,9 +184,9 @@ func TestEquals(t *testing.T) { } } -func verifyConfigFileWithExpectedConfig(t *testing.T, fileName string, expectedConfig *Defaults) { +func verifyConfigFileWithExpectedConfig(t *testing.T, fileName string, expectedConfig *config.Defaults) { cm := test.ConfigMapFromTestFile(t, fileName) - if Defaults, err := NewDefaultsFromConfigMap(cm); err == nil { + if Defaults, err := config.NewDefaultsFromConfigMap(cm); err == nil { if d := cmp.Diff(Defaults, expectedConfig); d != "" { t.Errorf("Diff:\n%s", diff.PrintWantGot(d)) } @@ -196,7 +197,7 @@ func verifyConfigFileWithExpectedConfig(t *testing.T, fileName string, expectedC func verifyConfigFileWithExpectedError(t *testing.T, fileName string) { cm := test.ConfigMapFromTestFile(t, fileName) - if _, err := NewDefaultsFromConfigMap(cm); err == nil { + if _, err := config.NewDefaultsFromConfigMap(cm); err == nil { t.Errorf("NewDefaultsFromConfigMap(actual) was expected to return an error") } } diff --git a/pkg/apis/config/feature_flags_test.go b/pkg/apis/config/feature_flags_test.go index 695a205a882..3d26ad7f31a 100644 --- a/pkg/apis/config/feature_flags_test.go +++ b/pkg/apis/config/feature_flags_test.go @@ -14,32 +14,33 @@ See the License for the specific language governing permissions and limitations under the License. */ -package config +package config_test import ( "os" "testing" "github.com/google/go-cmp/cmp" + "github.com/tektoncd/pipeline/pkg/apis/config" test "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/test/diff" ) func TestNewFeatureFlagsFromConfigMap(t *testing.T) { type testCase struct { - expectedConfig *FeatureFlags + expectedConfig *config.FeatureFlags fileName string } testCases := []testCase{ { - expectedConfig: &FeatureFlags{ - RunningInEnvWithInjectedSidecars: DefaultRunningInEnvWithInjectedSidecars, + expectedConfig: &config.FeatureFlags{ + RunningInEnvWithInjectedSidecars: config.DefaultRunningInEnvWithInjectedSidecars, }, - fileName: GetFeatureFlagsConfigName(), + fileName: config.GetFeatureFlagsConfigName(), }, { - expectedConfig: &FeatureFlags{ + expectedConfig: &config.FeatureFlags{ DisableHomeEnvOverwrite: true, DisableWorkingDirOverwrite: true, DisableAffinityAssistant: true, @@ -56,7 +57,7 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) { func TestNewFeatureFlagsFromEmptyConfigMap(t *testing.T) { FeatureFlagsConfigEmptyName := "feature-flags-empty" - expectedConfig := &FeatureFlags{ + expectedConfig := &config.FeatureFlags{ RunningInEnvWithInjectedSidecars: true, } verifyConfigFileWithExpectedFeatureFlagsConfig(t, FeatureFlagsConfigEmptyName, expectedConfig) @@ -84,7 +85,7 @@ func TestGetFeatureFlagsConfigName(t *testing.T) { if tc.featureFlagEnvValue != "" { os.Setenv("CONFIG_FEATURE_FLAGS_NAME", tc.featureFlagEnvValue) } - got := GetFeatureFlagsConfigName() + got := config.GetFeatureFlagsConfigName() want := tc.expected if got != want { t.Errorf("GetFeatureFlagsConfigName() = %s, want %s", got, want) @@ -93,9 +94,9 @@ func TestGetFeatureFlagsConfigName(t *testing.T) { } } -func verifyConfigFileWithExpectedFeatureFlagsConfig(t *testing.T, fileName string, expectedConfig *FeatureFlags) { +func verifyConfigFileWithExpectedFeatureFlagsConfig(t *testing.T, fileName string, expectedConfig *config.FeatureFlags) { cm := test.ConfigMapFromTestFile(t, fileName) - if flags, err := NewFeatureFlagsFromConfigMap(cm); err == nil { + if flags, err := config.NewFeatureFlagsFromConfigMap(cm); err == nil { if d := cmp.Diff(flags, expectedConfig); d != "" { t.Errorf("Diff:\n%s", diff.PrintWantGot(d)) } diff --git a/pkg/apis/config/store_test.go b/pkg/apis/config/store_test.go index fcd3c59fb17..4f1f3629be2 100644 --- a/pkg/apis/config/store_test.go +++ b/pkg/apis/config/store_test.go @@ -14,13 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package config +package config_test import ( "context" "testing" "github.com/google/go-cmp/cmp" + "github.com/tektoncd/pipeline/pkg/apis/config" test "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/test/diff" logtesting "knative.dev/pkg/logging/testing" @@ -30,21 +31,21 @@ func TestStoreLoadWithContext(t *testing.T) { defaultConfig := test.ConfigMapFromTestFile(t, "config-defaults") featuresConfig := test.ConfigMapFromTestFile(t, "feature-flags-all-flags-set") - expectedDefaults, _ := NewDefaultsFromConfigMap(defaultConfig) - expectedFeatures, _ := NewFeatureFlagsFromConfigMap(featuresConfig) + expectedDefaults, _ := config.NewDefaultsFromConfigMap(defaultConfig) + expectedFeatures, _ := config.NewFeatureFlagsFromConfigMap(featuresConfig) - expected := &Config{ + expected := &config.Config{ Defaults: expectedDefaults, FeatureFlags: expectedFeatures, } - store := NewStore(logtesting.TestLogger(t)) + store := config.NewStore(logtesting.TestLogger(t)) store.OnConfigChanged(defaultConfig) store.OnConfigChanged(featuresConfig) - config := FromContext(store.ToContext(context.Background())) + cfg := config.FromContext(store.ToContext(context.Background())) - if d := cmp.Diff(config, expected); d != "" { + if d := cmp.Diff(cfg, expected); d != "" { t.Errorf("Unexpected config %s", diff.PrintWantGot(d)) } } diff --git a/pkg/apis/pipeline/v1beta1/taskrun_types.go b/pkg/apis/pipeline/v1beta1/taskrun_types.go index bdf38ae3bfd..22398879c1b 100644 --- a/pkg/apis/pipeline/v1beta1/taskrun_types.go +++ b/pkg/apis/pipeline/v1beta1/taskrun_types.go @@ -306,6 +306,7 @@ type CloudEventDeliveryState struct { } // +genclient +// +genreconciler // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TaskRun represents a single execution of a Task. TaskRuns are how the steps diff --git a/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/controller.go b/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/controller.go new file mode 100644 index 00000000000..078627b1ea6 --- /dev/null +++ b/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/controller.go @@ -0,0 +1,118 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package taskrun + +import ( + context "context" + fmt "fmt" + reflect "reflect" + strings "strings" + + versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + client "github.com/tektoncd/pipeline/pkg/client/injection/client" + taskrun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun" + corev1 "k8s.io/api/core/v1" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" + record "k8s.io/client-go/tools/record" + kubeclient "knative.dev/pkg/client/injection/kube/client" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" +) + +const ( + defaultControllerAgentName = "taskrun-controller" + defaultFinalizerName = "taskruns.tekton.dev" +) + +// NewImpl returns a controller.Impl that handles queuing and feeding work from +// the queue through an implementation of controller.Reconciler, delegating to +// the provided Interface and optional Finalizer methods. OptionsFn is used to return +// controller.Options to be used but the internal reconciler. +func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl { + logger := logging.FromContext(ctx) + + // Check the options function input. It should be 0 or 1. + if len(optionsFns) > 1 { + logger.Fatalf("up to one options function is supported, found %d", len(optionsFns)) + } + + taskrunInformer := taskrun.Get(ctx) + + rec := &reconcilerImpl{ + Client: client.Get(ctx), + Lister: taskrunInformer.Lister(), + reconciler: r, + finalizerName: defaultFinalizerName, + } + + t := reflect.TypeOf(r).Elem() + queueName := fmt.Sprintf("%s.%s", strings.ReplaceAll(t.PkgPath(), "/", "-"), t.Name()) + + impl := controller.NewImpl(rec, logger, queueName) + agentName := defaultControllerAgentName + + // Pass impl to the options. Save any optional results. + for _, fn := range optionsFns { + opts := fn(impl) + if opts.ConfigStore != nil { + rec.configStore = opts.ConfigStore + } + if opts.FinalizerName != "" { + rec.finalizerName = opts.FinalizerName + } + if opts.AgentName != "" { + agentName = opts.AgentName + } + } + + rec.Recorder = createRecorder(ctx, agentName) + + return impl +} + +func createRecorder(ctx context.Context, agentName string) record.EventRecorder { + logger := logging.FromContext(ctx) + + recorder := controller.GetEventRecorder(ctx) + if recorder == nil { + // Create event broadcaster + logger.Debug("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + watches := []watch.Interface{ + eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof), + eventBroadcaster.StartRecordingToSink( + &v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}), + } + recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName}) + go func() { + <-ctx.Done() + for _, w := range watches { + w.Stop() + } + }() + } + + return recorder +} + +func init() { + versionedscheme.AddToScheme(scheme.Scheme) +} diff --git a/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/reconciler.go b/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/reconciler.go new file mode 100644 index 00000000000..f48a21471c9 --- /dev/null +++ b/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/reconciler.go @@ -0,0 +1,352 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package taskrun + +import ( + context "context" + json "encoding/json" + reflect "reflect" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1" + zap "go.uber.org/zap" + v1 "k8s.io/api/core/v1" + equality "k8s.io/apimachinery/pkg/api/equality" + errors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + sets "k8s.io/apimachinery/pkg/util/sets" + cache "k8s.io/client-go/tools/cache" + record "k8s.io/client-go/tools/record" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" + reconciler "knative.dev/pkg/reconciler" +) + +// Interface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1beta1.TaskRun. +type Interface interface { + // ReconcileKind implements custom logic to reconcile v1beta1.TaskRun. Any changes + // to the objects .Status or .Finalizers will be propagated to the stored + // object. It is recommended that implementors do not call any update calls + // for the Kind inside of ReconcileKind, it is the responsibility of the calling + // controller to propagate those properties. The resource passed to ReconcileKind + // will always have an empty deletion timestamp. + ReconcileKind(ctx context.Context, o *v1beta1.TaskRun) reconciler.Event +} + +// Finalizer defines the strongly typed interfaces to be implemented by a +// controller finalizing v1beta1.TaskRun. +type Finalizer interface { + // FinalizeKind implements custom logic to finalize v1beta1.TaskRun. Any changes + // to the objects .Status or .Finalizers will be ignored. Returning a nil or + // Normal type reconciler.Event will allow the finalizer to be deleted on + // the resource. The resource passed to FinalizeKind will always have a set + // deletion timestamp. + FinalizeKind(ctx context.Context, o *v1beta1.TaskRun) reconciler.Event +} + +// reconcilerImpl implements controller.Reconciler for v1beta1.TaskRun resources. +type reconcilerImpl struct { + // Client is used to write back status updates. + Client versioned.Interface + + // Listers index properties about resources + Lister pipelinev1beta1.TaskRunLister + + // Recorder is an event recorder for recording Event resources to the + // Kubernetes API. + Recorder record.EventRecorder + + // configStore allows for decorating a context with config maps. + // +optional + configStore reconciler.ConfigStore + + // reconciler is the implementation of the business logic of the resource. + reconciler Interface + + // finalizerName is the name of the finalizer to reconcile. + finalizerName string +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*reconcilerImpl)(nil) + +func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1beta1.TaskRunLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { + // Check the options function input. It should be 0 or 1. + if len(options) > 1 { + logger.Fatalf("up to one options struct is supported, found %d", len(options)) + } + + rec := &reconcilerImpl{ + Client: client, + Lister: lister, + Recorder: recorder, + reconciler: r, + finalizerName: defaultFinalizerName, + } + + for _, opts := range options { + if opts.ConfigStore != nil { + rec.configStore = opts.ConfigStore + } + if opts.FinalizerName != "" { + rec.finalizerName = opts.FinalizerName + } + } + + return rec +} + +// Reconcile implements controller.Reconciler +func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + + // If configStore is set, attach the frozen configuration to the context. + if r.configStore != nil { + ctx = r.configStore.ToContext(ctx) + } + + // Add the recorder to context. + ctx = controller.WithEventRecorder(ctx, r.Recorder) + + // Convert the namespace/name string into a distinct namespace and name + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + + if err != nil { + logger.Errorf("invalid resource key: %s", key) + return nil + } + + // Get the resource with this namespace/name. + + getter := r.Lister.TaskRuns(namespace) + + original, err := getter.Get(name) + + if errors.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing. + logger.Debugf("resource %q no longer exists", key) + return nil + } else if err != nil { + return err + } + + // Don't modify the informers copy. + resource := original.DeepCopy() + + var reconcileEvent reconciler.Event + if resource.GetDeletionTimestamp().IsZero() { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ReconcileKind")) + + // Set and update the finalizer on resource if r.reconciler + // implements Finalizer. + if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { + logger.Warnw("Failed to set finalizers", zap.Error(err)) + } + + // Reconcile this copy of the resource and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileEvent = r.reconciler.ReconcileKind(ctx, resource) + + } else if fin, ok := r.reconciler.(Finalizer); ok { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "FinalizeKind")) + + // For finalizing reconcilers, if this resource being marked for deletion + // and reconciled cleanly (nil or normal event), remove the finalizer. + reconcileEvent = fin.FinalizeKind(ctx, resource) + if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { + logger.Warnw("Failed to clear finalizers", zap.Error(err)) + } + } + + // Synchronize the status. + if equality.Semantic.DeepEqual(original.Status, resource.Status) { + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the injectionInformer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + } else if err = r.updateStatus(original, resource); err != nil { + logger.Warnw("Failed to update resource status", zap.Error(err)) + r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed", + "Failed to update status for %q: %v", resource.Name, err) + return err + } + + // Report the reconciler event, if any. + if reconcileEvent != nil { + var event *reconciler.ReconcilerEvent + if reconciler.EventAs(reconcileEvent, &event) { + logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) + r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...) + + // the event was wrapped inside an error, consider the reconciliation as failed + if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { + return reconcileEvent + } + return nil + } + + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + return reconcileEvent + } + + return nil +} + +func (r *reconcilerImpl) updateStatus(existing *v1beta1.TaskRun, desired *v1beta1.TaskRun) error { + existing = existing.DeepCopy() + return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + + getter := r.Client.TektonV1beta1().TaskRuns(desired.Namespace) + + existing, err = getter.Get(desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if reflect.DeepEqual(existing.Status, desired.Status) { + return nil + } + + existing.Status = desired.Status + + updater := r.Client.TektonV1beta1().TaskRuns(existing.Namespace) + + _, err = updater.UpdateStatus(existing) + return err + }) +} + +// updateFinalizersFiltered will update the Finalizers of the resource. +// TODO: this method could be generic and sync all finalizers. For now it only +// updates defaultFinalizerName or its override. +func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1beta1.TaskRun) (*v1beta1.TaskRun, error) { + + getter := r.Lister.TaskRuns(resource.Namespace) + + actual, err := getter.Get(resource.Name) + if err != nil { + return resource, err + } + + // Don't modify the informers copy. + existing := actual.DeepCopy() + + var finalizers []string + + // If there's nothing to update, just return. + existingFinalizers := sets.NewString(existing.Finalizers...) + desiredFinalizers := sets.NewString(resource.Finalizers...) + + if desiredFinalizers.Has(r.finalizerName) { + if existingFinalizers.Has(r.finalizerName) { + // Nothing to do. + return resource, nil + } + // Add the finalizer. + finalizers = append(existing.Finalizers, r.finalizerName) + } else { + if !existingFinalizers.Has(r.finalizerName) { + // Nothing to do. + return resource, nil + } + // Remove the finalizer. + existingFinalizers.Delete(r.finalizerName) + finalizers = existingFinalizers.List() + } + + mergePatch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": finalizers, + "resourceVersion": existing.ResourceVersion, + }, + } + + patch, err := json.Marshal(mergePatch) + if err != nil { + return resource, err + } + + patcher := r.Client.TektonV1beta1().TaskRuns(resource.Namespace) + + resource, err = patcher.Patch(resource.Name, types.MergePatchType, patch) + if err != nil { + r.Recorder.Eventf(resource, v1.EventTypeWarning, "FinalizerUpdateFailed", + "Failed to update finalizers for %q: %v", resource.Name, err) + } else { + r.Recorder.Eventf(resource, v1.EventTypeNormal, "FinalizerUpdate", + "Updated %q finalizers", resource.GetName()) + } + return resource, err +} + +func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1beta1.TaskRun) (*v1beta1.TaskRun, error) { + if _, ok := r.reconciler.(Finalizer); !ok { + return resource, nil + } + + finalizers := sets.NewString(resource.Finalizers...) + + // If this resource is not being deleted, mark the finalizer. + if resource.GetDeletionTimestamp().IsZero() { + finalizers.Insert(r.finalizerName) + } + + resource.Finalizers = finalizers.List() + + // Synchronize the finalizers filtered by r.finalizerName. + return r.updateFinalizersFiltered(ctx, resource) +} + +func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.TaskRun, reconcileEvent reconciler.Event) (*v1beta1.TaskRun, error) { + if _, ok := r.reconciler.(Finalizer); !ok { + return resource, nil + } + if resource.GetDeletionTimestamp().IsZero() { + return resource, nil + } + + finalizers := sets.NewString(resource.Finalizers...) + + if reconcileEvent != nil { + var event *reconciler.ReconcilerEvent + if reconciler.EventAs(reconcileEvent, &event) { + if event.EventType == v1.EventTypeNormal { + finalizers.Delete(r.finalizerName) + } + } + } else { + finalizers.Delete(r.finalizerName) + } + + resource.Finalizers = finalizers.List() + + // Synchronize the finalizers filtered by r.finalizerName. + return r.updateFinalizersFiltered(ctx, resource) +} diff --git a/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/stub/controller.go b/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/stub/controller.go new file mode 100644 index 00000000000..5be1ff8ef17 --- /dev/null +++ b/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/stub/controller.go @@ -0,0 +1,54 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package taskrun + +import ( + context "context" + + taskrun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun" + v1beta1taskrun "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun" + configmap "knative.dev/pkg/configmap" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" +) + +// TODO: PLEASE COPY AND MODIFY THIS FILE AS A STARTING POINT + +// NewController creates a Reconciler for TaskRun and returns the result of NewImpl. +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + logger := logging.FromContext(ctx) + + taskrunInformer := taskrun.Get(ctx) + + // TODO: setup additional informers here. + + r := &Reconciler{} + impl := v1beta1taskrun.NewImpl(ctx, r) + + logger.Info("Setting up event handlers.") + + taskrunInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + // TODO: add additional informer event handlers here. + + return impl +} diff --git a/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/stub/reconciler.go b/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/stub/reconciler.go new file mode 100644 index 00000000000..1d410321b9b --- /dev/null +++ b/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/stub/reconciler.go @@ -0,0 +1,66 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package taskrun + +import ( + context "context" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + taskrun "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun" + v1 "k8s.io/api/core/v1" + reconciler "knative.dev/pkg/reconciler" +) + +// TODO: PLEASE COPY AND MODIFY THIS FILE AS A STARTING POINT + +// newReconciledNormal makes a new reconciler event with event type Normal, and +// reason TaskRunReconciled. +func newReconciledNormal(namespace, name string) reconciler.Event { + return reconciler.NewEvent(v1.EventTypeNormal, "TaskRunReconciled", "TaskRun reconciled: \"%s/%s\"", namespace, name) +} + +// Reconciler implements controller.Reconciler for TaskRun resources. +type Reconciler struct { + // TODO: add additional requirements here. +} + +// Check that our Reconciler implements Interface +var _ taskrun.Interface = (*Reconciler)(nil) + +// Optionally check that our Reconciler implements Finalizer +//var _ taskrun.Finalizer = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, o *v1beta1.TaskRun) reconciler.Event { + // TODO: use this if the resource implements InitializeConditions. + // o.Status.InitializeConditions() + + // TODO: add custom reconciliation logic here. + + // TODO: use this if the object has .status.ObservedGeneration. + // o.Status.ObservedGeneration = o.Generation + return newReconciledNormal(o.Namespace, o.Name) +} + +// Optionally, use FinalizeKind to add finalizers. FinalizeKind will be called +// when the resource is deleted. +//func (r *Reconciler) FinalizeKind(ctx context.Context, o *v1beta1.TaskRun) reconciler.Event { +// // TODO: add custom finalization logic here. +// return nil +//} diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index 42d7f7e65c4..cb6da1a68da 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -47,9 +47,12 @@ import ( "k8s.io/apimachinery/pkg/runtime" k8stesting "k8s.io/client-go/testing" ktesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/record" "knative.dev/pkg/apis" duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" ) var ( @@ -80,13 +83,15 @@ func getRunName(pr *v1beta1.PipelineRun) string { func getPipelineRunController(t *testing.T, d test.Data) (test.Assets, func()) { //unregisterMetrics() ctx, _ := ttesting.SetupFakeContext(t) + ctx, cancel := context.WithCancel(ctx) c, informers := test.SeedTestData(t, ctx, d) configMapWatcher := configmap.NewInformedWatcher(c.Kube, system.GetNamespace()) - ctx, cancel := context.WithCancel(ctx) return test.Assets{ + Logger: logging.FromContext(ctx), Controller: NewController(namespace, images)(ctx, configMapWatcher), Clients: c, Informers: informers, + Recorder: controller.GetEventRecorder(ctx).(*record.FakeRecorder), }, cancel } diff --git a/pkg/reconciler/taskrun/controller.go b/pkg/reconciler/taskrun/controller.go index f84d38e8002..7dbbafa055e 100644 --- a/pkg/reconciler/taskrun/controller.go +++ b/pkg/reconciler/taskrun/controller.go @@ -18,7 +18,6 @@ package taskrun import ( "context" - "time" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline" @@ -27,6 +26,7 @@ import ( clustertaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/clustertask" taskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/task" taskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun" + taskrunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun" resourceinformer "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/resource/v1alpha1/pipelineresource" "github.com/tektoncd/pipeline/pkg/pod" "github.com/tektoncd/pipeline/pkg/reconciler" @@ -41,10 +41,6 @@ import ( "knative.dev/pkg/tracker" ) -const ( - resyncPeriod = 10 * time.Hour -) - // NewController instantiates a new controller.Impl from knative.dev/pkg/controller func NewController(namespace string, images pipeline.Images) func(context.Context, configmap.Watcher) *controller.Impl { return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl { @@ -86,7 +82,15 @@ func NewController(namespace string, images pipeline.Images) func(context.Contex entrypointCache: entrypointCache, pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger), } - impl := controller.NewImpl(c, c.Logger, pipeline.TaskRunControllerName) + impl := taskrunreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options { + configStore := config.NewStore(c.Logger.Named("config-store")) + configStore.WatchConfigs(cmw) + + return controller.Options{ + AgentName: pipeline.TaskRunControllerName, + ConfigStore: configStore, + } + }) timeoutHandler.SetTaskRunCallbackFunc(impl.Enqueue) timeoutHandler.CheckTimeouts(namespace, kubeclientset, pipelineclientset) @@ -104,9 +108,6 @@ func NewController(namespace string, images pipeline.Images) func(context.Contex Handler: controller.HandleAll(impl.EnqueueControllerOf), }) - c.configStore = config.NewStore(c.Logger.Named("config-store")) - c.configStore.WatchConfigs(cmw) - go metrics.ReportRunningTaskRuns(ctx, taskRunInformer.Lister()) return impl diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index e1fa4c79472..dbf6bb16f1d 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -30,6 +30,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/apis/resource" resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" + taskrunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun" listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1" resourcelisters "github.com/tektoncd/pipeline/pkg/client/resource/listers/resource/v1alpha1" "github.com/tektoncd/pipeline/pkg/contexts" @@ -41,16 +42,13 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" "github.com/tektoncd/pipeline/pkg/termination" "github.com/tektoncd/pipeline/pkg/workspace" - "go.uber.org/zap" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/cache" "knative.dev/pkg/apis" "knative.dev/pkg/configmap" - "knative.dev/pkg/controller" + pkgreconciler "knative.dev/pkg/reconciler" "knative.dev/pkg/tracker" ) @@ -82,36 +80,13 @@ type Reconciler struct { configStore configStore } -// Check that our Reconciler implements controller.Reconciler -var _ controller.Reconciler = (*Reconciler)(nil) +// Check that our Reconciler implements taskrunreconciler.Interface +var _ taskrunreconciler.Interface = (*Reconciler)(nil) // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Task Run // resource with the current status of the resource. -func (c *Reconciler) Reconcile(ctx context.Context, key string) error { - - // Convert the namespace/name string into a distinct namespace and name - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - c.Logger.Errorf("invalid resource key: %s", key) - return nil - } - - ctx = c.configStore.ToContext(ctx) - - // Get the Task Run resource with this namespace/name - original, err := c.taskRunLister.TaskRuns(namespace).Get(name) - if k8serrors.IsNotFound(err) { - // The resource no longer exists, in which case we stop processing. - c.Logger.Infof("task run %q in work queue no longer exists", key) - return nil - } else if err != nil { - c.Logger.Errorf("Error retrieving TaskRun %q: %s", name, err) - return err - } - - // Don't modify the informer's copy. - tr := original.DeepCopy() +func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkgreconciler.Event { // If the TaskRun is just starting, this will also set the starttime, // from which the timeout will immediately begin counting down. @@ -139,7 +114,7 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { cloudEventErr := cloudevent.SendCloudEvents(tr, c.cloudEventClient, c.Logger) // Regardless of `err`, we must write back any status update that may have // been generated by `sendCloudEvents` - updateErr := c.updateStatusLabelsAndAnnotations(tr, original) + _, updateErr := c.updateLabelsAndAnnotations(tr) merr = multierror.Append(cloudEventErr, updateErr) if cloudEventErr != nil { // Let's keep timeouts and sidecars running as long as we're trying to @@ -161,7 +136,7 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { return merr.ErrorOrNil() } if err != nil { - c.Logger.Errorf("Error stopping sidecars for TaskRun %q: %v", name, err) + c.Logger.Errorf("Error stopping sidecars for TaskRun %q: %v", tr.Name, err) merr = multierror.Append(merr, err) } @@ -184,7 +159,7 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { before := tr.Status.GetCondition(apis.ConditionSucceeded) message := fmt.Sprintf("TaskRun %q was cancelled", tr.Name) err := c.failTaskRun(tr, v1beta1.TaskRunReasonCancelled, message) - return c.finishReconcileUpdateEmitEvents(tr, original, before, err) + return c.finishReconcileUpdateEmitEvents(tr, before, err) } // Check if the TaskRun has timed out; if it is, this will set its status @@ -193,7 +168,7 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { before := tr.Status.GetCondition(apis.ConditionSucceeded) message := fmt.Sprintf("TaskRun %q failed to finish within %q", tr.Name, tr.GetTimeout()) err := c.failTaskRun(tr, v1beta1.TaskRunReasonTimedOut, message) - return c.finishReconcileUpdateEmitEvents(tr, original, before, err) + return c.finishReconcileUpdateEmitEvents(tr, before, err) } // prepare fetches all required resources, validates them together with the @@ -204,7 +179,7 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { c.Logger.Errorf("TaskRun prepare error: %v", err.Error()) // We only return an error if update failed, otherwise we don't want to // reconcile an invalid TaskRun anymore - return c.finishReconcileUpdateEmitEvents(tr, original, nil, nil) + return c.finishReconcileUpdateEmitEvents(tr, nil, nil) } // Store the condition before reconcile @@ -216,13 +191,14 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { c.Logger.Errorf("Reconcile error: %v", err.Error()) } // Emit events (only when ConditionSucceeded was changed) - return c.finishReconcileUpdateEmitEvents(tr, original, before, err) + return c.finishReconcileUpdateEmitEvents(tr, before, err) } -func (c *Reconciler) finishReconcileUpdateEmitEvents(tr, original *v1beta1.TaskRun, beforeCondition *apis.Condition, previousError error) error { +func (c *Reconciler) finishReconcileUpdateEmitEvents(tr *v1beta1.TaskRun, beforeCondition *apis.Condition, previousError error) error { afterCondition := tr.Status.GetCondition(apis.ConditionSucceeded) events.Emit(c.Recorder, beforeCondition, afterCondition, tr) - err := c.updateStatusLabelsAndAnnotations(tr, original) + + _, err := c.updateLabelsAndAnnotations(tr) events.EmitError(c.Recorder, err, tr) return multierror.Append(previousError, err).ErrorOrNil() } @@ -418,47 +394,6 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, return nil } -// Push changes (if any) to the TaskRun status, labels and annotations to -// TaskRun definition in ectd -func (c *Reconciler) updateStatusLabelsAndAnnotations(tr, original *v1beta1.TaskRun) error { - if !equality.Semantic.DeepEqual(original.Status, tr.Status) { - // If we didn't change anything then don't call updateStatus. - // This is important because the copy we loaded from the informer's - // cache may be stale and we don't want to overwrite a prior update - // to status with this stale state. - if _, err := c.updateStatus(tr); err != nil { - c.Logger.Warn("Failed to update taskRun status", zap.Error(err)) - return err - } - } - - // When we update the status only, we use updateStatus to minimize the chances of - // racing any clients updating other parts of the Run, e.g. the spec or the labels. - // If we need to update the labels or annotations, we need to call Update with these - // changes explicitly. - if !reflect.DeepEqual(original.ObjectMeta.Labels, tr.ObjectMeta.Labels) || !reflect.DeepEqual(original.ObjectMeta.Annotations, tr.ObjectMeta.Annotations) { - if _, err := c.updateLabelsAndAnnotations(tr); err != nil { - c.Logger.Warn("Failed to update TaskRun labels/annotations", zap.Error(err)) - return err - } - } - - return nil -} - -func (c *Reconciler) updateStatus(taskrun *v1beta1.TaskRun) (*v1beta1.TaskRun, error) { - newtaskrun, err := c.taskRunLister.TaskRuns(taskrun.Namespace).Get(taskrun.Name) - if err != nil { - return nil, fmt.Errorf("error getting TaskRun %s when updating status: %w", taskrun.Name, err) - } - if !reflect.DeepEqual(taskrun.Status, newtaskrun.Status) { - newtaskrun = newtaskrun.DeepCopy() - newtaskrun.Status = taskrun.Status - return c.PipelineClientSet.TektonV1beta1().TaskRuns(taskrun.Namespace).UpdateStatus(newtaskrun) - } - return newtaskrun, nil -} - func (c *Reconciler) updateLabelsAndAnnotations(tr *v1beta1.TaskRun) (*v1beta1.TaskRun, error) { newTr, err := c.taskRunLister.TaskRuns(tr.Namespace).Get(tr.Name) if err != nil { @@ -727,8 +662,7 @@ func updateStoppedSidecarStatus(pod *corev1.Pod, tr *v1beta1.TaskRun, c *Reconci }) } } - _, err := c.updateStatus(tr) - return err + return nil } // apply VolumeClaimTemplates and return WorkspaceBindings were templates is translated to PersistentVolumeClaims diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index 7f7cc95f281..2fe53775d41 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -32,8 +32,10 @@ import ( resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" podconvert "github.com/tektoncd/pipeline/pkg/pod" + "github.com/tektoncd/pipeline/pkg/reconciler" "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" + "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" "github.com/tektoncd/pipeline/pkg/system" test "github.com/tektoncd/pipeline/test" "github.com/tektoncd/pipeline/test/diff" @@ -46,11 +48,12 @@ import ( k8sruntimeschema "k8s.io/apimachinery/pkg/runtime/schema" fakekubeclientset "k8s.io/client-go/kubernetes/fake" ktesting "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "knative.dev/pkg/apis" duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" ) const ( @@ -291,29 +294,22 @@ func getTaskRunController(t *testing.T, d test.Data) (test.Assets, func()) { //unregisterMetrics() ctx, _ := ttesting.SetupFakeContext(t) ctx, cancel := context.WithCancel(ctx) - cloudEventClientBehaviour := cloudevent.FakeClientBehaviour{ - SendSuccessfully: true, - } - ctx = cloudevent.WithClient(ctx, &cloudEventClientBehaviour) ensureConfigurationConfigMapsExist(&d) c, informers := test.SeedTestData(t, ctx, d) configMapWatcher := configmap.NewInformedWatcher(c.Kube, system.GetNamespace()) - controller := NewController(namespace, images)(ctx, configMapWatcher) - - stopCh := make(chan struct{}) - if err := configMapWatcher.Start(stopCh); err != nil { + ctl := NewController(namespace, images)(ctx, configMapWatcher) + if err := configMapWatcher.Start(ctx.Done()); err != nil { t.Fatalf("error starting configmap watcher: %v", err) } return test.Assets{ - Controller: controller, - Clients: c, - Informers: informers, - }, func() { - close(stopCh) - cancel() - } + Logger: logging.FromContext(ctx), + Controller: ctl, + Clients: c, + Informers: informers, + Recorder: controller.GetEventRecorder(ctx).(*record.FakeRecorder), + }, cancel } func checkEvents(fr *record.FakeRecorder, testName string, wantEvents []string) error { @@ -466,6 +462,7 @@ func TestReconcile_ExplicitDefaultSA(t *testing.T) { if saName == "" { saName = defaultSAName } + t.Logf("Creating SA %s in %s", saName, tc.taskRun.Namespace) if _, err := clients.Kube.CoreV1().ServiceAccounts(tc.taskRun.Namespace).Create(&corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: saName, @@ -482,12 +479,7 @@ func TestReconcile_ExplicitDefaultSA(t *testing.T) { t.Errorf("Expected actions to be logged in the kubeclient, got none") } - namespace, name, err := cache.SplitMetaNamespaceKey(tc.taskRun.Name) - if err != nil { - t.Errorf("Invalid resource key: %v", err) - } - - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(namespace).Get(name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(tc.taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("getting updated taskrun: %v", err) } @@ -663,12 +655,7 @@ func TestReconcile_FeatureFlags(t *testing.T) { t.Errorf("Expected actions to be logged in the kubeclient, got none") } - namespace, name, err := cache.SplitMetaNamespaceKey(tc.taskRun.Name) - if err != nil { - t.Errorf("Invalid resource key: %v", err) - } - - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(namespace).Get(name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(tc.taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("getting updated taskrun: %v", err) } @@ -1280,22 +1267,14 @@ func TestReconcile(t *testing.T) { t.Fatal(err) } - reconciler := c.Reconciler.(*Reconciler) - fr := reconciler.Recorder.(*record.FakeRecorder) - - if err := reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)); err != nil { + if err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)); err != nil { t.Errorf("expected no error. Got error %v", err) } if len(clients.Kube.Actions()) == 0 { t.Errorf("Expected actions to be logged in the kubeclient, got none") } - namespace, name, err := cache.SplitMetaNamespaceKey(tc.taskRun.Name) - if err != nil { - t.Errorf("Invalid resource key: %v", err) - } - - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(namespace).Get(name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(tc.taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("getting updated taskrun: %v", err) } @@ -1328,7 +1307,7 @@ func TestReconcile(t *testing.T) { t.Fatalf("Expected actions to be logged in the kubeclient, got none") } - err = checkEvents(fr, tc.name, tc.wantEvents) + err = checkEvents(testAssets.Recorder, tc.name, tc.wantEvents) if !(err == nil) { t.Errorf(err.Error()) } @@ -1515,9 +1494,7 @@ func TestReconcileInvalidTaskRuns(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients - reconciler := c.Reconciler.(*Reconciler) - fr := reconciler.Recorder.(*record.FakeRecorder) - err := reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)) + err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)) // When a TaskRun is invalid and can't run, we don't want to return an error because // an error will tell the Reconciler to keep trying to reconcile; instead we want to stop @@ -1532,7 +1509,7 @@ func TestReconcileInvalidTaskRuns(t *testing.T) { t.Errorf("expected 3 actions (first: list namespaces) created by the reconciler, got %d. Actions: %#v", len(actions), actions) } - err = checkEvents(fr, tc.name, tc.wantEvents) + err = checkEvents(testAssets.Recorder, tc.name, tc.wantEvents) if !(err == nil) { t.Errorf(err.Error()) } @@ -1624,10 +1601,8 @@ func TestReconcilePodUpdateStatus(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients - reconciler := c.Reconciler.(*Reconciler) - fr := reconciler.Recorder.(*record.FakeRecorder) - if err := reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { + if err := c.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { t.Fatalf("Unexpected error when Reconcile() : %v", err) } newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) @@ -1677,7 +1652,7 @@ func TestReconcilePodUpdateStatus(t *testing.T) { "Normal Running Not all Steps", "Normal Succeeded", } - err = checkEvents(fr, "test-reconcile-pod-updateStatus", wantEvents) + err = checkEvents(testAssets.Recorder, "test-reconcile-pod-updateStatus", wantEvents) if !(err == nil) { t.Errorf(err.Error()) } @@ -1736,10 +1711,8 @@ func TestReconcileOnCancelledTaskRun(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients - reconciler := c.Reconciler.(*Reconciler) - fr := reconciler.Recorder.(*record.FakeRecorder) - if err := reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { + if err := c.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err) } newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) @@ -1761,7 +1734,7 @@ func TestReconcileOnCancelledTaskRun(t *testing.T) { "Normal Started", "Warning Failed TaskRun \"test-taskrun-run-cancelled\" was cancelled", } - err = checkEvents(fr, "test-reconcile-on-cancelled-taskrun", wantEvents) + err = checkEvents(testAssets.Recorder, "test-reconcile-on-cancelled-taskrun", wantEvents) if !(err == nil) { t.Errorf(err.Error()) } @@ -1848,8 +1821,6 @@ func TestReconcileTimeouts(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients - reconciler := c.Reconciler.(*Reconciler) - fr := reconciler.Recorder.(*record.FakeRecorder) if err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)); err != nil { t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err) @@ -1862,7 +1833,7 @@ func TestReconcileTimeouts(t *testing.T) { if d := cmp.Diff(tc.expectedStatus, condition, ignoreLastTransitionTime); d != "" { t.Fatalf("Did not get expected condition %s", diff.PrintWantGot(d)) } - err = checkEvents(fr, tc.taskRun.Name, tc.wantEvents) + err = checkEvents(testAssets.Recorder, tc.taskRun.Name, tc.wantEvents) if !(err == nil) { t.Errorf(err.Error()) } @@ -1885,10 +1856,28 @@ func TestHandlePodCreationError(t *testing.T) { } testAssets, cancel := getTaskRunController(t, d) defer cancel() - c, ok := testAssets.Controller.Reconciler.(*Reconciler) - if !ok { - t.Errorf("failed to construct instance of taskrun reconciler") - return + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + // Use the test assets to create a *Reconciler directly for focused testing. + opt := reconciler.Options{ + KubeClientSet: testAssets.Clients.Kube, + PipelineClientSet: testAssets.Clients.Pipeline, + Logger: testAssets.Logger, + Recorder: testAssets.Recorder, + } + c := &Reconciler{ + Base: reconciler.NewBase(opt, taskRunAgentName, images), + taskRunLister: testAssets.Informers.TaskRun.Lister(), + taskLister: testAssets.Informers.Task.Lister(), + clusterTaskLister: testAssets.Informers.ClusterTask.Lister(), + resourceLister: testAssets.Informers.PipelineResource.Lister(), + timeoutHandler: reconciler.NewTimeoutHandler(ctx.Done(), opt.Logger), + cloudEventClient: testAssets.Clients.CloudEvents, + metrics: nil, // Not used + entrypointCache: nil, // Not used + pvcHandler: volumeclaim.NewPVCHandler(opt.KubeClientSet, opt.Logger), } // Prevent backoff timer from starting @@ -2097,12 +2086,8 @@ func TestReconcileCloudEvents(t *testing.T) { if err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)); err != nil { t.Errorf("expected no error. Got error %v", err) } - namespace, name, err := cache.SplitMetaNamespaceKey(tc.taskRun.Name) - if err != nil { - t.Errorf("Invalid resource key: %v", err) - } - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(namespace).Get(name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(tc.taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("getting updated taskrun: %v", err) } @@ -2603,10 +2588,9 @@ func TestReconcileTaskResourceResolutionAndValidation(t *testing.T) { testAssets, cancel := getTaskRunController(t, tt.d) defer cancel() clients := testAssets.Clients - reconciler := testAssets.Controller.Reconciler.(*Reconciler) - fr := reconciler.Recorder.(*record.FakeRecorder) + c := testAssets.Controller - if err := reconciler.Reconcile(context.Background(), getRunName(tt.d.TaskRuns[0])); err != nil { + if err := c.Reconciler.Reconcile(context.Background(), getRunName(tt.d.TaskRuns[0])); err != nil { t.Errorf("expected no error reconciling valid TaskRun but got %v", err) } @@ -2621,7 +2605,7 @@ func TestReconcileTaskResourceResolutionAndValidation(t *testing.T) { } } - err = checkEvents(fr, tt.desc, tt.wantEvents) + err = checkEvents(testAssets.Recorder, tt.desc, tt.wantEvents) if !(err == nil) { t.Errorf(err.Error()) } @@ -2739,11 +2723,27 @@ func TestFailTaskRun(t *testing.T) { testAssets, cancel := getTaskRunController(t, d) defer cancel() - c, ok := testAssets.Controller.Reconciler.(*Reconciler) - if !ok { - t.Errorf("failed to construct instance of taskrun reconciler") - return + + // Use the test assets to create a *Reconciler directly for focused testing. + opt := reconciler.Options{ + KubeClientSet: testAssets.Clients.Kube, + PipelineClientSet: testAssets.Clients.Pipeline, + Logger: testAssets.Logger, + Recorder: testAssets.Recorder, + } + c := &Reconciler{ + Base: reconciler.NewBase(opt, taskRunAgentName, images), + taskRunLister: testAssets.Informers.TaskRun.Lister(), + taskLister: testAssets.Informers.Task.Lister(), + clusterTaskLister: testAssets.Informers.ClusterTask.Lister(), + resourceLister: testAssets.Informers.PipelineResource.Lister(), + timeoutHandler: nil, // Not used + cloudEventClient: testAssets.Clients.CloudEvents, + metrics: nil, // Not used + entrypointCache: nil, // Not used + pvcHandler: volumeclaim.NewPVCHandler(opt.KubeClientSet, opt.Logger), } + err := c.failTaskRun(tc.taskRun, tc.reason, tc.message) if err != nil { t.Fatal(err) diff --git a/pkg/reconciler/testing/logger.go b/pkg/reconciler/testing/logger.go index bf2d867c618..66c3d3ceb9f 100644 --- a/pkg/reconciler/testing/logger.go +++ b/pkg/reconciler/testing/logger.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent" "go.uber.org/zap" "knative.dev/pkg/controller" "knative.dev/pkg/logging" @@ -12,6 +13,10 @@ import ( func SetupFakeContext(t *testing.T) (context.Context, []controller.Informer) { ctx, informer := rtesting.SetupFakeContext(t) + cloudEventClientBehaviour := cloudevent.FakeClientBehaviour{ + SendSuccessfully: true, + } + ctx = cloudevent.WithClient(ctx, &cloudEventClientBehaviour) return WithLogger(ctx, t), informer } diff --git a/test/controller.go b/test/controller.go index 5708a1cf19c..b9c88b71611 100644 --- a/test/controller.go +++ b/test/controller.go @@ -39,6 +39,8 @@ import ( resourceinformersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource/v1alpha1" fakeresourceclient "github.com/tektoncd/pipeline/pkg/client/resource/injection/client/fake" fakeresourceinformer "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/resource/v1alpha1/pipelineresource/fake" + cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent" + "go.uber.org/zap" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -47,6 +49,7 @@ import ( fakekubeclientset "k8s.io/client-go/kubernetes/fake" ktesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" fakeconfigmapinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap/fake" fakepodinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/pod/fake" @@ -70,9 +73,10 @@ type Data struct { // Clients holds references to clients which are useful for reconciler tests. type Clients struct { - Pipeline *fakepipelineclientset.Clientset - Resource *fakeresourceclientset.Clientset - Kube *fakekubeclientset.Clientset + Pipeline *fakepipelineclientset.Clientset + Resource *fakeresourceclientset.Clientset + Kube *fakekubeclientset.Clientset + CloudEvents cloudeventclient.CEClient } // Informers holds references to informers which are useful for reconciler tests. @@ -90,9 +94,11 @@ type Informers struct { // Assets holds references to the controller, logs, clients, and informers. type Assets struct { + Logger *zap.SugaredLogger Controller *controller.Impl Clients Clients Informers Informers + Recorder *record.FakeRecorder } func AddToInformer(t *testing.T, store cache.Store) func(ktesting.Action) (bool, runtime.Object, error) { @@ -143,9 +149,10 @@ func AddToInformer(t *testing.T, store cache.Store) func(ktesting.Action) (bool, // nolint: golint func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers) { c := Clients{ - Kube: fakekubeclient.Get(ctx), - Pipeline: fakepipelineclient.Get(ctx), - Resource: fakeresourceclient.Get(ctx), + Kube: fakekubeclient.Get(ctx), + Pipeline: fakepipelineclient.Get(ctx), + Resource: fakeresourceclient.Get(ctx), + CloudEvents: cloudeventclient.Get(ctx), } // Every time a resource is modified, change the metadata.resourceVersion. PrependResourceVersionReactor(&c.Pipeline.Fake) @@ -227,10 +234,9 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers t.Fatal(err) } } + c.Kube.PrependReactor("*", "configmaps", AddToInformer(t, i.ConfigMap.Informer().GetIndexer())) for _, cm := range d.ConfigMaps { - if err := i.ConfigMap.Informer().GetIndexer().Add(cm); err != nil { - t.Fatal(err) - } + cm := cm.DeepCopy() // Avoid assumptions that the informer's copy is modified. if _, err := c.Kube.CoreV1().ConfigMaps(cm.Namespace).Create(cm); err != nil { t.Fatal(err) }