From f7b8dc88a7770abbeaf7ec6ed47f2ed3744e6e19 Mon Sep 17 00:00:00 2001 From: Cameron Garrison Date: Tue, 23 Jan 2024 06:05:10 -0500 Subject: [PATCH 01/32] feat(templates): process Feature templates in memory (#797) Co-authored-by: Bartosz Majsak Co-authored-by: Wen Zhou (cherry picked from commit 0e9c4ed46cd6b399dc58ce6ef4fa82f9826d0309) --- components/kserve/serverless_setup.go | 95 +++++----- .../dscinitialization/servicemesh_setup.go | 39 +++-- pkg/feature/builder.go | 6 +- pkg/feature/feature.go | 6 +- .../integration/features/features_int_test.go | 165 +----------------- .../features/servicemesh_feature_test.go | 110 ++++++++++++ 6 files changed, 185 insertions(+), 236 deletions(-) diff --git a/components/kserve/serverless_setup.go b/components/kserve/serverless_setup.go index d57ce6c6f55..782a3d06094 100644 --- a/components/kserve/serverless_setup.go +++ b/components/kserve/serverless_setup.go @@ -12,63 +12,46 @@ const ( templatesDir = "templates/serverless" ) -func (k *Kserve) configureServerlessFeatures() feature.DefinedFeatures { - return func(initializer *feature.FeaturesInitializer) error { - servingDeployment, err := feature.CreateFeature("serverless-serving-deployment"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). - Manifests( - path.Join(templatesDir, "serving-install"), - ). - WithData(PopulateComponentSettings(k)). - PreConditions( - serverless.EnsureServerlessOperatorInstalled, - serverless.EnsureServerlessAbsent, - servicemesh.EnsureServiceMeshInstalled, - feature.CreateNamespaceIfNotExists(serverless.KnativeServingNamespace), - ). - Load() - if err != nil { - return err - } - initializer.Features = append(initializer.Features, servingDeployment) - - servingNetIstioSecretFiltering, err := feature.CreateFeature("serverless-net-istio-secret-filtering"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). - Manifests( - path.Join(templatesDir, "serving-net-istio-secret-filtering.patch.tmpl"), - ). - WithData(PopulateComponentSettings(k)). - PreConditions(serverless.EnsureServerlessServingDeployed). - PostConditions( - feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), - ). - Load() - if err != nil { - return err - } - initializer.Features = append(initializer.Features, servingNetIstioSecretFiltering) +func (k *Kserve) configureServerlessFeatures(s *feature.FeaturesInitializer) error { + servingDeployment, err := feature.CreateFeature("serverless-serving-deployment"). + For(s.DSCInitializationSpec). + Manifests( + path.Join(templatesDir, "serving-install"), + ). + WithData(PopulateComponentSettings(k)). + PreConditions( + serverless.EnsureServerlessOperatorInstalled, + serverless.EnsureServerlessAbsent, + servicemesh.EnsureServiceMeshInstalled, + feature.CreateNamespaceIfNotExists(knativeServingNamespace), + ). + PostConditions( + feature.WaitForPodsToBeReady(knativeServingNamespace), + ). + Load() + if err != nil { + return err + } + s.Features = append(s.Features, servingDeployment) - servingIstioGateways, err := feature.CreateFeature("serverless-serving-gateways"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). - PreConditions(serverless.EnsureServerlessServingDeployed). - WithData( - serverless.ServingDefaultValues, - serverless.ServingIngressDomain, - PopulateComponentSettings(k), - ). - WithResources(serverless.ServingCertificateResource). - Manifests( - path.Join(templatesDir, "serving-istio-gateways"), - ). - Load() - if err != nil { - return err - } - initializer.Features = append(initializer.Features, servingIstioGateways) - return nil + servingIstioGateways, err := feature.CreateFeature("serverless-serving-gateways"). + For(s.DSCInitializationSpec). + PreConditions( + // Check serverless is installed + feature.WaitForResourceToBeCreated(knativeServingNamespace, gvr.KnativeServing), + ). + WithData( + serverless.ServingDefaultValues, + serverless.ServingIngressDomain, + PopulateComponentSettings(k), + ). + WithResources(serverless.ServingCertificateResource). + Manifests( + path.Join(templatesDir, "serving-istio-gateways"), + ). + Load() + if err != nil { + return err } } diff --git a/controllers/dscinitialization/servicemesh_setup.go b/controllers/dscinitialization/servicemesh_setup.go index e39ed64bfe7..48c72c612b0 100644 --- a/controllers/dscinitialization/servicemesh_setup.go +++ b/controllers/dscinitialization/servicemesh_setup.go @@ -62,22 +62,35 @@ func (r *DSCInitializationReconciler) removeServiceMesh(instance *dsciv1.DSCInit return nil } -func configureServiceMeshFeatures() feature.DefinedFeatures { - return func(initializer *feature.FeaturesInitializer) error { - serviceMeshSpec := initializer.DSCInitializationSpec.ServiceMesh +func configureServiceMeshFeatures(s *feature.FeaturesInitializer) error { + serviceMeshSpec := s.ServiceMesh + + smcpCreation, errSmcp := feature.CreateFeature("mesh-control-plane-creation"). + For(s.DSCInitializationSpec). + Manifests( + path.Join(templatesDir, "base", "create-smcp.tmpl"), + ). + PreConditions( + servicemesh.EnsureServiceMeshOperatorInstalled, + feature.CreateNamespaceIfNotExists(serviceMeshSpec.ControlPlane.Namespace), + ). + PostConditions( + feature.WaitForPodsToBeReady(serviceMeshSpec.ControlPlane.Namespace), + ). + Load() + if errSmcp != nil { + return errSmcp + } + s.Features = append(s.Features, smcpCreation) - smcpCreation, errSmcp := feature.CreateFeature("mesh-control-plane-creation"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). - Manifests( - path.Join(templatesDir, "base", "create-smcp.tmpl"), - ). + if serviceMeshSpec.ControlPlane.MetricsCollection == "Istio" { + metricsCollection, errMetrics := feature.CreateFeature("mesh-metrics-collection"). + For(s.DSCInitializationSpec). PreConditions( - servicemesh.EnsureServiceMeshOperatorInstalled, - feature.CreateNamespaceIfNotExists(serviceMeshSpec.ControlPlane.Namespace), + servicemesh.EnsureServiceMeshInstalled, ). - PostConditions( - feature.WaitForPodsToBeReady(serviceMeshSpec.ControlPlane.Namespace), + Manifests( + path.Join(templatesDir, "metrics-collection"), ). Load() diff --git a/pkg/feature/builder.go b/pkg/feature/builder.go index 19cf04e45eb..40ba92b8bb8 100644 --- a/pkg/feature/builder.go +++ b/pkg/feature/builder.go @@ -3,7 +3,6 @@ package feature import ( "io/fs" - "github.com/hashicorp/go-multierror" "github.com/pkg/errors" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/client-go/dynamic" @@ -27,9 +26,10 @@ type featureBuilder struct { fsys fs.FS } -func CreateFeature(name string) *featureName { //nolint:golint,revive //No need to export featureBuilder. - return &featureName{ +func CreateFeature(name string) *featureBuilder { //nolint:golint,revive //No need to export featureBuilder. + return &featureBuilder{ name: name, + fsys: embeddedFiles, } } diff --git a/pkg/feature/feature.go b/pkg/feature/feature.go index 5ccb5d6f3a5..f1feae263d3 100644 --- a/pkg/feature/feature.go +++ b/pkg/feature/feature.go @@ -90,14 +90,14 @@ func (f *Feature) Apply() (err error) { return dataLoadErr } - phase = featurev1.ResourceCreation + // Create or update resources for _, resource := range f.resources { if err := resource(f); err != nil { return errors.WithStack(err) } } - phase = featurev1.ProcessTemplates + // Process and apply manifests for i := range f.manifests { if err := f.manifests[i].process(f.Spec); err != nil { return errors.WithStack(err) @@ -109,7 +109,7 @@ func (f *Feature) Apply() (err error) { return errors.WithStack(err) } - phase = featurev1.PostConditions + // Check all postconditions and collect errors for _, postcondition := range f.postconditions { multiErr = multierror.Append(multiErr, postcondition(f)) } diff --git a/tests/integration/features/features_int_test.go b/tests/integration/features/features_int_test.go index 4232d3996f8..1459ecaa55e 100644 --- a/tests/integration/features/features_int_test.go +++ b/tests/integration/features/features_int_test.go @@ -3,13 +3,11 @@ package features_test import ( "context" "embed" - "fmt" "os" "path" "path/filepath" "time" - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -217,146 +215,18 @@ var _ = Describe("feature cleanup", func() { }) -var _ = Describe("feature trackers", func() { - Context("ensuring feature trackers indicate status and phase", func() { - - var ( - dsciSpec *dscv1.DSCInitializationSpec - source featurev1.Source - ) - - BeforeEach(func() { - dsciSpec = newDSCInitializationSpec("default") - source = envtestutil.NewSource(featurev1.DSCIType, "default") - }) - - It("should indicate successful installation in FeatureTracker", func() { - // given example CRD installed into env - name := "test-resources.openshift.io" - verificationFeature, err := feature.CreateFeature("crd-verification"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions(feature.EnsureCRDIsInstalled(name)). - Load() - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(verificationFeature.Apply()).To(Succeed()) - - // then - featureTracker := getFeatureTracker("default-crd-verification") - Expect(*featureTracker.Status.Conditions).To(ContainElement( - MatchFields(IgnoreExtras, Fields{ - "Type": Equal(conditionsv1.ConditionAvailable), - "Status": Equal(v1.ConditionTrue), - "Reason": Equal(string(featurev1.FeatureCreated)), - }), - )) - }) - - It("should indicate failure in preconditions", func() { - // given - name := "non-existing-resource.non-existing-group.io" - verificationFeature, err := feature.CreateFeature("crd-verification"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions(feature.EnsureCRDIsInstalled(name)). - Load() - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(verificationFeature.Apply()).ToNot(Succeed()) - - // then - featureTracker := getFeatureTracker("default-crd-verification") - Expect(*featureTracker.Status.Conditions).To(ContainElement( - MatchFields(IgnoreExtras, Fields{ - "Type": Equal(conditionsv1.ConditionDegraded), - "Status": Equal(v1.ConditionTrue), - "Reason": Equal(string(featurev1.PreConditions)), - }), - )) - }) - - It("should indicate failure in post-conditions", func() { - // given - verificationFeature, err := feature.CreateFeature("post-condition-failure"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PostConditions(func(f *feature.Feature) error { - return fmt.Errorf("always fail") - }). - Load() - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(verificationFeature.Apply()).ToNot(Succeed()) - - // then - featureTracker := getFeatureTracker("default-post-condition-failure") - Expect(*featureTracker.Status.Conditions).To(ContainElement( - MatchFields(IgnoreExtras, Fields{ - "Type": Equal(conditionsv1.ConditionDegraded), - "Status": Equal(v1.ConditionTrue), - "Reason": Equal(string(featurev1.PostConditions)), - }), - )) - }) - - It("should correctly indicate source in the feature tracker", func() { - verificationFeature, err := feature.CreateFeature("empty-feature"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(verificationFeature.Apply()).To(Succeed()) - - // then - featureTracker := getFeatureTracker("default-empty-feature") - Expect(featureTracker.Spec.Source.Name).To(Equal("default")) - Expect(featureTracker.Spec.Source.Type).To(Equal(featurev1.DSCIType)) - }) - - It("should correctly indicate app namespace in the feature tracker", func() { - verificationFeature, err := feature.CreateFeature("empty-feature"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(verificationFeature.Apply()).To(Succeed()) - - // then - featureTracker := getFeatureTracker("default-empty-feature") - Expect(featureTracker.Spec.AppNamespace).To(Equal("default")) - }) - - }) - -}) - var _ = Describe("Manifest sources", func() { Context("using various manifest sources", func() { var ( objectCleaner *envtestutil.Cleaner dsciSpec *dscv1.DSCInitializationSpec - source featurev1.Source namespace = "default" ) BeforeEach(func() { objectCleaner = envtestutil.CreateCleaner(envTestClient, envTest.Config, timeout, interval) dsciSpec = newDSCInitializationSpec(namespace) - source = envtestutil.NewSource(featurev1.DSCIType, "namespace") }) It("should be able to process an embedded template from the default location", func() { @@ -369,8 +239,7 @@ var _ = Describe("Manifest sources", func() { serviceMeshSpec.ControlPlane.Namespace = "service-ns" createService, err := feature.CreateFeature("create-control-plane"). - With(dsciSpec). - From(source). + For(dsciSpec). Manifests(path.Join(templatesDir, "serverless", "serving-istio-gateways", "local-gateway-svc.tmpl")). UsingConfig(envTest.Config). Load() @@ -393,8 +262,7 @@ var _ = Describe("Manifest sources", func() { defer objectCleaner.DeleteAll(ns) createGateway, err := feature.CreateFeature("create-gateway"). - With(dsciSpec). - From(source). + For(dsciSpec). Manifests(path.Join(templatesDir, "serverless", "serving-istio-gateways", "istio-local-gateway.yaml")). UsingConfig(envTest.Config). Load() @@ -412,8 +280,7 @@ var _ = Describe("Manifest sources", func() { It("should be able to process an embedded file from a non default location", func() { createNs, err := feature.CreateFeature("create-ns"). - With(dsciSpec). - From(source). + For(dsciSpec). ManifestSource(testEmbeddedFiles). Manifests(path.Join(templatesDir, "namespace.yaml")). UsingConfig(envTest.Config). @@ -442,8 +309,7 @@ metadata: Expect(err).ToNot(HaveOccurred()) createNs, err := feature.CreateFeature("create-ns"). - With(dsciSpec). - From(source). + For(dsciSpec). ManifestSource(os.DirFS(tempDir)). Manifests(path.Join("namespace.yaml")). // must be relative to root DirFS defined above UsingConfig(envTest.Config). @@ -462,29 +328,6 @@ metadata: }) }) -func createTestSecret(namespace string) func(f *feature.Feature) error { - return func(f *feature.Feature) error { - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: namespace, - OwnerReferences: []metav1.OwnerReference{ - f.AsOwnerReference(), - }, - }, - Data: map[string][]byte{ - "test": []byte("test"), - }, - } - - _, err := f.Clientset.CoreV1(). - Secrets(namespace). - Create(context.TODO(), secret, metav1.CreateOptions{}) - - return err - } -} - func createNamespace(name string) *v1.Namespace { return &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ diff --git a/tests/integration/features/servicemesh_feature_test.go b/tests/integration/features/servicemesh_feature_test.go index 2b6fb4086bf..1f0ee8685f3 100644 --- a/tests/integration/features/servicemesh_feature_test.go +++ b/tests/integration/features/servicemesh_feature_test.go @@ -23,6 +23,116 @@ import ( . "github.com/onsi/gomega" ) +var _ = Describe("Service Mesh feature", func() { + var testFeature *feature.Feature + var objectCleaner *envtestutil.Cleaner + + BeforeEach(func() { + c, err := client.New(envTest.Config, client.Options{}) + Expect(err).ToNot(HaveOccurred()) + + objectCleaner = envtestutil.CreateCleaner(c, envTest.Config, timeout, interval) + + testFeatureName := "servicemesh-feature" + namespace := envtestutil.AppendRandomNameTo(testFeatureName) + + dsciSpec := newDSCInitializationSpec(namespace) + testFeature, err = feature.CreateFeature(testFeatureName). + For(dsciSpec). + UsingConfig(envTest.Config). + Load() + + Expect(err).ToNot(HaveOccurred()) + }) + + Describe("preconditions", func() { + + When("operator is not installed", func() { + It("operator presence check should return an error", func() { + Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).ToNot(Succeed()) + }) + }) + + When("operator is installed", func() { + + It("should fail checking operator presence prerequisite when CRD not installed", func() { + Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).ToNot(Succeed()) + }) + + It("should succeed checking operator presence prerequisite when CRD installed", func() { + // given + smcpCRD := installServiceMeshControlPlaneCRD() + defer objectCleaner.DeleteAll(smcpCRD) + + // then + Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).To(Succeed()) + }) + + It("should find installed Service Mesh Control Plane", func() { + // given + smcpCRD := installServiceMeshControlPlaneCRD() + defer objectCleaner.DeleteAll(smcpCRD) + + // when + ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) + nsResource := createNamespace(ns) + Expect(envTestClient.Create(context.Background(), nsResource)).To(Succeed()) + defer objectCleaner.DeleteAll(nsResource) + + createServiceMeshControlPlane("test-name", ns) + + // then + testFeature.Spec.ControlPlane.Namespace = ns + testFeature.Spec.ControlPlane.Name = "test-name" + Expect(servicemesh.EnsureServiceMeshInstalled(testFeature)).To(Succeed()) + }) + + It("should fail to find Service Mesh Control Plane if not present", func() { + // given + smcpCRD := installServiceMeshControlPlaneCRD() + defer objectCleaner.DeleteAll(smcpCRD) + + // then + Expect(servicemesh.EnsureServiceMeshInstalled(testFeature)).ToNot(Succeed()) + }) + }) + }) + +}) + +func getGateway(cfg *rest.Config, namespace, name string) (*unstructured.Unstructured, error) { + dynamicClient, err := dynamic.NewForConfig(cfg) + if err != nil { + return nil, err + } + gwGvr := schema.GroupVersionResource{ + Group: "networking.istio.io", + Version: "v1beta1", + Resource: "gateways", + } + + gateway, err := dynamicClient.Resource(gwGvr).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return gateway, nil +} + +func installServiceMeshControlPlaneCRD() *apiextensionsv1.CustomResourceDefinition { + // Create SMCP the CRD + smcpCrdObj := &apiextensionsv1.CustomResourceDefinition{} + Expect(yaml.Unmarshal([]byte(serviceMeshControlPlaneCRD), smcpCrdObj)).To(Succeed()) + c, err := client.New(envTest.Config, client.Options{}) + Expect(err).ToNot(HaveOccurred()) + + Expect(c.Create(context.TODO(), smcpCrdObj)).To(Succeed()) + + crdOptions := envtest.CRDInstallOptions{PollInterval: interval, MaxTime: timeout} + Expect(envtest.WaitForCRDs(envTest.Config, []*apiextensionsv1.CustomResourceDefinition{smcpCrdObj}, crdOptions)).To(Succeed()) + + return smcpCrdObj +} + const serviceMeshControlPlaneCRD = `apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: From c75fe09327e44ecab099dde42b26a93ee18a0cf7 Mon Sep 17 00:00:00 2001 From: Bartosz Majsak Date: Tue, 23 Jan 2024 18:15:15 +0100 Subject: [PATCH 02/32] fix: ensures cleanup of associated `FeatureTracker`s (#812) * test: adds failing test to cover missconfigured tracker and cleanup logic * fix: ensures cleanup of associated FeatureTracker * chore(test): extracts secret creation to its own func * chore: uses addCleanup func (cherry picked from commit 55118bd0684ea5c1c4414a9dbd43dc5eec32d0b3) --- components/kserve/kserve.go | 9 +- components/kserve/serverless_setup.go | 7 +- .../dscinitialization/servicemesh_setup.go | 32 ++----- pkg/feature/builder.go | 28 +----- pkg/feature/feature.go | 56 +---------- pkg/feature/feature_tracker_handler.go | 8 +- pkg/feature/initializer.go | 18 +--- pkg/gvr/gvr.go | 2 +- .../integration/features/features_int_test.go | 62 ++++++------- .../features/servicemesh_feature_test.go | 93 ------------------- 10 files changed, 64 insertions(+), 251 deletions(-) diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index 16970fdf71c..2bd44f0d024 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -175,7 +175,7 @@ func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) err fmt.Println("Serverless CR is not configured by the operator, we won't do anything") case operatorv1.Removed: // we remove serving CR - fmt.Println("existing Serverless CR (owned by operator) will be removed") + fmt.Println("existing ServiceMesh CR (owned by operator) will be removed") if err := k.removeServerlessFeatures(instance); err != nil { return err } @@ -183,10 +183,9 @@ func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) err case operatorv1.Managed: // standard workflow to create CR switch instance.ServiceMesh.ManagementState { case operatorv1.Unmanaged, operatorv1.Removed: - return fmt.Errorf("ServiceMesh is need to set to 'Managed' in DSCI CR, it is required by KServe serving field") + return fmt.Errorf("ServiceMesh is need to set to 'Managaed' in DSCI CR, it is required by KServe serving field") } - - serverlessInitializer := feature.ComponentFeaturesInitializer(k, instance, k.configureServerlessFeatures()) + serverlessInitializer := feature.NewFeaturesInitializer(instance, k.configureServerlessFeatures) if err := serverlessInitializer.Prepare(); err != nil { return err @@ -200,7 +199,7 @@ func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) err } func (k *Kserve) removeServerlessFeatures(instance *dsciv1.DSCInitializationSpec) error { - serverlessInitializer := feature.ComponentFeaturesInitializer(k, instance, k.configureServerlessFeatures()) + serverlessInitializer := feature.NewFeaturesInitializer(instance, k.configureServerlessFeatures) if err := serverlessInitializer.Prepare(); err != nil { return err diff --git a/components/kserve/serverless_setup.go b/components/kserve/serverless_setup.go index 782a3d06094..83d2b6cf877 100644 --- a/components/kserve/serverless_setup.go +++ b/components/kserve/serverless_setup.go @@ -6,10 +6,12 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" ) const ( - templatesDir = "templates/serverless" + knativeServingNamespace = "knative-serving" + templatesDir = "templates/serverless" ) func (k *Kserve) configureServerlessFeatures(s *feature.FeaturesInitializer) error { @@ -53,6 +55,9 @@ func (k *Kserve) configureServerlessFeatures(s *feature.FeaturesInitializer) err if err != nil { return err } + s.Features = append(s.Features, servingIstioGateways) + + return nil } func PopulateComponentSettings(k *Kserve) feature.Action { diff --git a/controllers/dscinitialization/servicemesh_setup.go b/controllers/dscinitialization/servicemesh_setup.go index 48c72c612b0..e9250d57922 100644 --- a/controllers/dscinitialization/servicemesh_setup.go +++ b/controllers/dscinitialization/servicemesh_setup.go @@ -16,7 +16,7 @@ const templatesDir = "templates/servicemesh" func (r *DSCInitializationReconciler) configureServiceMesh(instance *dsciv1.DSCInitialization) error { switch instance.Spec.ServiceMesh.ManagementState { case operatorv1.Managed: - serviceMeshInitializer := feature.ClusterFeaturesInitializer(instance, configureServiceMeshFeatures()) + serviceMeshInitializer := feature.NewFeaturesInitializer(&instance.Spec, configureServiceMeshFeatures) if err := serviceMeshInitializer.Prepare(); err != nil { r.Log.Error(err, "failed configuring service mesh resources") r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed configuring service mesh resources") @@ -43,7 +43,8 @@ func (r *DSCInitializationReconciler) configureServiceMesh(instance *dsciv1.DSCI func (r *DSCInitializationReconciler) removeServiceMesh(instance *dsciv1.DSCInitialization) error { // on condition of Managed, do not handle Removed when set to Removed it trigger DSCI reconcile to cleanup if instance.Spec.ServiceMesh.ManagementState == operatorv1.Managed { - serviceMeshInitializer := feature.ClusterFeaturesInitializer(instance, configureServiceMeshFeatures()) + serviceMeshInitializer := feature.NewFeaturesInitializer(&instance.Spec, configureServiceMeshFeatures) + if err := serviceMeshInitializer.Prepare(); err != nil { r.Log.Error(err, "failed configuring service mesh resources") r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed configuring service mesh resources") @@ -93,28 +94,11 @@ func configureServiceMeshFeatures(s *feature.FeaturesInitializer) error { path.Join(templatesDir, "metrics-collection"), ). Load() - - if errSmcp != nil { - return errSmcp - } - initializer.Features = append(initializer.Features, smcpCreation) - - if serviceMeshSpec.ControlPlane.MetricsCollection == "Istio" { - metricsCollection, errMetrics := feature.CreateFeature("mesh-metrics-collection"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). - Manifests( - path.Join(templatesDir, "metrics-collection"), - ). - PreConditions( - servicemesh.EnsureServiceMeshInstalled, - ). - Load() - if errMetrics != nil { - return errMetrics - } - initializer.Features = append(initializer.Features, metricsCollection) + if errMetrics != nil { + return errMetrics } - return nil + s.Features = append(s.Features, metricsCollection) } + + return nil } diff --git a/pkg/feature/builder.go b/pkg/feature/builder.go index 40ba92b8bb8..c84324f38bd 100644 --- a/pkg/feature/builder.go +++ b/pkg/feature/builder.go @@ -3,6 +3,7 @@ package feature import ( "io/fs" + "github.com/hashicorp/go-multierror" "github.com/pkg/errors" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/client-go/dynamic" @@ -33,38 +34,17 @@ func CreateFeature(name string) *featureBuilder { //nolint:golint,revive //No ne } } -type featureName struct { - name string -} - -func (fn *featureName) With(spec *v1.DSCInitializationSpec) *featureSource { - return &featureSource{ - spec: spec, - name: fn.name, - } -} - -type featureSource struct { - spec *v1.DSCInitializationSpec - name string -} - -func (fo *featureSource) From(source featurev1.Source) *featureBuilder { +func (fb *featureBuilder) For(spec *v1.DSCInitializationSpec) *featureBuilder { createSpec := func(f *Feature) error { f.Spec = &Spec{ - ServiceMeshSpec: &fo.spec.ServiceMesh, + ServiceMeshSpec: &spec.ServiceMesh, Serving: &infrav1.ServingSpec{}, - Source: &source, - AppNamespace: fo.spec.ApplicationsNamespace, + AppNamespace: spec.ApplicationsNamespace, } return nil } - fb := &featureBuilder{ - name: fo.name, - fsys: embeddedFiles, - } // Ensures creation of .Spec object is always invoked first fb.builders = append([]partialBuilder{createSpec}, fb.builders...) diff --git a/pkg/feature/feature.go b/pkg/feature/feature.go index f1feae263d3..dbcb0a83afd 100644 --- a/pkg/feature/feature.go +++ b/pkg/feature/feature.go @@ -2,11 +2,9 @@ package feature import ( "context" - "fmt" "github.com/go-logr/logr" "github.com/hashicorp/go-multierror" - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -15,8 +13,6 @@ import ( "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" ctrlLog "sigs.k8s.io/controller-runtime/pkg/log" - - featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" ) type Feature struct { @@ -50,29 +46,17 @@ func newFeature(name string) *Feature { // Action is a func type which can be used for different purposes while having access to Feature struct. type Action func(feature *Feature) error -//nolint:nonamedreturns // Reason: we use the named return to handle errors in a unified fashion through deferred function. -func (f *Feature) Apply() (err error) { +func (f *Feature) Apply() error { if !f.Enabled { return nil } - if trackerErr := f.createFeatureTracker(); err != nil { - return trackerErr + if err := f.createResourceTracker(); err != nil { + return err } // Verify all precondition and collect errors var multiErr *multierror.Error - phase := featurev1.FeatureCreated - f.updateFeatureTrackerStatus(conditionsv1.ConditionProgressing, "True", phase, fmt.Sprintf("Applying feature %s", f.Name)) - defer func() { - if err != nil { - f.updateFeatureTrackerStatus(conditionsv1.ConditionDegraded, "True", phase, err.Error()) - } else { - f.updateFeatureTrackerStatus(conditionsv1.ConditionAvailable, "True", phase, fmt.Sprintf("Feature %s applied successfully", f.Name)) - } - }() - - phase = featurev1.PreConditions for _, precondition := range f.preconditions { multiErr = multierror.Append(multiErr, precondition(f)) } @@ -81,7 +65,7 @@ func (f *Feature) Apply() (err error) { return preconditionsErr } - phase = featurev1.LoadTemplateData + // Load necessary data for _, loader := range f.loaders { multiErr = multierror.Append(multiErr, loader(f)) } @@ -104,7 +88,6 @@ func (f *Feature) Apply() (err error) { } } - phase = featurev1.ApplyManifests if err := f.applyManifests(); err != nil { return errors.WithStack(err) } @@ -113,12 +96,8 @@ func (f *Feature) Apply() (err error) { for _, postcondition := range f.postconditions { multiErr = multierror.Append(multiErr, postcondition(f)) } - if multiErr.ErrorOrNil() != nil { - return multiErr.ErrorOrNil() - } - phase = featurev1.FeatureCreated - return nil + return multiErr.ErrorOrNil() } func (f *Feature) Cleanup() error { @@ -214,28 +193,3 @@ func (f *Feature) apply(m manifest) error { func (f *Feature) AsOwnerReference() metav1.OwnerReference { return f.Spec.Tracker.ToOwnerReference() } - -// updateFeatureTrackerStatus updates conditions of a FeatureTracker. -// It's deliberately logging errors instead of handing them as it is used in deferred error handling of Feature public API, -// which is more predictable. -func (f *Feature) updateFeatureTrackerStatus(condType conditionsv1.ConditionType, status corev1.ConditionStatus, reason featurev1.FeaturePhase, message string) { - tracker := f.Spec.Tracker - - // Update the status - if tracker.Status.Conditions == nil { - tracker.Status.Conditions = &[]conditionsv1.Condition{} - } - conditionsv1.SetStatusCondition(tracker.Status.Conditions, conditionsv1.Condition{ - Type: condType, - Status: status, - Reason: string(reason), - Message: message, - }) - - err := f.Client.Status().Update(context.Background(), tracker) - if err != nil { - f.Log.Error(err, "Error updating FeatureTracker status") - } - - f.Spec.Tracker.Status = tracker.Status -} diff --git a/pkg/feature/feature_tracker_handler.go b/pkg/feature/feature_tracker_handler.go index 28c175a6dd7..a8982059c34 100644 --- a/pkg/feature/feature_tracker_handler.go +++ b/pkg/feature/feature_tracker_handler.go @@ -13,10 +13,10 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/common" ) -// createFeatureTracker instantiates FeatureTracker for a given Feature. It's a cluster-scoped resource used +// createResourceTracker instantiates FeatureTracker for a given Feature. It's a cluster-scoped resource used // to track creation and removal of all owned resources which belong to this Feature. // All resources which particular feature is composed of will have this object attached as an OwnerReference. -func (f *Feature) createFeatureTracker() error { +func (f *Feature) createResourceTracker() error { tracker, err := f.getFeatureTracker() if k8serrors.IsNotFound(err) { if err := f.Client.Create(context.TODO(), tracker); err != nil { @@ -60,10 +60,6 @@ func (f *Feature) getFeatureTracker() (*featurev1.FeatureTracker, error) { ObjectMeta: metav1.ObjectMeta{ Name: f.Spec.AppNamespace + "-" + common.TrimToRFC1123Name(f.Name), }, - Spec: featurev1.FeatureTrackerSpec{ - Source: *f.Spec.Source, - AppNamespace: f.Spec.AppNamespace, - }, } err := f.Client.Get(context.Background(), client.ObjectKeyFromObject(tracker), tracker) diff --git a/pkg/feature/initializer.go b/pkg/feature/initializer.go index 412bd62b9f4..029de8733a4 100644 --- a/pkg/feature/initializer.go +++ b/pkg/feature/initializer.go @@ -4,31 +4,19 @@ import ( "github.com/hashicorp/go-multierror" v1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" ) type FeaturesInitializer struct { *v1.DSCInitializationSpec - Source featurev1.Source - Features []*Feature definedFeatures DefinedFeatures + Features []*Feature } -type DefinedFeatures func(initializer *FeaturesInitializer) error - -func ClusterFeaturesInitializer(dsci *v1.DSCInitialization, def DefinedFeatures) *FeaturesInitializer { - return &FeaturesInitializer{ - DSCInitializationSpec: &dsci.Spec, - Source: featurev1.Source{Type: featurev1.DSCIType, Name: dsci.Name}, - definedFeatures: def, - } -} +type DefinedFeatures func(s *FeaturesInitializer) error -func ComponentFeaturesInitializer(component components.ComponentInterface, spec *v1.DSCInitializationSpec, def DefinedFeatures) *FeaturesInitializer { +func NewFeaturesInitializer(spec *v1.DSCInitializationSpec, def DefinedFeatures) *FeaturesInitializer { return &FeaturesInitializer{ DSCInitializationSpec: spec, - Source: featurev1.Source{Type: featurev1.ComponentType, Name: component.GetComponentName()}, definedFeatures: def, } } diff --git a/pkg/gvr/gvr.go b/pkg/gvr/gvr.go index c8066ea4a12..049a30aa4f5 100644 --- a/pkg/gvr/gvr.go +++ b/pkg/gvr/gvr.go @@ -15,7 +15,7 @@ var ( Resource: "ingresses", } - FeatureTracker = schema.GroupVersionResource{ + ResourceTracker = schema.GroupVersionResource{ Group: "features.opendatahub.io", Version: "v1", Resource: "featuretrackers", diff --git a/tests/integration/features/features_int_test.go b/tests/integration/features/features_int_test.go index 1459ecaa55e..218bedb6910 100644 --- a/tests/integration/features/features_int_test.go +++ b/tests/integration/features/features_int_test.go @@ -12,17 +12,14 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gstruct" ) //go:embed templates @@ -51,11 +48,9 @@ var _ = Describe("feature preconditions", func() { namespace = envtestutil.AppendRandomNameTo(testFeatureName) dsciSpec := newDSCInitializationSpec(namespace) - source := envtestutil.NewSource(featurev1.DSCIType, "default") var err error testFeature, err = feature.CreateFeature(testFeatureName). - With(dsciSpec). - From(source). + For(dsciSpec). UsingConfig(envTest.Config). Load() Expect(err).ToNot(HaveOccurred()) @@ -93,12 +88,10 @@ var _ = Describe("feature preconditions", func() { var ( dsciSpec *dscv1.DSCInitializationSpec verificationFeature *feature.Feature - source featurev1.Source ) BeforeEach(func() { dsciSpec = newDSCInitializationSpec("default") - source = envtestutil.NewSource(featurev1.DSCIType, "default") }) It("should successfully check for existing CRD", func() { @@ -107,8 +100,7 @@ var _ = Describe("feature preconditions", func() { var err error verificationFeature, err = feature.CreateFeature("CRD verification"). - With(dsciSpec). - From(source). + For(dsciSpec). UsingConfig(envTest.Config). PreConditions(feature.EnsureCRDIsInstalled(name)). Load() @@ -127,8 +119,7 @@ var _ = Describe("feature preconditions", func() { var err error verificationFeature, err = feature.CreateFeature("CRD verification"). - With(dsciSpec). - From(source). + For(dsciSpec). UsingConfig(envTest.Config). PreConditions(feature.EnsureCRDIsInstalled(name)). Load() @@ -151,20 +142,17 @@ var _ = Describe("feature cleanup", func() { var ( namespace string dsciSpec *dscv1.DSCInitializationSpec - source featurev1.Source ) BeforeAll(func() { namespace = envtestutil.AppendRandomNameTo("feature-tracker-test") dsciSpec = newDSCInitializationSpec(namespace) - source = envtestutil.NewSource(featurev1.DSCIType, "default") }) It("should successfully create resource and associated feature tracker", func() { // given createConfigMap, err := feature.CreateFeature("create-cfg-map"). - With(dsciSpec). - From(source). + For(dsciSpec). UsingConfig(envTest.Config). PreConditions( feature.CreateNamespaceIfNotExists(namespace), @@ -179,7 +167,7 @@ var _ = Describe("feature cleanup", func() { // then Expect(createConfigMap.Spec.Tracker).ToNot(BeNil()) _, err = createConfigMap.DynamicClient. - Resource(gvr.FeatureTracker). + Resource(gvr.ResourceTracker). Get(context.TODO(), createConfigMap.Spec.Tracker.Name, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) @@ -189,8 +177,7 @@ var _ = Describe("feature cleanup", func() { // recreating feature struct again as it would happen in the reconcile // given createConfigMap, err := feature.CreateFeature("create-cfg-map"). - With(dsciSpec). - From(source). + For(dsciSpec). UsingConfig(envTest.Config). PreConditions( feature.CreateNamespaceIfNotExists(namespace), @@ -205,7 +192,7 @@ var _ = Describe("feature cleanup", func() { // then _, err = createConfigMap.DynamicClient. - Resource(gvr.FeatureTracker). + Resource(gvr.ResourceTracker). Get(context.TODO(), trackerName, metav1.GetOptions{}) Expect(errors.IsNotFound(err)).To(BeTrue()) @@ -215,6 +202,29 @@ var _ = Describe("feature cleanup", func() { }) +func createTestSecret(namespace string) func(f *feature.Feature) error { + return func(f *feature.Feature) error { + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{ + f.AsOwnerReference(), + }, + }, + Data: map[string][]byte{ + "test": []byte("test"), + }, + } + + _, err := f.Clientset.CoreV1(). + Secrets(namespace). + Create(context.TODO(), secret, metav1.CreateOptions{}) + + return err + } +} + var _ = Describe("Manifest sources", func() { Context("using various manifest sources", func() { @@ -336,20 +346,10 @@ func createNamespace(name string) *v1.Namespace { } } -func getFeatureTracker(name string) *featurev1.FeatureTracker { - tracker := &featurev1.FeatureTracker{} - err := envTestClient.Get(context.Background(), client.ObjectKey{ - Name: name, - }, tracker) - - Expect(err).ToNot(HaveOccurred()) - - return tracker -} - func newDSCInitializationSpec(ns string) *dscv1.DSCInitializationSpec { spec := dscv1.DSCInitializationSpec{} spec.ApplicationsNamespace = ns + return &spec } diff --git a/tests/integration/features/servicemesh_feature_test.go b/tests/integration/features/servicemesh_feature_test.go index 1f0ee8685f3..a9615388207 100644 --- a/tests/integration/features/servicemesh_feature_test.go +++ b/tests/integration/features/servicemesh_feature_test.go @@ -13,7 +13,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" @@ -175,98 +174,6 @@ spec: status: {} ` -var _ = Describe("Service Mesh feature", func() { - var testFeature *feature.Feature - var objectCleaner *envtestutil.Cleaner - - BeforeEach(func() { - c, err := client.New(envTest.Config, client.Options{}) - Expect(err).ToNot(HaveOccurred()) - - objectCleaner = envtestutil.CreateCleaner(c, envTest.Config, timeout, interval) - - testFeatureName := "servicemesh-feature" - namespace := envtestutil.AppendRandomNameTo(testFeatureName) - - dsciSpec := newDSCInitializationSpec(namespace) - source := envtestutil.NewSource(featurev1.DSCIType, "default") - testFeature, err = feature.CreateFeature(testFeatureName). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() - - Expect(err).ToNot(HaveOccurred()) - }) - - Describe("preconditions", func() { - When("operator is not installed", func() { - It("operator presence check should return an error", func() { - Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).To(HaveOccurred()) - }) - }) - When("operator is installed", func() { - var smcpCrdObj *apiextensionsv1.CustomResourceDefinition - - BeforeEach(func() { - // Create SMCP the CRD - smcpCrdObj = &apiextensionsv1.CustomResourceDefinition{} - Expect(yaml.Unmarshal([]byte(serviceMeshControlPlaneCRD), smcpCrdObj)).ToNot(HaveOccurred()) - c, err := client.New(envTest.Config, client.Options{}) - Expect(err).ToNot(HaveOccurred()) - Expect(c.Create(context.TODO(), smcpCrdObj)).ToNot(HaveOccurred()) - - crdOptions := envtest.CRDInstallOptions{PollInterval: interval, MaxTime: timeout} - err = envtest.WaitForCRDs(envTest.Config, []*apiextensionsv1.CustomResourceDefinition{smcpCrdObj}, crdOptions) - Expect(err).ToNot(HaveOccurred()) - }) - AfterEach(func() { - // Delete SMCP CRD - objectCleaner.DeleteAll(smcpCrdObj) - }) - It("operator presence check should succeed", func() { - Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).To(Succeed()) - }) - It("should find installed Service Mesh Control Plane", func() { - c, err := client.New(envTest.Config, client.Options{}) - Expect(err).ToNot(HaveOccurred()) - - ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) - nsResource := createNamespace(ns) - Expect(c.Create(context.Background(), nsResource)).To(Succeed()) - defer objectCleaner.DeleteAll(nsResource) - - createServiceMeshControlPlane("test-name", ns) - - testFeature.Spec.ControlPlane.Namespace = ns - testFeature.Spec.ControlPlane.Name = "test-name" - Expect(servicemesh.EnsureServiceMeshInstalled(testFeature)).To(Succeed()) - }) - It("should fail to find Service Mesh Control Plane if not present", func() { - Expect(servicemesh.EnsureServiceMeshInstalled(testFeature)).ToNot(Succeed()) - }) - }) - }) -}) - -func getGateway(cfg *rest.Config, namespace, name string) (*unstructured.Unstructured, error) { - dynamicClient, err := dynamic.NewForConfig(cfg) - if err != nil { - return nil, err - } - gwGvr := schema.GroupVersionResource{ - Group: "networking.istio.io", - Version: "v1beta1", - Resource: "gateways", - } - - gateway, err := dynamicClient.Resource(gwGvr).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return gateway, nil -} - func createServiceMeshControlPlane(name, namespace string) { serviceMeshControlPlane := &unstructured.Unstructured{ Object: map[string]interface{}{ From 0b5bc8c13596304bab20c80d74929a3842d0792d Mon Sep 17 00:00:00 2001 From: Cameron Garrison Date: Wed, 24 Jan 2024 12:43:51 -0500 Subject: [PATCH 03/32] feat: enhances FeatureTracker with spec (#808) * feat: enhances FeatureTracker with spec (#17) * initial add tracker spec * update tests, update crd * add omitempty to origin struct * undo accidental tag change * re add empty line * move pointer operator * add testing * lint * re-lint changes * add ownertype, move newOrigin() to shared util * Update apis/features/v1/features_types.go Co-authored-by: Bartosz Majsak * remove origin from featureinitializer * modify kserve sm step to match dashboard's * make dsci servicemesh setup like dashboard's * fix merge issues, lint --------- Co-authored-by: Bartosz Majsak * restore testing mistakenly removed in merge * satisfy linter post merge conflicts * fix linter post merge * fix post merge issue * split For() into With + DefinedBy * rename origin to source, definedby to from --------- Co-authored-by: Bartosz Majsak (cherry picked from commit 591a29123b0b8ffed226e2f5fd8be51de02154d9) --- apis/features/v1/features_types.go | 12 +- components/kserve/kserve.go | 7 +- components/kserve/serverless_setup.go | 86 +++++----- .../dscinitialization/servicemesh_setup.go | 71 ++++---- pkg/feature/builder.go | 32 +++- pkg/feature/feature_tracker_handler.go | 4 + pkg/feature/initializer.go | 18 ++- .../integration/features/features_int_test.go | 135 ++++++++++++---- .../features/servicemesh_feature_test.go | 153 ++++++++---------- 9 files changed, 306 insertions(+), 212 deletions(-) diff --git a/apis/features/v1/features_types.go b/apis/features/v1/features_types.go index 037dda6bdf2..52f8c8198a5 100644 --- a/apis/features/v1/features_types.go +++ b/apis/features/v1/features_types.go @@ -23,19 +23,11 @@ type FeatureTracker struct { Status FeatureTrackerStatus `json:"status,omitempty"` } -type FeaturePhase string type OwnerType string const ( - FeatureCreated FeaturePhase = "FeatureCreated" - PreConditions FeaturePhase = "FeaturePreConditions" - ResourceCreation FeaturePhase = "ResourceCreation" - LoadTemplateData FeaturePhase = "LoadTemplateData" - ProcessTemplates FeaturePhase = "ProcessTemplates" - ApplyManifests FeaturePhase = "ApplyManifests" - PostConditions FeaturePhase = "FeaturePostConditions" - ComponentType OwnerType = "Component" - DSCIType OwnerType = "DSCI" + ComponentType OwnerType = "Component" + DSCIType OwnerType = "DSCI" ) func (s *FeatureTracker) ToOwnerReference() metav1.OwnerReference { diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index 2bd44f0d024..d7cf3dece37 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -183,9 +183,10 @@ func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) err case operatorv1.Managed: // standard workflow to create CR switch instance.ServiceMesh.ManagementState { case operatorv1.Unmanaged, operatorv1.Removed: - return fmt.Errorf("ServiceMesh is need to set to 'Managaed' in DSCI CR, it is required by KServe serving field") + return fmt.Errorf("ServiceMesh is need to set to 'Managed' in DSCI CR, it is required by KServe serving field") } - serverlessInitializer := feature.NewFeaturesInitializer(instance, k.configureServerlessFeatures) + + serverlessInitializer := feature.ComponentFeaturesInitializer(k, instance, k.configureServerlessFeatures()) if err := serverlessInitializer.Prepare(); err != nil { return err @@ -199,7 +200,7 @@ func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) err } func (k *Kserve) removeServerlessFeatures(instance *dsciv1.DSCInitializationSpec) error { - serverlessInitializer := feature.NewFeaturesInitializer(instance, k.configureServerlessFeatures) + serverlessInitializer := feature.ComponentFeaturesInitializer(k, instance, k.configureServerlessFeatures()) if err := serverlessInitializer.Prepare(); err != nil { return err diff --git a/components/kserve/serverless_setup.go b/components/kserve/serverless_setup.go index 83d2b6cf877..db6ac0aafc2 100644 --- a/components/kserve/serverless_setup.go +++ b/components/kserve/serverless_setup.go @@ -14,50 +14,52 @@ const ( templatesDir = "templates/serverless" ) -func (k *Kserve) configureServerlessFeatures(s *feature.FeaturesInitializer) error { - servingDeployment, err := feature.CreateFeature("serverless-serving-deployment"). - For(s.DSCInitializationSpec). - Manifests( - path.Join(templatesDir, "serving-install"), - ). - WithData(PopulateComponentSettings(k)). - PreConditions( - serverless.EnsureServerlessOperatorInstalled, - serverless.EnsureServerlessAbsent, - servicemesh.EnsureServiceMeshInstalled, - feature.CreateNamespaceIfNotExists(knativeServingNamespace), - ). - PostConditions( - feature.WaitForPodsToBeReady(knativeServingNamespace), - ). - Load() - if err != nil { - return err - } - s.Features = append(s.Features, servingDeployment) +func (k *Kserve) configureServerlessFeatures() feature.DefinedFeatures { + return func(initializer *feature.FeaturesInitializer) error { + servingDeployment, err := feature.CreateFeature("serverless-serving-deployment"). + With(initializer.DSCInitializationSpec). + From(initializer.Source). + Manifests( + path.Join(templatesDir, "serving-install"), + ). + WithData(PopulateComponentSettings(k)). + PreConditions( + serverless.EnsureServerlessOperatorInstalled, + serverless.EnsureServerlessAbsent, + servicemesh.EnsureServiceMeshInstalled, + feature.CreateNamespaceIfNotExists(knativeServingNamespace), + ). + PostConditions( + feature.WaitForPodsToBeReady(knativeServingNamespace), + ). + Load() + if err != nil { + return err + } + initializer.Features = append(initializer.Features, servingDeployment) - servingIstioGateways, err := feature.CreateFeature("serverless-serving-gateways"). - For(s.DSCInitializationSpec). - PreConditions( - // Check serverless is installed - feature.WaitForResourceToBeCreated(knativeServingNamespace, gvr.KnativeServing), - ). - WithData( - serverless.ServingDefaultValues, - serverless.ServingIngressDomain, - PopulateComponentSettings(k), - ). - WithResources(serverless.ServingCertificateResource). - Manifests( - path.Join(templatesDir, "serving-istio-gateways"), - ). - Load() - if err != nil { - return err + servingIstioGateways, err := feature.CreateFeature("serverless-serving-gateways"). + With(initializer.DSCInitializationSpec). + From(initializer.Source). + PreConditions( + // Check serverless is installed + feature.WaitForResourceToBeCreated(knativeServingNamespace, gvr.KnativeServing)). + WithData( + serverless.ServingDefaultValues, + serverless.ServingIngressDomain, + PopulateComponentSettings(k), + ). + WithResources(serverless.ServingCertificateResource). + Manifests( + path.Join(templatesDir, "serving-istio-gateways"), + ). + Load() + if err != nil { + return err + } + initializer.Features = append(initializer.Features, servingIstioGateways) + return nil } - s.Features = append(s.Features, servingIstioGateways) - - return nil } func PopulateComponentSettings(k *Kserve) feature.Action { diff --git a/controllers/dscinitialization/servicemesh_setup.go b/controllers/dscinitialization/servicemesh_setup.go index e9250d57922..e39ed64bfe7 100644 --- a/controllers/dscinitialization/servicemesh_setup.go +++ b/controllers/dscinitialization/servicemesh_setup.go @@ -16,7 +16,7 @@ const templatesDir = "templates/servicemesh" func (r *DSCInitializationReconciler) configureServiceMesh(instance *dsciv1.DSCInitialization) error { switch instance.Spec.ServiceMesh.ManagementState { case operatorv1.Managed: - serviceMeshInitializer := feature.NewFeaturesInitializer(&instance.Spec, configureServiceMeshFeatures) + serviceMeshInitializer := feature.ClusterFeaturesInitializer(instance, configureServiceMeshFeatures()) if err := serviceMeshInitializer.Prepare(); err != nil { r.Log.Error(err, "failed configuring service mesh resources") r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed configuring service mesh resources") @@ -43,8 +43,7 @@ func (r *DSCInitializationReconciler) configureServiceMesh(instance *dsciv1.DSCI func (r *DSCInitializationReconciler) removeServiceMesh(instance *dsciv1.DSCInitialization) error { // on condition of Managed, do not handle Removed when set to Removed it trigger DSCI reconcile to cleanup if instance.Spec.ServiceMesh.ManagementState == operatorv1.Managed { - serviceMeshInitializer := feature.NewFeaturesInitializer(&instance.Spec, configureServiceMeshFeatures) - + serviceMeshInitializer := feature.ClusterFeaturesInitializer(instance, configureServiceMeshFeatures()) if err := serviceMeshInitializer.Prepare(); err != nil { r.Log.Error(err, "failed configuring service mesh resources") r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed configuring service mesh resources") @@ -63,42 +62,46 @@ func (r *DSCInitializationReconciler) removeServiceMesh(instance *dsciv1.DSCInit return nil } -func configureServiceMeshFeatures(s *feature.FeaturesInitializer) error { - serviceMeshSpec := s.ServiceMesh - - smcpCreation, errSmcp := feature.CreateFeature("mesh-control-plane-creation"). - For(s.DSCInitializationSpec). - Manifests( - path.Join(templatesDir, "base", "create-smcp.tmpl"), - ). - PreConditions( - servicemesh.EnsureServiceMeshOperatorInstalled, - feature.CreateNamespaceIfNotExists(serviceMeshSpec.ControlPlane.Namespace), - ). - PostConditions( - feature.WaitForPodsToBeReady(serviceMeshSpec.ControlPlane.Namespace), - ). - Load() - if errSmcp != nil { - return errSmcp - } - s.Features = append(s.Features, smcpCreation) +func configureServiceMeshFeatures() feature.DefinedFeatures { + return func(initializer *feature.FeaturesInitializer) error { + serviceMeshSpec := initializer.DSCInitializationSpec.ServiceMesh - if serviceMeshSpec.ControlPlane.MetricsCollection == "Istio" { - metricsCollection, errMetrics := feature.CreateFeature("mesh-metrics-collection"). - For(s.DSCInitializationSpec). + smcpCreation, errSmcp := feature.CreateFeature("mesh-control-plane-creation"). + With(initializer.DSCInitializationSpec). + From(initializer.Source). + Manifests( + path.Join(templatesDir, "base", "create-smcp.tmpl"), + ). PreConditions( - servicemesh.EnsureServiceMeshInstalled, + servicemesh.EnsureServiceMeshOperatorInstalled, + feature.CreateNamespaceIfNotExists(serviceMeshSpec.ControlPlane.Namespace), ). - Manifests( - path.Join(templatesDir, "metrics-collection"), + PostConditions( + feature.WaitForPodsToBeReady(serviceMeshSpec.ControlPlane.Namespace), ). Load() - if errMetrics != nil { - return errMetrics + + if errSmcp != nil { + return errSmcp + } + initializer.Features = append(initializer.Features, smcpCreation) + + if serviceMeshSpec.ControlPlane.MetricsCollection == "Istio" { + metricsCollection, errMetrics := feature.CreateFeature("mesh-metrics-collection"). + With(initializer.DSCInitializationSpec). + From(initializer.Source). + Manifests( + path.Join(templatesDir, "metrics-collection"), + ). + PreConditions( + servicemesh.EnsureServiceMeshInstalled, + ). + Load() + if errMetrics != nil { + return errMetrics + } + initializer.Features = append(initializer.Features, metricsCollection) } - s.Features = append(s.Features, metricsCollection) + return nil } - - return nil } diff --git a/pkg/feature/builder.go b/pkg/feature/builder.go index c84324f38bd..19cf04e45eb 100644 --- a/pkg/feature/builder.go +++ b/pkg/feature/builder.go @@ -27,24 +27,44 @@ type featureBuilder struct { fsys fs.FS } -func CreateFeature(name string) *featureBuilder { //nolint:golint,revive //No need to export featureBuilder. - return &featureBuilder{ +func CreateFeature(name string) *featureName { //nolint:golint,revive //No need to export featureBuilder. + return &featureName{ name: name, - fsys: embeddedFiles, } } -func (fb *featureBuilder) For(spec *v1.DSCInitializationSpec) *featureBuilder { +type featureName struct { + name string +} + +func (fn *featureName) With(spec *v1.DSCInitializationSpec) *featureSource { + return &featureSource{ + spec: spec, + name: fn.name, + } +} + +type featureSource struct { + spec *v1.DSCInitializationSpec + name string +} + +func (fo *featureSource) From(source featurev1.Source) *featureBuilder { createSpec := func(f *Feature) error { f.Spec = &Spec{ - ServiceMeshSpec: &spec.ServiceMesh, + ServiceMeshSpec: &fo.spec.ServiceMesh, Serving: &infrav1.ServingSpec{}, - AppNamespace: spec.ApplicationsNamespace, + Source: &source, + AppNamespace: fo.spec.ApplicationsNamespace, } return nil } + fb := &featureBuilder{ + name: fo.name, + fsys: embeddedFiles, + } // Ensures creation of .Spec object is always invoked first fb.builders = append([]partialBuilder{createSpec}, fb.builders...) diff --git a/pkg/feature/feature_tracker_handler.go b/pkg/feature/feature_tracker_handler.go index a8982059c34..673ad2f54b6 100644 --- a/pkg/feature/feature_tracker_handler.go +++ b/pkg/feature/feature_tracker_handler.go @@ -60,6 +60,10 @@ func (f *Feature) getFeatureTracker() (*featurev1.FeatureTracker, error) { ObjectMeta: metav1.ObjectMeta{ Name: f.Spec.AppNamespace + "-" + common.TrimToRFC1123Name(f.Name), }, + Spec: featurev1.FeatureTrackerSpec{ + Source: *f.Spec.Source, + AppNamespace: f.Spec.AppNamespace, + }, } err := f.Client.Get(context.Background(), client.ObjectKeyFromObject(tracker), tracker) diff --git a/pkg/feature/initializer.go b/pkg/feature/initializer.go index 029de8733a4..412bd62b9f4 100644 --- a/pkg/feature/initializer.go +++ b/pkg/feature/initializer.go @@ -4,19 +4,31 @@ import ( "github.com/hashicorp/go-multierror" v1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/components" ) type FeaturesInitializer struct { *v1.DSCInitializationSpec - definedFeatures DefinedFeatures + Source featurev1.Source Features []*Feature + definedFeatures DefinedFeatures } -type DefinedFeatures func(s *FeaturesInitializer) error +type DefinedFeatures func(initializer *FeaturesInitializer) error + +func ClusterFeaturesInitializer(dsci *v1.DSCInitialization, def DefinedFeatures) *FeaturesInitializer { + return &FeaturesInitializer{ + DSCInitializationSpec: &dsci.Spec, + Source: featurev1.Source{Type: featurev1.DSCIType, Name: dsci.Name}, + definedFeatures: def, + } +} -func NewFeaturesInitializer(spec *v1.DSCInitializationSpec, def DefinedFeatures) *FeaturesInitializer { +func ComponentFeaturesInitializer(component components.ComponentInterface, spec *v1.DSCInitializationSpec, def DefinedFeatures) *FeaturesInitializer { return &FeaturesInitializer{ DSCInitializationSpec: spec, + Source: featurev1.Source{Type: featurev1.ComponentType, Name: component.GetComponentName()}, definedFeatures: def, } } diff --git a/tests/integration/features/features_int_test.go b/tests/integration/features/features_int_test.go index 218bedb6910..2efc2fb6232 100644 --- a/tests/integration/features/features_int_test.go +++ b/tests/integration/features/features_int_test.go @@ -12,8 +12,10 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" @@ -48,9 +50,11 @@ var _ = Describe("feature preconditions", func() { namespace = envtestutil.AppendRandomNameTo(testFeatureName) dsciSpec := newDSCInitializationSpec(namespace) + source := envtestutil.NewSource(featurev1.DSCIType, "default") var err error testFeature, err = feature.CreateFeature(testFeatureName). - For(dsciSpec). + With(dsciSpec). + From(source). UsingConfig(envTest.Config). Load() Expect(err).ToNot(HaveOccurred()) @@ -88,10 +92,12 @@ var _ = Describe("feature preconditions", func() { var ( dsciSpec *dscv1.DSCInitializationSpec verificationFeature *feature.Feature + source featurev1.Source ) BeforeEach(func() { dsciSpec = newDSCInitializationSpec("default") + source = envtestutil.NewSource(featurev1.DSCIType, "default") }) It("should successfully check for existing CRD", func() { @@ -100,7 +106,8 @@ var _ = Describe("feature preconditions", func() { var err error verificationFeature, err = feature.CreateFeature("CRD verification"). - For(dsciSpec). + With(dsciSpec). + From(source). UsingConfig(envTest.Config). PreConditions(feature.EnsureCRDIsInstalled(name)). Load() @@ -119,7 +126,8 @@ var _ = Describe("feature preconditions", func() { var err error verificationFeature, err = feature.CreateFeature("CRD verification"). - For(dsciSpec). + With(dsciSpec). + From(source). UsingConfig(envTest.Config). PreConditions(feature.EnsureCRDIsInstalled(name)). Load() @@ -142,17 +150,20 @@ var _ = Describe("feature cleanup", func() { var ( namespace string dsciSpec *dscv1.DSCInitializationSpec + source featurev1.Source ) BeforeAll(func() { namespace = envtestutil.AppendRandomNameTo("feature-tracker-test") dsciSpec = newDSCInitializationSpec(namespace) + source = envtestutil.NewSource(featurev1.DSCIType, "default") }) It("should successfully create resource and associated feature tracker", func() { // given createConfigMap, err := feature.CreateFeature("create-cfg-map"). - For(dsciSpec). + With(dsciSpec). + From(source). UsingConfig(envTest.Config). PreConditions( feature.CreateNamespaceIfNotExists(namespace), @@ -177,7 +188,8 @@ var _ = Describe("feature cleanup", func() { // recreating feature struct again as it would happen in the reconcile // given createConfigMap, err := feature.CreateFeature("create-cfg-map"). - For(dsciSpec). + With(dsciSpec). + From(source). UsingConfig(envTest.Config). PreConditions( feature.CreateNamespaceIfNotExists(namespace), @@ -202,28 +214,53 @@ var _ = Describe("feature cleanup", func() { }) -func createTestSecret(namespace string) func(f *feature.Feature) error { - return func(f *feature.Feature) error { - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: namespace, - OwnerReferences: []metav1.OwnerReference{ - f.AsOwnerReference(), - }, - }, - Data: map[string][]byte{ - "test": []byte("test"), - }, - } +var _ = Describe("feature trackers", func() { + Context("ensuring feature trackers indicate status and phase", func() { - _, err := f.Clientset.CoreV1(). - Secrets(namespace). - Create(context.TODO(), secret, metav1.CreateOptions{}) + var ( + dsciSpec *dscv1.DSCInitializationSpec + source featurev1.Source + ) - return err - } -} + BeforeEach(func() { + dsciSpec = newDSCInitializationSpec("default") + source = envtestutil.NewSource(featurev1.DSCIType, "default") + }) + + It("should correctly indicate source in the feature tracker", func() { + verificationFeature, err := feature.CreateFeature("empty-feature"). + With(dsciSpec). + From(source). + UsingConfig(envTest.Config). + Load() + Expect(err).ToNot(HaveOccurred()) + + // when + Expect(verificationFeature.Apply()).To(Succeed()) + + // then + featureTracker := getFeatureTracker("default-empty-feature") + Expect(featureTracker.Spec.Source.Name).To(Equal("default")) + Expect(featureTracker.Spec.Source.Type).To(Equal(featurev1.DSCIType)) + }) + + It("should correctly indicate app namespace in the feature tracker", func() { + verificationFeature, err := feature.CreateFeature("empty-feature"). + With(dsciSpec). + From(source). + UsingConfig(envTest.Config). + Load() + Expect(err).ToNot(HaveOccurred()) + + // when + Expect(verificationFeature.Apply()).To(Succeed()) + + // then + featureTracker := getFeatureTracker("default-empty-feature") + Expect(featureTracker.Spec.AppNamespace).To(Equal("default")) + }) + }) +}) var _ = Describe("Manifest sources", func() { Context("using various manifest sources", func() { @@ -231,12 +268,14 @@ var _ = Describe("Manifest sources", func() { var ( objectCleaner *envtestutil.Cleaner dsciSpec *dscv1.DSCInitializationSpec + source featurev1.Source namespace = "default" ) BeforeEach(func() { objectCleaner = envtestutil.CreateCleaner(envTestClient, envTest.Config, timeout, interval) dsciSpec = newDSCInitializationSpec(namespace) + source = envtestutil.NewSource(featurev1.DSCIType, "namespace") }) It("should be able to process an embedded template from the default location", func() { @@ -249,7 +288,8 @@ var _ = Describe("Manifest sources", func() { serviceMeshSpec.ControlPlane.Namespace = "service-ns" createService, err := feature.CreateFeature("create-control-plane"). - For(dsciSpec). + With(dsciSpec). + From(source). Manifests(path.Join(templatesDir, "serverless", "serving-istio-gateways", "local-gateway-svc.tmpl")). UsingConfig(envTest.Config). Load() @@ -272,7 +312,8 @@ var _ = Describe("Manifest sources", func() { defer objectCleaner.DeleteAll(ns) createGateway, err := feature.CreateFeature("create-gateway"). - For(dsciSpec). + With(dsciSpec). + From(source). Manifests(path.Join(templatesDir, "serverless", "serving-istio-gateways", "istio-local-gateway.yaml")). UsingConfig(envTest.Config). Load() @@ -290,7 +331,8 @@ var _ = Describe("Manifest sources", func() { It("should be able to process an embedded file from a non default location", func() { createNs, err := feature.CreateFeature("create-ns"). - For(dsciSpec). + With(dsciSpec). + From(source). ManifestSource(testEmbeddedFiles). Manifests(path.Join(templatesDir, "namespace.yaml")). UsingConfig(envTest.Config). @@ -319,7 +361,8 @@ metadata: Expect(err).ToNot(HaveOccurred()) createNs, err := feature.CreateFeature("create-ns"). - For(dsciSpec). + With(dsciSpec). + From(source). ManifestSource(os.DirFS(tempDir)). Manifests(path.Join("namespace.yaml")). // must be relative to root DirFS defined above UsingConfig(envTest.Config). @@ -338,6 +381,29 @@ metadata: }) }) +func createTestSecret(namespace string) func(f *feature.Feature) error { + return func(f *feature.Feature) error { + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{ + f.AsOwnerReference(), + }, + }, + Data: map[string][]byte{ + "test": []byte("test"), + }, + } + + _, err := f.Clientset.CoreV1(). + Secrets(namespace). + Create(context.TODO(), secret, metav1.CreateOptions{}) + + return err + } +} + func createNamespace(name string) *v1.Namespace { return &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -346,6 +412,17 @@ func createNamespace(name string) *v1.Namespace { } } +func getFeatureTracker(name string) *featurev1.FeatureTracker { + tracker := &featurev1.FeatureTracker{} + err := envTestClient.Get(context.Background(), client.ObjectKey{ + Name: name, + }, tracker) + + Expect(err).ToNot(HaveOccurred()) + + return tracker +} + func newDSCInitializationSpec(ns string) *dscv1.DSCInitializationSpec { spec := dscv1.DSCInitializationSpec{} spec.ApplicationsNamespace = ns diff --git a/tests/integration/features/servicemesh_feature_test.go b/tests/integration/features/servicemesh_feature_test.go index a9615388207..2b6fb4086bf 100644 --- a/tests/integration/features/servicemesh_feature_test.go +++ b/tests/integration/features/servicemesh_feature_test.go @@ -13,6 +13,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" @@ -22,6 +23,48 @@ import ( . "github.com/onsi/gomega" ) +const serviceMeshControlPlaneCRD = `apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + maistra-version: 2.4.2 + annotations: + service.beta.openshift.io/inject-cabundle: "true" + controller-gen.kubebuilder.io/version: v0.4.1 + name: servicemeshcontrolplanes.maistra.io +spec: + group: maistra.io + names: + categories: + - maistra-io + kind: ServiceMeshControlPlane + listKind: ServiceMeshControlPlaneList + plural: servicemeshcontrolplanes + shortNames: + - smcp + singular: servicemeshcontrolplane + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: false + subresources: + status: {} + - name: v2 + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} +` + var _ = Describe("Service Mesh feature", func() { var testFeature *feature.Feature var objectCleaner *envtestutil.Cleaner @@ -36,8 +79,10 @@ var _ = Describe("Service Mesh feature", func() { namespace := envtestutil.AppendRandomNameTo(testFeatureName) dsciSpec := newDSCInitializationSpec(namespace) + source := envtestutil.NewSource(featurev1.DSCIType, "default") testFeature, err = feature.CreateFeature(testFeatureName). - For(dsciSpec). + With(dsciSpec). + From(source). UsingConfig(envTest.Config). Load() @@ -45,58 +90,53 @@ var _ = Describe("Service Mesh feature", func() { }) Describe("preconditions", func() { - When("operator is not installed", func() { It("operator presence check should return an error", func() { - Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).ToNot(Succeed()) + Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).To(HaveOccurred()) }) }) - When("operator is installed", func() { - - It("should fail checking operator presence prerequisite when CRD not installed", func() { - Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).ToNot(Succeed()) + var smcpCrdObj *apiextensionsv1.CustomResourceDefinition + + BeforeEach(func() { + // Create SMCP the CRD + smcpCrdObj = &apiextensionsv1.CustomResourceDefinition{} + Expect(yaml.Unmarshal([]byte(serviceMeshControlPlaneCRD), smcpCrdObj)).ToNot(HaveOccurred()) + c, err := client.New(envTest.Config, client.Options{}) + Expect(err).ToNot(HaveOccurred()) + Expect(c.Create(context.TODO(), smcpCrdObj)).ToNot(HaveOccurred()) + + crdOptions := envtest.CRDInstallOptions{PollInterval: interval, MaxTime: timeout} + err = envtest.WaitForCRDs(envTest.Config, []*apiextensionsv1.CustomResourceDefinition{smcpCrdObj}, crdOptions) + Expect(err).ToNot(HaveOccurred()) }) - - It("should succeed checking operator presence prerequisite when CRD installed", func() { - // given - smcpCRD := installServiceMeshControlPlaneCRD() - defer objectCleaner.DeleteAll(smcpCRD) - - // then + AfterEach(func() { + // Delete SMCP CRD + objectCleaner.DeleteAll(smcpCrdObj) + }) + It("operator presence check should succeed", func() { Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).To(Succeed()) }) - It("should find installed Service Mesh Control Plane", func() { - // given - smcpCRD := installServiceMeshControlPlaneCRD() - defer objectCleaner.DeleteAll(smcpCRD) + c, err := client.New(envTest.Config, client.Options{}) + Expect(err).ToNot(HaveOccurred()) - // when ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) nsResource := createNamespace(ns) - Expect(envTestClient.Create(context.Background(), nsResource)).To(Succeed()) + Expect(c.Create(context.Background(), nsResource)).To(Succeed()) defer objectCleaner.DeleteAll(nsResource) createServiceMeshControlPlane("test-name", ns) - // then testFeature.Spec.ControlPlane.Namespace = ns testFeature.Spec.ControlPlane.Name = "test-name" Expect(servicemesh.EnsureServiceMeshInstalled(testFeature)).To(Succeed()) }) - It("should fail to find Service Mesh Control Plane if not present", func() { - // given - smcpCRD := installServiceMeshControlPlaneCRD() - defer objectCleaner.DeleteAll(smcpCRD) - - // then Expect(servicemesh.EnsureServiceMeshInstalled(testFeature)).ToNot(Succeed()) }) }) }) - }) func getGateway(cfg *rest.Config, namespace, name string) (*unstructured.Unstructured, error) { @@ -117,63 +157,6 @@ func getGateway(cfg *rest.Config, namespace, name string) (*unstructured.Unstruc return gateway, nil } -func installServiceMeshControlPlaneCRD() *apiextensionsv1.CustomResourceDefinition { - // Create SMCP the CRD - smcpCrdObj := &apiextensionsv1.CustomResourceDefinition{} - Expect(yaml.Unmarshal([]byte(serviceMeshControlPlaneCRD), smcpCrdObj)).To(Succeed()) - c, err := client.New(envTest.Config, client.Options{}) - Expect(err).ToNot(HaveOccurred()) - - Expect(c.Create(context.TODO(), smcpCrdObj)).To(Succeed()) - - crdOptions := envtest.CRDInstallOptions{PollInterval: interval, MaxTime: timeout} - Expect(envtest.WaitForCRDs(envTest.Config, []*apiextensionsv1.CustomResourceDefinition{smcpCrdObj}, crdOptions)).To(Succeed()) - - return smcpCrdObj -} - -const serviceMeshControlPlaneCRD = `apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - maistra-version: 2.4.2 - annotations: - service.beta.openshift.io/inject-cabundle: "true" - controller-gen.kubebuilder.io/version: v0.4.1 - name: servicemeshcontrolplanes.maistra.io -spec: - group: maistra.io - names: - categories: - - maistra-io - kind: ServiceMeshControlPlane - listKind: ServiceMeshControlPlaneList - plural: servicemeshcontrolplanes - shortNames: - - smcp - singular: servicemeshcontrolplane - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - type: object - x-kubernetes-preserve-unknown-fields: true - served: true - storage: false - subresources: - status: {} - - name: v2 - schema: - openAPIV3Schema: - type: object - x-kubernetes-preserve-unknown-fields: true - served: true - storage: true - subresources: - status: {} -` - func createServiceMeshControlPlane(name, namespace string) { serviceMeshControlPlane := &unstructured.Unstructured{ Object: map[string]interface{}{ From 3f67b898ab1159d0e3b9599236086714cd243568 Mon Sep 17 00:00:00 2001 From: Cameron Garrison Date: Thu, 25 Jan 2024 05:34:21 -0500 Subject: [PATCH 04/32] feat: add status/conditions to FeatureTrackers (#801) Co-authored-by: Bartosz Majsak (cherry picked from commit 3558db6f3c0a1241400e5d049e22ceece13433b9) --- apis/features/v1/features_types.go | 12 ++- pkg/feature/feature.go | 62 ++++++++++++-- pkg/feature/feature_tracker_handler.go | 4 +- pkg/gvr/gvr.go | 2 +- .../integration/features/features_int_test.go | 85 ++++++++++++++++++- 5 files changed, 150 insertions(+), 15 deletions(-) diff --git a/apis/features/v1/features_types.go b/apis/features/v1/features_types.go index 52f8c8198a5..037dda6bdf2 100644 --- a/apis/features/v1/features_types.go +++ b/apis/features/v1/features_types.go @@ -23,11 +23,19 @@ type FeatureTracker struct { Status FeatureTrackerStatus `json:"status,omitempty"` } +type FeaturePhase string type OwnerType string const ( - ComponentType OwnerType = "Component" - DSCIType OwnerType = "DSCI" + FeatureCreated FeaturePhase = "FeatureCreated" + PreConditions FeaturePhase = "FeaturePreConditions" + ResourceCreation FeaturePhase = "ResourceCreation" + LoadTemplateData FeaturePhase = "LoadTemplateData" + ProcessTemplates FeaturePhase = "ProcessTemplates" + ApplyManifests FeaturePhase = "ApplyManifests" + PostConditions FeaturePhase = "FeaturePostConditions" + ComponentType OwnerType = "Component" + DSCIType OwnerType = "DSCI" ) func (s *FeatureTracker) ToOwnerReference() metav1.OwnerReference { diff --git a/pkg/feature/feature.go b/pkg/feature/feature.go index dbcb0a83afd..5ccb5d6f3a5 100644 --- a/pkg/feature/feature.go +++ b/pkg/feature/feature.go @@ -2,9 +2,11 @@ package feature import ( "context" + "fmt" "github.com/go-logr/logr" "github.com/hashicorp/go-multierror" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -13,6 +15,8 @@ import ( "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" ctrlLog "sigs.k8s.io/controller-runtime/pkg/log" + + featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" ) type Feature struct { @@ -46,17 +50,29 @@ func newFeature(name string) *Feature { // Action is a func type which can be used for different purposes while having access to Feature struct. type Action func(feature *Feature) error -func (f *Feature) Apply() error { +//nolint:nonamedreturns // Reason: we use the named return to handle errors in a unified fashion through deferred function. +func (f *Feature) Apply() (err error) { if !f.Enabled { return nil } - if err := f.createResourceTracker(); err != nil { - return err + if trackerErr := f.createFeatureTracker(); err != nil { + return trackerErr } // Verify all precondition and collect errors var multiErr *multierror.Error + phase := featurev1.FeatureCreated + f.updateFeatureTrackerStatus(conditionsv1.ConditionProgressing, "True", phase, fmt.Sprintf("Applying feature %s", f.Name)) + defer func() { + if err != nil { + f.updateFeatureTrackerStatus(conditionsv1.ConditionDegraded, "True", phase, err.Error()) + } else { + f.updateFeatureTrackerStatus(conditionsv1.ConditionAvailable, "True", phase, fmt.Sprintf("Feature %s applied successfully", f.Name)) + } + }() + + phase = featurev1.PreConditions for _, precondition := range f.preconditions { multiErr = multierror.Append(multiErr, precondition(f)) } @@ -65,7 +81,7 @@ func (f *Feature) Apply() error { return preconditionsErr } - // Load necessary data + phase = featurev1.LoadTemplateData for _, loader := range f.loaders { multiErr = multierror.Append(multiErr, loader(f)) } @@ -74,30 +90,35 @@ func (f *Feature) Apply() error { return dataLoadErr } - // Create or update resources + phase = featurev1.ResourceCreation for _, resource := range f.resources { if err := resource(f); err != nil { return errors.WithStack(err) } } - // Process and apply manifests + phase = featurev1.ProcessTemplates for i := range f.manifests { if err := f.manifests[i].process(f.Spec); err != nil { return errors.WithStack(err) } } + phase = featurev1.ApplyManifests if err := f.applyManifests(); err != nil { return errors.WithStack(err) } - // Check all postconditions and collect errors + phase = featurev1.PostConditions for _, postcondition := range f.postconditions { multiErr = multierror.Append(multiErr, postcondition(f)) } + if multiErr.ErrorOrNil() != nil { + return multiErr.ErrorOrNil() + } - return multiErr.ErrorOrNil() + phase = featurev1.FeatureCreated + return nil } func (f *Feature) Cleanup() error { @@ -193,3 +214,28 @@ func (f *Feature) apply(m manifest) error { func (f *Feature) AsOwnerReference() metav1.OwnerReference { return f.Spec.Tracker.ToOwnerReference() } + +// updateFeatureTrackerStatus updates conditions of a FeatureTracker. +// It's deliberately logging errors instead of handing them as it is used in deferred error handling of Feature public API, +// which is more predictable. +func (f *Feature) updateFeatureTrackerStatus(condType conditionsv1.ConditionType, status corev1.ConditionStatus, reason featurev1.FeaturePhase, message string) { + tracker := f.Spec.Tracker + + // Update the status + if tracker.Status.Conditions == nil { + tracker.Status.Conditions = &[]conditionsv1.Condition{} + } + conditionsv1.SetStatusCondition(tracker.Status.Conditions, conditionsv1.Condition{ + Type: condType, + Status: status, + Reason: string(reason), + Message: message, + }) + + err := f.Client.Status().Update(context.Background(), tracker) + if err != nil { + f.Log.Error(err, "Error updating FeatureTracker status") + } + + f.Spec.Tracker.Status = tracker.Status +} diff --git a/pkg/feature/feature_tracker_handler.go b/pkg/feature/feature_tracker_handler.go index 673ad2f54b6..28c175a6dd7 100644 --- a/pkg/feature/feature_tracker_handler.go +++ b/pkg/feature/feature_tracker_handler.go @@ -13,10 +13,10 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/common" ) -// createResourceTracker instantiates FeatureTracker for a given Feature. It's a cluster-scoped resource used +// createFeatureTracker instantiates FeatureTracker for a given Feature. It's a cluster-scoped resource used // to track creation and removal of all owned resources which belong to this Feature. // All resources which particular feature is composed of will have this object attached as an OwnerReference. -func (f *Feature) createResourceTracker() error { +func (f *Feature) createFeatureTracker() error { tracker, err := f.getFeatureTracker() if k8serrors.IsNotFound(err) { if err := f.Client.Create(context.TODO(), tracker); err != nil { diff --git a/pkg/gvr/gvr.go b/pkg/gvr/gvr.go index 049a30aa4f5..c8066ea4a12 100644 --- a/pkg/gvr/gvr.go +++ b/pkg/gvr/gvr.go @@ -15,7 +15,7 @@ var ( Resource: "ingresses", } - ResourceTracker = schema.GroupVersionResource{ + FeatureTracker = schema.GroupVersionResource{ Group: "features.opendatahub.io", Version: "v1", Resource: "featuretrackers", diff --git a/tests/integration/features/features_int_test.go b/tests/integration/features/features_int_test.go index 2efc2fb6232..1d0430b2859 100644 --- a/tests/integration/features/features_int_test.go +++ b/tests/integration/features/features_int_test.go @@ -3,11 +3,13 @@ package features_test import ( "context" "embed" + "fmt" "os" "path" "path/filepath" "time" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,6 +24,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" ) //go:embed templates @@ -178,7 +181,7 @@ var _ = Describe("feature cleanup", func() { // then Expect(createConfigMap.Spec.Tracker).ToNot(BeNil()) _, err = createConfigMap.DynamicClient. - Resource(gvr.ResourceTracker). + Resource(gvr.FeatureTracker). Get(context.TODO(), createConfigMap.Spec.Tracker.Name, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) @@ -204,7 +207,7 @@ var _ = Describe("feature cleanup", func() { // then _, err = createConfigMap.DynamicClient. - Resource(gvr.ResourceTracker). + Resource(gvr.FeatureTracker). Get(context.TODO(), trackerName, metav1.GetOptions{}) Expect(errors.IsNotFound(err)).To(BeTrue()) @@ -227,6 +230,82 @@ var _ = Describe("feature trackers", func() { source = envtestutil.NewSource(featurev1.DSCIType, "default") }) + It("should indicate successful installation in FeatureTracker", func() { + // given example CRD installed into env + name := "test-resources.openshift.io" + verificationFeature, err := feature.CreateFeature("crd-verification"). + With(dsciSpec). + From(source). + UsingConfig(envTest.Config). + PreConditions(feature.EnsureCRDIsInstalled(name)). + Load() + Expect(err).ToNot(HaveOccurred()) + + // when + Expect(verificationFeature.Apply()).To(Succeed()) + + // then + featureTracker := getFeatureTracker("default-crd-verification") + Expect(*featureTracker.Status.Conditions).To(ContainElement( + MatchFields(IgnoreExtras, Fields{ + "Type": Equal(conditionsv1.ConditionAvailable), + "Status": Equal(v1.ConditionTrue), + "Reason": Equal(string(featurev1.FeatureCreated)), + }), + )) + }) + + It("should indicate failure in preconditions", func() { + // given + name := "non-existing-resource.non-existing-group.io" + verificationFeature, err := feature.CreateFeature("crd-verification"). + With(dsciSpec). + From(source). + UsingConfig(envTest.Config). + PreConditions(feature.EnsureCRDIsInstalled(name)). + Load() + Expect(err).ToNot(HaveOccurred()) + + // when + Expect(verificationFeature.Apply()).ToNot(Succeed()) + + // then + featureTracker := getFeatureTracker("default-crd-verification") + Expect(*featureTracker.Status.Conditions).To(ContainElement( + MatchFields(IgnoreExtras, Fields{ + "Type": Equal(conditionsv1.ConditionDegraded), + "Status": Equal(v1.ConditionTrue), + "Reason": Equal(string(featurev1.PreConditions)), + }), + )) + }) + + It("should indicate failure in post-conditions", func() { + // given + verificationFeature, err := feature.CreateFeature("post-condition-failure"). + With(dsciSpec). + From(source). + UsingConfig(envTest.Config). + PostConditions(func(f *feature.Feature) error { + return fmt.Errorf("always fail") + }). + Load() + Expect(err).ToNot(HaveOccurred()) + + // when + Expect(verificationFeature.Apply()).ToNot(Succeed()) + + // then + featureTracker := getFeatureTracker("default-post-condition-failure") + Expect(*featureTracker.Status.Conditions).To(ContainElement( + MatchFields(IgnoreExtras, Fields{ + "Type": Equal(conditionsv1.ConditionDegraded), + "Status": Equal(v1.ConditionTrue), + "Reason": Equal(string(featurev1.PostConditions)), + }), + )) + }) + It("should correctly indicate source in the feature tracker", func() { verificationFeature, err := feature.CreateFeature("empty-feature"). With(dsciSpec). @@ -259,7 +338,9 @@ var _ = Describe("feature trackers", func() { featureTracker := getFeatureTracker("default-empty-feature") Expect(featureTracker.Spec.AppNamespace).To(Equal("default")) }) + }) + }) var _ = Describe("Manifest sources", func() { From 7377ea1ad3fed2aed31bc46731197361c40b016e Mon Sep 17 00:00:00 2001 From: Dhiraj Bokde Date: Thu, 25 Jan 2024 08:14:31 -0800 Subject: [PATCH 05/32] fix: fix e2e test error handling for https://issues.redhat.com/browse/RHOAIENG-2219 (#817) (cherry picked from commit db3bf7b89e8eac79ea58d8ca5442e34cc7b2e1dc) --- tests/e2e/dsc_creation_test.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/tests/e2e/dsc_creation_test.go b/tests/e2e/dsc_creation_test.go index f4b1619a9d2..1d44ced7c89 100644 --- a/tests/e2e/dsc_creation_test.go +++ b/tests/e2e/dsc_creation_test.go @@ -237,17 +237,6 @@ func (tc *testContext) testAllApplicationCreation(t *testing.T) error { //nolint } }) - t.Run("Validate Kueue", func(t *testing.T) { - // speed testing in parallel - t.Parallel() - err = tc.testApplicationCreation(&(tc.testDsc.Spec.Components.Kueue)) - if tc.testDsc.Spec.Components.Kueue.ManagementState == operatorv1.Managed { - require.NoError(t, err, "error validating application %v when enabled", tc.testDsc.Spec.Components.Kueue.GetComponentName()) - } else { - require.Error(t, err, "error validating application %v when disabled", tc.testDsc.Spec.Components.Kueue.GetComponentName()) - } - }) - t.Run("Validate TrustyAI", func(t *testing.T) { // speed testing in parallel t.Parallel() From 37452337dce674930a42d889eee90c42a94212f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edgar=20Hern=C3=A1ndez?= Date: Mon, 29 Jan 2024 02:54:29 -0600 Subject: [PATCH 06/32] fix(serverless): Patch to enable Secret filtering in KNative Serving (#814) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This prevents KNative's net-istio pods from being OOMKilled in environments with a large number of Secrets. This would patch a Managed installation. Custom installations would be left untouched. Signed-off-by: Edgar Hernández <23639005+israel-hdez@users.noreply.github.com> (cherry picked from commit e1409afb44182cbc9617b2e1f3528f8f50eae128) --- components/kserve/serverless_setup.go | 28 ++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/components/kserve/serverless_setup.go b/components/kserve/serverless_setup.go index db6ac0aafc2..d57ce6c6f55 100644 --- a/components/kserve/serverless_setup.go +++ b/components/kserve/serverless_setup.go @@ -6,12 +6,10 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" ) const ( - knativeServingNamespace = "knative-serving" - templatesDir = "templates/serverless" + templatesDir = "templates/serverless" ) func (k *Kserve) configureServerlessFeatures() feature.DefinedFeatures { @@ -27,23 +25,35 @@ func (k *Kserve) configureServerlessFeatures() feature.DefinedFeatures { serverless.EnsureServerlessOperatorInstalled, serverless.EnsureServerlessAbsent, servicemesh.EnsureServiceMeshInstalled, - feature.CreateNamespaceIfNotExists(knativeServingNamespace), + feature.CreateNamespaceIfNotExists(serverless.KnativeServingNamespace), ). + Load() + if err != nil { + return err + } + initializer.Features = append(initializer.Features, servingDeployment) + + servingNetIstioSecretFiltering, err := feature.CreateFeature("serverless-net-istio-secret-filtering"). + With(initializer.DSCInitializationSpec). + From(initializer.Source). + Manifests( + path.Join(templatesDir, "serving-net-istio-secret-filtering.patch.tmpl"), + ). + WithData(PopulateComponentSettings(k)). + PreConditions(serverless.EnsureServerlessServingDeployed). PostConditions( - feature.WaitForPodsToBeReady(knativeServingNamespace), + feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), ). Load() if err != nil { return err } - initializer.Features = append(initializer.Features, servingDeployment) + initializer.Features = append(initializer.Features, servingNetIstioSecretFiltering) servingIstioGateways, err := feature.CreateFeature("serverless-serving-gateways"). With(initializer.DSCInitializationSpec). From(initializer.Source). - PreConditions( - // Check serverless is installed - feature.WaitForResourceToBeCreated(knativeServingNamespace, gvr.KnativeServing)). + PreConditions(serverless.EnsureServerlessServingDeployed). WithData( serverless.ServingDefaultValues, serverless.ServingIngressDomain, From dc1ab67d44784e44a3ca10396f54439570b7d9de Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Mon, 29 Jan 2024 10:04:13 +0100 Subject: [PATCH 07/32] (backport): wrong CR name in the message. (#813) Signed-off-by: Wen Zhou (cherry picked from commit 92beb809e382f37fed0436e0459cf9b3f89877ca) --- components/kserve/kserve.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index d7cf3dece37..16970fdf71c 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -175,7 +175,7 @@ func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) err fmt.Println("Serverless CR is not configured by the operator, we won't do anything") case operatorv1.Removed: // we remove serving CR - fmt.Println("existing ServiceMesh CR (owned by operator) will be removed") + fmt.Println("existing Serverless CR (owned by operator) will be removed") if err := k.removeServerlessFeatures(instance); err != nil { return err } From 4d469a864009e68598e057a3319c70d4d2753d1e Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Tue, 30 Jan 2024 22:01:35 +0100 Subject: [PATCH 08/32] fix(trustyai): missing overwrite for the devFlag on component level (#816) - this will be able to test live build if set devFlags on the Trustyai with downstream RHOAI build Signed-off-by: Wen Zhou (cherry picked from commit f70e96646dea88d3e8177563b7758884b6eac51c) --- components/trustyai/trustyai.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/components/trustyai/trustyai.go b/components/trustyai/trustyai.go index 6a6ca215c5c..5cc182a0e91 100644 --- a/components/trustyai/trustyai.go +++ b/components/trustyai/trustyai.go @@ -72,8 +72,7 @@ func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, re return err } } - - if dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "" { + if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (t.DevFlags == nil || len(t.DevFlags.Manifests) == 0) { if err := deploy.ApplyParams(Path, t.SetImageParamsMap(imageParamMap), false); err != nil { return err } From caa9484ba87d2d35396a7cd412bd990c93e4f388 Mon Sep 17 00:00:00 2001 From: Bartosz Majsak Date: Fri, 2 Feb 2024 15:02:53 +0100 Subject: [PATCH 09/32] feat: simplifies Feature API (#831) - `FeaturesInitializer` (now named `FeaturesHandler`) becomes an entry point to compose features - it has two modes: cluster and component. Based on this mode it will initialize `.spec.source` transparently - there is no `Prepare` func anymore - it is now part of `Apply` and `Delete` - most importantly `FeaturesHandler` keeps track of created features so there is no need to keep adding to the slice explicitly - Tests have been adjusted to focus on behavior rather than implementation details (cherry picked from commit d6168fd6b1842eb9ff69ace69771b15f682ceb10) --- apis/features/v1/features_types.go | 13 + components/kserve/kserve.go | 51 +- components/kserve/serverless_setup.go | 50 +- .../dscinitialization/servicemesh_setup.go | 56 +- pkg/feature/builder.go | 54 +- pkg/feature/feature_tracker_handler.go | 19 +- pkg/feature/handler.go | 69 ++ pkg/feature/initializer.go | 64 -- pkg/feature/manifest.go | 7 + tests/envtestutil/utils.go | 10 - .../integration/features/features_int_test.go | 763 ++++++++++-------- .../features/features_suite_int_test.go | 14 +- .../features/serverless_feature_test.go | 240 ++++-- .../features/servicemesh_feature_test.go | 116 ++- 14 files changed, 898 insertions(+), 628 deletions(-) create mode 100644 pkg/feature/handler.go delete mode 100644 pkg/feature/initializer.go diff --git a/apis/features/v1/features_types.go b/apis/features/v1/features_types.go index 037dda6bdf2..d277012c14f 100644 --- a/apis/features/v1/features_types.go +++ b/apis/features/v1/features_types.go @@ -23,6 +23,19 @@ type FeatureTracker struct { Status FeatureTrackerStatus `json:"status,omitempty"` } +// NewFeatureTracker instantiate FeatureTracker instance. +func NewFeatureTracker(name, appNamespace string) *FeatureTracker { + return &FeatureTracker{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "features.opendatahub.io/v1", + Kind: "FeatureTracker", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: appNamespace + "-" + name, + }, + } +} + type FeaturePhase string type OwnerType string diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index 16970fdf71c..9476cb4f404 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -108,9 +108,9 @@ func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, resC } } // check on dependent operators if all installed in cluster - // dependent operators set in checkRequiredOperatorsInstalled() - if err := checkRequiredOperatorsInstalled(cli); err != nil { - return err + dependOpsErrors := checkDependentOperators(cli).ErrorOrNil() + if dependOpsErrors != nil { + return dependOpsErrors } if err := k.configureServerless(dscispec); err != nil { @@ -186,13 +186,9 @@ func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) err return fmt.Errorf("ServiceMesh is need to set to 'Managed' in DSCI CR, it is required by KServe serving field") } - serverlessInitializer := feature.ComponentFeaturesInitializer(k, instance, k.configureServerlessFeatures()) - - if err := serverlessInitializer.Prepare(); err != nil { - return err - } + serverlessFeatures := feature.ComponentFeaturesHandler(k, instance, k.configureServerlessFeatures()) - if err := serverlessInitializer.Apply(); err != nil { + if err := serverlessFeatures.Apply(); err != nil { return err } } @@ -200,30 +196,27 @@ func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) err } func (k *Kserve) removeServerlessFeatures(instance *dsciv1.DSCInitializationSpec) error { - serverlessInitializer := feature.ComponentFeaturesInitializer(k, instance, k.configureServerlessFeatures()) + serverlessFeatures := feature.ComponentFeaturesHandler(k, instance, k.configureServerlessFeatures()) - if err := serverlessInitializer.Prepare(); err != nil { - return err - } - - return serverlessInitializer.Delete() + return serverlessFeatures.Delete() } -func checkRequiredOperatorsInstalled(cli client.Client) error { +func checkDependentOperators(cli client.Client) *multierror.Error { var multiErr *multierror.Error - checkAndAppendError := func(operatorName string) { - if found, err := deploy.OperatorExists(cli, operatorName); err != nil { - multiErr = multierror.Append(multiErr, err) - } else if !found { - err = fmt.Errorf("operator %s not found. Please install the operator before enabling %s component", - operatorName, ComponentName) - multiErr = multierror.Append(multiErr, err) - } + if found, err := deploy.OperatorExists(cli, ServiceMeshOperator); err != nil { + multiErr = multierror.Append(multiErr, err) + } else if !found { + err = fmt.Errorf("operator %s not found. Please install the operator before enabling %s component", + ServiceMeshOperator, ComponentName) + multiErr = multierror.Append(multiErr, err) } - - checkAndAppendError(ServiceMeshOperator) - checkAndAppendError(ServerlessOperator) - - return multiErr.ErrorOrNil() + if found, err := deploy.OperatorExists(cli, ServerlessOperator); err != nil { + multiErr = multierror.Append(multiErr, err) + } else if !found { + err = fmt.Errorf("operator %s not found. Please install the operator before enabling %s component", + ServerlessOperator, ComponentName) + multiErr = multierror.Append(multiErr, err) + } + return multiErr } diff --git a/components/kserve/serverless_setup.go b/components/kserve/serverless_setup.go index d57ce6c6f55..95d1d38b882 100644 --- a/components/kserve/serverless_setup.go +++ b/components/kserve/serverless_setup.go @@ -8,17 +8,12 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" ) -const ( - templatesDir = "templates/serverless" -) - -func (k *Kserve) configureServerlessFeatures() feature.DefinedFeatures { - return func(initializer *feature.FeaturesInitializer) error { - servingDeployment, err := feature.CreateFeature("serverless-serving-deployment"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). +func (k *Kserve) configureServerlessFeatures() feature.FeaturesProvider { + return func(handler *feature.FeaturesHandler) error { + servingDeploymentErr := feature.CreateFeature("serverless-serving-deployment"). + For(handler). Manifests( - path.Join(templatesDir, "serving-install"), + path.Join(feature.ServerlessDir, "serving-install"), ). WithData(PopulateComponentSettings(k)). PreConditions( @@ -27,17 +22,18 @@ func (k *Kserve) configureServerlessFeatures() feature.DefinedFeatures { servicemesh.EnsureServiceMeshInstalled, feature.CreateNamespaceIfNotExists(serverless.KnativeServingNamespace), ). + PostConditions( + feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), + ). Load() - if err != nil { - return err + if servingDeploymentErr != nil { + return servingDeploymentErr } - initializer.Features = append(initializer.Features, servingDeployment) - servingNetIstioSecretFiltering, err := feature.CreateFeature("serverless-net-istio-secret-filtering"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). + servingNetIstioSecretFilterinErr := feature.CreateFeature("serverless-net-istio-secret-filtering"). + For(handler). Manifests( - path.Join(templatesDir, "serving-net-istio-secret-filtering.patch.tmpl"), + path.Join(feature.ServerlessDir, "serving-net-istio-secret-filtering.patch.tmpl"), ). WithData(PopulateComponentSettings(k)). PreConditions(serverless.EnsureServerlessServingDeployed). @@ -45,29 +41,27 @@ func (k *Kserve) configureServerlessFeatures() feature.DefinedFeatures { feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), ). Load() - if err != nil { - return err + if servingNetIstioSecretFilterinErr != nil { + return servingNetIstioSecretFilterinErr } - initializer.Features = append(initializer.Features, servingNetIstioSecretFiltering) - servingIstioGateways, err := feature.CreateFeature("serverless-serving-gateways"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). + serverlessGwErr := feature.CreateFeature("serverless-serving-gateways"). + For(handler). PreConditions(serverless.EnsureServerlessServingDeployed). WithData( + PopulateComponentSettings(k), serverless.ServingDefaultValues, serverless.ServingIngressDomain, - PopulateComponentSettings(k), ). WithResources(serverless.ServingCertificateResource). Manifests( - path.Join(templatesDir, "serving-istio-gateways"), + path.Join(feature.ServerlessDir, "serving-istio-gateways"), ). Load() - if err != nil { - return err + if serverlessGwErr != nil { + return serverlessGwErr } - initializer.Features = append(initializer.Features, servingIstioGateways) + return nil } } diff --git a/controllers/dscinitialization/servicemesh_setup.go b/controllers/dscinitialization/servicemesh_setup.go index e39ed64bfe7..f976652fc3a 100644 --- a/controllers/dscinitialization/servicemesh_setup.go +++ b/controllers/dscinitialization/servicemesh_setup.go @@ -11,19 +11,12 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" ) -const templatesDir = "templates/servicemesh" - func (r *DSCInitializationReconciler) configureServiceMesh(instance *dsciv1.DSCInitialization) error { switch instance.Spec.ServiceMesh.ManagementState { case operatorv1.Managed: - serviceMeshInitializer := feature.ClusterFeaturesInitializer(instance, configureServiceMeshFeatures()) - if err := serviceMeshInitializer.Prepare(); err != nil { - r.Log.Error(err, "failed configuring service mesh resources") - r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed configuring service mesh resources") - return err - } + serviceMeshFeatures := feature.ClusterFeaturesHandler(instance, configureServiceMeshFeatures()) - if err := serviceMeshInitializer.Apply(); err != nil { + if err := serviceMeshFeatures.Apply(); err != nil { r.Log.Error(err, "failed applying service mesh resources") r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed applying service mesh resources") return err @@ -43,15 +36,9 @@ func (r *DSCInitializationReconciler) configureServiceMesh(instance *dsciv1.DSCI func (r *DSCInitializationReconciler) removeServiceMesh(instance *dsciv1.DSCInitialization) error { // on condition of Managed, do not handle Removed when set to Removed it trigger DSCI reconcile to cleanup if instance.Spec.ServiceMesh.ManagementState == operatorv1.Managed { - serviceMeshInitializer := feature.ClusterFeaturesInitializer(instance, configureServiceMeshFeatures()) - if err := serviceMeshInitializer.Prepare(); err != nil { - r.Log.Error(err, "failed configuring service mesh resources") - r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed configuring service mesh resources") - - return err - } + serviceMeshFeatures := feature.ClusterFeaturesHandler(instance, configureServiceMeshFeatures()) - if err := serviceMeshInitializer.Delete(); err != nil { + if err := serviceMeshFeatures.Delete(); err != nil { r.Log.Error(err, "failed deleting service mesh resources") r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed deleting service mesh resources") @@ -62,15 +49,14 @@ func (r *DSCInitializationReconciler) removeServiceMesh(instance *dsciv1.DSCInit return nil } -func configureServiceMeshFeatures() feature.DefinedFeatures { - return func(initializer *feature.FeaturesInitializer) error { - serviceMeshSpec := initializer.DSCInitializationSpec.ServiceMesh +func configureServiceMeshFeatures() feature.FeaturesProvider { + return func(handler *feature.FeaturesHandler) error { + serviceMeshSpec := handler.DSCInitializationSpec.ServiceMesh - smcpCreation, errSmcp := feature.CreateFeature("mesh-control-plane-creation"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). + smcpCreationErr := feature.CreateFeature("mesh-control-plane-creation"). + For(handler). Manifests( - path.Join(templatesDir, "base", "create-smcp.tmpl"), + path.Join(feature.ServiceMeshDir, "base", "create-smcp.tmpl"), ). PreConditions( servicemesh.EnsureServiceMeshOperatorInstalled, @@ -81,27 +67,25 @@ func configureServiceMeshFeatures() feature.DefinedFeatures { ). Load() - if errSmcp != nil { - return errSmcp + if smcpCreationErr != nil { + return smcpCreationErr } - initializer.Features = append(initializer.Features, smcpCreation) if serviceMeshSpec.ControlPlane.MetricsCollection == "Istio" { - metricsCollection, errMetrics := feature.CreateFeature("mesh-metrics-collection"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). - Manifests( - path.Join(templatesDir, "metrics-collection"), - ). + metricsCollectionErr := feature.CreateFeature("mesh-metrics-collection"). + For(handler). PreConditions( servicemesh.EnsureServiceMeshInstalled, ). + Manifests( + path.Join(feature.ServiceMeshDir, "metrics-collection"), + ). Load() - if errMetrics != nil { - return errMetrics + if metricsCollectionErr != nil { + return metricsCollectionErr } - initializer.Features = append(initializer.Features, metricsCollection) } + return nil } } diff --git a/pkg/feature/builder.go b/pkg/feature/builder.go index 19cf04e45eb..e019ecf555c 100644 --- a/pkg/feature/builder.go +++ b/pkg/feature/builder.go @@ -13,7 +13,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" - v1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" ) @@ -21,50 +20,41 @@ import ( type partialBuilder func(f *Feature) error type featureBuilder struct { - name string - config *rest.Config - builders []partialBuilder - fsys fs.FS + name string + config *rest.Config + builders []partialBuilder + featuresHanlder *FeaturesHandler + fsys fs.FS } -func CreateFeature(name string) *featureName { //nolint:golint,revive //No need to export featureBuilder. - return &featureName{ +func CreateFeature(name string) *usingFeaturesHandler { //nolint:golint,revive //No need to export featureBuilder. + return &usingFeaturesHandler{ name: name, } } -type featureName struct { +type usingFeaturesHandler struct { name string } -func (fn *featureName) With(spec *v1.DSCInitializationSpec) *featureSource { - return &featureSource{ - spec: spec, - name: fn.name, - } -} - -type featureSource struct { - spec *v1.DSCInitializationSpec - name string -} - -func (fo *featureSource) From(source featurev1.Source) *featureBuilder { +func (u *usingFeaturesHandler) For(featuresHandler *FeaturesHandler) *featureBuilder { createSpec := func(f *Feature) error { f.Spec = &Spec{ - ServiceMeshSpec: &fo.spec.ServiceMesh, + ServiceMeshSpec: &featuresHandler.DSCInitializationSpec.ServiceMesh, Serving: &infrav1.ServingSpec{}, - Source: &source, - AppNamespace: fo.spec.ApplicationsNamespace, + Source: &featuresHandler.source, + AppNamespace: featuresHandler.DSCInitializationSpec.ApplicationsNamespace, } return nil } fb := &featureBuilder{ - name: fo.name, - fsys: embeddedFiles, + name: u.name, + featuresHanlder: featuresHandler, + fsys: embeddedFiles, } + // Ensures creation of .Spec object is always invoked first fb.builders = append([]partialBuilder{createSpec}, fb.builders...) @@ -172,28 +162,30 @@ func (fb *featureBuilder) WithResources(resources ...Action) *featureBuilder { return fb } -func (fb *featureBuilder) Load() (*Feature, error) { +func (fb *featureBuilder) Load() error { feature := newFeature(fb.name) // UsingConfig builder wasn't called while constructing this feature. // Get default settings and create needed clients. if fb.config == nil { if err := fb.withDefaultClient(); err != nil { - return nil, err + return err } } if err := createClients(fb.config)(feature); err != nil { - return nil, err + return err } for i := range fb.builders { if err := fb.builders[i](feature); err != nil { - return nil, err + return err } } - return feature, nil + fb.featuresHanlder.features = append(fb.featuresHanlder.features, feature) + + return nil } func (fb *featureBuilder) withDefaultClient() error { diff --git a/pkg/feature/feature_tracker_handler.go b/pkg/feature/feature_tracker_handler.go index 28c175a6dd7..e487847b0ae 100644 --- a/pkg/feature/feature_tracker_handler.go +++ b/pkg/feature/feature_tracker_handler.go @@ -5,12 +5,10 @@ import ( "fmt" k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/common" ) // createFeatureTracker instantiates FeatureTracker for a given Feature. It's a cluster-scoped resource used @@ -52,18 +50,11 @@ func removeFeatureTracker(f *Feature) error { } func (f *Feature) getFeatureTracker() (*featurev1.FeatureTracker, error) { - tracker := &featurev1.FeatureTracker{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "features.opendatahub.io/v1", - Kind: "FeatureTracker", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: f.Spec.AppNamespace + "-" + common.TrimToRFC1123Name(f.Name), - }, - Spec: featurev1.FeatureTrackerSpec{ - Source: *f.Spec.Source, - AppNamespace: f.Spec.AppNamespace, - }, + tracker := featurev1.NewFeatureTracker(f.Name, f.Spec.AppNamespace) + + tracker.Spec = featurev1.FeatureTrackerSpec{ + Source: *f.Spec.Source, + AppNamespace: f.Spec.AppNamespace, } err := f.Client.Get(context.Background(), client.ObjectKeyFromObject(tracker), tracker) diff --git a/pkg/feature/handler.go b/pkg/feature/handler.go new file mode 100644 index 00000000000..0dc539a5bb1 --- /dev/null +++ b/pkg/feature/handler.go @@ -0,0 +1,69 @@ +package feature + +import ( + "fmt" + + "github.com/hashicorp/go-multierror" + + v1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/components" +) + +// FeaturesHandler coordinates feature creations and removal from within controllers. +type FeaturesHandler struct { + *v1.DSCInitializationSpec + source featurev1.Source + features []*Feature + featuresProvider FeaturesProvider +} + +// FeaturesProvider is a function which allow to define list of features +// and couple them with the given initializer. +type FeaturesProvider func(handler *FeaturesHandler) error + +func ClusterFeaturesHandler(dsci *v1.DSCInitialization, def FeaturesProvider) *FeaturesHandler { + return &FeaturesHandler{ + DSCInitializationSpec: &dsci.Spec, + source: featurev1.Source{Type: featurev1.DSCIType, Name: dsci.Name}, + featuresProvider: def, + } +} + +func ComponentFeaturesHandler(component components.ComponentInterface, spec *v1.DSCInitializationSpec, def FeaturesProvider) *FeaturesHandler { + return &FeaturesHandler{ + DSCInitializationSpec: spec, + source: featurev1.Source{Type: featurev1.ComponentType, Name: component.GetComponentName()}, + featuresProvider: def, + } +} + +func (f *FeaturesHandler) Apply() error { + if err := f.featuresProvider(f); err != nil { + return fmt.Errorf("apply phase failed when wiring Feature instances: %w", err) + } + + var applyErrors *multierror.Error + for _, f := range f.features { + applyErrors = multierror.Append(applyErrors, f.Apply()) + } + + return applyErrors.ErrorOrNil() +} + +// Delete executes registered clean-up tasks in the opposite order they were initiated (following a stack structure). +// For instance, this allows for the undoing patches before its deletion. +// This approach assumes that Features are either instantiated in the correct sequence +// or are self-contained. +func (f *FeaturesHandler) Delete() error { + if err := f.featuresProvider(f); err != nil { + return fmt.Errorf("delete phase failed when wiring Feature instances: %w", err) + } + + var cleanupErrors *multierror.Error + for i := len(f.features) - 1; i >= 0; i-- { + cleanupErrors = multierror.Append(cleanupErrors, f.features[i].Cleanup()) + } + + return cleanupErrors.ErrorOrNil() +} diff --git a/pkg/feature/initializer.go b/pkg/feature/initializer.go deleted file mode 100644 index 412bd62b9f4..00000000000 --- a/pkg/feature/initializer.go +++ /dev/null @@ -1,64 +0,0 @@ -package feature - -import ( - "github.com/hashicorp/go-multierror" - - v1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" -) - -type FeaturesInitializer struct { - *v1.DSCInitializationSpec - Source featurev1.Source - Features []*Feature - definedFeatures DefinedFeatures -} - -type DefinedFeatures func(initializer *FeaturesInitializer) error - -func ClusterFeaturesInitializer(dsci *v1.DSCInitialization, def DefinedFeatures) *FeaturesInitializer { - return &FeaturesInitializer{ - DSCInitializationSpec: &dsci.Spec, - Source: featurev1.Source{Type: featurev1.DSCIType, Name: dsci.Name}, - definedFeatures: def, - } -} - -func ComponentFeaturesInitializer(component components.ComponentInterface, spec *v1.DSCInitializationSpec, def DefinedFeatures) *FeaturesInitializer { - return &FeaturesInitializer{ - DSCInitializationSpec: spec, - Source: featurev1.Source{Type: featurev1.ComponentType, Name: component.GetComponentName()}, - definedFeatures: def, - } -} - -// Prepare performs validation of the spec and ensures all resources, -// such as Features and their templates, are processed and initialized -// before proceeding with the actual cluster set-up. -func (s *FeaturesInitializer) Prepare() error { - return s.definedFeatures(s) -} - -func (s *FeaturesInitializer) Apply() error { - var applyErrors *multierror.Error - - for _, f := range s.Features { - applyErrors = multierror.Append(applyErrors, f.Apply()) - } - - return applyErrors.ErrorOrNil() -} - -// Delete executes registered clean-up tasks in the opposite order they were initiated (following a stack structure). -// For instance, this allows for the undoing patches before its deletion. -// This approach assumes that Features are either instantiated in the correct sequence -// or are self-contained. -func (s *FeaturesInitializer) Delete() error { - var cleanupErrors *multierror.Error - for i := len(s.Features) - 1; i >= 0; i-- { - cleanupErrors = multierror.Append(cleanupErrors, s.Features[i].Cleanup()) - } - - return cleanupErrors.ErrorOrNil() -} diff --git a/pkg/feature/manifest.go b/pkg/feature/manifest.go index 2726e2d0e23..3fcbcd22de3 100644 --- a/pkg/feature/manifest.go +++ b/pkg/feature/manifest.go @@ -7,6 +7,7 @@ import ( "html/template" "io" "io/fs" + "path" "path/filepath" "strings" ) @@ -14,6 +15,12 @@ import ( //go:embed templates var embeddedFiles embed.FS +var ( + BaseDir = "templates" + ServiceMeshDir = path.Join(BaseDir, "servicemesh") + ServerlessDir = path.Join(BaseDir, "serverless") +) + type manifest struct { name, path, diff --git a/tests/envtestutil/utils.go b/tests/envtestutil/utils.go index e24b286560e..3a6d2396479 100644 --- a/tests/envtestutil/utils.go +++ b/tests/envtestutil/utils.go @@ -4,8 +4,6 @@ import ( "fmt" "os" "path/filepath" - - featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" ) func FindProjectRoot() (string, error) { @@ -29,11 +27,3 @@ func FindProjectRoot() (string, error) { return "", fmt.Errorf("project root not found") } - -// NewSource creates an origin object with specified component and name. -func NewSource(component featurev1.OwnerType, name string) featurev1.Source { - return featurev1.Source{ - Type: component, - Name: name, - } -} diff --git a/tests/integration/features/features_int_test.go b/tests/integration/features/features_int_test.go index 1d0430b2859..3d52595614e 100644 --- a/tests/integration/features/features_int_test.go +++ b/tests/integration/features/features_int_test.go @@ -16,10 +16,10 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" . "github.com/onsi/ginkgo/v2" @@ -31,9 +31,8 @@ import ( var testEmbeddedFiles embed.FS const ( - timeout = 5 * time.Second - interval = 250 * time.Millisecond - templatesDir = "templates" + timeout = 5 * time.Second + interval = 250 * time.Millisecond ) var _ = Describe("feature preconditions", func() { @@ -42,8 +41,8 @@ var _ = Describe("feature preconditions", func() { var ( objectCleaner *envtestutil.Cleaner - testFeature *feature.Feature namespace string + dsci *dsciv1.DSCInitialization ) BeforeEach(func() { @@ -51,16 +50,7 @@ var _ = Describe("feature preconditions", func() { testFeatureName := "test-ns-creation" namespace = envtestutil.AppendRandomNameTo(testFeatureName) - - dsciSpec := newDSCInitializationSpec(namespace) - source := envtestutil.NewSource(featurev1.DSCIType, "default") - var err error - testFeature, err = feature.CreateFeature(testFeatureName). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() - Expect(err).ToNot(HaveOccurred()) + dsci = newDSCInitialization(namespace) }) It("should create namespace if it does not exist", func() { @@ -70,78 +60,110 @@ var _ = Describe("feature preconditions", func() { defer objectCleaner.DeleteAll(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}) // when - err = feature.CreateNamespaceIfNotExists(namespace)(testFeature) + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + testFeatureErr := feature.CreateFeature("create-new-ns"). + For(handler). + PreConditions(feature.CreateNamespaceIfNotExists(namespace)). + UsingConfig(envTest.Config). + Load() + + Expect(testFeatureErr).ToNot(HaveOccurred()) + + return nil + }) // then - Expect(err).ToNot(HaveOccurred()) + Expect(featuresHandler.Apply()).To(Succeed()) + + // and + Eventually(func() error { + _, err := getNamespace(namespace) + return err + }).WithTimeout(timeout).WithPolling(interval).Should(Succeed()) }) It("should not try to create namespace if it does already exist", func() { // given - ns := createNamespace(namespace) + ns := newNamespace(namespace) Expect(envTestClient.Create(context.Background(), ns)).To(Succeed()) + Eventually(func() error { + _, err := getNamespace(namespace) + return err + }).WithTimeout(timeout).WithPolling(interval).Should(Succeed()) // wait for ns to actually get created + defer objectCleaner.DeleteAll(ns) // when - err := feature.CreateNamespaceIfNotExists(namespace)(testFeature) + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + testFeatureErr := feature.CreateFeature("create-new-ns"). + For(handler). + PreConditions(feature.CreateNamespaceIfNotExists(namespace)). + UsingConfig(envTest.Config). + Load() + + Expect(testFeatureErr).ToNot(HaveOccurred()) + + return nil + }) // then - Expect(err).ToNot(HaveOccurred()) + Expect(featuresHandler.Apply()).To(Succeed()) + }) + }) Context("ensuring custom resource definitions are installed", func() { var ( - dsciSpec *dscv1.DSCInitializationSpec - verificationFeature *feature.Feature - source featurev1.Source + dsci *dsciv1.DSCInitialization ) BeforeEach(func() { - dsciSpec = newDSCInitializationSpec("default") - source = envtestutil.NewSource(featurev1.DSCIType, "default") + namespace := envtestutil.AppendRandomNameTo("test-crd-creation") + dsci = newDSCInitialization(namespace) }) It("should successfully check for existing CRD", func() { // given example CRD installed into env name := "test-resources.openshift.io" - var err error - verificationFeature, err = feature.CreateFeature("CRD verification"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions(feature.EnsureCRDIsInstalled(name)). - Load() - Expect(err).ToNot(HaveOccurred()) - // when - err = verificationFeature.Apply() + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + crdVerificationErr := feature.CreateFeature("verify-crd-exists"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(feature.EnsureCRDIsInstalled(name)). + Load() + + Expect(crdVerificationErr).ToNot(HaveOccurred()) + + return nil + }) // then - Expect(err).ToNot(HaveOccurred()) + Expect(featuresHandler.Apply()).To(Succeed()) }) It("should fail to check non-existing CRD", func() { // given name := "non-existing-resource.non-existing-group.io" - var err error - verificationFeature, err = feature.CreateFeature("CRD verification"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions(feature.EnsureCRDIsInstalled(name)). - Load() - Expect(err).ToNot(HaveOccurred()) - // when - err = verificationFeature.Apply() + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + crdVerificationErr := feature.CreateFeature("fail-on-non-existing-crd"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(feature.EnsureCRDIsInstalled(name)). + Load() + + Expect(crdVerificationErr).ToNot(HaveOccurred()) + + return nil + }) // then - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("\"non-existing-resource.non-existing-group.io\" not found")) + Expect(featuresHandler.Apply()).To(MatchError(ContainSubstring("\"non-existing-resource.non-existing-group.io\" not found"))) }) }) }) @@ -150,323 +172,392 @@ var _ = Describe("feature cleanup", func() { Context("using FeatureTracker and ownership as cleanup strategy", Ordered, func() { + const ( + featureName = "create-secret" + secretName = "test-secret" + ) + var ( - namespace string - dsciSpec *dscv1.DSCInitializationSpec - source featurev1.Source + dsci *dsciv1.DSCInitialization + namespace string + featuresHandler *feature.FeaturesHandler ) BeforeAll(func() { - namespace = envtestutil.AppendRandomNameTo("feature-tracker-test") - dsciSpec = newDSCInitializationSpec(namespace) - source = envtestutil.NewSource(featurev1.DSCIType, "default") + namespace = envtestutil.AppendRandomNameTo("test-secret-ownership") + dsci = newDSCInitialization(namespace) + featuresHandler = feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + secretCreationErr := feature.CreateFeature(featureName). + For(handler). + UsingConfig(envTest.Config). + PreConditions( + feature.CreateNamespaceIfNotExists(namespace), + ). + WithResources(createSecret(secretName, namespace)). + Load() + + Expect(secretCreationErr).ToNot(HaveOccurred()) + + return nil + }) + }) It("should successfully create resource and associated feature tracker", func() { - // given - createConfigMap, err := feature.CreateFeature("create-cfg-map"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions( - feature.CreateNamespaceIfNotExists(namespace), - ). - WithResources(createTestSecret(namespace)). - Load() - Expect(err).ToNot(HaveOccurred()) - // when - Expect(createConfigMap.Apply()).To(Succeed()) + Expect(featuresHandler.Apply()).Should(Succeed()) // then - Expect(createConfigMap.Spec.Tracker).ToNot(BeNil()) - _, err = createConfigMap.DynamicClient. - Resource(gvr.FeatureTracker). - Get(context.TODO(), createConfigMap.Spec.Tracker.Name, metav1.GetOptions{}) - - Expect(err).ToNot(HaveOccurred()) + Eventually(createdSecretHasOwnerReferenceToOwningFeature(secretName, namespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(Succeed()) }) It("should remove feature tracker on clean-up", func() { - // recreating feature struct again as it would happen in the reconcile - // given - createConfigMap, err := feature.CreateFeature("create-cfg-map"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions( - feature.CreateNamespaceIfNotExists(namespace), - ). - WithResources(createTestSecret(namespace)). - Load() - Expect(err).ToNot(HaveOccurred()) - // when - Expect(createConfigMap.Cleanup()).To(Succeed()) - trackerName := createConfigMap.Spec.Tracker.Name + Expect(featuresHandler.Delete()).To(Succeed()) // then - _, err = createConfigMap.DynamicClient. - Resource(gvr.FeatureTracker). - Get(context.TODO(), trackerName, metav1.GetOptions{}) - - Expect(errors.IsNotFound(err)).To(BeTrue()) + Eventually(createdSecretHasOwnerReferenceToOwningFeature(secretName, namespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(WithTransform(errors.IsNotFound, BeTrue())) }) }) -}) - -var _ = Describe("feature trackers", func() { - Context("ensuring feature trackers indicate status and phase", func() { + var _ = Describe("feature trackers", func() { + Context("ensuring feature trackers indicate status and phase", func() { + + const appNamespace = "default" + + var ( + dsci *dsciv1.DSCInitialization + ) + + BeforeEach(func() { + dsci = newDSCInitialization("default") + }) + + It("should indicate successful installation in FeatureTracker", func() { + // given example CRD installed into env + name := "test-resources.openshift.io" + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("crd-verification"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(feature.EnsureCRDIsInstalled(name)). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + featureTracker, err := getFeatureTracker("crd-verification", appNamespace) + Expect(err).ToNot(HaveOccurred()) + Expect(*featureTracker.Status.Conditions).To(ContainElement( + MatchFields(IgnoreExtras, Fields{ + "Type": Equal(conditionsv1.ConditionAvailable), + "Status": Equal(v1.ConditionTrue), + "Reason": Equal(string(featurev1.FeatureCreated)), + }), + )) + }) + + It("should indicate failure in preconditions", func() { + // given + name := "non-existing-resource.non-existing-group.io" + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("non-existing-crd-verification"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(feature.EnsureCRDIsInstalled(name)). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).ToNot(Succeed()) + + // then + featureTracker, err := getFeatureTracker("non-existing-crd-verification", appNamespace) + Expect(err).ToNot(HaveOccurred()) + Expect(*featureTracker.Status.Conditions).To(ContainElement( + MatchFields(IgnoreExtras, Fields{ + "Type": Equal(conditionsv1.ConditionDegraded), + "Status": Equal(v1.ConditionTrue), + "Reason": Equal(string(featurev1.PreConditions)), + }), + )) + }) + + It("should indicate failure in post-conditions", func() { + // given + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("post-condition-failure"). + For(handler). + UsingConfig(envTest.Config). + PostConditions(func(f *feature.Feature) error { + return fmt.Errorf("during test always fail") + }). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).ToNot(Succeed()) + + // then + featureTracker, err := getFeatureTracker("post-condition-failure", appNamespace) + Expect(err).ToNot(HaveOccurred()) + Expect(*featureTracker.Status.Conditions).To(ContainElement( + MatchFields(IgnoreExtras, Fields{ + "Type": Equal(conditionsv1.ConditionDegraded), + "Status": Equal(v1.ConditionTrue), + "Reason": Equal(string(featurev1.PostConditions)), + }), + )) + }) + + It("should correctly indicate source in the feature tracker", func() { + // given + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + emptyFeatureErr := feature.CreateFeature("empty-feature"). + For(handler). + UsingConfig(envTest.Config). + Load() + + Expect(emptyFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + featureTracker, err := getFeatureTracker("empty-feature", appNamespace) + Expect(err).ToNot(HaveOccurred()) + Expect(featureTracker.Spec.Source).To( + MatchFields(IgnoreExtras, Fields{ + "Name": Equal("default-dsci"), + "Type": Equal(featurev1.DSCIType), + }), + ) + }) + + It("should correctly indicate app namespace in the feature tracker", func() { + // given + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + emptyFeatureErr := feature.CreateFeature("empty-feature"). + For(handler). + UsingConfig(envTest.Config). + Load() + + Expect(emptyFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + featureTracker, err := getFeatureTracker("empty-feature", appNamespace) + Expect(err).ToNot(HaveOccurred()) + Expect(featureTracker.Spec.AppNamespace).To(Equal("default")) + }) - var ( - dsciSpec *dscv1.DSCInitializationSpec - source featurev1.Source - ) - - BeforeEach(func() { - dsciSpec = newDSCInitializationSpec("default") - source = envtestutil.NewSource(featurev1.DSCIType, "default") }) - It("should indicate successful installation in FeatureTracker", func() { - // given example CRD installed into env - name := "test-resources.openshift.io" - verificationFeature, err := feature.CreateFeature("crd-verification"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions(feature.EnsureCRDIsInstalled(name)). - Load() - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(verificationFeature.Apply()).To(Succeed()) - - // then - featureTracker := getFeatureTracker("default-crd-verification") - Expect(*featureTracker.Status.Conditions).To(ContainElement( - MatchFields(IgnoreExtras, Fields{ - "Type": Equal(conditionsv1.ConditionAvailable), - "Status": Equal(v1.ConditionTrue), - "Reason": Equal(string(featurev1.FeatureCreated)), - }), - )) - }) - - It("should indicate failure in preconditions", func() { - // given - name := "non-existing-resource.non-existing-group.io" - verificationFeature, err := feature.CreateFeature("crd-verification"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions(feature.EnsureCRDIsInstalled(name)). - Load() - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(verificationFeature.Apply()).ToNot(Succeed()) - - // then - featureTracker := getFeatureTracker("default-crd-verification") - Expect(*featureTracker.Status.Conditions).To(ContainElement( - MatchFields(IgnoreExtras, Fields{ - "Type": Equal(conditionsv1.ConditionDegraded), - "Status": Equal(v1.ConditionTrue), - "Reason": Equal(string(featurev1.PreConditions)), - }), - )) - }) + }) - It("should indicate failure in post-conditions", func() { - // given - verificationFeature, err := feature.CreateFeature("post-condition-failure"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PostConditions(func(f *feature.Feature) error { - return fmt.Errorf("always fail") - }). - Load() - Expect(err).ToNot(HaveOccurred()) + var _ = Describe("Manifest sources", func() { + Context("using various manifest sources", func() { + + var ( + objectCleaner *envtestutil.Cleaner + dsci *dsciv1.DSCInitialization + namespace *v1.Namespace + ) + + BeforeEach(func() { + objectCleaner = envtestutil.CreateCleaner(envTestClient, envTest.Config, timeout, interval) + nsName := envtestutil.AppendRandomNameTo("smcp-ns") + + var err error + namespace, err = cluster.CreateNamespace(envTestClient, nsName) + Expect(err).ToNot(HaveOccurred()) + + dsci = newDSCInitialization(nsName) + dsci.Spec.ServiceMesh.ControlPlane.Namespace = namespace.Name + }) + + AfterEach(func() { + objectCleaner.DeleteAll(namespace) + }) + + It("should be able to process an embedded template from the default location", func() { + // given + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + createServiceErr := feature.CreateFeature("create-local-gw-svc"). + For(handler). + UsingConfig(envTest.Config). + Manifests(path.Join(feature.ServerlessDir, "serving-istio-gateways", "local-gateway-svc.tmpl")). + Load() + + Expect(createServiceErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + service, err := getService("knative-local-gateway", namespace.Name) + Expect(err).ToNot(HaveOccurred()) + Expect(service.Name).To(Equal("knative-local-gateway")) + }) + + It("should be able to process an embedded YAML file from the default location", func() { + // given + knativeNs, nsErr := cluster.CreateNamespace(envTestClient, "knative-serving") + Expect(nsErr).ToNot(HaveOccurred()) + defer objectCleaner.DeleteAll(knativeNs) + + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + createGatewayErr := feature.CreateFeature("create-local-gateway"). + For(handler). + UsingConfig(envTest.Config). + Manifests(path.Join(feature.ServerlessDir, "serving-istio-gateways", "istio-local-gateway.yaml")). + Load() + + Expect(createGatewayErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + gateway, err := getGateway(envTest.Config, "knative-serving", "knative-local-gateway") + Expect(err).ToNot(HaveOccurred()) + Expect(gateway).ToNot(BeNil()) + }) + + It("should be able to process an embedded file from a non default location", func() { + // given + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + createNamespaceErr := feature.CreateFeature("create-namespace"). + For(handler). + UsingConfig(envTest.Config). + ManifestSource(testEmbeddedFiles). + Manifests(path.Join(feature.BaseDir, "namespace.yaml")). + Load() + + Expect(createNamespaceErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + embeddedNs, err := getNamespace("embedded-test-ns") + defer objectCleaner.DeleteAll(embeddedNs) + Expect(err).ToNot(HaveOccurred()) + Expect(embeddedNs.Name).To(Equal("embedded-test-ns")) + }) + + const nsYAML = `apiVersion: v1 +kind: Namespace +metadata: + name: real-file-test-ns` - // when - Expect(verificationFeature.Apply()).ToNot(Succeed()) + It("should source manifests from a specified temporary directory within the file system", func() { + // given + tempDir := GinkgoT().TempDir() - // then - featureTracker := getFeatureTracker("default-post-condition-failure") - Expect(*featureTracker.Status.Conditions).To(ContainElement( - MatchFields(IgnoreExtras, Fields{ - "Type": Equal(conditionsv1.ConditionDegraded), - "Status": Equal(v1.ConditionTrue), - "Reason": Equal(string(featurev1.PostConditions)), - }), - )) - }) + Expect(createFile(tempDir, "namespace.yaml", nsYAML)).To(Succeed()) - It("should correctly indicate source in the feature tracker", func() { - verificationFeature, err := feature.CreateFeature("empty-feature"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() - Expect(err).ToNot(HaveOccurred()) + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + createServiceErr := feature.CreateFeature("create-namespace"). + For(handler). + UsingConfig(envTest.Config). + ManifestSource(os.DirFS(tempDir)). + Manifests(path.Join("namespace.yaml")). // must be relative to root DirFS defined above + Load() - // when - Expect(verificationFeature.Apply()).To(Succeed()) + Expect(createServiceErr).ToNot(HaveOccurred()) - // then - featureTracker := getFeatureTracker("default-empty-feature") - Expect(featureTracker.Spec.Source.Name).To(Equal("default")) - Expect(featureTracker.Spec.Source.Type).To(Equal(featurev1.DSCIType)) - }) + return nil + }) - It("should correctly indicate app namespace in the feature tracker", func() { - verificationFeature, err := feature.CreateFeature("empty-feature"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() - Expect(err).ToNot(HaveOccurred()) + // when + Expect(featuresHandler.Apply()).To(Succeed()) - // when - Expect(verificationFeature.Apply()).To(Succeed()) - - // then - featureTracker := getFeatureTracker("default-empty-feature") - Expect(featureTracker.Spec.AppNamespace).To(Equal("default")) + // then + realNs, err := getNamespace("real-file-test-ns") + defer objectCleaner.DeleteAll(realNs) + Expect(err).ToNot(HaveOccurred()) + Expect(realNs.Name).To(Equal("real-file-test-ns")) + }) }) - }) }) -var _ = Describe("Manifest sources", func() { - Context("using various manifest sources", func() { - - var ( - objectCleaner *envtestutil.Cleaner - dsciSpec *dscv1.DSCInitializationSpec - source featurev1.Source - namespace = "default" - ) - - BeforeEach(func() { - objectCleaner = envtestutil.CreateCleaner(envTestClient, envTest.Config, timeout, interval) - dsciSpec = newDSCInitializationSpec(namespace) - source = envtestutil.NewSource(featurev1.DSCIType, "namespace") - }) - - It("should be able to process an embedded template from the default location", func() { - // given - ns := createNamespace("service-ns") - Expect(envTestClient.Create(context.Background(), ns)).To(Succeed()) - defer objectCleaner.DeleteAll(ns) - - serviceMeshSpec := &dsciSpec.ServiceMesh - serviceMeshSpec.ControlPlane.Namespace = "service-ns" - - createService, err := feature.CreateFeature("create-control-plane"). - With(dsciSpec). - From(source). - Manifests(path.Join(templatesDir, "serverless", "serving-istio-gateways", "local-gateway-svc.tmpl")). - UsingConfig(envTest.Config). - Load() - - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(createService.Apply()).To(Succeed()) - - // then - service, err := getService("knative-local-gateway", "service-ns") - Expect(err).ToNot(HaveOccurred()) - Expect(service.Name).To(Equal("knative-local-gateway")) - }) - - It("should be able to process an embedded YAML file from the default location", func() { - // given - ns := createNamespace("knative-serving") - Expect(envTestClient.Create(context.Background(), ns)).To(Succeed()) - defer objectCleaner.DeleteAll(ns) - - createGateway, err := feature.CreateFeature("create-gateway"). - With(dsciSpec). - From(source). - Manifests(path.Join(templatesDir, "serverless", "serving-istio-gateways", "istio-local-gateway.yaml")). - UsingConfig(envTest.Config). - Load() - - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(createGateway.Apply()).To(Succeed()) - - // then - gateway, err := getGateway(envTest.Config, "knative-serving", "knative-local-gateway") - Expect(err).ToNot(HaveOccurred()) - Expect(gateway).ToNot(BeNil()) - }) - - It("should be able to process an embedded file from a non default location", func() { - createNs, err := feature.CreateFeature("create-ns"). - With(dsciSpec). - From(source). - ManifestSource(testEmbeddedFiles). - Manifests(path.Join(templatesDir, "namespace.yaml")). - UsingConfig(envTest.Config). - Load() - - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(createNs.Apply()).To(Succeed()) - - // then - namespace, err := getNamespace("embedded-test-ns") - Expect(err).ToNot(HaveOccurred()) - Expect(namespace.Name).To(Equal("embedded-test-ns")) - }) - - It("should source manifests from a specified temporary directory within the file system", func() { - // given - tempDir := GinkgoT().TempDir() - yamlData := `apiVersion: v1 -kind: Namespace -metadata: - name: real-file-test-ns` - - err := createFile(tempDir, "namespace.yaml", yamlData) - Expect(err).ToNot(HaveOccurred()) +func createdSecretHasOwnerReferenceToOwningFeature(secretName, namespace string) func() error { + return func() error { + secret, err := envTestClientset.CoreV1(). + Secrets(namespace). + Get(context.TODO(), secretName, metav1.GetOptions{}) - createNs, err := feature.CreateFeature("create-ns"). - With(dsciSpec). - From(source). - ManifestSource(os.DirFS(tempDir)). - Manifests(path.Join("namespace.yaml")). // must be relative to root DirFS defined above - UsingConfig(envTest.Config). - Load() + if err != nil { + return err + } - Expect(err).ToNot(HaveOccurred()) + Expect(secret.OwnerReferences).Should( + ContainElement( + MatchFields(IgnoreExtras, Fields{"Kind": Equal("FeatureTracker")}), + ), + ) - // when - Expect(createNs.Apply()).To(Succeed()) + trackerName := "" + for _, ownerRef := range secret.OwnerReferences { + if ownerRef.Kind == "FeatureTracker" { + trackerName = ownerRef.Name + break + } + } - // then - namespace, err := getNamespace("real-file-test-ns") - Expect(err).ToNot(HaveOccurred()) - Expect(namespace.Name).To(Equal("real-file-test-ns")) - }) - }) -}) + tracker := &featurev1.FeatureTracker{} + return envTestClient.Get(context.Background(), client.ObjectKey{ + Name: trackerName, + }, tracker) + } +} -func createTestSecret(namespace string) func(f *feature.Feature) error { +func createSecret(name, namespace string) func(f *feature.Feature) error { return func(f *feature.Feature) error { secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", + Name: name, Namespace: namespace, OwnerReferences: []metav1.OwnerReference{ f.AsOwnerReference(), @@ -485,7 +576,7 @@ func createTestSecret(namespace string) func(f *feature.Feature) error { } } -func createNamespace(name string) *v1.Namespace { +func newNamespace(name string) *v1.Namespace { return &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -493,26 +584,28 @@ func createNamespace(name string) *v1.Namespace { } } -func getFeatureTracker(name string) *featurev1.FeatureTracker { - tracker := &featurev1.FeatureTracker{} +func getFeatureTracker(featureName, appNamespace string) (*featurev1.FeatureTracker, error) { //nolint:unparam //reason appNs + tracker := featurev1.NewFeatureTracker(featureName, appNamespace) err := envTestClient.Get(context.Background(), client.ObjectKey{ - Name: name, + Name: tracker.Name, }, tracker) - Expect(err).ToNot(HaveOccurred()) - - return tracker + return tracker, err } -func newDSCInitializationSpec(ns string) *dscv1.DSCInitializationSpec { - spec := dscv1.DSCInitializationSpec{} - spec.ApplicationsNamespace = ns - - return &spec +func newDSCInitialization(ns string) *dsciv1.DSCInitialization { + return &dsciv1.DSCInitialization{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-dsci", + }, + Spec: dsciv1.DSCInitializationSpec{ + ApplicationsNamespace: ns, + }, + } } func getNamespace(namespace string) (*v1.Namespace, error) { - ns := createNamespace(namespace) + ns := newNamespace(namespace) err := envTestClient.Get(context.Background(), types.NamespacedName{Name: namespace}, ns) return ns, err diff --git a/tests/integration/features/features_suite_int_test.go b/tests/integration/features/features_suite_int_test.go index 89732414b74..37ba1573cf0 100644 --- a/tests/integration/features/features_suite_int_test.go +++ b/tests/integration/features/features_suite_int_test.go @@ -11,6 +11,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -24,10 +25,11 @@ import ( ) var ( - envTestClient client.Client - envTest *envtest.Environment - ctx context.Context - cancel context.CancelFunc + envTestClient client.Client + envTestClientset *kubernetes.Clientset + envTest *envtest.Environment + ctx context.Context + cancel context.CancelFunc ) var testScheme = runtime.NewScheme() @@ -78,6 +80,10 @@ var _ = BeforeSuite(func() { envTestClient, err = client.New(config, client.Options{Scheme: testScheme}) Expect(err).NotTo(HaveOccurred()) Expect(envTestClient).NotTo(BeNil()) + + envTestClientset, err = kubernetes.NewForConfig(config) + Expect(err).NotTo(HaveOccurred()) + Expect(envTestClientset).NotTo(BeNil()) }) var _ = AfterSuite(func() { diff --git a/tests/integration/features/serverless_feature_test.go b/tests/integration/features/serverless_feature_test.go index c4a642c58c9..9aa18693065 100644 --- a/tests/integration/features/serverless_feature_test.go +++ b/tests/integration/features/serverless_feature_test.go @@ -2,15 +2,19 @@ package features_test import ( "context" + "fmt" + corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/dynamic" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" @@ -65,41 +69,50 @@ spec: var _ = Describe("Serverless feature", func() { - var testFeature *feature.Feature - var objectCleaner *envtestutil.Cleaner + var ( + dsci *dsciv1.DSCInitialization + objectCleaner *envtestutil.Cleaner + kserveComponent *kserve.Kserve + ) BeforeEach(func() { + // TODO rework c, err := client.New(envTest.Config, client.Options{}) Expect(err).ToNot(HaveOccurred()) - objectCleaner = envtestutil.CreateCleaner(c, envTest.Config, timeout, interval) - testFeatureName := "serverless-feature" - namespace := envtestutil.AppendRandomNameTo(testFeatureName) - - dsciSpec := newDSCInitializationSpec(namespace) - source := envtestutil.NewSource(featurev1.ComponentType, "kserve") - testFeature, err = feature.CreateFeature(testFeatureName). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() - - Expect(err).ToNot(HaveOccurred()) - - // Creates the actual Feature instance so that associated FeatureTracker is created as well - Expect(testFeature.Apply()).To(Succeed()) + dsci = newDSCInitialization("default") + kserveComponent = &kserve.Kserve{} }) Context("verifying preconditions", func() { When("operator is not installed", func() { - It("operator presence check should return an error", func() { - Expect(serverless.EnsureServerlessOperatorInstalled(testFeature)).ToNot(Succeed()) + + It("should fail on precondition check", func() { + // given + featuresHandler := feature.ComponentFeaturesHandler(kserveComponent, &dsci.Spec, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("no-serverless-operator-check"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(serverless.EnsureServerlessOperatorInstalled). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + applyErr := featuresHandler.Apply() + + // then + Expect(applyErr).To(MatchError(ContainSubstring("\"knativeservings.operator.knative.dev\" not found"))) }) }) When("operator is installed", func() { + var knativeServingCrdObj *apiextensionsv1.CustomResourceDefinition BeforeEach(func() { @@ -120,17 +133,46 @@ var _ = Describe("Serverless feature", func() { objectCleaner.DeleteAll(knativeServingCrdObj) }) - It("operator presence check should succeed", func() { - Expect(serverless.EnsureServerlessOperatorInstalled(testFeature)).To(Succeed()) + It("should succeed checking operator installation using precondition", func() { + // when + featuresHandler := feature.ComponentFeaturesHandler(kserveComponent, &dsci.Spec, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("serverless-operator-check"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(serverless.EnsureServerlessOperatorInstalled). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // then + Expect(featuresHandler.Apply()).To(Succeed()) }) - It("KNative serving absence check should succeed if serving is not installed", func() { - Expect(serverless.EnsureServerlessAbsent(testFeature)).To(Succeed()) + It("should succeed if serving is not installed for KNative serving precondition", func() { + // when + featuresHandler := feature.ComponentFeaturesHandler(kserveComponent, &dsci.Spec, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("no-serving-installed-yet"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(serverless.EnsureServerlessAbsent). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // then + Expect(featuresHandler.Apply()).To(Succeed()) }) - It("KNative serving absence check should fail when serving is present", func() { + It("should fail if serving is already installed for KNative serving precondition", func() { + // given ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) - nsResource := createNamespace(ns) + nsResource := newNamespace(ns) Expect(envTestClient.Create(context.TODO(), nsResource)).To(Succeed()) defer objectCleaner.DeleteAll(nsResource) @@ -139,13 +181,47 @@ var _ = Describe("Serverless feature", func() { knativeServing.SetNamespace(nsResource.Name) Expect(envTestClient.Create(context.TODO(), knativeServing)).To(Succeed()) - Expect(serverless.EnsureServerlessAbsent(testFeature)).ToNot(Succeed()) + // when + featuresHandler := feature.ComponentFeaturesHandler(kserveComponent, &dsci.Spec, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("serving-already-installed"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(serverless.EnsureServerlessAbsent). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // then + Expect(featuresHandler.Apply()).ToNot(Succeed()) }) }) + }) Context("default values", func() { + var testFeature *feature.Feature + + BeforeEach(func() { + // Stubbing feature as we want to test particular functions in isolation + testFeature = &feature.Feature{ + Name: "test-feature", + Spec: &feature.Spec{ + ServiceMeshSpec: &infrav1.ServiceMeshSpec{}, + Serving: &infrav1.ServingSpec{}, + }, + } + + testFeature.Client = envTestClient + testFeature.Clientset = envTestClientset + var err error + testFeature.DynamicClient, err = dynamic.NewForConfig(envTest.Config) + Expect(err).ToNot(HaveOccurred()) + }) + Context("ingress gateway TLS secret name", func() { It("should set default value when value is empty in the DSCI", func() { @@ -186,43 +262,97 @@ var _ = Describe("Serverless feature", func() { Context("resources creation", func() { - It("should create a TLS secret if certificate is SelfSigned", func() { + var ( + namespace *corev1.Namespace + ) + + BeforeEach(func() { ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) - nsResource := createNamespace(ns) - Expect(envTestClient.Create(context.TODO(), nsResource)).To(Succeed()) - defer objectCleaner.DeleteAll(nsResource) + namespace = newNamespace(ns) + Expect(envTestClient.Create(context.TODO(), namespace)).To(Succeed()) - testFeature.Spec.ControlPlane.Namespace = nsResource.Name - testFeature.Spec.Serving.IngressGateway.Certificate.Type = infrav1.SelfSigned - testFeature.Spec.Serving.IngressGateway.Domain = testDomainFooCom - Expect(serverless.ServingDefaultValues(testFeature)).To(Succeed()) - Expect(serverless.ServingIngressDomain(testFeature)).To(Succeed()) + dsci.Spec.ServiceMesh.ControlPlane.Namespace = ns + }) - Expect(serverless.ServingCertificateResource(testFeature)).To(Succeed()) + AfterEach(func() { + objectCleaner.DeleteAll(namespace) + }) - secret, err := testFeature.Clientset.CoreV1().Secrets(nsResource.Name).Get(context.TODO(), serverless.DefaultCertificateSecretName, v1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - Expect(secret).ToNot(BeNil()) + It("should create a TLS secret if certificate is SelfSigned", func() { + // given + kserveComponent.Serving.IngressGateway.Certificate.Type = infrav1.SelfSigned + kserveComponent.Serving.IngressGateway.Domain = testDomainFooCom + + featuresHandler := feature.ComponentFeaturesHandler(kserveComponent, &dsci.Spec, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("tls-secret-creation"). + For(handler). + UsingConfig(envTest.Config). + WithData( + kserve.PopulateComponentSettings(kserveComponent), + serverless.ServingDefaultValues, + serverless.ServingIngressDomain, + ). + WithResources(serverless.ServingCertificateResource). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + Eventually(func() error { + secret, err := envTestClientset.CoreV1().Secrets(namespace.Name).Get(context.TODO(), serverless.DefaultCertificateSecretName, metav1.GetOptions{}) + if err != nil { + return err + } + + if secret == nil { + return fmt.Errorf("secret not found") + } + + return nil + }).WithTimeout(timeout).WithPolling(interval).Should(Succeed()) }) It("should not create any TLS secret if certificate is user provided", func() { - ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) - nsResource := createNamespace(ns) - Expect(envTestClient.Create(context.TODO(), nsResource)).To(Succeed()) - defer objectCleaner.DeleteAll(nsResource) + // given + kserveComponent.Serving.IngressGateway.Certificate.Type = infrav1.Provided + kserveComponent.Serving.IngressGateway.Domain = testDomainFooCom + featuresHandler := feature.ComponentFeaturesHandler(kserveComponent, &dsci.Spec, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("tls-secret-creation"). + For(handler). + UsingConfig(envTest.Config). + WithData( + kserve.PopulateComponentSettings(kserveComponent), + serverless.ServingDefaultValues, + serverless.ServingIngressDomain, + ). + WithResources(serverless.ServingCertificateResource). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) - testFeature.Spec.ControlPlane.Namespace = nsResource.Name - testFeature.Spec.Serving.IngressGateway.Certificate.Type = infrav1.Provided - testFeature.Spec.Serving.IngressGateway.Domain = "*.foo.com" - Expect(serverless.ServingDefaultValues(testFeature)).To(Succeed()) - Expect(serverless.ServingIngressDomain(testFeature)).To(Succeed()) + // when + Expect(featuresHandler.Apply()).To(Succeed()) - Expect(serverless.ServingCertificateResource(testFeature)).To(Succeed()) + // then + Consistently(func() error { + list, err := envTestClientset.CoreV1().Secrets(namespace.Name).List(context.TODO(), metav1.ListOptions{}) + if err != nil || len(list.Items) != 0 { + return fmt.Errorf("list len: %d, error: %w", len(list.Items), err) + } - list, err := testFeature.Clientset.CoreV1().Secrets(nsResource.Name).List(context.TODO(), v1.ListOptions{}) - Expect(err).ToNot(HaveOccurred()) - Expect(list.Items).To(BeEmpty()) + return nil + }).WithTimeout(timeout).WithPolling(interval).Should(Succeed()) }) }) + }) diff --git a/tests/integration/features/servicemesh_feature_test.go b/tests/integration/features/servicemesh_feature_test.go index 2b6fb4086bf..bf32225611c 100644 --- a/tests/integration/features/servicemesh_feature_test.go +++ b/tests/integration/features/servicemesh_feature_test.go @@ -13,7 +13,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" @@ -66,35 +66,54 @@ spec: ` var _ = Describe("Service Mesh feature", func() { - var testFeature *feature.Feature - var objectCleaner *envtestutil.Cleaner + + var ( + dsci *dsciv1.DSCInitialization + objectCleaner *envtestutil.Cleaner + ) BeforeEach(func() { c, err := client.New(envTest.Config, client.Options{}) Expect(err).ToNot(HaveOccurred()) - objectCleaner = envtestutil.CreateCleaner(c, envTest.Config, timeout, interval) - testFeatureName := "servicemesh-feature" - namespace := envtestutil.AppendRandomNameTo(testFeatureName) + namespace := envtestutil.AppendRandomNameTo("service-mesh-settings") - dsciSpec := newDSCInitializationSpec(namespace) - source := envtestutil.NewSource(featurev1.DSCIType, "default") - testFeature, err = feature.CreateFeature(testFeatureName). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() + dsci = newDSCInitialization(namespace) Expect(err).ToNot(HaveOccurred()) }) + AfterEach(func() { + + }) + Describe("preconditions", func() { + When("operator is not installed", func() { - It("operator presence check should return an error", func() { - Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).To(HaveOccurred()) + + It("should fail using precondition check", func() { + // given + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("no-serverless-operator-check"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(servicemesh.EnsureServiceMeshOperatorInstalled). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + applyErr := featuresHandler.Apply() + + // then + Expect(applyErr).To(MatchError(ContainSubstring("customresourcedefinitions.apiextensions.k8s.io \"servicemeshcontrolplanes.maistra.io\" not found"))) }) }) + When("operator is installed", func() { var smcpCrdObj *apiextensionsv1.CustomResourceDefinition @@ -110,32 +129,85 @@ var _ = Describe("Service Mesh feature", func() { err = envtest.WaitForCRDs(envTest.Config, []*apiextensionsv1.CustomResourceDefinition{smcpCrdObj}, crdOptions) Expect(err).ToNot(HaveOccurred()) }) + AfterEach(func() { // Delete SMCP CRD objectCleaner.DeleteAll(smcpCrdObj) }) - It("operator presence check should succeed", func() { - Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).To(Succeed()) + + It("should succeed using precondition check", func() { + // when + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("service-mesh-operator-check"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(servicemesh.EnsureServiceMeshOperatorInstalled). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + }) + It("should find installed Service Mesh Control Plane", func() { + // given c, err := client.New(envTest.Config, client.Options{}) Expect(err).ToNot(HaveOccurred()) ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) - nsResource := createNamespace(ns) + nsResource := newNamespace(ns) Expect(c.Create(context.Background(), nsResource)).To(Succeed()) defer objectCleaner.DeleteAll(nsResource) createServiceMeshControlPlane("test-name", ns) + dsci.Spec.ServiceMesh.ControlPlane.Namespace = ns + dsci.Spec.ServiceMesh.ControlPlane.Name = "test-name" + + // when + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("service-mesh-control-plane-check"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(servicemesh.EnsureServiceMeshInstalled). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) - testFeature.Spec.ControlPlane.Namespace = ns - testFeature.Spec.ControlPlane.Name = "test-name" - Expect(servicemesh.EnsureServiceMeshInstalled(testFeature)).To(Succeed()) + // then + Expect(featuresHandler.Apply()).To(Succeed()) }) + It("should fail to find Service Mesh Control Plane if not present", func() { - Expect(servicemesh.EnsureServiceMeshInstalled(testFeature)).ToNot(Succeed()) + // given + dsci.Spec.ServiceMesh.ControlPlane.Name = "test-name" + + // when + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("no-service-mesh-control-plane-check"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(servicemesh.EnsureServiceMeshInstalled). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // then + Expect(featuresHandler.Apply()).To(MatchError(ContainSubstring("failed to find Service Mesh Control Plane"))) }) + }) + }) }) From 930eb4fd0dfc00aca157d58907ac0c6bcd8e3e54 Mon Sep 17 00:00:00 2001 From: Bartosz Majsak Date: Mon, 5 Feb 2024 11:13:45 +0100 Subject: [PATCH 10/32] chore: fixes typos in code and comments (#839) (cherry picked from commit fec8a2b123070d7689a0df464527bb1d342b4f57) --- components/codeflare/codeflare.go | 2 +- components/component.go | 4 ++-- components/dashboard/dashboard.go | 2 +- components/datasciencepipelines/datasciencepipelines.go | 2 +- components/kserve/kserve.go | 4 ++-- components/kserve/serverless_setup.go | 6 +++--- components/kueue/kueue.go | 2 +- components/modelmeshserving/modelmeshserving.go | 4 ++-- components/ray/ray.go | 2 +- components/workbenches/workbenches.go | 2 +- controllers/dscinitialization/servicemesh_setup.go | 2 +- controllers/secretgenerator/secretgenerator_controller.go | 2 +- pkg/deploy/deploy.go | 4 ++-- pkg/feature/builder.go | 6 +++--- pkg/upgrade/upgrade.go | 4 ++-- tests/envtestutil/cleaner.go | 2 +- 16 files changed, 25 insertions(+), 25 deletions(-) diff --git a/components/codeflare/codeflare.go b/components/codeflare/codeflare.go index 9aa2338d9a5..c9fe4ad768c 100644 --- a/components/codeflare/codeflare.go +++ b/components/codeflare/codeflare.go @@ -108,7 +108,7 @@ func (c *CodeFlare) ReconcileComponent(ctx context.Context, cli client.Client, r // CloudServiceMonitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if the service is up, so prometheus wont fire alerts when it is just startup + // first check if the service is up, so prometheus won't fire alerts when it is just startup if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } diff --git a/components/component.go b/components/component.go index 6f04ed945be..b0ac3818c43 100644 --- a/components/component.go +++ b/components/component.go @@ -60,7 +60,7 @@ type DevFlags struct { } type ManifestsConfig struct { - // uri is the URI point to a git repo with tag/branch. e.g https://github.com/org/repo/tarball/ + // uri is the URI point to a git repo with tag/branch. e.g. https://github.com/org/repo/tarball/ // +optional // +kubebuilder:default:="" // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 @@ -72,7 +72,7 @@ type ManifestsConfig struct { // +operator-sdk:csv:customresourcedefinitions:type=spec,order=2 ContextDir string `json:"contextDir,omitempty"` - // sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, `default`, `odh` etc + // sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, `default`, `odh` etc. // +optional // +kubebuilder:default:="" // +operator-sdk:csv:customresourcedefinitions:type=spec,order=3 diff --git a/components/dashboard/dashboard.go b/components/dashboard/dashboard.go index 36fbae7aae4..12dc60b23a6 100644 --- a/components/dashboard/dashboard.go +++ b/components/dashboard/dashboard.go @@ -172,7 +172,7 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context, // CloudService Monitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if the service is up, so prometheus wont fire alerts when it is just startup + // first check if the service is up, so prometheus won't fire alerts when it is just startup if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentNameSupported, dscispec.ApplicationsNamespace, 20, 3); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } diff --git a/components/datasciencepipelines/datasciencepipelines.go b/components/datasciencepipelines/datasciencepipelines.go index dc187a84748..bc5b9628636 100644 --- a/components/datasciencepipelines/datasciencepipelines.go +++ b/components/datasciencepipelines/datasciencepipelines.go @@ -98,7 +98,7 @@ func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context, // CloudService Monitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if the service is up, so prometheus wont fire alerts when it is just startup + // first check if the service is up, so prometheus won't fire alerts when it is just startup // only 1 replica should be very quick if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 10, 1); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index 9476cb4f404..acc2c9c0db6 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -45,7 +45,7 @@ type Kserve struct { func (k *Kserve) OverrideManifests(_ string) error { // Download manifests if defined by devflags - // Go through each manifests and set the overlays if defined + // Go through each manifest and set the overlays if defined for _, subcomponent := range k.DevFlags.Manifests { if strings.Contains(subcomponent.URI, DependentComponentName) { // Download subcomponent @@ -151,7 +151,7 @@ func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, resC // CloudService Monitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if the service is up, so prometheus wont fire alerts when it is just startup + // first check if the service is up, so prometheus won't fire alerts when it is just startup if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } diff --git a/components/kserve/serverless_setup.go b/components/kserve/serverless_setup.go index 95d1d38b882..47eac118bbb 100644 --- a/components/kserve/serverless_setup.go +++ b/components/kserve/serverless_setup.go @@ -30,7 +30,7 @@ func (k *Kserve) configureServerlessFeatures() feature.FeaturesProvider { return servingDeploymentErr } - servingNetIstioSecretFilterinErr := feature.CreateFeature("serverless-net-istio-secret-filtering"). + servingNetIstioSecretFilteringErr := feature.CreateFeature("serverless-net-istio-secret-filtering"). For(handler). Manifests( path.Join(feature.ServerlessDir, "serving-net-istio-secret-filtering.patch.tmpl"), @@ -41,8 +41,8 @@ func (k *Kserve) configureServerlessFeatures() feature.FeaturesProvider { feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), ). Load() - if servingNetIstioSecretFilterinErr != nil { - return servingNetIstioSecretFilterinErr + if servingNetIstioSecretFilteringErr != nil { + return servingNetIstioSecretFilteringErr } serverlessGwErr := feature.CreateFeature("serverless-serving-gateways"). diff --git a/components/kueue/kueue.go b/components/kueue/kueue.go index 0db08cdd053..0e0ab988f35 100644 --- a/components/kueue/kueue.go +++ b/components/kueue/kueue.go @@ -84,7 +84,7 @@ func (r *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, resCo // CloudService Monitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if the service is up, so prometheus wont fire alerts when it is just startup + // first check if the service is up, so prometheus won't fire alerts when it is just startup if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } diff --git a/components/modelmeshserving/modelmeshserving.go b/components/modelmeshserving/modelmeshserving.go index 07b54ce9c7b..06968ee0728 100644 --- a/components/modelmeshserving/modelmeshserving.go +++ b/components/modelmeshserving/modelmeshserving.go @@ -36,7 +36,7 @@ type ModelMeshServing struct { } func (m *ModelMeshServing) OverrideManifests(_ string) error { - // Go through each manifests and set the overlays if defined + // Go through each manifest and set the overlays if defined for _, subcomponent := range m.DevFlags.Manifests { if strings.Contains(subcomponent.URI, DependentComponentName) { // Download subcomponent @@ -150,7 +150,7 @@ func (m *ModelMeshServing) ReconcileComponent(ctx context.Context, // CloudService Monitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if service is up, so prometheus wont fire alerts when it is just startup + // first check if service is up, so prometheus won't fire alerts when it is just startup if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } diff --git a/components/ray/ray.go b/components/ray/ray.go index 009cf40c40e..8168647df13 100644 --- a/components/ray/ray.go +++ b/components/ray/ray.go @@ -86,7 +86,7 @@ func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, resConf // CloudService Monitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if the service is up, so prometheus wont fire alerts when it is just startup + // first check if the service is up, so prometheus won't fire alerts when it is just startup if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } diff --git a/components/workbenches/workbenches.go b/components/workbenches/workbenches.go index 25a01ea1ee5..afd840c5fc3 100644 --- a/components/workbenches/workbenches.go +++ b/components/workbenches/workbenches.go @@ -41,7 +41,7 @@ type Workbenches struct { func (w *Workbenches) OverrideManifests(platform string) error { // Download manifests if defined by devflags - // Go through each manifests and set the overlays if defined + // Go through each manifest and set the overlays if defined for _, subcomponent := range w.DevFlags.Manifests { if strings.Contains(subcomponent.URI, DependentComponentName) { // Download subcomponent diff --git a/controllers/dscinitialization/servicemesh_setup.go b/controllers/dscinitialization/servicemesh_setup.go index f976652fc3a..7d67bf5205e 100644 --- a/controllers/dscinitialization/servicemesh_setup.go +++ b/controllers/dscinitialization/servicemesh_setup.go @@ -34,7 +34,7 @@ func (r *DSCInitializationReconciler) configureServiceMesh(instance *dsciv1.DSCI } func (r *DSCInitializationReconciler) removeServiceMesh(instance *dsciv1.DSCInitialization) error { - // on condition of Managed, do not handle Removed when set to Removed it trigger DSCI reconcile to cleanup + // on condition of Managed, do not handle Removed when set to Removed it trigger DSCI reconcile to clean up if instance.Spec.ServiceMesh.ManagementState == operatorv1.Managed { serviceMeshFeatures := feature.ClusterFeaturesHandler(instance, configureServiceMeshFeatures()) diff --git a/controllers/secretgenerator/secretgenerator_controller.go b/controllers/secretgenerator/secretgenerator_controller.go index 25dd3c58280..6ec86d04c63 100644 --- a/controllers/secretgenerator/secretgenerator_controller.go +++ b/controllers/secretgenerator/secretgenerator_controller.go @@ -71,7 +71,7 @@ func (r *SecretGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error { return false }, // this only watch for secret deletion if has with annotation - // e.g dashboard-oauth-client but not dashboard-oauth-client-generated + // e.g. dashboard-oauth-client but not dashboard-oauth-client-generated DeleteFunc: func(e event.DeleteEvent) bool { if _, found := e.Object.GetAnnotations()[SECRET_NAME_ANNOTATION]; found { return true diff --git a/pkg/deploy/deploy.go b/pkg/deploy/deploy.go index cb75896a41e..c5dabe5f020 100644 --- a/pkg/deploy/deploy.go +++ b/pkg/deploy/deploy.go @@ -58,7 +58,7 @@ const ( // 2. It saves the manifests in the odh-manifests/component-name/ folder. func DownloadManifests(componentName string, manifestConfig components.ManifestsConfig) error { // Get the component repo from the given url - // e.g https://github.com/example/tarball/master + // e.g. https://github.com/example/tarball/master req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, manifestConfig.URI, nil) if err != nil { return err @@ -273,7 +273,7 @@ func manageResource(ctx context.Context, cli client.Client, obj *unstructured.Un // Create the resource if it doesn't exist and component is enabled if apierrs.IsNotFound(err) { // Set the owner reference for garbage collection - // Skip set on CRD, e.g we should not delete notebook CRD if we delete DSC instance + // Skip set on CRD, e.g. we should not delete notebook CRD if we delete DSC instance if found.GetKind() != "CustomResourceDefinition" { if err = ctrl.SetControllerReference(owner, metav1.Object(obj), cli.Scheme()); err != nil { return err diff --git a/pkg/feature/builder.go b/pkg/feature/builder.go index e019ecf555c..99be036bac4 100644 --- a/pkg/feature/builder.go +++ b/pkg/feature/builder.go @@ -23,7 +23,7 @@ type featureBuilder struct { name string config *rest.Config builders []partialBuilder - featuresHanlder *FeaturesHandler + featuresHandler *FeaturesHandler fsys fs.FS } @@ -51,7 +51,7 @@ func (u *usingFeaturesHandler) For(featuresHandler *FeaturesHandler) *featureBui fb := &featureBuilder{ name: u.name, - featuresHanlder: featuresHandler, + featuresHandler: featuresHandler, fsys: embeddedFiles, } @@ -183,7 +183,7 @@ func (fb *featureBuilder) Load() error { } } - fb.featuresHanlder.features = append(fb.featuresHanlder.features, feature) + fb.featuresHandler.features = append(fb.featuresHandler.features, feature) return nil } diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index ecbf0711916..0be7941b8f2 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -70,7 +70,7 @@ func OperatorUninstall(cli client.Client, cfg *rest.Config) error { return fmt.Errorf("error getting generated namespaces : %w", err) } - // Return if any one of the namespaces is Terminating due to resources that are in process of deletion. (e.g CRDs) + // Return if any one of the namespaces is Terminating due to resources that are in process of deletion. (e.g. CRDs) for _, namespace := range generatedNamespaces.Items { if namespace.Status.Phase == corev1.NamespaceTerminating { return fmt.Errorf("waiting for namespace %v to be deleted", namespace.Name) @@ -462,7 +462,7 @@ func deleteDeploymentsAndCheck(ctx context.Context, cli client.Client, namespace if err := cli.List(ctx, deployments, listOpts); err != nil { return false, nil //nolint:nilerr } - // filter deployment which has the new label to limit that we do not over kill other deployment + // filter deployment which has the new label to limit that we do not overkill other deployment // this logic can be used even when upgrade from v2.4 to v2.5 without remove it markedForDeletion := []appsv1.Deployment{} for _, deployment := range deployments.Items { diff --git a/tests/envtestutil/cleaner.go b/tests/envtestutil/cleaner.go index cfa70675902..9b12301f887 100644 --- a/tests/envtestutil/cleaner.go +++ b/tests/envtestutil/cleaner.go @@ -18,7 +18,7 @@ import ( ) // Cleaner is a struct to perform deletion of resources, -// enforcing removal of finalizers. Otherwise deletion of namespaces wouldn't be possible. +// enforcing removal of finalizers. Otherwise, deletion of namespaces wouldn't be possible. // See: https://book.kubebuilder.io/reference/envtest.html#namespace-usage-limitation // Based on https://github.com/kubernetes-sigs/controller-runtime/issues/880#issuecomment-749742403 type Cleaner struct { From 3e471849df0d8eecc5c54539da9b23fa0e738855 Mon Sep 17 00:00:00 2001 From: Bartosz Majsak Date: Mon, 5 Feb 2024 11:18:25 +0100 Subject: [PATCH 11/32] chore(test): unifies how timeout and polling is set (#840) favoring fluent interface for readability instead of passing values after a closure passed to Eventually or Consistenly Gomega checks. (cherry picked from commit 312554db6369e77d279b4ecec55861b8a3db2066) --- .../dscinitialization_test.go | 115 ++++++++++++++---- tests/envtestutil/cleaner.go | 5 +- 2 files changed, 96 insertions(+), 24 deletions(-) diff --git a/controllers/dscinitialization/dscinitialization_test.go b/controllers/dscinitialization/dscinitialization_test.go index c2cd564bfaf..18fd56f9bf2 100644 --- a/controllers/dscinitialization/dscinitialization_test.go +++ b/controllers/dscinitialization/dscinitialization_test.go @@ -35,7 +35,10 @@ var _ = Describe("DataScienceCluster initialization", func() { desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} - Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci), timeout, interval).Should(BeTrue()) + Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) }) AfterEach(cleanupResources) @@ -43,7 +46,10 @@ var _ = Describe("DataScienceCluster initialization", func() { It("Should create default application namespace", func() { // then foundApplicationNamespace := &corev1.Namespace{} - Eventually(namespaceExists(applicationNamespace, foundApplicationNamespace), timeout, interval).Should(BeTrue()) + Eventually(namespaceExists(applicationNamespace, foundApplicationNamespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundApplicationNamespace.Name).To(Equal(applicationNamespace)) }) @@ -58,7 +64,10 @@ var _ = Describe("DataScienceCluster initialization", func() { It("Should create default network policy", func() { // then foundNetworkPolicy := &netv1.NetworkPolicy{} - Eventually(objectExists(applicationNamespace, applicationNamespace, foundNetworkPolicy), timeout, interval).Should(BeTrue()) + Eventually(objectExists(applicationNamespace, applicationNamespace, foundNetworkPolicy)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundNetworkPolicy.Name).To(Equal(applicationNamespace)) Expect(foundNetworkPolicy.Namespace).To(Equal(applicationNamespace)) Expect(foundNetworkPolicy.Spec.PolicyTypes[0]).To(Equal(netv1.PolicyTypeIngress)) @@ -67,7 +76,10 @@ var _ = Describe("DataScienceCluster initialization", func() { It("Should create default rolebinding", func() { // then foundRoleBinding := &authv1.RoleBinding{} - Eventually(objectExists(applicationNamespace, applicationNamespace, foundRoleBinding), timeout, interval).Should(BeTrue()) + Eventually(objectExists(applicationNamespace, applicationNamespace, foundRoleBinding)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) expectedSubjects := []authv1.Subject{ { Kind: "ServiceAccount", @@ -89,7 +101,10 @@ var _ = Describe("DataScienceCluster initialization", func() { It("Should create default configmap", func() { // then foundConfigMap := &corev1.ConfigMap{} - Eventually(objectExists(configmapName, applicationNamespace, foundConfigMap), timeout, interval).Should(BeTrue()) + Eventually(objectExists(configmapName, applicationNamespace, foundConfigMap)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundConfigMap.Name).To(Equal(configmapName)) Expect(foundConfigMap.Namespace).To(Equal(applicationNamespace)) expectedConfigmapData := map[string]string{"namespace": applicationNamespace} @@ -106,20 +121,32 @@ var _ = Describe("DataScienceCluster initialization", func() { desiredDsci := createDSCI(applicationName, operatorv1.Removed, monitoringNamespace2) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} - Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci), timeout, interval).Should(BeTrue()) + Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // then foundMonitoringNamespace := &corev1.Namespace{} - Eventually(namespaceExists(monitoringNamespace2, foundMonitoringNamespace), timeout, interval).Should(BeFalse()) + Eventually(namespaceExists(monitoringNamespace2, foundMonitoringNamespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeFalse()) }) It("Should create default monitoring namespace if monitoring enabled", func() { // when desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace2) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} - Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci), timeout, interval).Should(BeTrue()) + Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // then foundMonitoringNamespace := &corev1.Namespace{} - Eventually(namespaceExists(monitoringNamespace2, foundMonitoringNamespace), timeout, interval).Should(BeTrue()) + Eventually(namespaceExists(monitoringNamespace2, foundMonitoringNamespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundMonitoringNamespace.Name).Should(Equal(monitoringNamespace2)) }) }) @@ -137,7 +164,10 @@ var _ = Describe("DataScienceCluster initialization", func() { // when desiredDsci2 := createDSCI(anotherApplicationName, operatorv1.Managed, monitoringNamespace) // then - Eventually(dscInitializationIsReady(anotherApplicationName, workingNamespace, desiredDsci2), timeout, interval).Should(BeFalse()) + Eventually(dscInitializationIsReady(anotherApplicationName, workingNamespace, desiredDsci2)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeFalse()) }) It("Should not update rolebinding if it exists", func() { @@ -162,17 +192,26 @@ var _ = Describe("DataScienceCluster initialization", func() { } Expect(k8sClient.Create(context.Background(), desiredRoleBinding)).Should(Succeed()) createdRoleBinding := &authv1.RoleBinding{} - Eventually(objectExists(applicationNamespace, applicationNamespace, createdRoleBinding), timeout, interval).Should(BeTrue()) + Eventually(objectExists(applicationNamespace, applicationNamespace, createdRoleBinding)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // when desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} - Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci), timeout, interval).Should(BeTrue()) + Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // then foundRoleBinding := &authv1.RoleBinding{} - Eventually(objectExists(applicationNamespace, applicationNamespace, foundRoleBinding), timeout, interval).Should(BeTrue()) + Eventually(objectExists(applicationNamespace, applicationNamespace, foundRoleBinding)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundRoleBinding.UID).To(Equal(createdRoleBinding.UID)) Expect(foundRoleBinding.Subjects).To(BeNil()) }) @@ -194,17 +233,26 @@ var _ = Describe("DataScienceCluster initialization", func() { } Expect(k8sClient.Create(context.Background(), desiredConfigMap)).Should(Succeed()) createdConfigMap := &corev1.ConfigMap{} - Eventually(objectExists(configmapName, applicationNamespace, createdConfigMap), timeout, interval).Should(BeTrue()) + Eventually(objectExists(configmapName, applicationNamespace, createdConfigMap)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // when desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} - Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci), timeout, interval).Should(BeTrue()) + Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // then foundConfigMap := &corev1.ConfigMap{} - Eventually(objectExists(configmapName, applicationNamespace, foundConfigMap), timeout, interval).Should(BeTrue()) + Eventually(objectExists(configmapName, applicationNamespace, foundConfigMap)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundConfigMap.UID).To(Equal(createdConfigMap.UID)) Expect(foundConfigMap.Data).To(Equal(map[string]string{"namespace": "existing-data"})) Expect(foundConfigMap.Data).ToNot(Equal(map[string]string{"namespace": applicationNamespace})) @@ -222,17 +270,26 @@ var _ = Describe("DataScienceCluster initialization", func() { } Expect(k8sClient.Create(context.Background(), desiredNamespace)).Should(Succeed()) createdNamespace := &corev1.Namespace{} - Eventually(namespaceExists(anotherNamespace, createdNamespace), timeout, interval).Should(BeTrue()) + Eventually(namespaceExists(anotherNamespace, createdNamespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // when desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} - Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci), timeout, interval).Should(BeTrue()) + Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // then foundApplicationNamespace := &corev1.Namespace{} - Eventually(namespaceExists(anotherNamespace, foundApplicationNamespace), timeout, interval).Should(BeTrue()) + Eventually(namespaceExists(anotherNamespace, foundApplicationNamespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundApplicationNamespace.Name).To(Equal(createdNamespace.Name)) Expect(foundApplicationNamespace.UID).To(Equal(createdNamespace.UID)) }) @@ -249,10 +306,22 @@ func cleanupResources() { Expect(k8sClient.DeleteAllOf(context.TODO(), &authv1.RoleBinding{}, appNamespace)).To(Succeed()) Expect(k8sClient.DeleteAllOf(context.TODO(), &authv1.ClusterRoleBinding{}, appNamespace)).To(Succeed()) - Eventually(noInstanceExistsIn(workingNamespace, &dsci.DSCInitializationList{}), timeout, interval).Should(BeTrue()) - Eventually(noInstanceExistsIn(applicationNamespace, &authv1.ClusterRoleBindingList{}), timeout, interval).Should(BeTrue()) - Eventually(noInstanceExistsIn(applicationNamespace, &authv1.RoleBindingList{}), timeout, interval).Should(BeTrue()) - Eventually(noInstanceExistsIn(applicationNamespace, &corev1.ConfigMapList{}), timeout, interval).Should(BeTrue()) + Eventually(noInstanceExistsIn(workingNamespace, &dsci.DSCInitializationList{})). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) + Eventually(noInstanceExistsIn(applicationNamespace, &authv1.ClusterRoleBindingList{})). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) + Eventually(noInstanceExistsIn(applicationNamespace, &authv1.RoleBindingList{})). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) + Eventually(noInstanceExistsIn(applicationNamespace, &corev1.ConfigMapList{})). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) } func noInstanceExistsIn(namespace string, list client.ObjectList) func() bool { diff --git a/tests/envtestutil/cleaner.go b/tests/envtestutil/cleaner.go index 9b12301f887..1883a3faf4b 100644 --- a/tests/envtestutil/cleaner.go +++ b/tests/envtestutil/cleaner.go @@ -111,7 +111,10 @@ func (c *Cleaner) DeleteAll(objects ...client.Object) { // ability to patch the /finalize subresource on the namespace _, err = c.clientset.CoreV1().Namespaces().Finalize(context.Background(), ns, metav1.UpdateOptions{}) return err - }, c.timeout, c.interval).Should(Succeed()) + }). + WithTimeout(c.timeout). + WithPolling(c.interval). + Should(Succeed()) } Eventually(func() metav1.StatusReason { From aaf5f150f860915003f4650ac0ab12d447bcb73e Mon Sep 17 00:00:00 2001 From: Ajay Pratap Date: Mon, 5 Feb 2024 18:29:26 +0530 Subject: [PATCH 12/32] Add manual operator upgrade testing document (#802) * add manual upgrade testing document (cherry picked from commit f2b177ad986a3e086c0b7b232d60540f3a42ec67) --- README.md | 4 ++ docs/upgrade-testing.md | 140 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 144 insertions(+) create mode 100644 docs/upgrade-testing.md diff --git a/README.md b/README.md index 3becd4d5f69..713cbcf4431 100644 --- a/README.md +++ b/README.md @@ -300,3 +300,7 @@ make e2e-test -e OPERATOR_NAMESPACE= -e E2E_TEST_FLAGS="--skip-deleti ### Troubleshooting Please refer to [troubleshooting documentation](docs/troubleshooting.md) + +### Upgrade testing + +Please refer to [upgrade testing documentation](docs/upgrade-testing.md) diff --git a/docs/upgrade-testing.md b/docs/upgrade-testing.md new file mode 100644 index 00000000000..f00ef9cf56d --- /dev/null +++ b/docs/upgrade-testing.md @@ -0,0 +1,140 @@ +## Upgrade testing +Follow below step for manual upgrade testing + +1. Set environment variables to overwrite values in Makefile. You should overwrite the IMAGE_OWNER and VERSION etc values for pushing container images into your quay.io account. + +``` +IMAGE_OWNER ?= IMAGE_OWNER +VERSION ?= VERSION +``` + +2. Add `replaces` property in [opendatahub-operator.clusterserviceversion.yaml](https://github.com/opendatahub-io/opendatahub-operator/blob/114a137d6289c748d421e7560f6f4fdf925e1b1f/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml) and add version which you would like to upgrade with next version + +``` +replaces: opendatahub-operator.v2.4.0 +``` + +3. Build and push docker container image + +``` +make image +``` + +4. Build bundle image + +``` +make bundle-build +``` + +5. Push bundle image into registry + +``` +make bundle-push +``` + +6. Build catalog source image + +``` +make catalog-build +``` + +7. Push catalog source image into registry + +``` +make catalog-push +``` +### Cluster +Deploy CatalogSource on cluster to install `Open Data Hub Operator`. + + +8. Deploy CatalogSource. Deploy catalog source on cluster by using your catalog source container image and wait until catalog source pod is ready + +```console +cat < Date: Mon, 5 Feb 2024 15:24:48 +0100 Subject: [PATCH 13/32] chore(test): simplifies name generations (#841) Instead of using our random string generation, this PR relies on how k8s does it internally. See https://github.com/kubernetes/apimachinery/blob/f14778da5523847e4c07346e3161a4b4f6c9186e/pkg/util/rand/rand.go#L98 (cherry picked from commit 6530454d6cc723a349106a159c588ecd5b1a94c2) --- tests/envtestutil/name_gen.go | 64 +++++++---------------------------- 1 file changed, 12 insertions(+), 52 deletions(-) diff --git a/tests/envtestutil/name_gen.go b/tests/envtestutil/name_gen.go index e336a3fd687..44d39681282 100644 --- a/tests/envtestutil/name_gen.go +++ b/tests/envtestutil/name_gen.go @@ -1,60 +1,20 @@ package envtestutil import ( - "crypto/rand" - "encoding/hex" - "math" - "math/big" -) - -var letters = []rune("abcdefghijklmnopqrstuvwxyz") - -func RandomUUIDName(numlen int) string { - uuidBytes := make([]byte, numlen) - _, _ = rand.Read(uuidBytes) - - return hex.EncodeToString(uuidBytes)[:numlen] -} - -func AppendRandomNameTo(prefix string) string { - return ConcatToMax(63, prefix, GenerateString(16)) -} - -// GenerateString generates random alphabetical name which can be used for example as application or namespace name. -// Maximum length is capped at 63 characters. -// -// Don't forget to seed before using this function, e.g. rand.Seed(time.Now().UTC().UnixNano()) -// otherwise you will always get the same value. -func GenerateString(length int) string { - if length == 0 { - return "" - } + "fmt" - if length > 63 { - length = 63 - } - - b := make([]rune, length) - for i := range b { - ri, _ := rand.Int(rand.Reader, big.NewInt(int64(len(letters)))) - b[i] = letters[ri.Int64()] - } - - return string(b) -} + utilrand "k8s.io/apimachinery/pkg/util/rand" +) -// ConcatToMax will cut each section to length based on number of sections to not go beyond max and separate the sections with -. -func ConcatToMax(max int, sections ...string) string { - sectionLength := (max - len(sections) - 1) / len(sections) - name := "" +const ( + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) - for i, section := range sections { - s := section[:int32(math.Min(float64(len(section)), float64(sectionLength)))] - name = name + "-" + s - if i+1 != len(sections) { - sectionLength = (max - len(name) - 1) / (len(sections) - (i + 1)) - } +func AppendRandomNameTo(base string) string { + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] } - - return name[1:] + return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) } From 34ff018de3293ae2c69f57180023acefb9921beb Mon Sep 17 00:00:00 2001 From: Bartosz Majsak Date: Mon, 5 Feb 2024 17:26:00 +0100 Subject: [PATCH 14/32] fix(manifests): return walkErr immediately (#842) `walkErr` is propagated as part of a recursive call when traversing a directory tree. This adds a guard check and does not proceed if the error happens in the call chain. Without this, the controller will `panic` in case of the wrong path defined for a manifest. (cherry picked from commit 052124e32ae9488d252ccc804aab8dcfe910bdf7) --- pkg/feature/manifest.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/feature/manifest.go b/pkg/feature/manifest.go index 3fcbcd22de3..db4b111f1dc 100644 --- a/pkg/feature/manifest.go +++ b/pkg/feature/manifest.go @@ -34,6 +34,10 @@ func loadManifestsFrom(fsys fs.FS, path string) ([]manifest, error) { var manifests []manifest err := fs.WalkDir(fsys, path, func(path string, dirEntry fs.DirEntry, walkErr error) error { + if walkErr != nil { + return walkErr + } + _, err := dirEntry.Info() if err != nil { return err From b30c678dfbdff0a8463d73b109d42db1ed6a9e39 Mon Sep 17 00:00:00 2001 From: Aslak Knutsen Date: Tue, 6 Feb 2024 08:04:25 +0100 Subject: [PATCH 15/32] fix: checks value instead of err when updating configmap (#843) `client.Get()` will never return an error of `IsAlreadyExists` if the resource exists, it would rather return the object. `IsAlreadyExists` error is only relevant with `client.Create` (cherry picked from commit 12a4075fa9bf51d8e14e6a281c3bb2cc54626e54) --- pkg/feature/feature.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/feature/feature.go b/pkg/feature/feature.go index 5ccb5d6f3a5..5edbc976d2b 100644 --- a/pkg/feature/feature.go +++ b/pkg/feature/feature.go @@ -160,13 +160,13 @@ func (f *Feature) CreateConfigMap(cfgMapName string, data map[string]string) err } configMaps := f.Clientset.CoreV1().ConfigMaps(configMap.Namespace) - _, err := configMaps.Get(context.TODO(), configMap.Name, metav1.GetOptions{}) + found, err := configMaps.Get(context.TODO(), configMap.Name, metav1.GetOptions{}) if k8serrors.IsNotFound(err) { //nolint:gocritic _, err = configMaps.Create(context.TODO(), configMap, metav1.CreateOptions{}) if err != nil { return err } - } else if k8serrors.IsAlreadyExists(err) { + } else if found != nil { _, err = configMaps.Update(context.TODO(), configMap, metav1.UpdateOptions{}) if err != nil { return err From f9a8c967bf0b0b0a14aec4f013511d26ca33b439 Mon Sep 17 00:00:00 2001 From: Bartosz Majsak Date: Tue, 6 Feb 2024 09:26:37 +0100 Subject: [PATCH 16/32] fix(crd): updates docs in crd (#845) Missed as part of #839. I assumed it is part of default target (cherry picked from commit d32bee5080ea385ad5e1c39b2dfab0039caa36a1) --- ...er.opendatahub.io_datascienceclusters.yaml | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml b/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml index 5b269e6319b..fd4c96d290b 100644 --- a/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml +++ b/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml @@ -63,12 +63,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -106,12 +106,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -150,12 +150,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -196,12 +196,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -299,12 +299,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -343,12 +343,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -386,12 +386,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -429,12 +429,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -472,12 +472,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array From 6b0a9285780ec80fc15e2677c9695c8674e322bd Mon Sep 17 00:00:00 2001 From: Bartosz Majsak Date: Tue, 6 Feb 2024 09:43:57 +0100 Subject: [PATCH 17/32] fix(make): ensures manifests are generated as part of default target (#846) This is based on the assumption that calling the `make` default target ensures the repo is always in the desired state. This includes: - all generated artifacts are up-to-date with the code they are derived from - code passes defined linter checks - a subset of tests (fast tests) is passing - no compilation errors (cherry picked from commit 69356d33f47ef3dcfd4b8e77b0735dfb6a687966) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6b975095bae..bfdd5186681 100644 --- a/Makefile +++ b/Makefile @@ -99,7 +99,7 @@ E2E_TEST_FLAGS = "--skip-deletion=false" -timeout 15m # See README.md, default g IMAGE_BUILD_FLAGS = --build-arg USE_LOCAL=false .PHONY: default -default: lint unit-test build +default: manifests lint unit-test build ##@ General From 6c4a7fc89c798f799e192cb7217c687131c3651d Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Thu, 8 Feb 2024 22:07:59 +0100 Subject: [PATCH 18/32] Fix: uninstallation for operator (#796) * Fix: add back logic to deal with uninstallation - when CM exist and DSC CR exists Signed-off-by: Wen Zhou * update: cleanup finalzlier * fix Signed-off-by: Wen Zhou * update: uninstallation process - add logic to remove subscription - do not call backup in DSCI during uninstallation - add permisison of delete subscripton - change function SubscriptionExists() return sub Signed-off-by: Wen Zhou * update: add deletion for DS project as well during uninstallation Signed-off-by: Wen Zhou * update: add calls of component.Cleanup() esp for kserve for now for uninstall Signed-off-by: Wen Zhou * revert: deletion ds project during uninstall Signed-off-by: Wen Zhou * fix: extend timeout in unit-test till we update testcase Signed-off-by: Wen Zhou * cleanup: remove commented block Signed-off-by: Wen Zhou --------- Signed-off-by: Wen Zhou (cherry picked from commit 92276871200b9d871e191b6cb8b99d6ff114f3f7) --- .../rhods-operator.clusterserviceversion.yaml | 1 + config/rbac/role.yaml | 1 + .../datasciencecluster_controller.go | 42 ++++++++++++++---- .../datasciencecluster/kubebuilder_rbac.go | 2 +- .../dscinitialization_controller.go | 9 ++-- controllers/dscinitialization/suite_test.go | 2 +- pkg/deploy/deploy.go | 9 ++-- pkg/upgrade/upgrade.go | 44 +++++++++++++++---- tests/e2e/dsc_cfmap_deletion_test.go | 5 +++ 9 files changed, 87 insertions(+), 28 deletions(-) diff --git a/bundle/manifests/rhods-operator.clusterserviceversion.yaml b/bundle/manifests/rhods-operator.clusterserviceversion.yaml index badc283186d..3f2afaa0cfc 100644 --- a/bundle/manifests/rhods-operator.clusterserviceversion.yaml +++ b/bundle/manifests/rhods-operator.clusterserviceversion.yaml @@ -1332,6 +1332,7 @@ spec: resources: - subscriptions verbs: + - delete - get - list - watch diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 7589b3ba71b..bbe029f5342 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1093,6 +1093,7 @@ rules: resources: - subscriptions verbs: + - delete - get - list - watch diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index 10677ab0399..3adb0b1ba34 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -35,6 +35,7 @@ import ( corev1 "k8s.io/api/core/v1" netv1 "k8s.io/api/networking/v1" authv1 "k8s.io/api/rbac/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" @@ -80,10 +81,11 @@ const ( // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. -func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { //nolint:gocyclo,maintidx r.Log.Info("Reconciling DataScienceCluster resources", "Request.Name", req.Name) instances := &dsc.DataScienceClusterList{} + if err := r.Client.List(ctx, instances); err != nil { return ctrl.Result{}, err } @@ -104,6 +106,37 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R instance := &instances.Items[0] + allComponents, err := getAllComponents(&instance.Spec.Components) + if err != nil { + return ctrl.Result{}, err + } + + // If DSC CR exist and deletion CM exist + // delete DSC CR and let reconcile requeue + // sometimes with finalzier DSC CR wont get deleted, force to remove finalizer here + if upgrade.HasDeleteConfigMap(r.Client) { + if controllerutil.ContainsFinalizer(instance, finalizerName) { + if controllerutil.RemoveFinalizer(instance, finalizerName) { + if err := r.Update(ctx, instance); err != nil { + r.Log.Info("Error to remove DSC finalzier", "error", err) + return ctrl.Result{}, err + } + r.Log.Info("Removed finalizer for DataScienceCluster", "name", instance.Name, "finalizer", finalizerName) + } + } + if err := r.Client.Delete(context.TODO(), instance, []client.DeleteOption{}...); err != nil { + if !apierrs.IsNotFound(err) { + return reconcile.Result{}, err + } + } + for _, component := range allComponents { + if err := component.Cleanup(r.Client, r.DataScienceCluster.DSCISpec); err != nil { + return ctrl.Result{}, err + } + } + return reconcile.Result{Requeue: true}, nil + } + if len(instances.Items) > 1 { message := fmt.Sprintf("only one instance of DataScienceCluster object is allowed. Update existing instance %s", req.Name) err := errors.New(message) @@ -117,8 +150,6 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, err } - var err error - // Verify a valid DSCInitialization instance is created dsciInstances := &dsci.DSCInitializationList{} err = r.Client.List(ctx, dsciInstances) @@ -154,11 +185,6 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, errors.New(message) } - allComponents, err := getAllComponents(&instance.Spec.Components) - if err != nil { - return ctrl.Result{}, err - } - if instance.ObjectMeta.DeletionTimestamp.IsZero() { if !controllerutil.ContainsFinalizer(instance, finalizerName) { r.Log.Info("Adding finalizer for DataScienceCluster", "name", instance.Name, "finalizer", finalizerName) diff --git a/controllers/datasciencecluster/kubebuilder_rbac.go b/controllers/datasciencecluster/kubebuilder_rbac.go index 38f9d1c1d03..dd8c02699b5 100644 --- a/controllers/datasciencecluster/kubebuilder_rbac.go +++ b/controllers/datasciencecluster/kubebuilder_rbac.go @@ -29,7 +29,7 @@ package datasciencecluster // +kubebuilder:rbac:groups="operators.coreos.com",resources=clusterserviceversions,verbs=get;list;watch;delete;update // +kubebuilder:rbac:groups="operators.coreos.com",resources=customresourcedefinitions,verbs=create;get;patch;delete -// +kubebuilder:rbac:groups="operators.coreos.com",resources=subscriptions,verbs=get;list;watch +// +kubebuilder:rbac:groups="operators.coreos.com",resources=subscriptions,verbs=get;list;watch;delete // +kubebuilder:rbac:groups="operators.coreos.com",resources=operatorconditions,verbs=get;list;watch /* This is for operator */ diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index abe659cb7bb..6a8d92fd8fe 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -375,11 +375,10 @@ func (r *DSCInitializationReconciler) watchDSCResource(_ client.Object) []reconc // do not handle if cannot get list return nil } - if len(instanceList.Items) == 0 { - r.Log.Info("Found no DSC instance in cluster, reset monitoring stack config") - return []reconcile.Request{{ - NamespacedName: types.NamespacedName{Name: "backup"}, - }} + if len(instanceList.Items) == 0 && !upgrade.HasDeleteConfigMap(r.Client) { + r.Log.Info("Found no DSC instance in cluster but not in uninstalltion process, reset monitoring stack config") + + return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: "backup"}}} } return nil } diff --git a/controllers/dscinitialization/suite_test.go b/controllers/dscinitialization/suite_test.go index da61c0b6ab4..7deb7115277 100644 --- a/controllers/dscinitialization/suite_test.go +++ b/controllers/dscinitialization/suite_test.go @@ -61,7 +61,7 @@ var ( ) const ( - timeout = 20 * time.Second + timeout = 30 * time.Second // change this from original 20 to 30 because we often failed in post cleanup job interval = 250 * time.Millisecond ) diff --git a/pkg/deploy/deploy.go b/pkg/deploy/deploy.go index c5dabe5f020..14686da2ee5 100644 --- a/pkg/deploy/deploy.go +++ b/pkg/deploy/deploy.go @@ -401,16 +401,17 @@ func ApplyParams(componentPath string, imageParamsMap map[string]string, isUpdat } // SubscriptionExists checks if a Subscription for the operator exists in the given namespace. -func SubscriptionExists(cli client.Client, namespace string, name string) (bool, error) { +// if exsit, return object; if not exsit, return nil. +func SubscriptionExists(cli client.Client, namespace string, name string) (*ofapiv1alpha1.Subscription, error) { sub := &ofapiv1alpha1.Subscription{} if err := cli.Get(context.TODO(), client.ObjectKey{Namespace: namespace, Name: name}, sub); err != nil { if apierrs.IsNotFound(err) { - return false, nil + return nil, nil } - return false, err + return nil, err } - return true, nil + return sub, nil } // OperatorExists checks if an Operator with 'operatorPrefix' is installed. diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index 0be7941b8f2..2a79e5828f3 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -42,7 +42,6 @@ const ( // DeleteConfigMapLabel is the label for configMap used to trigger operator uninstall // TODO: Label should be updated if addon name changes. DeleteConfigMapLabel = "api.openshift.com/addon-managed-odh-delete" - // odhGeneratedNamespaceLabel is the label added to all the namespaces genereated by odh-deployer. ) // OperatorUninstall deletes all the externally generated resources. This includes monitoring resources and applications @@ -83,16 +82,43 @@ func OperatorUninstall(cli client.Client, cfg *rest.Config) error { if err := cli.Delete(context.TODO(), &namespace, []client.DeleteOption{}...); err != nil { return fmt.Errorf("error deleting namespace %v: %w", namespace.Name, err) } - fmt.Printf("Namespace %s deleted as a part of uninstall.\n", namespace.Name) + fmt.Printf("Namespace %s deleted as a part of uninstallation.\n", namespace.Name) } } - // Wait for all resources to get cleaned up + // give enough time for namespace deletion before proceed time.Sleep(10 * time.Second) - fmt.Printf("All resources deleted as part of uninstall. Removing the operator csv\n") + // We can only assume the subscription is using standard names + // if user install by creating different named subs, then we will not know the name + // we cannot remove CSV before remove subscription because that need SA account + operatorNs, err := GetOperatorNamespace() + if err != nil { + return err + } + fmt.Printf("Removing operator subscription which in turn will remove installplan\n") + subsName := "opendatahub-operator" + if platform == deploy.SelfManagedRhods { + subsName = "rhods-operator" + } else if platform == deploy.ManagedRhods { + subsName = "addon-managed-odh" + } + sub, _ := deploy.SubscriptionExists(cli, operatorNs, subsName) + if sub == nil { + fmt.Printf("Could not find subscription %s in namespace %s. Maybe you have a different one", subsName, operatorNs) + } else { + if err := cli.Delete(context.TODO(), sub); err != nil { + return fmt.Errorf("error deleting subscription %s: %w", sub.Name, err) + } + } - return removeCsv(cli, cfg) + fmt.Printf("Removing the operator CSV in turn remove operator deployment\n") + time.Sleep(5 * time.Second) + + err = removeCSV(cli, cfg) + + fmt.Printf("All resources deleted as part of uninstall.") + return err } func removeDSCInitialization(cli client.Client) error { @@ -362,7 +388,7 @@ func RemoveKfDefInstances(cli client.Client, _ deploy.Platform) error { return nil } -func removeCsv(c client.Client, r *rest.Config) error { +func removeCSV(c client.Client, r *rest.Config) error { // Get watchNamespace operatorNamespace, err := GetOperatorNamespace() if err != nil { @@ -375,7 +401,7 @@ func removeCsv(c client.Client, r *rest.Config) error { } if operatorCsv != nil { - fmt.Printf("Deleting csv %s\n", operatorCsv.Name) + fmt.Printf("Deleting CSV %s\n", operatorCsv.Name) err = c.Delete(context.TODO(), operatorCsv, []client.DeleteOption{}...) if err != nil { if apierrs.IsNotFound(err) { @@ -385,9 +411,9 @@ func removeCsv(c client.Client, r *rest.Config) error { return fmt.Errorf("error deleting clusterserviceversion: %w", err) } fmt.Printf("Clusterserviceversion %s deleted as a part of uninstall.\n", operatorCsv.Name) + return nil } fmt.Printf("No clusterserviceversion for the operator found.\n") - return nil } @@ -402,7 +428,7 @@ func getClusterServiceVersion(cfg *rest.Config, watchNameSpace string) (*ofapi.C return nil, err } - // get csv with CRD DataScienceCluster + // get CSV with CRD DataScienceCluster if len(csvs.Items) != 0 { for _, csv := range csvs.Items { for _, operatorCR := range csv.Spec.CustomResourceDefinitions.Owned { diff --git a/tests/e2e/dsc_cfmap_deletion_test.go b/tests/e2e/dsc_cfmap_deletion_test.go index ba887219af8..ae95fabcce7 100644 --- a/tests/e2e/dsc_cfmap_deletion_test.go +++ b/tests/e2e/dsc_cfmap_deletion_test.go @@ -46,6 +46,11 @@ func cfgMapDeletionTestSuite(t *testing.T) { require.NoError(t, err, "Error while deleting owned namespaces") }) + // t.Run("DS Projects should be deleted", func(t *testing.T) { + // err = testCtx.testDSProjectDeletion() + // require.NoError(t, err, "Error while deleting DS Projectss") + // }) + t.Run("dsci should be deleted", func(t *testing.T) { err = testCtx.testDSCIDeletion() require.NoError(t, err, "failed deleting DSCI") From be03b892555616f66ba0a79ce27e5c5003ad249c Mon Sep 17 00:00:00 2001 From: Vedant Mahabaleshwarkar Date: Thu, 8 Feb 2024 16:09:08 -0500 Subject: [PATCH 19/32] add logic to ensure deprecated model monitoring stack is deleted (#850) Signed-off-by: Vedant Mahabaleshwarkar (cherry picked from commit ae3b539fb64829a1e1a8854e9ccefd895a5fb752) --- main.go | 6 +++ pkg/upgrade/upgrade.go | 104 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+) diff --git a/main.go b/main.go index 8fe0c6cf502..de5914d89b2 100644 --- a/main.go +++ b/main.go @@ -29,6 +29,7 @@ import ( ocuserv1 "github.com/openshift/api/user/v1" ofapiv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" ofapiv2 "github.com/operator-framework/api/pkg/operators/v2" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" admv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -85,6 +86,7 @@ func init() { //nolint:gochecknoinits utilruntime.Must(apiextv1.AddToScheme(scheme)) utilruntime.Must(admv1.AddToScheme(scheme)) utilruntime.Must(apiregistrationv1.AddToScheme(scheme)) + utilruntime.Must(monitoringv1.AddToScheme(scheme)) } func main() { @@ -201,6 +203,10 @@ func main() { setupLog.Error(err, "unable to update from legacy operator version") } + if err = upgrade.CleanupExistingResource(setupClient, platform); err != nil { + setupLog.Error(err, "unable to perform cleanup") + } + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") os.Exit(1) diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index 2a79e5828f3..e415c90791c 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -4,18 +4,23 @@ import ( "context" "fmt" "os" + "reflect" "strings" "time" "github.com/hashicorp/go-multierror" operatorv1 "github.com/openshift/api/operator/v1" + routev1 "github.com/openshift/api/route/v1" ofapi "github.com/operator-framework/api/pkg/operators/v1alpha1" olmclientset "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1alpha1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + authv1 "k8s.io/api/rbac/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" @@ -44,6 +49,11 @@ const ( DeleteConfigMapLabel = "api.openshift.com/addon-managed-odh-delete" ) +type Resource interface { + metav1.Object + runtime.Object +} + // OperatorUninstall deletes all the externally generated resources. This includes monitoring resources and applications // installed by KfDef. func OperatorUninstall(cli client.Client, cfg *rest.Config) error { @@ -339,6 +349,44 @@ func UpdateFromLegacyVersion(cli client.Client, platform deploy.Platform, appNS return nil } +func CleanupExistingResource(cli client.Client, platform deploy.Platform) error { + var multiErr *multierror.Error + + // Special Handling of cleanup of deprecated model monitoring stack + if platform == deploy.ManagedRhods { + montNamespace := "redhat-ods-monitoring" + ctx := context.TODO() + deprecatedDeployments := []string{"rhods-prometheus-operator"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedDeployments, &appsv1.DeploymentList{})) + + deprecatedStatefulsets := []string{"prometheus-rhods-model-monitoring"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedStatefulsets, &appsv1.StatefulSetList{})) + + deprecatedServices := []string{"rhods-model-monitoring"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedServices, &corev1.ServiceList{})) + + deprecatedRoutes := []string{"rhods-model-monitoring"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedRoutes, &routev1.RouteList{})) + + deprecatedSecrets := []string{"rhods-monitoring-oauth-config"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedSecrets, &corev1.SecretList{})) + + deprecatedClusterroles := []string{"rhods-namespace-read", "rhods-prometheus-operator"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedClusterroles, &authv1.ClusterRoleList{})) + + deprecatedClusterrolebindings := []string{"rhods-namespace-read", "rhods-prometheus-operator"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedClusterrolebindings, &authv1.ClusterRoleBindingList{})) + + deprecatedServiceAccounts := []string{"rhods-prometheus-operator"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedServiceAccounts, &corev1.ServiceAccountList{})) + + deprecatedServicemonitors := []string{"modelmesh-federated-metrics"} + multiErr = multierror.Append(multiErr, deleteDeprecatedServiceMonitors(ctx, cli, montNamespace, deprecatedServicemonitors)) + } + + return multiErr.ErrorOrNil() +} + func GetOperatorNamespace() (string, error) { data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") if err == nil { @@ -441,6 +489,62 @@ func getClusterServiceVersion(cfg *rest.Config, watchNameSpace string) (*ofapi.C return nil, nil } +func deleteDeprecatedResources(ctx context.Context, cli client.Client, namespace string, resourceList []string, resourceType client.ObjectList) error { //nolint:unparam + var multiErr *multierror.Error + listOpts := &client.ListOptions{Namespace: namespace} + if err := cli.List(ctx, resourceType, listOpts); err != nil { + multiErr = multierror.Append(multiErr, err) + } + items := reflect.ValueOf(resourceType).Elem().FieldByName("Items") + for i := 0; i < items.Len(); i++ { + item := items.Index(i).Addr().Interface().(client.Object) //nolint:errcheck,forcetypeassert + for _, name := range resourceList { + if name == item.GetName() { + fmt.Printf("Attempting to delete %s in namespace %s\n", item.GetName(), namespace) + err := cli.Delete(ctx, item) + if err != nil { + if apierrs.IsNotFound(err) { + fmt.Printf("Could not find %s in namespace %s\n", item.GetName(), namespace) + } else { + multiErr = multierror.Append(multiErr, err) + } + } + fmt.Printf("Successfully deleted %s\n", item.GetName()) + } + } + } + return multiErr.ErrorOrNil() +} + +// Need to handle ServiceMonitor deletion separately as the generic function does not work for ServiceMonitors because of how the package is built. +func deleteDeprecatedServiceMonitors(ctx context.Context, cli client.Client, namespace string, resourceList []string) error { + var multiErr *multierror.Error + listOpts := &client.ListOptions{Namespace: namespace} + servicemonitors := &monitoringv1.ServiceMonitorList{} + if err := cli.List(ctx, servicemonitors, listOpts); err != nil { + multiErr = multierror.Append(multiErr, err) + } + + for _, servicemonitor := range servicemonitors.Items { + servicemonitor := servicemonitor + for _, name := range resourceList { + if name == servicemonitor.Name { + fmt.Printf("Attempting to delete %s in namespace %s\n", servicemonitor.Name, namespace) + err := cli.Delete(ctx, servicemonitor) + if err != nil { + if apierrs.IsNotFound(err) { + fmt.Printf("Could not find %s in namespace %s\n", servicemonitor.Name, namespace) + } else { + multiErr = multierror.Append(multiErr, err) + } + } + fmt.Printf("Successfully deleted %s\n", servicemonitor.Name) + } + } + } + return multiErr.ErrorOrNil() +} + func deleteResource(cli client.Client, namespace string, resourceType string) error { // In v2, Deployment selectors use a label "app.opendatahub.io/" which is // not present in v1. Since label selectors are immutable, we need to delete the existing From cfe78f94157785ac967e1178fdd4597d89cf9678 Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Mon, 12 Feb 2024 13:57:04 +0100 Subject: [PATCH 20/32] update: remove duplicated createnamespace step in DSCI (#855) - we do not need to have this call for every reconcile - the function does patching namespace on downstream logic everytime Signed-off-by: Wen Zhou (cherry picked from commit 7d757e30f71f6501062f4c95d4d70f9d3309e082) --- .../dscinitialization/dscinitialization_controller.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index 6a8d92fd8fe..7b34122811a 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -153,14 +153,6 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re } } - // Check namespace - namespace := instance.Spec.ApplicationsNamespace - err = r.createOdhNamespace(ctx, instance, namespace) - if err != nil { - // no need to log error as it was already logged in createOdhNamespace - return reconcile.Result{}, err - } - // Get platform platform, err := deploy.GetPlatform(r.Client) if err != nil { From d93a2dd3e5862d577e0e072df125d186619de8f6 Mon Sep 17 00:00:00 2001 From: Cameron Garrison Date: Mon, 12 Feb 2024 09:12:39 -0500 Subject: [PATCH 21/32] move feature tracker out of spec (#859) (cherry picked from commit 4d8ba7ed4c6183e40ca24eb5dfa4708d853d9ea9) --- pkg/feature/feature.go | 7 ++++--- pkg/feature/feature_tracker_handler.go | 8 ++++---- pkg/feature/types.go | 1 - 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/feature/feature.go b/pkg/feature/feature.go index 5edbc976d2b..6141915ca49 100644 --- a/pkg/feature/feature.go +++ b/pkg/feature/feature.go @@ -23,6 +23,7 @@ type Feature struct { Name string Spec *Spec Enabled bool + Tracker *featurev1.FeatureTracker Clientset *kubernetes.Clientset DynamicClient dynamic.Interface @@ -212,14 +213,14 @@ func (f *Feature) apply(m manifest) error { } func (f *Feature) AsOwnerReference() metav1.OwnerReference { - return f.Spec.Tracker.ToOwnerReference() + return f.Tracker.ToOwnerReference() } // updateFeatureTrackerStatus updates conditions of a FeatureTracker. // It's deliberately logging errors instead of handing them as it is used in deferred error handling of Feature public API, // which is more predictable. func (f *Feature) updateFeatureTrackerStatus(condType conditionsv1.ConditionType, status corev1.ConditionStatus, reason featurev1.FeaturePhase, message string) { - tracker := f.Spec.Tracker + tracker := f.Tracker // Update the status if tracker.Status.Conditions == nil { @@ -237,5 +238,5 @@ func (f *Feature) updateFeatureTrackerStatus(condType conditionsv1.ConditionType f.Log.Error(err, "Error updating FeatureTracker status") } - f.Spec.Tracker.Status = tracker.Status + f.Tracker.Status = tracker.Status } diff --git a/pkg/feature/feature_tracker_handler.go b/pkg/feature/feature_tracker_handler.go index e487847b0ae..223e6a2d0b2 100644 --- a/pkg/feature/feature_tracker_handler.go +++ b/pkg/feature/feature_tracker_handler.go @@ -28,13 +28,13 @@ func (f *Feature) createFeatureTracker() error { return err } - f.Spec.Tracker = tracker + f.Tracker = tracker return nil } func removeFeatureTracker(f *Feature) error { - if f.Spec.Tracker != nil { + if f.Tracker != nil { return deleteTracker(f) } @@ -65,7 +65,7 @@ func (f *Feature) getFeatureTracker() (*featurev1.FeatureTracker, error) { func setFeatureTrackerIfAbsent(f *Feature) error { tracker, err := f.getFeatureTracker() - f.Spec.Tracker = tracker + f.Tracker = tracker return err } @@ -86,7 +86,7 @@ func (f *Feature) ensureGVKSet(obj runtime.Object) error { } func deleteTracker(f *Feature) error { - err := f.Client.Delete(context.Background(), f.Spec.Tracker) + err := f.Client.Delete(context.Background(), f.Tracker) if err != nil && !k8serrors.IsNotFound(err) { return err } diff --git a/pkg/feature/types.go b/pkg/feature/types.go index 4857bb1d243..9733587b074 100644 --- a/pkg/feature/types.go +++ b/pkg/feature/types.go @@ -15,7 +15,6 @@ type Spec struct { Domain string KnativeCertificateSecret string KnativeIngressDomain string - Tracker *featurev1.FeatureTracker Source *featurev1.Source } From 1935074e851859249b3b3116f0c0cf5df8ae99c2 Mon Sep 17 00:00:00 2001 From: Vaishnavi Hire Date: Fri, 16 Feb 2024 12:43:01 -0500 Subject: [PATCH 22/32] Add Trusted Bundle Configmap to all namespaces (#848) * Add odh-trusted-ca-bundle CM to DSCInitialization (cherry picked from commit 5d415c26dafdcc9e992fa463716fc9f5837c247c) * Update ca bundle configMap to use dscInit TrustedCABundle for the default value (cherry picked from commit 9a94ef66cfce2c034754f1c7d0154f0870868ced) * Add Trusted Bundle Configmap to all namespaces * Add ManagementState to TrustedCABundle Co-authored-by: Wen Zhou fix --------- Co-authored-by: Landon LaSmith (cherry picked from commit d998839a35c64a7aaad717ee24a5f64d6635fcfd) --- .../v1/dscinitialization_types.go | 20 +- .../v1/zz_generated.deepcopy.go | 16 ++ ...ion.opendatahub.io_dscinitializations.yaml | 25 +++ .../rhods-operator.clusterserviceversion.yaml | 10 + ...ion.opendatahub.io_dscinitializations.yaml | 25 +++ ...scinitialization_v1_dscinitialization.yaml | 5 +- .../certconfigmapgenerator_controller.go | 158 +++++++++++++ .../dscinitialization_controller.go | 35 ++- .../dscinitialization_test.go | 22 +- main.go | 10 + pkg/trustedcabundle/trustedcabundle.go | 210 ++++++++++++++++++ pkg/upgrade/upgrade.go | 3 + tests/e2e/helper_test.go | 4 + 13 files changed, 531 insertions(+), 12 deletions(-) create mode 100644 controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go create mode 100644 pkg/trustedcabundle/trustedcabundle.go diff --git a/apis/dscinitialization/v1/dscinitialization_types.go b/apis/dscinitialization/v1/dscinitialization_types.go index 593a1a5921c..7fad5bcb69e 100644 --- a/apis/dscinitialization/v1/dscinitialization_types.go +++ b/apis/dscinitialization/v1/dscinitialization_types.go @@ -45,9 +45,15 @@ type DSCInitializationSpec struct { // +operator-sdk:csv:customresourcedefinitions:type=spec,order=3 // +optional ServiceMesh infrav1.ServiceMeshSpec `json:"serviceMesh,omitempty"` + // When set to `Managed`, adds odh-trusted-ca-bundle Configmap to all namespaces that includes + // cluster-wide Trusted CA Bundle in .data["ca-bundle.crt"]. + // Additionally, this fields allows admins to add custom CA bundles to the configmap using the .CustomCABundle field. + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=4 + // +optional + TrustedCABundle TrustedCABundleSpec `json:"trustedCABundle,omitempty"` // Internal development useful field to test customizations. // This is not recommended to be used in production environment. - // +operator-sdk:csv:customresourcedefinitions:type=spec,order=4 + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=5 // +optional DevFlags *DevFlags `json:"devFlags,omitempty"` } @@ -73,6 +79,18 @@ type DevFlags struct { ManifestsUri string `json:"manifestsUri,omitempty"` } +type TrustedCABundleSpec struct { + // managementState indicates whether and how the operator should manage customized CA bundle + // +kubebuilder:validation:Enum=Managed;Removed;Unmanaged + // +kubebuilder:default=Removed + ManagementState operatorv1.ManagementState `json:"managementState"` + // A custom CA bundle that will be available for all components in the + // Data Science Cluster(DSC). This bundle will be stored in odh-trusted-ca-bundle + // ConfigMap .data.odh-ca-bundle.crt . + // +kubebuilder:default="" + CustomCABundle string `json:"customCABundle"` +} + // DSCInitializationStatus defines the observed state of DSCInitialization. type DSCInitializationStatus struct { // Phase describes the Phase of DSCInitializationStatus diff --git a/apis/dscinitialization/v1/zz_generated.deepcopy.go b/apis/dscinitialization/v1/zz_generated.deepcopy.go index 800f45905ed..c25082c24e2 100644 --- a/apis/dscinitialization/v1/zz_generated.deepcopy.go +++ b/apis/dscinitialization/v1/zz_generated.deepcopy.go @@ -91,6 +91,7 @@ func (in *DSCInitializationSpec) DeepCopyInto(out *DSCInitializationSpec) { *out = *in out.Monitoring = in.Monitoring out.ServiceMesh = in.ServiceMesh + out.TrustedCABundle = in.TrustedCABundle if in.DevFlags != nil { in, out := &in.DevFlags, &out.DevFlags *out = new(DevFlags) @@ -164,3 +165,18 @@ func (in *Monitoring) DeepCopy() *Monitoring { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedCABundleSpec) DeepCopyInto(out *TrustedCABundleSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedCABundleSpec. +func (in *TrustedCABundleSpec) DeepCopy() *TrustedCABundleSpec { + if in == nil { + return nil + } + out := new(TrustedCABundleSpec) + in.DeepCopyInto(out) + return out +} diff --git a/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml b/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml index c3757674e01..8ae3397ba85 100644 --- a/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml +++ b/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml @@ -123,6 +123,31 @@ spec: pattern: ^(Managed|Unmanaged|Force|Removed)$ type: string type: object + trustedCABundle: + description: When set to `Managed`, adds odh-trusted-ca-bundle Configmap + to all namespaces that includes cluster-wide Trusted CA Bundle in + .data["ca-bundle.crt"]. Additionally, this fields allows admins + to add custom CA bundles to the configmap using the .CustomCABundle + field. + properties: + customCABundle: + description: A custom CA bundle that will be available for all components + in the Data Science Cluster(DSC). This bundle will be stored + in odh-trusted-ca-bundle ConfigMap .data.odh-ca-bundle.crt . + type: string + managementState: + default: Removed + description: managementState indicates whether and how the operator + should manage customized CA bundle + enum: + - Managed + - Removed + - Unmanaged + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + required: + - managementState + type: object required: - applicationsNamespace type: object diff --git a/bundle/manifests/rhods-operator.clusterserviceversion.yaml b/bundle/manifests/rhods-operator.clusterserviceversion.yaml index 3f2afaa0cfc..2b2d3a06b95 100644 --- a/bundle/manifests/rhods-operator.clusterserviceversion.yaml +++ b/bundle/manifests/rhods-operator.clusterserviceversion.yaml @@ -84,6 +84,10 @@ metadata: "namespace": "istio-system" }, "managementState": "Managed" + }, + "trustedCABundle": { + "customCABundle": "", + "managementState": "Managed" } } }, @@ -201,6 +205,12 @@ spec: e.g. it provides unified authentication giving a Single Sign On experience. displayName: Service Mesh path: serviceMesh + - description: When set to `Managed`, adds odh-trusted-ca-bundle Configmap to + all namespaces that includes cluster-wide Trusted CA Bundle in .data["ca-bundle.crt"]. + Additionally, this fields allows admins to add custom CA bundles to the + configmap using the .CustomCABundle field. + displayName: Trusted CABundle + path: trustedCABundle - description: Internal development useful field to test customizations. This is not recommended to be used in production environment. displayName: Dev Flags diff --git a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml index 01213c67a24..48621d88cc7 100644 --- a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml +++ b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml @@ -124,6 +124,31 @@ spec: pattern: ^(Managed|Unmanaged|Force|Removed)$ type: string type: object + trustedCABundle: + description: When set to `Managed`, adds odh-trusted-ca-bundle Configmap + to all namespaces that includes cluster-wide Trusted CA Bundle in + .data["ca-bundle.crt"]. Additionally, this fields allows admins + to add custom CA bundles to the configmap using the .CustomCABundle + field. + properties: + customCABundle: + description: A custom CA bundle that will be available for all components + in the Data Science Cluster(DSC). This bundle will be stored + in odh-trusted-ca-bundle ConfigMap .data.odh-ca-bundle.crt . + type: string + managementState: + default: Removed + description: managementState indicates whether and how the operator + should manage customized CA bundle + enum: + - Managed + - Removed + - Unmanaged + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + required: + - managementState + type: object required: - applicationsNamespace type: object diff --git a/config/samples/dscinitialization_v1_dscinitialization.yaml b/config/samples/dscinitialization_v1_dscinitialization.yaml index 0d6645911b7..d02a5d4c34e 100644 --- a/config/samples/dscinitialization_v1_dscinitialization.yaml +++ b/config/samples/dscinitialization_v1_dscinitialization.yaml @@ -18,5 +18,8 @@ spec: metricsCollection: Istio name: data-science-smcp namespace: istio-system - managementState: Managed + managementState: "Managed" + trustedCABundle: + managementState: "Managed" + customCABundle: "" diff --git a/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go new file mode 100644 index 00000000000..17732592d39 --- /dev/null +++ b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go @@ -0,0 +1,158 @@ +// Package certconfigmapgenerator contains generator logic of add cert configmap resource in user namespaces +package certconfigmapgenerator + +import ( + "context" + "reflect" + + "github.com/go-logr/logr" + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + dsci "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/trustedcabundle" +) + +var configmapGenLog = log.Log.WithName("cert-configmap-generator") + +// CertConfigmapGeneratorReconciler holds the controller configuration. +type CertConfigmapGeneratorReconciler struct { //nolint:golint,revive // Readability + Client client.Client + Scheme *runtime.Scheme + Log logr.Logger +} + +// SetupWithManager sets up the controller with the Manager. +func (r *CertConfigmapGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error { + configmapGenLog.Info("Adding controller for Configmap Generation.") + return ctrl.NewControllerManagedBy(mgr). + Named("cert-configmap-generator-controller"). + Watches(&source.Kind{Type: &corev1.ConfigMap{}}, handler.EnqueueRequestsFromMapFunc(r.watchTrustedCABundleConfigMapResource), builder.WithPredicates(ConfigMapChangedPredicate)). + Watches(&source.Kind{Type: &corev1.Namespace{}}, handler.EnqueueRequestsFromMapFunc(r.watchNamespaceResource), builder.WithPredicates(NamespaceCreatedPredicate)). + Complete(r) +} + +// Reconcile will generate new configmap, odh-trusted-ca-bundle, that includes cluster-wide trusted-ca bundle and custom +// ca bundle in every new namespace created. +func (r *CertConfigmapGeneratorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // Request includes namespace that is newly created or where odh-trusted-ca-bundle configmap is updated. + r.Log.Info("Reconciling certConfigMapGenerator.", " CertConfigMapGenerator Request.Namespace", req.NamespacedName) + // Get namespace instance + userNamespace := &corev1.Namespace{} + err := r.Client.Get(ctx, client.ObjectKey{Name: req.Namespace}, userNamespace) + if err != nil { + return ctrl.Result{}, errors.WithMessage(err, "error getting user namespace to inject trustedCA bundle ") + } + + // Get DSCI instance + dsciInstances := &dsci.DSCInitializationList{} + err = r.Client.List(ctx, dsciInstances) + if err != nil { + r.Log.Error(err, "Failed to retrieve DSCInitialization resource for certconfigmapgenerator ", "CertConfigmapGenerator Request.Name", req.Name) + return ctrl.Result{}, err + } + + var dsciInstance *dsci.DSCInitialization + switch len(dsciInstances.Items) { + case 0: + return ctrl.Result{}, nil + case 1: + dsciInstance = &dsciInstances.Items[0] + default: + message := "only one instance of DSCInitialization object is allowed" + return ctrl.Result{}, errors.New(message) + } + + if dsciInstance.Spec.TrustedCABundle.ManagementState != operatorv1.Managed { + return ctrl.Result{}, nil + } + + // Verify if namespace did not opt out of trustedCABundle injection + if trustedcabundle.HasCABundleAnnotationDisabled(userNamespace) { + r.Log.Info("Namespace has opted-out of CA bundle injection using annotation ", "namespace", userNamespace.Name, + "annotation", trustedcabundle.InjectionOfCABundleAnnotatoion) + err := trustedcabundle.DeleteOdhTrustedCABundleConfigMap(ctx, r.Client, req.Namespace) + if err != nil { + if !apierrors.IsNotFound(err) { + r.Log.Error(err, "error deleting existing configmap from namespace", "name", trustedcabundle.CAConfigMapName, "namespace", req.Namespace) + return reconcile.Result{}, err + } + } + return reconcile.Result{}, nil + } + + // Verify odh-trusted-ca-bundle Configmap is created for the given namespace + if trustedcabundle.ShouldInjectTrustedBundle(userNamespace) { + r.Log.Info("Adding trusted CA bundle configmap to the new or existing namespace ", "namespace", userNamespace.Name, + "configmap", trustedcabundle.CAConfigMapName) + trustCAData := dsciInstance.Spec.TrustedCABundle.CustomCABundle + err = trustedcabundle.CreateOdhTrustedCABundleConfigMap(ctx, r.Client, req.Namespace, trustCAData) + if err != nil { + r.Log.Error(err, "error adding configmap to namespace", "name", trustedcabundle.CAConfigMapName, "namespace", req.Namespace) + return reconcile.Result{}, err + } + } + return ctrl.Result{}, err +} + +func (r *CertConfigmapGeneratorReconciler) watchNamespaceResource(a client.Object) []reconcile.Request { + if trustedcabundle.ShouldInjectTrustedBundle(a) { + return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: trustedcabundle.CAConfigMapName, Namespace: a.GetName()}}} + } + return nil +} + +func (r *CertConfigmapGeneratorReconciler) watchTrustedCABundleConfigMapResource(a client.Object) []reconcile.Request { + if a.GetName() == trustedcabundle.CAConfigMapName { + r.Log.Info("Cert configmap has been updated, start reconcile") + return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: a.GetName(), Namespace: a.GetNamespace()}}} + } + return nil +} + +var NamespaceCreatedPredicate = predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return trustedcabundle.ShouldInjectTrustedBundle(e.Object) + }, + + // If user changes the annotation of namespace to opt out of CABundle injection, reconcile. + UpdateFunc: func(e event.UpdateEvent) bool { + oldNamespace, _ := e.ObjectOld.(*corev1.Namespace) + newNamespace, _ := e.ObjectNew.(*corev1.Namespace) + + oldNsAnnValue, oldNsAnnExists := oldNamespace.GetAnnotations()[trustedcabundle.InjectionOfCABundleAnnotatoion] + newNsAnnValue, newNsAnnExists := newNamespace.GetAnnotations()[trustedcabundle.InjectionOfCABundleAnnotatoion] + + if newNsAnnExists && !oldNsAnnExists { + return true + } else if newNsAnnExists && oldNsAnnExists && oldNsAnnValue != newNsAnnValue { + return true + } + return false + }, +} + +var ConfigMapChangedPredicate = predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldCM, _ := e.ObjectOld.(*corev1.ConfigMap) + newCM, _ := e.ObjectNew.(*corev1.ConfigMap) + return !reflect.DeepEqual(oldCM.Data, newCM.Data) + }, + + DeleteFunc: func(deleteEvent event.DeleteEvent) bool { + return true + }, +} diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index 7b34122811a..be34f146816 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -49,6 +49,7 @@ import ( dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/trustedcabundle" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" ) @@ -56,6 +57,10 @@ const ( finalizerName = "dscinitialization.opendatahub.io/finalizer" ) +// This ar is required by the .spec.TrustedCABundle field. When a user goes from Unmanaged to Managed, update all +// namespaces irrespective of any changes in the configmap. +var managementStateChangeTrustedCA = false + // DSCInitializationReconciler reconciles a DSCInitialization object. type DSCInitializationReconciler struct { //nolint:golint,revive // Readability client.Client @@ -153,6 +158,21 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re } } + // Check namespace + namespace := instance.Spec.ApplicationsNamespace + err = r.createOdhNamespace(ctx, instance, namespace) + if err != nil { + // no need to log error as it was already logged in createOdhNamespace + return reconcile.Result{}, err + } + + // Verify odh-trusted-ca-bundle Configmap is configured for the namespaces + err = trustedcabundle.ConfigureTrustedCABundle(ctx, r.Client, r.Log, instance, managementStateChangeTrustedCA) + if err != nil { + return reconcile.Result{}, err + } + managementStateChangeTrustedCA = false + // Get platform platform, err := deploy.GetPlatform(r.Client) if err != nil { @@ -279,7 +299,8 @@ func (r *DSCInitializationReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). // add predicates prevents meaningless reconciliations from being triggered // not use WithEventFilter() because it conflict with secret and configmap predicate - For(&dsciv1.DSCInitialization{}, builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))). + For(&dsciv1.DSCInitialization{}, builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, + predicate.LabelChangedPredicate{}), dsciPredicateStateChangeTrustedCA)). Owns(&corev1.Namespace{}, builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))). Owns(&corev1.Secret{}, builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))). Owns(&corev1.ConfigMap{}, builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))). @@ -339,6 +360,18 @@ var DSCDeletionPredicate = predicate.Funcs{ }, } +var dsciPredicateStateChangeTrustedCA = predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldDSCI, _ := e.ObjectOld.(*dsciv1.DSCInitialization) + newDSCI, _ := e.ObjectNew.(*dsciv1.DSCInitialization) + + if oldDSCI.Spec.TrustedCABundle.ManagementState != newDSCI.Spec.TrustedCABundle.ManagementState { + managementStateChangeTrustedCA = true + } + return true + }, +} + func (r *DSCInitializationReconciler) watchMonitoringConfigMapResource(a client.Object) []reconcile.Request { if a.GetName() == "prometheus" && a.GetNamespace() == "redhat-ods-monitoring" { r.Log.Info("Found monitoring configmap has updated, start reconcile") diff --git a/controllers/dscinitialization/dscinitialization_test.go b/controllers/dscinitialization/dscinitialization_test.go index 18fd56f9bf2..6a726a1998b 100644 --- a/controllers/dscinitialization/dscinitialization_test.go +++ b/controllers/dscinitialization/dscinitialization_test.go @@ -32,7 +32,7 @@ var _ = Describe("DataScienceCluster initialization", func() { const applicationName = "default-dsci" BeforeEach(func() { // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -110,6 +110,7 @@ var _ = Describe("DataScienceCluster initialization", func() { expectedConfigmapData := map[string]string{"namespace": applicationNamespace} Expect(foundConfigMap.Data).To(Equal(expectedConfigmapData)) }) + }) Context("Monitoring Resource", func() { @@ -118,7 +119,7 @@ var _ = Describe("DataScienceCluster initialization", func() { const applicationName = "default-dsci" It("Should not create monitoring namespace if monitoring is disabled", func() { // when - desiredDsci := createDSCI(applicationName, operatorv1.Removed, monitoringNamespace2) + desiredDsci := createDSCI(applicationName, operatorv1.Removed, operatorv1.Managed, monitoringNamespace2) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -134,7 +135,7 @@ var _ = Describe("DataScienceCluster initialization", func() { }) It("Should create default monitoring namespace if monitoring enabled", func() { // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace2) + desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace2) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -159,10 +160,10 @@ var _ = Describe("DataScienceCluster initialization", func() { anotherApplicationName := "default2" // given - desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) // when - desiredDsci2 := createDSCI(anotherApplicationName, operatorv1.Managed, monitoringNamespace) + desiredDsci2 := createDSCI(anotherApplicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) // then Eventually(dscInitializationIsReady(anotherApplicationName, workingNamespace, desiredDsci2)). WithTimeout(timeout). @@ -198,7 +199,7 @@ var _ = Describe("DataScienceCluster initialization", func() { Should(BeTrue()) // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -239,7 +240,7 @@ var _ = Describe("DataScienceCluster initialization", func() { Should(BeTrue()) // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -276,7 +277,7 @@ var _ = Describe("DataScienceCluster initialization", func() { Should(BeTrue()) // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -348,7 +349,7 @@ func objectExists(ns string, name string, obj client.Object) func() bool { //nol } } -func createDSCI(appName string, enableMonitoring operatorv1.ManagementState, monitoringNS string) *dsci.DSCInitialization { +func createDSCI(appName string, enableMonitoring operatorv1.ManagementState, enableTrustedCABundle operatorv1.ManagementState, monitoringNS string) *dsci.DSCInitialization { return &dsci.DSCInitialization{ TypeMeta: metav1.TypeMeta{ Kind: "DSCInitialization", @@ -364,6 +365,9 @@ func createDSCI(appName string, enableMonitoring operatorv1.ManagementState, mon Namespace: monitoringNS, ManagementState: enableMonitoring, }, + TrustedCABundle: dsci.TrustedCABundleSpec{ + ManagementState: enableTrustedCABundle, + }, }, } } diff --git a/main.go b/main.go index de5914d89b2..cf76380826c 100644 --- a/main.go +++ b/main.go @@ -50,6 +50,7 @@ import ( dsc "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsci "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/certconfigmapgenerator" datascienceclustercontrollers "github.com/opendatahub-io/opendatahub-operator/v2/controllers/datasciencecluster" dscicontr "github.com/opendatahub-io/opendatahub-operator/v2/controllers/dscinitialization" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/secretgenerator" @@ -171,6 +172,15 @@ func main() { os.Exit(1) } + if err = (&certconfigmapgenerator.CertConfigmapGeneratorReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("CertConfigmapGenerator"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "CertConfigmapGenerator") + os.Exit(1) + } + // Create new uncached client to run initial setup setupCfg, err := config.GetConfig() if err != nil { diff --git a/pkg/trustedcabundle/trustedcabundle.go b/pkg/trustedcabundle/trustedcabundle.go new file mode 100644 index 00000000000..a058d7f8af4 --- /dev/null +++ b/pkg/trustedcabundle/trustedcabundle.go @@ -0,0 +1,210 @@ +package trustedcabundle + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + operatorv1 "github.com/openshift/api/operator/v1" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + dsci "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" +) + +const ( + InjectionOfCABundleAnnotatoion = "security.opendatahub.io/inject-trusted-ca-bundle" + CAConfigMapName = "odh-trusted-ca-bundle" + CADataFieldName = "odh-ca-bundle.crt" +) + +func ShouldInjectTrustedBundle(ns client.Object) bool { + if !strings.HasPrefix(ns.GetName(), "openshift-") && !strings.HasPrefix(ns.GetName(), "kube-") && + ns.GetName() != "default" && ns.GetName() != "openshift" && !HasCABundleAnnotationDisabled(ns) { + return true + } + return false +} + +func HasCABundleAnnotationDisabled(ns client.Object) bool { + if value, found := ns.GetAnnotations()[InjectionOfCABundleAnnotatoion]; found { + enabled, err := strconv.ParseBool(value) + return err == nil && !enabled + } + return false +} + +func AddCABundleConfigMapInAllNamespaces(ctx context.Context, cli client.Client, dscInit *dsci.DSCInitialization) error { + namespaceList := &corev1.NamespaceList{} + err := cli.List(ctx, namespaceList) + if err != nil { + return err + } + + for i := range namespaceList.Items { + ns := &namespaceList.Items[i] + if ShouldInjectTrustedBundle(ns) { + if err := wait.PollUntilContextTimeout(ctx, time.Second*1, time.Second*10, false, func(ctx context.Context) (bool, error) { + if cmErr := CreateOdhTrustedCABundleConfigMap(ctx, cli, ns.Name, dscInit.Spec.TrustedCABundle.CustomCABundle); cmErr != nil { + // Logging the error for debugging + fmt.Printf("error creating cert configmap in namespace %v: %v", ns.Name, cmErr) + return false, nil + } + return true, nil + }); err != nil { + return err + } + } + } + return nil +} + +// createOdhTrustedCABundleConfigMap creates a configMap 'odh-trusted-ca-bundle' -- Certificates for the cluster +// trusted CA Cert Bundle. +func CreateOdhTrustedCABundleConfigMap(ctx context.Context, cli client.Client, namespace string, customCAData string) error { + // Expected configmap for the given namespace + desiredConfigMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: CAConfigMapName, + Namespace: namespace, + Labels: map[string]string{ + "app.kubernetes.io/part-of": "opendatahub-operator", + // Label required for the Cluster Network Operator(CNO) to inject the cluster trusted CA bundle + // into .data["ca-bundle.crt"] + "config.openshift.io/inject-trusted-cabundle": "true", + }, + }, + // Add the DSCInitialzation specified TrustedCABundle.CustomCABundle to CM's data.odh-ca-bundle.crt field + // Additionally, the CNO operator will automatically create and maintain ca-bundle.crt + // if label 'config.openshift.io/inject-trusted-cabundle' is true + Data: map[string]string{CADataFieldName: customCAData}, + } + + // Create Configmap if doesn't exist + foundConfigMap := &corev1.ConfigMap{} + err := cli.Get(ctx, client.ObjectKey{ + Name: CAConfigMapName, + Namespace: namespace, + }, foundConfigMap) + if err != nil { + if apierrs.IsNotFound(err) { + err = cli.Create(ctx, desiredConfigMap) + if err != nil && !apierrs.IsAlreadyExists(err) { + return err + } + return nil + } + return err + } + + if foundConfigMap.Data[CADataFieldName] != customCAData { + foundConfigMap.Data[CADataFieldName] = customCAData + return cli.Update(ctx, foundConfigMap) + } + + return nil +} + +func DeleteOdhTrustedCABundleConfigMap(ctx context.Context, cli client.Client, namespace string) error { + // Delete Configmap if exists + foundConfigMap := &corev1.ConfigMap{} + err := cli.Get(ctx, client.ObjectKey{ + Name: CAConfigMapName, + Namespace: namespace, + }, foundConfigMap) + if err != nil { + if apierrs.IsNotFound(err) { + return nil + } + return err + } + return cli.Delete(ctx, foundConfigMap) +} + +// IsTrustedCABundleUpdated verifies for a given namespace if the odh-ca-bundle.crt field in cert configmap is updated. +func IsTrustedCABundleUpdated(ctx context.Context, cli client.Client, dscInit *dsci.DSCInitialization) (bool, error) { + usernamespace := &corev1.Namespace{} + if err := cli.Get(ctx, client.ObjectKey{Name: dscInit.Spec.ApplicationsNamespace}, usernamespace); err != nil { + if apierrs.IsNotFound(err) { + // if namespace is not found, return true. This is to ensure we reconcile, and check for other namespaces. + return true, nil + } + return false, err + } + + if HasCABundleAnnotationDisabled(usernamespace) { + return false, nil + } + + foundConfigMap := &corev1.ConfigMap{} + err := cli.Get(ctx, client.ObjectKey{ + Name: CAConfigMapName, + Namespace: dscInit.Spec.ApplicationsNamespace, + }, foundConfigMap) + + if err != nil { + if apierrs.IsNotFound(err) { + return false, nil + } + return false, err + } + + return foundConfigMap.Data[CADataFieldName] != dscInit.Spec.TrustedCABundle.CustomCABundle, nil +} + +func ConfigureTrustedCABundle(ctx context.Context, cli client.Client, log logr.Logger, dscInit *dsci.DSCInitialization, managementStateChanged bool) error { + switch dscInit.Spec.TrustedCABundle.ManagementState { + case operatorv1.Managed: + log.Info("Trusted CA Bundle injection is set to `Managed` state. Reconciling to add/update cert configmaps") + istrustedCABundleUpdated, err := IsTrustedCABundleUpdated(ctx, cli, dscInit) + if err != nil { + return err + } + + if istrustedCABundleUpdated || managementStateChanged { + err = AddCABundleConfigMapInAllNamespaces(ctx, cli, dscInit) + if err != nil { + log.Error(err, "error adding configmap to all namespaces", "name", CAConfigMapName) + return err + } + } + case operatorv1.Removed: + log.Info("Trusted CA Bundle injection is set to `Removed` state. Reconciling to delete all cert configmaps") + err := RemoveCABundleConfigMapInAllNamespaces(ctx, cli) + if err != nil { + log.Error(err, "error deleting configmap from all namespaces", "name", CAConfigMapName) + return err + } + + case operatorv1.Unmanaged: + log.Info("Trusted CA Bundle injection is set to `Unmanaged` state. Cert configmaps are no longer managed by DSCI") + } + + return nil +} + +func RemoveCABundleConfigMapInAllNamespaces(ctx context.Context, cli client.Client) error { + namespaceList := &corev1.NamespaceList{} + err := cli.List(ctx, namespaceList) + if err != nil { + return err + } + + for i := range namespaceList.Items { + ns := &namespaceList.Items[i] + if err := DeleteOdhTrustedCABundleConfigMap(ctx, cli, ns.Name); err != nil { + return err + } + } + return nil +} diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index e415c90791c..2b133e124d6 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -247,6 +247,9 @@ func CreateDefaultDSCI(cli client.Client, _ deploy.Platform, appNamespace, monNa MetricsCollection: "Istio", }, }, + TrustedCABundle: dsci.TrustedCABundleSpec{ + ManagementState: "Managed", + }, } defaultDsci := &dsci.DSCInitialization{ diff --git a/tests/e2e/helper_test.go b/tests/e2e/helper_test.go index 40dbd1e45f4..5019804c9ef 100644 --- a/tests/e2e/helper_test.go +++ b/tests/e2e/helper_test.go @@ -63,6 +63,10 @@ func setupDSCICR() *dsci.DSCInitialization { ManagementState: "Managed", Namespace: "opendatahub", }, + TrustedCABundle: dsci.TrustedCABundleSpec{ + ManagementState: "Managed", + CustomCABundle: "", + }, }, } return dsciTest From 13a2f1fd26fd0c78597fde3b48ba5b35fd941efa Mon Sep 17 00:00:00 2001 From: Bartosz Majsak Date: Mon, 19 Feb 2024 10:19:48 +0100 Subject: [PATCH 23/32] chore: simplifies k8s client usage in Features (#868) - simplifies interaction with cluster APIs by using basic `client.Interface` only instead. - previously, for those CRDs we did not want to take as dependencies, `DynamicClient` was used. Now unstructured objects with GVK set instead are used with `client.Interface` instead. (cherry picked from commit 3fb7fd82bcd2e836ba87d083e61dab8635ff30f6) --- pkg/cluster/const.go | 23 +++++++++++ pkg/feature/builder.go | 15 +------- pkg/feature/cert.go | 24 +++++------- pkg/feature/conditions.go | 21 ++++++---- pkg/feature/feature.go | 38 +------------------ pkg/feature/raw_resources.go | 29 ++------------ pkg/feature/serverless/conditions.go | 15 +++++--- pkg/feature/serverless/loaders.go | 2 +- pkg/feature/serverless/resources.go | 22 +++++++---- pkg/feature/servicemesh/conditions.go | 19 ++++++---- pkg/gvr/gvr.go | 35 ----------------- .../integration/features/features_int_test.go | 6 +-- .../features/serverless_feature_test.go | 5 --- .../features/servicemesh_feature_test.go | 28 +++++--------- 14 files changed, 98 insertions(+), 184 deletions(-) create mode 100644 pkg/cluster/const.go delete mode 100644 pkg/gvr/gvr.go diff --git a/pkg/cluster/const.go b/pkg/cluster/const.go new file mode 100644 index 00000000000..de340c42d4c --- /dev/null +++ b/pkg/cluster/const.go @@ -0,0 +1,23 @@ +package cluster + +import "k8s.io/apimachinery/pkg/runtime/schema" + +var ( + KnativeServingGVK = schema.GroupVersionKind{ + Group: "operator.knative.dev", + Version: "v1beta1", + Kind: "KnativeServing", + } + + OpenshiftIngressGVK = schema.GroupVersionKind{ + Group: "config.openshift.io", + Version: "v1", + Kind: "Ingress", + } + + ServiceMeshControlPlaneGVK = schema.GroupVersionKind{ + Group: "maistra.io", + Version: "v2", + Kind: "ServiceMeshControlPlane", + } +) diff --git a/pkg/feature/builder.go b/pkg/feature/builder.go index 99be036bac4..9b952d9f9db 100644 --- a/pkg/feature/builder.go +++ b/pkg/feature/builder.go @@ -6,8 +6,6 @@ import ( "github.com/hashicorp/go-multierror" "github.com/pkg/errors" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" @@ -66,18 +64,9 @@ func (fb *featureBuilder) UsingConfig(config *rest.Config) *featureBuilder { return fb } -func createClients(config *rest.Config) partialBuilder { +func createClient(config *rest.Config) partialBuilder { return func(f *Feature) error { var err error - f.Clientset, err = kubernetes.NewForConfig(config) - if err != nil { - return err - } - - f.DynamicClient, err = dynamic.NewForConfig(config) - if err != nil { - return err - } f.Client, err = client.New(config, client.Options{}) if err != nil { @@ -173,7 +162,7 @@ func (fb *featureBuilder) Load() error { } } - if err := createClients(fb.config)(feature); err != nil { + if err := createClient(fb.config)(feature); err != nil { return err } diff --git a/pkg/feature/cert.go b/pkg/feature/cert.go index 64b6d05c17a..98959f792eb 100644 --- a/pkg/feature/cert.go +++ b/pkg/feature/cert.go @@ -8,6 +8,7 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/pem" + "fmt" "math/big" "net" "strings" @@ -15,8 +16,8 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" ) @@ -34,20 +35,13 @@ func (f *Feature) CreateSelfSignedCertificate(secretName string, certificateType }, } - cert, err := GenerateSelfSignedCertificateAsSecret(domain, meta) + certSecret, err := GenerateSelfSignedCertificateAsSecret(domain, meta) if err != nil { - return errors.WithStack(err) + return fmt.Errorf("failed generating self-signed certificate: %w", err) } - if err != nil { - return errors.WithStack(err) - } - - _, err = f.Clientset.CoreV1(). - Secrets(namespace). - Create(context.TODO(), cert, metav1.CreateOptions{}) - if err != nil && !k8serrors.IsAlreadyExists(err) { - return errors.WithStack(err) + if createErr := f.Client.Create(context.TODO(), certSecret); client.IgnoreAlreadyExists(createErr) != nil { + return fmt.Errorf("failed creating certificate secret: %w", createErr) } return nil @@ -74,9 +68,9 @@ func generateCertificate(addr string) ([]byte, []byte, error) { return nil, nil, errors.WithStack(err) } - seededRand, crypterr := rand.Int(rand.Reader, big.NewInt(time.Now().UnixNano())) - if err != nil { - return nil, nil, errors.WithStack(crypterr) + seededRand, cryptErr := rand.Int(rand.Reader, big.NewInt(time.Now().UnixNano())) + if cryptErr != nil { + return nil, nil, errors.WithStack(cryptErr) } now := time.Now() diff --git a/pkg/feature/conditions.go b/pkg/feature/conditions.go index 8ee161f96c4..04fd3bde9ad 100644 --- a/pkg/feature/conditions.go +++ b/pkg/feature/conditions.go @@ -6,7 +6,7 @@ import ( corev1 "k8s.io/api/core/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" @@ -28,7 +28,9 @@ func WaitForPodsToBeReady(namespace string) Action { f.Log.Info("waiting for pods to become ready", "namespace", namespace, "duration (s)", duration.Seconds()) return wait.PollUntilContextTimeout(context.TODO(), interval, duration, false, func(ctx context.Context) (bool, error) { - podList, err := f.Clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) + var podList corev1.PodList + + err := f.Client.List(context.TODO(), &podList, client.InNamespace(namespace)) if err != nil { return false, err } @@ -71,20 +73,23 @@ func WaitForPodsToBeReady(namespace string) Action { } } -func WaitForResourceToBeCreated(namespace string, gvr schema.GroupVersionResource) Action { +func WaitForResourceToBeCreated(namespace string, gvk schema.GroupVersionKind) Action { return func(f *Feature) error { - f.Log.Info("waiting for resource to be created", "namespace", namespace, "resource", gvr) + f.Log.Info("waiting for resource to be created", "namespace", namespace, "resource", gvk) return wait.PollUntilContextTimeout(context.TODO(), interval, duration, false, func(ctx context.Context) (bool, error) { - resources, err := f.DynamicClient.Resource(gvr).Namespace(namespace).List(context.TODO(), metav1.ListOptions{Limit: 1}) + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(gvk) + + err := f.Client.List(context.TODO(), list, client.InNamespace(namespace), client.Limit(1)) if err != nil { - f.Log.Error(err, "failed waiting for resource", "namespace", namespace, "resource", gvr) + f.Log.Error(err, "failed waiting for resource", "namespace", namespace, "resource", gvk) return false, err } - if len(resources.Items) > 0 { - f.Log.Info("resource created", "namespace", namespace, "resource", gvr) + if len(list.Items) > 0 { + f.Log.Info("resource created", "namespace", namespace, "resource", gvk) return true, nil } diff --git a/pkg/feature/feature.go b/pkg/feature/feature.go index 6141915ca49..21aec158343 100644 --- a/pkg/feature/feature.go +++ b/pkg/feature/feature.go @@ -9,10 +9,7 @@ import ( conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" ctrlLog "sigs.k8s.io/controller-runtime/pkg/log" @@ -25,9 +22,7 @@ type Feature struct { Enabled bool Tracker *featurev1.FeatureTracker - Clientset *kubernetes.Clientset - DynamicClient dynamic.Interface - Client client.Client + Client client.Client manifests []manifest @@ -148,37 +143,6 @@ func (f *Feature) applyManifests() error { return applyErrors.ErrorOrNil() } -func (f *Feature) CreateConfigMap(cfgMapName string, data map[string]string) error { - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: cfgMapName, - Namespace: f.Spec.AppNamespace, - OwnerReferences: []metav1.OwnerReference{ - f.AsOwnerReference(), - }, - }, - Data: data, - } - - configMaps := f.Clientset.CoreV1().ConfigMaps(configMap.Namespace) - found, err := configMaps.Get(context.TODO(), configMap.Name, metav1.GetOptions{}) - if k8serrors.IsNotFound(err) { //nolint:gocritic - _, err = configMaps.Create(context.TODO(), configMap, metav1.CreateOptions{}) - if err != nil { - return err - } - } else if found != nil { - _, err = configMaps.Update(context.TODO(), configMap, metav1.UpdateOptions{}) - if err != nil { - return err - } - } else { - return err - } - - return nil -} - func (f *Feature) addCleanup(cleanupFuncs ...Action) { f.cleanups = append(f.cleanups, cleanupFuncs...) } diff --git a/pkg/feature/raw_resources.go b/pkg/feature/raw_resources.go index e388737da5d..14159b72f15 100644 --- a/pkg/feature/raw_resources.go +++ b/pkg/feature/raw_resources.go @@ -24,8 +24,8 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" k8stypes "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -83,39 +83,18 @@ func (f *Feature) patchResources(resources string) error { u := &unstructured.Unstructured{} if err := yaml.Unmarshal([]byte(str), u); err != nil { f.Log.Error(err, "error unmarshalling yaml") - return errors.WithStack(err) } ensureNamespaceIsSet(f, u) - gvr := schema.GroupVersionResource{ - Group: strings.ToLower(u.GroupVersionKind().Group), - Version: u.GroupVersionKind().Version, - Resource: strings.ToLower(u.GroupVersionKind().Kind) + "s", - } - - // Convert the individual resource patch from YAML to JSON patchAsJSON, err := yaml.YAMLToJSON([]byte(str)) if err != nil { - f.Log.Error(err, "error converting yaml to json") - - return errors.WithStack(err) + return fmt.Errorf("error converting yaml to json: %w", err) } - _, err = f.DynamicClient.Resource(gvr). - Namespace(u.GetNamespace()). - Patch(context.TODO(), u.GetName(), k8stypes.MergePatchType, patchAsJSON, metav1.PatchOptions{}) - if err != nil { - f.Log.Error(err, "error patching resource", - "gvr", fmt.Sprintf("%+v\n", gvr), - "patch", fmt.Sprintf("%+v\n", u), - "json", fmt.Sprintf("%+v\n", patchAsJSON)) - return errors.WithStack(err) - } - - if err != nil { - return errors.WithStack(err) + if err := f.Client.Patch(context.TODO(), u, client.RawPatch(k8stypes.MergePatchType, patchAsJSON)); err != nil { + return fmt.Errorf("failed patching resource: %w", err) } } diff --git a/pkg/feature/serverless/conditions.go b/pkg/feature/serverless/conditions.go index 739e9ebd2e1..6396e76c9da 100644 --- a/pkg/feature/serverless/conditions.go +++ b/pkg/feature/serverless/conditions.go @@ -4,11 +4,12 @@ import ( "context" "fmt" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" ctrlLog "sigs.k8s.io/controller-runtime/pkg/log" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" ) const ( @@ -18,9 +19,11 @@ const ( var log = ctrlLog.Log.WithName("features") func EnsureServerlessAbsent(f *feature.Feature) error { - list, err := f.DynamicClient.Resource(gvr.KnativeServing).Namespace("").List(context.TODO(), v1.ListOptions{}) - if err != nil { - return err + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(cluster.KnativeServingGVK) + + if err := f.Client.List(context.TODO(), list, client.InNamespace("")); err != nil { + return fmt.Errorf("failed to list KnativeServings: %w", err) } if len(list.Items) == 0 { @@ -55,4 +58,4 @@ func EnsureServerlessOperatorInstalled(f *feature.Feature) error { return nil } -var EnsureServerlessServingDeployed = feature.WaitForResourceToBeCreated(KnativeServingNamespace, gvr.KnativeServing) +var EnsureServerlessServingDeployed = feature.WaitForResourceToBeCreated(KnativeServingNamespace, cluster.KnativeServingGVK) diff --git a/pkg/feature/serverless/loaders.go b/pkg/feature/serverless/loaders.go index 115034dbdcc..966021e39da 100644 --- a/pkg/feature/serverless/loaders.go +++ b/pkg/feature/serverless/loaders.go @@ -23,7 +23,7 @@ func ServingIngressDomain(f *feature.Feature) error { domain := strings.TrimSpace(f.Spec.Serving.IngressGateway.Domain) if len(domain) == 0 { var errDomain error - domain, errDomain = GetDomain(f.DynamicClient) + domain, errDomain = GetDomain(f.Client) if errDomain != nil { return fmt.Errorf("failed to fetch OpenShift domain to generate certificate for Serverless: %w", errDomain) } diff --git a/pkg/feature/serverless/resources.go b/pkg/feature/serverless/resources.go index d06e4f1ae21..5acf5514974 100644 --- a/pkg/feature/serverless/resources.go +++ b/pkg/feature/serverless/resources.go @@ -2,29 +2,35 @@ package serverless import ( "context" + "fmt" "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/dynamic" + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" ) func ServingCertificateResource(f *feature.Feature) error { return f.CreateSelfSignedCertificate(f.Spec.KnativeCertificateSecret, f.Spec.Serving.IngressGateway.Certificate.Type, f.Spec.KnativeIngressDomain, f.Spec.ControlPlane.Namespace) } -func GetDomain(dynamicClient dynamic.Interface) (string, error) { - cluster, err := dynamicClient.Resource(gvr.OpenshiftIngress).Get(context.TODO(), "cluster", metav1.GetOptions{}) - if err != nil { - return "", err +func GetDomain(c client.Client) (string, error) { + ingress := &unstructured.Unstructured{} + ingress.SetGroupVersionKind(cluster.OpenshiftIngressGVK) + + if err := c.Get(context.TODO(), client.ObjectKey{ + Namespace: "", + Name: "cluster", + }, ingress); err != nil { + return "", fmt.Errorf("failed fetching cluster's ingress details: %w", err) } - domain, found, err := unstructured.NestedString(cluster.Object, "spec", "domain") + domain, found, err := unstructured.NestedString(ingress.Object, "spec", "domain") if !found { return "", errors.New("spec.domain not found") } + return domain, err } diff --git a/pkg/feature/servicemesh/conditions.go b/pkg/feature/servicemesh/conditions.go index e352c9545a7..947d811da76 100644 --- a/pkg/feature/servicemesh/conditions.go +++ b/pkg/feature/servicemesh/conditions.go @@ -7,13 +7,12 @@ import ( "github.com/hashicorp/go-multierror" "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" ) const ( @@ -55,7 +54,7 @@ func WaitForControlPlaneToBeReady(f *feature.Feature) error { f.Log.Info("waiting for control plane components to be ready", "control-plane", smcp, "namespace", smcpNs, "duration (s)", duration.Seconds()) return wait.PollUntilContextTimeout(context.TODO(), interval, duration, false, func(ctx context.Context) (bool, error) { - ready, err := CheckControlPlaneComponentReadiness(f.DynamicClient, smcp, smcpNs) + ready, err := CheckControlPlaneComponentReadiness(f.Client, smcp, smcpNs) if ready { f.Log.Info("done waiting for control plane components to be ready", "control-plane", smcp, "namespace", smcpNs) @@ -65,13 +64,19 @@ func WaitForControlPlaneToBeReady(f *feature.Feature) error { }) } -func CheckControlPlaneComponentReadiness(dynamicClient dynamic.Interface, smcp, smcpNs string) (bool, error) { - unstructObj, err := dynamicClient.Resource(gvr.SMCP).Namespace(smcpNs).Get(context.TODO(), smcp, metav1.GetOptions{}) +func CheckControlPlaneComponentReadiness(c client.Client, smcpName, smcpNs string) (bool, error) { + smcpObj := &unstructured.Unstructured{} + smcpObj.SetGroupVersionKind(cluster.ServiceMeshControlPlaneGVK) + err := c.Get(context.TODO(), client.ObjectKey{ + Namespace: smcpNs, + Name: smcpName, + }, smcpObj) + if err != nil { return false, fmt.Errorf("failed to find Service Mesh Control Plane: %w", err) } - components, found, err := unstructured.NestedMap(unstructObj.Object, "status", "readiness", "components") + components, found, err := unstructured.NestedMap(smcpObj.Object, "status", "readiness", "components") if err != nil || !found { return false, fmt.Errorf("status conditions not found or error in parsing of Service Mesh Control Plane: %w", err) } diff --git a/pkg/gvr/gvr.go b/pkg/gvr/gvr.go deleted file mode 100644 index c8066ea4a12..00000000000 --- a/pkg/gvr/gvr.go +++ /dev/null @@ -1,35 +0,0 @@ -package gvr - -import "k8s.io/apimachinery/pkg/runtime/schema" - -var ( - KnativeServing = schema.GroupVersionResource{ - Group: "operator.knative.dev", - Version: "v1beta1", - Resource: "knativeservings", - } - - OpenshiftIngress = schema.GroupVersionResource{ - Group: "config.openshift.io", - Version: "v1", - Resource: "ingresses", - } - - FeatureTracker = schema.GroupVersionResource{ - Group: "features.opendatahub.io", - Version: "v1", - Resource: "featuretrackers", - } - - SMCP = schema.GroupVersionResource{ - Group: "maistra.io", - Version: "v2", - Resource: "servicemeshcontrolplanes", - } - - NetworkPolicies = schema.GroupVersionResource{ - Group: "networking.k8s.io", - Version: "v1", - Resource: "networkpolicies", - } -) diff --git a/tests/integration/features/features_int_test.go b/tests/integration/features/features_int_test.go index 3d52595614e..7e58e3a6714 100644 --- a/tests/integration/features/features_int_test.go +++ b/tests/integration/features/features_int_test.go @@ -568,11 +568,7 @@ func createSecret(name, namespace string) func(f *feature.Feature) error { }, } - _, err := f.Clientset.CoreV1(). - Secrets(namespace). - Create(context.TODO(), secret, metav1.CreateOptions{}) - - return err + return f.Client.Create(context.TODO(), secret) } } diff --git a/tests/integration/features/serverless_feature_test.go b/tests/integration/features/serverless_feature_test.go index 9aa18693065..a9b84bcbd3e 100644 --- a/tests/integration/features/serverless_feature_test.go +++ b/tests/integration/features/serverless_feature_test.go @@ -9,7 +9,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/dynamic" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" @@ -216,10 +215,6 @@ var _ = Describe("Serverless feature", func() { } testFeature.Client = envTestClient - testFeature.Clientset = envTestClientset - var err error - testFeature.DynamicClient, err = dynamic.NewForConfig(envTest.Config) - Expect(err).ToNot(HaveOccurred()) }) Context("ingress gateway TLS secret name", func() { diff --git a/tests/integration/features/servicemesh_feature_test.go b/tests/integration/features/servicemesh_feature_test.go index bf32225611c..2f9879f839f 100644 --- a/tests/integration/features/servicemesh_feature_test.go +++ b/tests/integration/features/servicemesh_feature_test.go @@ -14,9 +14,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" . "github.com/onsi/ginkgo/v2" @@ -241,18 +241,13 @@ func createServiceMeshControlPlane(name, namespace string) { "spec": map[string]interface{}{}, }, } - Expect(createSMCPInCluster(envTest.Config, serviceMeshControlPlane, namespace)).To(Succeed()) + Expect(createSMCPInCluster(serviceMeshControlPlane, namespace)).To(Succeed()) } -// createSMCPInCluster uses dynamic client to create a dummy SMCP resource for testing. -func createSMCPInCluster(cfg *rest.Config, smcpObj *unstructured.Unstructured, namespace string) error { - dynamicClient, err := dynamic.NewForConfig(cfg) - if err != nil { - return err - } - - result, err := dynamicClient.Resource(gvr.SMCP).Namespace(namespace).Create(context.TODO(), smcpObj, metav1.CreateOptions{}) - if err != nil { +func createSMCPInCluster(smcpObj *unstructured.Unstructured, namespace string) error { + smcpObj.SetGroupVersionKind(cluster.ServiceMeshControlPlaneGVK) + smcpObj.SetNamespace(namespace) + if err := envTestClient.Create(context.TODO(), smcpObj); err != nil { return err } @@ -277,15 +272,10 @@ func createSMCPInCluster(cfg *rest.Config, smcpObj *unstructured.Unstructured, n }, }, } - - if err := unstructured.SetNestedField(result.Object, status, "status"); err != nil { - return err - } - - _, err = dynamicClient.Resource(gvr.SMCP).Namespace(namespace).UpdateStatus(context.TODO(), result, metav1.UpdateOptions{}) - if err != nil { + update := smcpObj.DeepCopy() + if err := unstructured.SetNestedField(update.Object, status, "status"); err != nil { return err } - return nil + return envTestClient.Status().Update(context.TODO(), update) } From 6376583098b7c9c61f2727bb8c5f8a486356bc0d Mon Sep 17 00:00:00 2001 From: Landon LaSmith <2432396+LaVLaS@users.noreply.github.com> Date: Mon, 19 Feb 2024 09:27:00 -0500 Subject: [PATCH 24/32] Add support for .env file in Makefile (#844) Signed-off-by: Landon LaSmith Co-authored-by: Wen Zhou (cherry picked from commit 6c038ce2ed99f022c04a9b78713ecbab0243fef4) --- .gitignore | 6 +++++- CONTRIBUTING.md | 14 ++++++++++++++ Makefile | 22 +++++++++++++--------- 3 files changed, 32 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index e4bfbc44e5b..584abfdd2bf 100644 --- a/.gitignore +++ b/.gitignore @@ -53,4 +53,8 @@ scripts/gke/build/** odh-manifests/ -cover.out \ No newline at end of file +cover.out + +# Ignore any local.mk files that would be consumed by the Makefile +local.mk + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 335d187d4bc..525d14f9f9d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -43,3 +43,17 @@ echo 'export PATH=${PATH}:~/bin' >> ~/.zshrc echo 'export GOPROXY=https://proxy.golang.org' >> ~/.zshrc ``` +## Using a local.mk file to override Makefile variables for your development environment + +To support the ability for a developer to customize the Makefile execution to support their development environment, you can create a `local.mk` file in the root of this repo to specify custom values that match your environment. + +``` +$ cat local.mk +VERSION=9.9.9 +IMAGE_TAG_BASE=quay.io/my-dev-env/opendatahub-operator +IMG_TAG=my-dev-tag +OPERATOR_NAMESPACE=my-dev-odh-operator-system +IMAGE_BUILD_FLAGS=--build-arg USE_LOCAL=true +E2E_TEST_FLAGS="--skip-deletion=true" -timeout 15m +``` + diff --git a/Makefile b/Makefile index bfdd5186681..b2357975a45 100644 --- a/Makefile +++ b/Makefile @@ -3,21 +3,16 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -ifneq ($(USER),) -IMAGE_OWNER = $(USER) -else -IMAGE_OWNER = opendatahub -endif -VERSION ?= 2.4.0 +VERSION ?= 2.7.0 # IMAGE_TAG_BASE defines the opendatahub.io namespace and part of the image name for remote images. # This variable is used to construct full image tags for bundle and catalog images. # # For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both # opendatahub.io/opendatahub-operator-bundle:$VERSION and opendatahub.io/opendatahub-operator-catalog:$VERSION. -IMAGE_TAG_BASE = quay.io/$(IMAGE_OWNER)/opendatahub-operator +IMAGE_TAG_BASE ?= quay.io/opendatahub/opendatahub-operator # keep the name based on IMG which already used from command line -IMG_TAG = latest +IMG_TAG ?= latest # Update IMG to a variable, to keep it consistent across versions for OpenShift CI IMG = $(IMAGE_TAG_BASE):$(IMG_TAG) # BUNDLE_IMG defines the image:tag used for the bundle. @@ -96,7 +91,16 @@ E2E_TEST_FLAGS = "--skip-deletion=false" -timeout 15m # See README.md, default g # Default image-build is to not use local odh-manifests folder # set to "true" to use local instead # see target "image-build" -IMAGE_BUILD_FLAGS = --build-arg USE_LOCAL=false +IMAGE_BUILD_FLAGS ?= --build-arg USE_LOCAL=false + +# Read any custom variables overrides from a local.mk file. This will only be read if it exists in the +# same directory as this Makefile. Variables can be specified in the standard format supported by +# GNU Make since `include` processes any valid Makefile +# Standard variables override would include anything you would pass at runtime that is different +# from the defaults specified in this file +OPERATOR_MAKE_ENV_FILE = local.mk +-include $(OPERATOR_MAKE_ENV_FILE) + .PHONY: default default: manifests lint unit-test build From fe41683a26f6d630173f5bb6d164d36f4f612de5 Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Mon, 19 Feb 2024 17:52:46 +0100 Subject: [PATCH 25/32] feat(nwp): add ingress from cluster-monitoring to applicationnamespace (#863) * feat(nwp): add ingress from cluster-monitoring to applicationnamespace Signed-off-by: Wen Zhou * update: code review and clean NWP not needed for now Signed-off-by: Wen Zhou --------- Signed-off-by: Wen Zhou (cherry picked from commit e7e398288e4866f021619f136814f245f31f6559) --- controllers/dscinitialization/utils.go | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/controllers/dscinitialization/utils.go b/controllers/dscinitialization/utils.go index fc87113204e..4fce7baf52f 100644 --- a/controllers/dscinitialization/utils.go +++ b/controllers/dscinitialization/utils.go @@ -238,7 +238,7 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. r.Log.Error(err, "error to set networkpolicy in applications namespace", "path", networkpolicyPath) return err } - } else { // Expected namespace for the given name + } else { // Expected namespace for the given name in ODH desiredNetworkPolicy := &netv1.NetworkPolicy{ TypeMeta: metav1.TypeMeta{ Kind: "NetworkPolicy", @@ -255,7 +255,11 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. Ingress: []netv1.NetworkPolicyIngressRule{ { From: []netv1.NetworkPolicyPeer{ - { + { /* allow ODH namespace <->ODH namespace: + - default notebook project: rhods-notebooks + - redhat-odh-monitoring + - redhat-odh-applications / opendatahub + */ NamespaceSelector: &metav1.LabelSelector{ // AND logic MatchLabels: map[string]string{ cluster.ODHGeneratedNamespaceLabel: "true", @@ -266,7 +270,7 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. }, { // OR logic From: []netv1.NetworkPolicyPeer{ - { // need this for access dashboard + { // need this to access external-> dashboard NamespaceSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "network.openshift.io/policy-group": "ingress", @@ -277,7 +281,7 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. }, { // OR logic for PSI From: []netv1.NetworkPolicyPeer{ - { // need this to access dashboard + { // need this to access external->dashboard NamespaceSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "kubernetes.io/metadata.name": "openshift-host-network", @@ -286,6 +290,17 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. }, }, }, + { + From: []netv1.NetworkPolicyPeer{ + { // need this for cluster-monitoring work: cluster-monitoring->ODH namespaces + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "openshift-monitoring", + }, + }, + }, + }, + }, }, PolicyTypes: []netv1.PolicyType{ netv1.PolicyTypeIngress, From 4c222cf8408c411ea0a7936488a88cbebaf3052e Mon Sep 17 00:00:00 2001 From: Vaishnavi Hire Date: Mon, 19 Feb 2024 18:58:05 -0500 Subject: [PATCH 26/32] Fix Kserve Unmanaged Installation (#860) * Fix Kserve Unmanaged Installation * Update tests for kserve (cherry picked from commit b467ed7c7b47b747b1f3a026ee31b387852d9333) --- components/kserve/kserve.go | 15 ++++++++------- tests/e2e/dsc_creation_test.go | 12 +++++------- tests/e2e/helper_test.go | 4 ++++ 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index acc2c9c0db6..b3b415df22e 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -107,13 +107,8 @@ func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, resC return err } } - // check on dependent operators if all installed in cluster - dependOpsErrors := checkDependentOperators(cli).ErrorOrNil() - if dependOpsErrors != nil { - return dependOpsErrors - } - if err := k.configureServerless(dscispec); err != nil { + if err := k.configureServerless(cli, dscispec); err != nil { return err } @@ -169,7 +164,7 @@ func (k *Kserve) Cleanup(_ client.Client, instance *dsciv1.DSCInitializationSpec return k.removeServerlessFeatures(instance) } -func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) error { +func (k *Kserve) configureServerless(cli client.Client, instance *dsciv1.DSCInitializationSpec) error { switch k.Serving.ManagementState { case operatorv1.Unmanaged: // Bring your own CR fmt.Println("Serverless CR is not configured by the operator, we won't do anything") @@ -186,6 +181,12 @@ func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) err return fmt.Errorf("ServiceMesh is need to set to 'Managed' in DSCI CR, it is required by KServe serving field") } + // check on dependent operators if all installed in cluster + dependOpsErrors := checkDependentOperators(cli).ErrorOrNil() + if dependOpsErrors != nil { + return dependOpsErrors + } + serverlessFeatures := feature.ComponentFeaturesHandler(k, instance, k.configureServerlessFeatures()) if err := serverlessFeatures.Apply(); err != nil { diff --git a/tests/e2e/dsc_creation_test.go b/tests/e2e/dsc_creation_test.go index 1d44ced7c89..b5fcc71b888 100644 --- a/tests/e2e/dsc_creation_test.go +++ b/tests/e2e/dsc_creation_test.go @@ -178,14 +178,12 @@ func (tc *testContext) testAllApplicationCreation(t *testing.T) error { //nolint // speed testing in parallel t.Parallel() err = tc.testApplicationCreation(&(tc.testDsc.Spec.Components.Kserve)) + // test Unmanaged state, since servicemesh is not installed. if tc.testDsc.Spec.Components.Kserve.ManagementState == operatorv1.Managed { - if err != nil { - // depedent operator error, as expected - if strings.Contains(err.Error(), "Please install the operator before enabling component") { - t.Logf("expected error: %v", err.Error()) - } else { - require.NoError(t, err, "error validating application %v when enabled", tc.testDsc.Spec.Components.Kserve.GetComponentName()) - } + if err != nil && tc.testDsc.Spec.Components.Kserve.Serving.ManagementState == operatorv1.Unmanaged { + require.NoError(t, err, "error validating application %v when enabled", tc.testDsc.Spec.Components.Workbenches.GetComponentName()) + } else { + require.NoError(t, err, "error validating application %v when enabled", tc.testDsc.Spec.Components.Kserve.GetComponentName()) } } else { require.Error(t, err, "error validating application %v when disabled", tc.testDsc.Spec.Components.Kserve.GetComponentName()) diff --git a/tests/e2e/helper_test.go b/tests/e2e/helper_test.go index 5019804c9ef..592a95cff4b 100644 --- a/tests/e2e/helper_test.go +++ b/tests/e2e/helper_test.go @@ -25,6 +25,7 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/components/ray" "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" "github.com/opendatahub-io/opendatahub-operator/v2/components/workbenches" + infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" ) func (tc *testContext) waitForControllerDeployment(name string, replicas int32) error { @@ -104,6 +105,9 @@ func setupDSCInstance() *dsc.DataScienceCluster { Component: components.Component{ ManagementState: operatorv1.Managed, }, + Serving: infrav1.ServingSpec{ + ManagementState: operatorv1.Unmanaged, + }, }, CodeFlare: codeflare.CodeFlare{ Component: components.Component{ From a01819d2c22912c0cbe5152408cacd5715783b82 Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Thu, 22 Feb 2024 19:54:34 +0100 Subject: [PATCH 27/32] update: add cleanup for rhods-monitor-federation2 (#861) Signed-off-by: Wen Zhou (cherry picked from commit bef7e86a374cd1ae49b52ed28478a633278d1f1e) --- pkg/upgrade/upgrade.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index 2b133e124d6..c1f12d1214d 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -20,7 +20,6 @@ import ( apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" @@ -49,11 +48,6 @@ const ( DeleteConfigMapLabel = "api.openshift.com/addon-managed-odh-delete" ) -type Resource interface { - metav1.Object - runtime.Object -} - // OperatorUninstall deletes all the externally generated resources. This includes monitoring resources and applications // installed by KfDef. func OperatorUninstall(cli client.Client, cfg *rest.Config) error { @@ -354,11 +348,11 @@ func UpdateFromLegacyVersion(cli client.Client, platform deploy.Platform, appNS func CleanupExistingResource(cli client.Client, platform deploy.Platform) error { var multiErr *multierror.Error + montNamespace := "redhat-ods-monitoring" + ctx := context.TODO() // Special Handling of cleanup of deprecated model monitoring stack if platform == deploy.ManagedRhods { - montNamespace := "redhat-ods-monitoring" - ctx := context.TODO() deprecatedDeployments := []string{"rhods-prometheus-operator"} multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedDeployments, &appsv1.DeploymentList{})) @@ -386,6 +380,9 @@ func CleanupExistingResource(cli client.Client, platform deploy.Platform) error deprecatedServicemonitors := []string{"modelmesh-federated-metrics"} multiErr = multierror.Append(multiErr, deleteDeprecatedServiceMonitors(ctx, cli, montNamespace, deprecatedServicemonitors)) } + // common logic for both self-managed and managed + deprecatedOperatorSM := []string{"rhods-monitor-federation2"} + multiErr = multierror.Append(multiErr, deleteDeprecatedServiceMonitors(ctx, cli, montNamespace, deprecatedOperatorSM)) return multiErr.ErrorOrNil() } From fec85d2a154825faa46f355699edf3744c271555 Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Thu, 22 Feb 2024 19:55:08 +0100 Subject: [PATCH 28/32] update(rbac): (#866) - add clusterrolebinding prometheus-k8s-viewer - add clusterrole prometheus-k8s-viewer - this should be applied for both ODH and downstream - add prometheus folder from config for bundle - cleanup config for parts as duplicated: - tokenreviews - subjectaccessreviews - remove leader_election related rbac, we dont use them - remove dsci rbac fron config, we dont need them - Signed-off-by: Wen Zhou (cherry picked from commit 6bc16b03ead4b38bdb6c6459f645ca3115cab3f6) --- ...c.authorization.k8s.io_v1_clusterrole.yaml | 16 ++++++++ ...rization.k8s.io_v1_clusterrolebinding.yaml | 9 +++-- .../rhods-operator.clusterserviceversion.yaml | 5 --- config/default/manager_config_patch.yaml | 20 ---------- config/manifests/kustomization.yaml | 3 +- config/prometheus/kustomization.yaml | 3 +- config/prometheus/prom_clusterrole.yaml | 15 ++++++++ .../prometheus/prom_clusterrolebinding.yaml | 12 ++++++ config/rbac/auth_proxy_role.yaml | 17 --------- config/rbac/auth_proxy_service.yaml | 2 +- ...ahub.io_dscinitialization_editor_role.yaml | 24 ------------ ...ahub.io_dscinitialization_viewer_role.yaml | 20 ---------- config/rbac/kustomization.yaml | 11 +----- config/rbac/leader_election_role.yaml | 37 ------------------- config/rbac/leader_election_role_binding.yaml | 12 ------ 15 files changed, 55 insertions(+), 151 deletions(-) create mode 100644 bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrole.yaml rename config/rbac/auth_proxy_role_binding.yaml => bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml (55%) delete mode 100644 config/default/manager_config_patch.yaml create mode 100644 config/prometheus/prom_clusterrole.yaml create mode 100644 config/prometheus/prom_clusterrolebinding.yaml delete mode 100644 config/rbac/auth_proxy_role.yaml delete mode 100644 config/rbac/dscinitialization.opendatahub.io_dscinitialization_editor_role.yaml delete mode 100644 config/rbac/dscinitialization.opendatahub.io_dscinitialization_viewer_role.yaml delete mode 100644 config/rbac/leader_election_role.yaml delete mode 100644 config/rbac/leader_election_role_binding.yaml diff --git a/bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrole.yaml b/bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 00000000000..3109a48e21d --- /dev/null +++ b/bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: prometheus-k8s-viewer +rules: +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + verbs: + - get + - watch + - list diff --git a/config/rbac/auth_proxy_role_binding.yaml b/bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml similarity index 55% rename from config/rbac/auth_proxy_role_binding.yaml rename to bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml index ec7acc0a1b7..e88728f1c53 100644 --- a/config/rbac/auth_proxy_role_binding.yaml +++ b/bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml @@ -1,12 +1,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: proxy-rolebinding + creationTimestamp: null + name: prometheus-k8s-viewer roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: proxy-role + name: prometheus-k8s-viewer subjects: - kind: ServiceAccount - name: controller-manager - namespace: system + name: prometheus-k8s + namespace: openshift-monitoring diff --git a/bundle/manifests/rhods-operator.clusterserviceversion.yaml b/bundle/manifests/rhods-operator.clusterserviceversion.yaml index 2b2d3a06b95..9aec4487f3f 100644 --- a/bundle/manifests/rhods-operator.clusterserviceversion.yaml +++ b/bundle/manifests/rhods-operator.clusterserviceversion.yaml @@ -1809,11 +1809,6 @@ spec: requests: cpu: 500m memory: 256Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL securityContext: runAsNonRoot: true serviceAccountName: redhat-ods-operator-controller-manager diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml deleted file mode 100644 index 6c400155cfb..00000000000 --- a/config/default/manager_config_patch.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - args: - - "--config=controller_manager_config.yaml" - volumeMounts: - - name: manager-config - mountPath: /controller_manager_config.yaml - subPath: controller_manager_config.yaml - volumes: - - name: manager-config - configMap: - name: manager-config diff --git a/config/manifests/kustomization.yaml b/config/manifests/kustomization.yaml index 940e327e0fa..bea4b1f7217 100644 --- a/config/manifests/kustomization.yaml +++ b/config/manifests/kustomization.yaml @@ -6,7 +6,8 @@ kind: Kustomization resources: - bases/rhods-operator.clusterserviceversion.yaml - ../default -- ../samples +- ../prometheus +- ../samples # to generate CSV alm-example - ../scorecard patches: diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml index 7f6a57fbe15..86458dbecb0 100644 --- a/config/prometheus/kustomization.yaml +++ b/config/prometheus/kustomization.yaml @@ -1,4 +1,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: -- monitor.yaml +- prom_clusterrole.yaml +- prom_clusterrolebinding.yaml diff --git a/config/prometheus/prom_clusterrole.yaml b/config/prometheus/prom_clusterrole.yaml new file mode 100644 index 00000000000..cc55247cb73 --- /dev/null +++ b/config/prometheus/prom_clusterrole.yaml @@ -0,0 +1,15 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: prometheus-k8s-viewer +rules: + - apiGroups: + - '' + verbs: + - get + - watch + - list + resources: + - pods + - services + - endpoints diff --git a/config/prometheus/prom_clusterrolebinding.yaml b/config/prometheus/prom_clusterrolebinding.yaml new file mode 100644 index 00000000000..645f130ca14 --- /dev/null +++ b/config/prometheus/prom_clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus-k8s-viewer +subjects: + - kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus-k8s-viewer \ No newline at end of file diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml deleted file mode 100644 index 80e1857c594..00000000000 --- a/config/rbac/auth_proxy_role.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: proxy-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml index 2e46fc2b55f..65b4e67a891 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/rbac/auth_proxy_service.yaml @@ -12,4 +12,4 @@ spec: protocol: TCP targetPort: 8080 selector: - control-plane: controller-manager + control-plane: controller-manager \ No newline at end of file diff --git a/config/rbac/dscinitialization.opendatahub.io_dscinitialization_editor_role.yaml b/config/rbac/dscinitialization.opendatahub.io_dscinitialization_editor_role.yaml deleted file mode 100644 index a832c3aed02..00000000000 --- a/config/rbac/dscinitialization.opendatahub.io_dscinitialization_editor_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# permissions for end users to edit dscinitializations. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: dscinitialization-editor-role -rules: -- apiGroups: - - dscinitialization.opendatahub.io.opendatahub.io - resources: - - dscinitializations - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - dscinitialization.opendatahub.io.opendatahub.io - resources: - - dscinitializations/status - verbs: - - get diff --git a/config/rbac/dscinitialization.opendatahub.io_dscinitialization_viewer_role.yaml b/config/rbac/dscinitialization.opendatahub.io_dscinitialization_viewer_role.yaml deleted file mode 100644 index df72ff59e80..00000000000 --- a/config/rbac/dscinitialization.opendatahub.io_dscinitialization_viewer_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# permissions for end users to view dscinitializations. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: dscinitialization-viewer-role -rules: -- apiGroups: - - dscinitialization.opendatahub.io.opendatahub.io - resources: - - dscinitializations - verbs: - - get - - list - - watch -- apiGroups: - - dscinitialization.opendatahub.io.opendatahub.io - resources: - - dscinitializations/status - verbs: - - get diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index c15bf590e11..63c426323d0 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -8,13 +8,6 @@ kind: Kustomization resources: - service_account.yaml - role.yaml -- role_binding.yaml -- leader_election_role.yaml -- leader_election_role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -- auth_proxy_service.yaml -- auth_proxy_role.yaml -- auth_proxy_role_binding.yaml +- role_binding.yaml # add role with binding for SA in CSV - auth_proxy_client_clusterrole.yaml +- auth_proxy_service.yaml # for ODH operator to scrape metrics diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml deleted file mode 100644 index 1e4984e7c75..00000000000 --- a/config/rbac/leader_election_role.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# permissions to do leader election. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: leader-election-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch \ No newline at end of file diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml deleted file mode 100644 index 1d1321ed4f0..00000000000 --- a/config/rbac/leader_election_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: leader-election-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: leader-election-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system From cecc8dab7355e5d40ac6de6c4f775935368a93d2 Mon Sep 17 00:00:00 2001 From: Vaishnavi Hire Date: Thu, 22 Feb 2024 17:54:46 -0500 Subject: [PATCH 29/32] Fix lint --- controllers/dscinitialization/dscinitialization_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/controllers/dscinitialization/dscinitialization_test.go b/controllers/dscinitialization/dscinitialization_test.go index 6a726a1998b..61d6b5e09ff 100644 --- a/controllers/dscinitialization/dscinitialization_test.go +++ b/controllers/dscinitialization/dscinitialization_test.go @@ -154,7 +154,6 @@ var _ = Describe("DataScienceCluster initialization", func() { Context("Handling existing resources", func() { AfterEach(cleanupResources) - const applicationName = "default-dsci" It("Should not have more than one DSCI instance in the cluster", func() { From 9392209531ffaa80429f17aa19c3b6e5c5774efc Mon Sep 17 00:00:00 2001 From: Vaishnavi Hire Date: Mon, 19 Feb 2024 16:39:46 -0500 Subject: [PATCH 30/32] Remove Trustyai installation in downstream (cherry picked from commit 0be94f076382846385497ed594717415aec3b490) (cherry picked from commit d7ccc88f6b185282b665a87f280f4bc9ce5d5661) --- .../rhods-operator.clusterserviceversion.yaml | 6 ---- components/trustyai/trustyai.go | 8 +++++ .../rhods-operator.clusterserviceversion.yaml | 13 ++++----- ...asciencecluster_v1_datasciencecluster.yaml | 2 -- .../datasciencecluster_controller.go | 12 ++++++-- controllers/status/status.go | 16 ++++++++-- main.go | 6 ++++ pkg/upgrade/upgrade.go | 29 ++++++++++++++++++- 8 files changed, 70 insertions(+), 22 deletions(-) diff --git a/bundle/manifests/rhods-operator.clusterserviceversion.yaml b/bundle/manifests/rhods-operator.clusterserviceversion.yaml index 9aec4487f3f..9417d9431be 100644 --- a/bundle/manifests/rhods-operator.clusterserviceversion.yaml +++ b/bundle/manifests/rhods-operator.clusterserviceversion.yaml @@ -49,9 +49,6 @@ metadata: "ray": { "managementState": "Removed" }, - "trustyai": { - "managementState": "Managed" - }, "workbenches": { "managementState": "Managed" } @@ -160,9 +157,6 @@ metadata: }, "workbenches": { "managementState": "Managed" - }, - "trustyai": { - "managementState": "Managed" } } } diff --git a/components/trustyai/trustyai.go b/components/trustyai/trustyai.go index 5cc182a0e91..29eedc4d89e 100644 --- a/components/trustyai/trustyai.go +++ b/components/trustyai/trustyai.go @@ -26,6 +26,9 @@ var ( var _ components.ComponentInterface = (*TrustyAI)(nil) // TrustyAI struct holds the configuration for the TrustyAI component. +// ## DEPRECATED ## : Installation of TrustyAI operator is deprecated in downstream(RHOAI). +// If TrustyAI operator is installed, it will be removed +// Changes in managemenstState are not supported. // +kubebuilder:object:generate=true type TrustyAI struct { components.Component `json:""` @@ -65,6 +68,11 @@ func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, re return err } + //TODO: Remove manifests for trustyai in 2.8 + if platform == deploy.SelfManagedRhods || platform == deploy.ManagedRhods { + enabled = false + } + if enabled { if t.DevFlags != nil { // Download manifests and update paths diff --git a/config/manifests/bases/rhods-operator.clusterserviceversion.yaml b/config/manifests/bases/rhods-operator.clusterserviceversion.yaml index aedbae1a6d1..6da19c778bf 100644 --- a/config/manifests/bases/rhods-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/rhods-operator.clusterserviceversion.yaml @@ -6,9 +6,9 @@ metadata: capabilities: Full Lifecycle categories: AI/Machine Learning, Big Data certified: "False" - containerImage: quay.io/opendatahub/opendatahub-operator:v2.4.0 + containerImage: quay.io/opendatahub/opendatahub-operator:v2.7.0 createdAt: "2023-8-23T00:00:00Z" - olm.skipRange: '>=1.0.0 <2.0.0' + olm.skipRange: '>=1.0.0 <2.7.0' operatorframework.io/initialization-resource: |- { "apiVersion": "datasciencecluster.opendatahub.io/v1", @@ -58,15 +58,12 @@ metadata: "workbenches": { "managementState": "Managed" }, - "trustyai": { - "managementState": "Managed" - } } } } operators.operatorframework.io/internal-objects: '[dscinitialization.opendatahub.io]' repository: https://github.com/red-hat-data-services/rhods-operator - name: rhods-operator.v2.4.0 + name: rhods-operator.v2.7.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -138,11 +135,11 @@ spec: - training - kserve - distributed-workloads - - trustyai links: - name: Red Hat OpenShift AI url: https://www.redhat.com/en/technologies/cloud-computing/openshift/openshift-ai minKubeVersion: 1.22.0 provider: name: Red Hat - version: 2.4.0 + replaces: rhods-operator.v2.6.0 + version: 2.7.0 diff --git a/config/samples/datasciencecluster_v1_datasciencecluster.yaml b/config/samples/datasciencecluster_v1_datasciencecluster.yaml index b7e30473dac..631141e756f 100644 --- a/config/samples/datasciencecluster_v1_datasciencecluster.yaml +++ b/config/samples/datasciencecluster_v1_datasciencecluster.yaml @@ -36,5 +36,3 @@ spec: managementState: "Removed" workbenches: managementState: "Managed" - trustyai: - managementState: "Managed" diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index 3adb0b1ba34..3010faa80f8 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -55,6 +55,7 @@ import ( dsc "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsci "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/components" + "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" ) @@ -278,9 +279,14 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context enabled := component.GetManagementState() == v1.Managed // First set conditions to reflect a component is about to be reconciled instance, err := r.updateStatus(ctx, instance, func(saved *dsc.DataScienceCluster) { - message := "Component is disabled" - if enabled { - message = "Component is enabled" + var message string + if componentName == trustyai.ComponentName { + message = "TrustyAI is deprecated. Component state is disabled." + } else { + message = "Component is disabled" + if enabled { + message = "Component is enabled" + } } status.SetComponentCondition(&saved.Status.Conditions, componentName, status.ReconcileInit, message, corev1.ConditionUnknown) }) diff --git a/controllers/status/status.go b/controllers/status/status.go index d664903938b..d0e8865b851 100644 --- a/controllers/status/status.go +++ b/controllers/status/status.go @@ -21,6 +21,8 @@ package status import ( conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" + + "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" ) // These constants represent the overall Phase as used by .Status.Phase. @@ -179,7 +181,12 @@ func SetCompleteCondition(conditions *[]conditionsv1.Condition, reason string, m // SetComponentCondition appends Condition Type with const ReadySuffix for given component // when component finished reconcile. func SetComponentCondition(conditions *[]conditionsv1.Condition, component string, reason string, message string, status corev1.ConditionStatus) { - condtype := component + ReadySuffix + var condtype string + if component == trustyai.ComponentName { + condtype = component + "Deprecated" + } else { + condtype = component + ReadySuffix + } conditionsv1.SetStatusCondition(conditions, conditionsv1.Condition{ Type: conditionsv1.ConditionType(condtype), Status: status, @@ -190,6 +197,11 @@ func SetComponentCondition(conditions *[]conditionsv1.Condition, component strin // RemoveComponentCondition remove Condition of giving component. func RemoveComponentCondition(conditions *[]conditionsv1.Condition, component string) { - condType := component + ReadySuffix + var condType string + if component == trustyai.ComponentName { + condType = component + "Deprecated" + } else { + condType = component + ReadySuffix + } conditionsv1.RemoveStatusCondition(conditions, conditionsv1.ConditionType(condType)) } diff --git a/main.go b/main.go index cf76380826c..67f46d3b772 100644 --- a/main.go +++ b/main.go @@ -213,6 +213,12 @@ func main() { setupLog.Error(err, "unable to update from legacy operator version") } + // Remove TrustyAI forRHOAI + // TODO: Remove below check in 2.8 version + if err = upgrade.RemoveDeprecatedTrustyAI(setupClient, platform); err != nil { + setupLog.Error(err, "unable to remove trustyai from DSC") + } + if err = upgrade.CleanupExistingResource(setupClient, platform); err != nil { setupLog.Error(err, "unable to perform cleanup") } diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index c1f12d1214d..1a199efdbfc 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -35,7 +35,6 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/components/kueue" "github.com/opendatahub-io/opendatahub-operator/v2/components/modelmeshserving" "github.com/opendatahub-io/opendatahub-operator/v2/components/ray" - "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" "github.com/opendatahub-io/opendatahub-operator/v2/components/workbenches" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" @@ -200,12 +199,15 @@ func CreateDefaultDSC(cli client.Client, _ deploy.Platform) error { Ray: ray.Ray{ Component: components.Component{ManagementState: operatorv1.Removed}, }, +<<<<<<< HEAD Kueue: kueue.Kueue{ Component: components.Component{ManagementState: operatorv1.Removed}, }, TrustyAI: trustyai.TrustyAI{ Component: components.Component{ManagementState: operatorv1.Managed}, }, +======= +>>>>>>> d7ccc88f (Remove Trustyai installation in downstream) }, }, } @@ -683,3 +685,28 @@ func deleteStatefulsetsAndCheck(ctx context.Context, cli client.Client, namespac return true, multiErr.ErrorOrNil() } + +func RemoveDeprecatedTrustyAI(cli client.Client, platform deploy.Platform) error { + existingDSCList := &dsc.DataScienceClusterList{} + err := cli.List(context.TODO(), existingDSCList) + if err != nil { + return fmt.Errorf("error getting existing DSC: %w", err) + } + + switch len(existingDSCList.Items) { + case 0: + return nil + case 1: + existingDSC := existingDSCList.Items[0] + if platform == deploy.ManagedRhods || platform == deploy.SelfManagedRhods { + if existingDSC.Spec.Components.TrustyAI.ManagementState != operatorv1.Removed { + existingDSC.Spec.Components.TrustyAI.ManagementState = operatorv1.Removed + err := cli.Update(context.TODO(), &existingDSC) + if err != nil { + return fmt.Errorf("error updating TrustyAI component: %w", err) + } + } + } + } + return nil +} From 4330b9b12fa73fe96a8c1d60d5bca62479ff1270 Mon Sep 17 00:00:00 2001 From: Vaishnavi Hire Date: Thu, 22 Feb 2024 18:04:51 -0500 Subject: [PATCH 31/32] Update Manifests --- get_all_manifests.sh | 22 +++++++++++----------- pkg/upgrade/upgrade.go | 6 ------ 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/get_all_manifests.sh b/get_all_manifests.sh index a4483881253..cb3669c2faf 100755 --- a/get_all_manifests.sh +++ b/get_all_manifests.sh @@ -11,17 +11,17 @@ MANIFESTS_TARBALL_URL="${GITHUB_URL}/${MANIFEST_ORG}/odh-manifests/tarball/${MAN # component: notebook, dsp, kserve, dashbaord, cf/ray, trustyai, modelmesh. # in the format of "repo-org:repo-name:branch-name:source-folder:target-folder". declare -A COMPONENT_MANIFESTS=( - ["codeflare"]="red-hat-data-services:codeflare-operator:rhoai-2.7:config:codeflare" - ["ray"]="red-hat-data-services:kuberay:rhoai-2.7:ray-operator/config:ray" - ["kueue"]="red-hat-data-services:kueue:rhoai-2.7:config:kueue" - ["data-science-pipelines-operator"]="red-hat-data-services:data-science-pipelines-operator:rhoai-2.7:config:data-science-pipelines-operator" - ["kf-notebook-controller"]="red-hat-data-services:kubeflow:rhoai-2.7:components/notebook-controller/config:odh-notebook-controller/kf-notebook-controller" - ["odh-notebook-controller"]="red-hat-data-services:kubeflow:rhoai-2.7:components/odh-notebook-controller/config:odh-notebook-controller/odh-notebook-controller" - ["notebooks"]="red-hat-data-services:notebooks:rhoai-2.7:manifests:/jupyterhub/notebooks" - ["trustyai"]="red-hat-data-services:trustyai-service-operator:rhoai-2.7:config:trustyai-service-operator" - ["model-mesh"]="red-hat-data-services:modelmesh-serving:rhoai-2.7:config:model-mesh" - ["odh-model-controller"]="red-hat-data-services:odh-model-controller:rhoai-2.7:config:odh-model-controller" - ["kserve"]="red-hat-data-services:kserve:rhoai-2.7:config:kserve" + ["codeflare"]="red-hat-data-services:codeflare-operator:rhoai-2.8:config:codeflare" + ["ray"]="red-hat-data-services:kuberay:rhoai-2.8:ray-operator/config:ray" + ["kueue"]="red-hat-data-services:kueue:rhoai-2.8:config:kueue" + ["data-science-pipelines-operator"]="red-hat-data-services:data-science-pipelines-operator:rhoai-2.8:config:data-science-pipelines-operator" + ["kf-notebook-controller"]="red-hat-data-services:kubeflow:rhoai-2.8:components/notebook-controller/config:odh-notebook-controller/kf-notebook-controller" + ["odh-notebook-controller"]="red-hat-data-services:kubeflow:rhoai-2.8:components/odh-notebook-controller/config:odh-notebook-controller/odh-notebook-controller" + ["notebooks"]="red-hat-data-services:notebooks:rhoai-2.8:manifests:/jupyterhub/notebooks" + ["trustyai"]="red-hat-data-services:trustyai-service-operator:rhoai-2.8:config:trustyai-service-operator" + ["model-mesh"]="red-hat-data-services:modelmesh-serving:rhoai-2.8:config:model-mesh" + ["odh-model-controller"]="red-hat-data-services:odh-model-controller:rhoai-2.8:config:odh-model-controller" + ["kserve"]="red-hat-data-services:kserve:rhoai-2.8:config:kserve" ) # Allow overwriting repo using flags component=repo diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index 1a199efdbfc..a4e48232de2 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -199,15 +199,9 @@ func CreateDefaultDSC(cli client.Client, _ deploy.Platform) error { Ray: ray.Ray{ Component: components.Component{ManagementState: operatorv1.Removed}, }, -<<<<<<< HEAD Kueue: kueue.Kueue{ Component: components.Component{ManagementState: operatorv1.Removed}, }, - TrustyAI: trustyai.TrustyAI{ - Component: components.Component{ManagementState: operatorv1.Managed}, - }, -======= ->>>>>>> d7ccc88f (Remove Trustyai installation in downstream) }, }, } From 1a29da9ee388cea3ced26367b8a802a8b9e9ad99 Mon Sep 17 00:00:00 2001 From: Vaishnavi Hire Date: Thu, 22 Feb 2024 19:16:48 -0500 Subject: [PATCH 32/32] Update application name --- controllers/dscinitialization/dscinitialization_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/controllers/dscinitialization/dscinitialization_test.go b/controllers/dscinitialization/dscinitialization_test.go index 61d6b5e09ff..6a726a1998b 100644 --- a/controllers/dscinitialization/dscinitialization_test.go +++ b/controllers/dscinitialization/dscinitialization_test.go @@ -154,6 +154,7 @@ var _ = Describe("DataScienceCluster initialization", func() { Context("Handling existing resources", func() { AfterEach(cleanupResources) + const applicationName = "default-dsci" It("Should not have more than one DSCI instance in the cluster", func() {