diff --git a/.gitignore b/.gitignore index e4bfbc44e5b..584abfdd2bf 100644 --- a/.gitignore +++ b/.gitignore @@ -53,4 +53,8 @@ scripts/gke/build/** odh-manifests/ -cover.out \ No newline at end of file +cover.out + +# Ignore any local.mk files that would be consumed by the Makefile +local.mk + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 335d187d4bc..525d14f9f9d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -43,3 +43,17 @@ echo 'export PATH=${PATH}:~/bin' >> ~/.zshrc echo 'export GOPROXY=https://proxy.golang.org' >> ~/.zshrc ``` +## Using a local.mk file to override Makefile variables for your development environment + +To support the ability for a developer to customize the Makefile execution to support their development environment, you can create a `local.mk` file in the root of this repo to specify custom values that match your environment. + +``` +$ cat local.mk +VERSION=9.9.9 +IMAGE_TAG_BASE=quay.io/my-dev-env/opendatahub-operator +IMG_TAG=my-dev-tag +OPERATOR_NAMESPACE=my-dev-odh-operator-system +IMAGE_BUILD_FLAGS=--build-arg USE_LOCAL=true +E2E_TEST_FLAGS="--skip-deletion=true" -timeout 15m +``` + diff --git a/Makefile b/Makefile index 6b975095bae..b2357975a45 100644 --- a/Makefile +++ b/Makefile @@ -3,21 +3,16 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -ifneq ($(USER),) -IMAGE_OWNER = $(USER) -else -IMAGE_OWNER = opendatahub -endif -VERSION ?= 2.4.0 +VERSION ?= 2.7.0 # IMAGE_TAG_BASE defines the opendatahub.io namespace and part of the image name for remote images. # This variable is used to construct full image tags for bundle and catalog images. # # For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both # opendatahub.io/opendatahub-operator-bundle:$VERSION and opendatahub.io/opendatahub-operator-catalog:$VERSION. -IMAGE_TAG_BASE = quay.io/$(IMAGE_OWNER)/opendatahub-operator +IMAGE_TAG_BASE ?= quay.io/opendatahub/opendatahub-operator # keep the name based on IMG which already used from command line -IMG_TAG = latest +IMG_TAG ?= latest # Update IMG to a variable, to keep it consistent across versions for OpenShift CI IMG = $(IMAGE_TAG_BASE):$(IMG_TAG) # BUNDLE_IMG defines the image:tag used for the bundle. @@ -96,10 +91,19 @@ E2E_TEST_FLAGS = "--skip-deletion=false" -timeout 15m # See README.md, default g # Default image-build is to not use local odh-manifests folder # set to "true" to use local instead # see target "image-build" -IMAGE_BUILD_FLAGS = --build-arg USE_LOCAL=false +IMAGE_BUILD_FLAGS ?= --build-arg USE_LOCAL=false + +# Read any custom variables overrides from a local.mk file. This will only be read if it exists in the +# same directory as this Makefile. Variables can be specified in the standard format supported by +# GNU Make since `include` processes any valid Makefile +# Standard variables override would include anything you would pass at runtime that is different +# from the defaults specified in this file +OPERATOR_MAKE_ENV_FILE = local.mk +-include $(OPERATOR_MAKE_ENV_FILE) + .PHONY: default -default: lint unit-test build +default: manifests lint unit-test build ##@ General diff --git a/README.md b/README.md index 3becd4d5f69..713cbcf4431 100644 --- a/README.md +++ b/README.md @@ -300,3 +300,7 @@ make e2e-test -e OPERATOR_NAMESPACE= -e E2E_TEST_FLAGS="--skip-deleti ### Troubleshooting Please refer to [troubleshooting documentation](docs/troubleshooting.md) + +### Upgrade testing + +Please refer to [upgrade testing documentation](docs/upgrade-testing.md) diff --git a/apis/dscinitialization/v1/dscinitialization_types.go b/apis/dscinitialization/v1/dscinitialization_types.go index 593a1a5921c..7fad5bcb69e 100644 --- a/apis/dscinitialization/v1/dscinitialization_types.go +++ b/apis/dscinitialization/v1/dscinitialization_types.go @@ -45,9 +45,15 @@ type DSCInitializationSpec struct { // +operator-sdk:csv:customresourcedefinitions:type=spec,order=3 // +optional ServiceMesh infrav1.ServiceMeshSpec `json:"serviceMesh,omitempty"` + // When set to `Managed`, adds odh-trusted-ca-bundle Configmap to all namespaces that includes + // cluster-wide Trusted CA Bundle in .data["ca-bundle.crt"]. + // Additionally, this fields allows admins to add custom CA bundles to the configmap using the .CustomCABundle field. + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=4 + // +optional + TrustedCABundle TrustedCABundleSpec `json:"trustedCABundle,omitempty"` // Internal development useful field to test customizations. // This is not recommended to be used in production environment. - // +operator-sdk:csv:customresourcedefinitions:type=spec,order=4 + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=5 // +optional DevFlags *DevFlags `json:"devFlags,omitempty"` } @@ -73,6 +79,18 @@ type DevFlags struct { ManifestsUri string `json:"manifestsUri,omitempty"` } +type TrustedCABundleSpec struct { + // managementState indicates whether and how the operator should manage customized CA bundle + // +kubebuilder:validation:Enum=Managed;Removed;Unmanaged + // +kubebuilder:default=Removed + ManagementState operatorv1.ManagementState `json:"managementState"` + // A custom CA bundle that will be available for all components in the + // Data Science Cluster(DSC). This bundle will be stored in odh-trusted-ca-bundle + // ConfigMap .data.odh-ca-bundle.crt . + // +kubebuilder:default="" + CustomCABundle string `json:"customCABundle"` +} + // DSCInitializationStatus defines the observed state of DSCInitialization. type DSCInitializationStatus struct { // Phase describes the Phase of DSCInitializationStatus diff --git a/apis/dscinitialization/v1/zz_generated.deepcopy.go b/apis/dscinitialization/v1/zz_generated.deepcopy.go index 800f45905ed..c25082c24e2 100644 --- a/apis/dscinitialization/v1/zz_generated.deepcopy.go +++ b/apis/dscinitialization/v1/zz_generated.deepcopy.go @@ -91,6 +91,7 @@ func (in *DSCInitializationSpec) DeepCopyInto(out *DSCInitializationSpec) { *out = *in out.Monitoring = in.Monitoring out.ServiceMesh = in.ServiceMesh + out.TrustedCABundle = in.TrustedCABundle if in.DevFlags != nil { in, out := &in.DevFlags, &out.DevFlags *out = new(DevFlags) @@ -164,3 +165,18 @@ func (in *Monitoring) DeepCopy() *Monitoring { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedCABundleSpec) DeepCopyInto(out *TrustedCABundleSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedCABundleSpec. +func (in *TrustedCABundleSpec) DeepCopy() *TrustedCABundleSpec { + if in == nil { + return nil + } + out := new(TrustedCABundleSpec) + in.DeepCopyInto(out) + return out +} diff --git a/apis/features/v1/features_types.go b/apis/features/v1/features_types.go index 037dda6bdf2..d277012c14f 100644 --- a/apis/features/v1/features_types.go +++ b/apis/features/v1/features_types.go @@ -23,6 +23,19 @@ type FeatureTracker struct { Status FeatureTrackerStatus `json:"status,omitempty"` } +// NewFeatureTracker instantiate FeatureTracker instance. +func NewFeatureTracker(name, appNamespace string) *FeatureTracker { + return &FeatureTracker{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "features.opendatahub.io/v1", + Kind: "FeatureTracker", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: appNamespace + "-" + name, + }, + } +} + type FeaturePhase string type OwnerType string diff --git a/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml b/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml index c3757674e01..8ae3397ba85 100644 --- a/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml +++ b/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml @@ -123,6 +123,31 @@ spec: pattern: ^(Managed|Unmanaged|Force|Removed)$ type: string type: object + trustedCABundle: + description: When set to `Managed`, adds odh-trusted-ca-bundle Configmap + to all namespaces that includes cluster-wide Trusted CA Bundle in + .data["ca-bundle.crt"]. Additionally, this fields allows admins + to add custom CA bundles to the configmap using the .CustomCABundle + field. + properties: + customCABundle: + description: A custom CA bundle that will be available for all components + in the Data Science Cluster(DSC). This bundle will be stored + in odh-trusted-ca-bundle ConfigMap .data.odh-ca-bundle.crt . + type: string + managementState: + default: Removed + description: managementState indicates whether and how the operator + should manage customized CA bundle + enum: + - Managed + - Removed + - Unmanaged + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + required: + - managementState + type: object required: - applicationsNamespace type: object diff --git a/bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrole.yaml b/bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 00000000000..3109a48e21d --- /dev/null +++ b/bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: prometheus-k8s-viewer +rules: +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + verbs: + - get + - watch + - list diff --git a/config/rbac/auth_proxy_role_binding.yaml b/bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml similarity index 55% rename from config/rbac/auth_proxy_role_binding.yaml rename to bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml index ec7acc0a1b7..e88728f1c53 100644 --- a/config/rbac/auth_proxy_role_binding.yaml +++ b/bundle/manifests/prometheus-k8s-viewer_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml @@ -1,12 +1,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: proxy-rolebinding + creationTimestamp: null + name: prometheus-k8s-viewer roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: proxy-role + name: prometheus-k8s-viewer subjects: - kind: ServiceAccount - name: controller-manager - namespace: system + name: prometheus-k8s + namespace: openshift-monitoring diff --git a/bundle/manifests/rhods-operator.clusterserviceversion.yaml b/bundle/manifests/rhods-operator.clusterserviceversion.yaml index badc283186d..9417d9431be 100644 --- a/bundle/manifests/rhods-operator.clusterserviceversion.yaml +++ b/bundle/manifests/rhods-operator.clusterserviceversion.yaml @@ -49,9 +49,6 @@ metadata: "ray": { "managementState": "Removed" }, - "trustyai": { - "managementState": "Managed" - }, "workbenches": { "managementState": "Managed" } @@ -84,6 +81,10 @@ metadata: "namespace": "istio-system" }, "managementState": "Managed" + }, + "trustedCABundle": { + "customCABundle": "", + "managementState": "Managed" } } }, @@ -156,9 +157,6 @@ metadata: }, "workbenches": { "managementState": "Managed" - }, - "trustyai": { - "managementState": "Managed" } } } @@ -201,6 +199,12 @@ spec: e.g. it provides unified authentication giving a Single Sign On experience. displayName: Service Mesh path: serviceMesh + - description: When set to `Managed`, adds odh-trusted-ca-bundle Configmap to + all namespaces that includes cluster-wide Trusted CA Bundle in .data["ca-bundle.crt"]. + Additionally, this fields allows admins to add custom CA bundles to the + configmap using the .CustomCABundle field. + displayName: Trusted CABundle + path: trustedCABundle - description: Internal development useful field to test customizations. This is not recommended to be used in production environment. displayName: Dev Flags @@ -1332,6 +1336,7 @@ spec: resources: - subscriptions verbs: + - delete - get - list - watch @@ -1798,11 +1803,6 @@ spec: requests: cpu: 500m memory: 256Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL securityContext: runAsNonRoot: true serviceAccountName: redhat-ods-operator-controller-manager diff --git a/components/codeflare/codeflare.go b/components/codeflare/codeflare.go index 9aa2338d9a5..c9fe4ad768c 100644 --- a/components/codeflare/codeflare.go +++ b/components/codeflare/codeflare.go @@ -108,7 +108,7 @@ func (c *CodeFlare) ReconcileComponent(ctx context.Context, cli client.Client, r // CloudServiceMonitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if the service is up, so prometheus wont fire alerts when it is just startup + // first check if the service is up, so prometheus won't fire alerts when it is just startup if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } diff --git a/components/component.go b/components/component.go index 6f04ed945be..b0ac3818c43 100644 --- a/components/component.go +++ b/components/component.go @@ -60,7 +60,7 @@ type DevFlags struct { } type ManifestsConfig struct { - // uri is the URI point to a git repo with tag/branch. e.g https://github.com/org/repo/tarball/ + // uri is the URI point to a git repo with tag/branch. e.g. https://github.com/org/repo/tarball/ // +optional // +kubebuilder:default:="" // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 @@ -72,7 +72,7 @@ type ManifestsConfig struct { // +operator-sdk:csv:customresourcedefinitions:type=spec,order=2 ContextDir string `json:"contextDir,omitempty"` - // sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, `default`, `odh` etc + // sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, `default`, `odh` etc. // +optional // +kubebuilder:default:="" // +operator-sdk:csv:customresourcedefinitions:type=spec,order=3 diff --git a/components/dashboard/dashboard.go b/components/dashboard/dashboard.go index 36fbae7aae4..12dc60b23a6 100644 --- a/components/dashboard/dashboard.go +++ b/components/dashboard/dashboard.go @@ -172,7 +172,7 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context, // CloudService Monitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if the service is up, so prometheus wont fire alerts when it is just startup + // first check if the service is up, so prometheus won't fire alerts when it is just startup if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentNameSupported, dscispec.ApplicationsNamespace, 20, 3); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } diff --git a/components/datasciencepipelines/datasciencepipelines.go b/components/datasciencepipelines/datasciencepipelines.go index dc187a84748..bc5b9628636 100644 --- a/components/datasciencepipelines/datasciencepipelines.go +++ b/components/datasciencepipelines/datasciencepipelines.go @@ -98,7 +98,7 @@ func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context, // CloudService Monitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if the service is up, so prometheus wont fire alerts when it is just startup + // first check if the service is up, so prometheus won't fire alerts when it is just startup // only 1 replica should be very quick if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 10, 1); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index 16970fdf71c..b3b415df22e 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -45,7 +45,7 @@ type Kserve struct { func (k *Kserve) OverrideManifests(_ string) error { // Download manifests if defined by devflags - // Go through each manifests and set the overlays if defined + // Go through each manifest and set the overlays if defined for _, subcomponent := range k.DevFlags.Manifests { if strings.Contains(subcomponent.URI, DependentComponentName) { // Download subcomponent @@ -107,13 +107,8 @@ func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, resC return err } } - // check on dependent operators if all installed in cluster - // dependent operators set in checkRequiredOperatorsInstalled() - if err := checkRequiredOperatorsInstalled(cli); err != nil { - return err - } - if err := k.configureServerless(dscispec); err != nil { + if err := k.configureServerless(cli, dscispec); err != nil { return err } @@ -151,7 +146,7 @@ func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, resC // CloudService Monitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if the service is up, so prometheus wont fire alerts when it is just startup + // first check if the service is up, so prometheus won't fire alerts when it is just startup if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } @@ -169,7 +164,7 @@ func (k *Kserve) Cleanup(_ client.Client, instance *dsciv1.DSCInitializationSpec return k.removeServerlessFeatures(instance) } -func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) error { +func (k *Kserve) configureServerless(cli client.Client, instance *dsciv1.DSCInitializationSpec) error { switch k.Serving.ManagementState { case operatorv1.Unmanaged: // Bring your own CR fmt.Println("Serverless CR is not configured by the operator, we won't do anything") @@ -186,13 +181,15 @@ func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) err return fmt.Errorf("ServiceMesh is need to set to 'Managed' in DSCI CR, it is required by KServe serving field") } - serverlessInitializer := feature.ComponentFeaturesInitializer(k, instance, k.configureServerlessFeatures()) - - if err := serverlessInitializer.Prepare(); err != nil { - return err + // check on dependent operators if all installed in cluster + dependOpsErrors := checkDependentOperators(cli).ErrorOrNil() + if dependOpsErrors != nil { + return dependOpsErrors } - if err := serverlessInitializer.Apply(); err != nil { + serverlessFeatures := feature.ComponentFeaturesHandler(k, instance, k.configureServerlessFeatures()) + + if err := serverlessFeatures.Apply(); err != nil { return err } } @@ -200,30 +197,27 @@ func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) err } func (k *Kserve) removeServerlessFeatures(instance *dsciv1.DSCInitializationSpec) error { - serverlessInitializer := feature.ComponentFeaturesInitializer(k, instance, k.configureServerlessFeatures()) + serverlessFeatures := feature.ComponentFeaturesHandler(k, instance, k.configureServerlessFeatures()) - if err := serverlessInitializer.Prepare(); err != nil { - return err - } - - return serverlessInitializer.Delete() + return serverlessFeatures.Delete() } -func checkRequiredOperatorsInstalled(cli client.Client) error { +func checkDependentOperators(cli client.Client) *multierror.Error { var multiErr *multierror.Error - checkAndAppendError := func(operatorName string) { - if found, err := deploy.OperatorExists(cli, operatorName); err != nil { - multiErr = multierror.Append(multiErr, err) - } else if !found { - err = fmt.Errorf("operator %s not found. Please install the operator before enabling %s component", - operatorName, ComponentName) - multiErr = multierror.Append(multiErr, err) - } + if found, err := deploy.OperatorExists(cli, ServiceMeshOperator); err != nil { + multiErr = multierror.Append(multiErr, err) + } else if !found { + err = fmt.Errorf("operator %s not found. Please install the operator before enabling %s component", + ServiceMeshOperator, ComponentName) + multiErr = multierror.Append(multiErr, err) } - - checkAndAppendError(ServiceMeshOperator) - checkAndAppendError(ServerlessOperator) - - return multiErr.ErrorOrNil() + if found, err := deploy.OperatorExists(cli, ServerlessOperator); err != nil { + multiErr = multierror.Append(multiErr, err) + } else if !found { + err = fmt.Errorf("operator %s not found. Please install the operator before enabling %s component", + ServerlessOperator, ComponentName) + multiErr = multierror.Append(multiErr, err) + } + return multiErr } diff --git a/components/kserve/serverless_setup.go b/components/kserve/serverless_setup.go index d57ce6c6f55..47eac118bbb 100644 --- a/components/kserve/serverless_setup.go +++ b/components/kserve/serverless_setup.go @@ -8,17 +8,12 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" ) -const ( - templatesDir = "templates/serverless" -) - -func (k *Kserve) configureServerlessFeatures() feature.DefinedFeatures { - return func(initializer *feature.FeaturesInitializer) error { - servingDeployment, err := feature.CreateFeature("serverless-serving-deployment"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). +func (k *Kserve) configureServerlessFeatures() feature.FeaturesProvider { + return func(handler *feature.FeaturesHandler) error { + servingDeploymentErr := feature.CreateFeature("serverless-serving-deployment"). + For(handler). Manifests( - path.Join(templatesDir, "serving-install"), + path.Join(feature.ServerlessDir, "serving-install"), ). WithData(PopulateComponentSettings(k)). PreConditions( @@ -27,17 +22,18 @@ func (k *Kserve) configureServerlessFeatures() feature.DefinedFeatures { servicemesh.EnsureServiceMeshInstalled, feature.CreateNamespaceIfNotExists(serverless.KnativeServingNamespace), ). + PostConditions( + feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), + ). Load() - if err != nil { - return err + if servingDeploymentErr != nil { + return servingDeploymentErr } - initializer.Features = append(initializer.Features, servingDeployment) - servingNetIstioSecretFiltering, err := feature.CreateFeature("serverless-net-istio-secret-filtering"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). + servingNetIstioSecretFilteringErr := feature.CreateFeature("serverless-net-istio-secret-filtering"). + For(handler). Manifests( - path.Join(templatesDir, "serving-net-istio-secret-filtering.patch.tmpl"), + path.Join(feature.ServerlessDir, "serving-net-istio-secret-filtering.patch.tmpl"), ). WithData(PopulateComponentSettings(k)). PreConditions(serverless.EnsureServerlessServingDeployed). @@ -45,29 +41,27 @@ func (k *Kserve) configureServerlessFeatures() feature.DefinedFeatures { feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), ). Load() - if err != nil { - return err + if servingNetIstioSecretFilteringErr != nil { + return servingNetIstioSecretFilteringErr } - initializer.Features = append(initializer.Features, servingNetIstioSecretFiltering) - servingIstioGateways, err := feature.CreateFeature("serverless-serving-gateways"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). + serverlessGwErr := feature.CreateFeature("serverless-serving-gateways"). + For(handler). PreConditions(serverless.EnsureServerlessServingDeployed). WithData( + PopulateComponentSettings(k), serverless.ServingDefaultValues, serverless.ServingIngressDomain, - PopulateComponentSettings(k), ). WithResources(serverless.ServingCertificateResource). Manifests( - path.Join(templatesDir, "serving-istio-gateways"), + path.Join(feature.ServerlessDir, "serving-istio-gateways"), ). Load() - if err != nil { - return err + if serverlessGwErr != nil { + return serverlessGwErr } - initializer.Features = append(initializer.Features, servingIstioGateways) + return nil } } diff --git a/components/kueue/kueue.go b/components/kueue/kueue.go index 0db08cdd053..0e0ab988f35 100644 --- a/components/kueue/kueue.go +++ b/components/kueue/kueue.go @@ -84,7 +84,7 @@ func (r *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, resCo // CloudService Monitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if the service is up, so prometheus wont fire alerts when it is just startup + // first check if the service is up, so prometheus won't fire alerts when it is just startup if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } diff --git a/components/modelmeshserving/modelmeshserving.go b/components/modelmeshserving/modelmeshserving.go index 07b54ce9c7b..06968ee0728 100644 --- a/components/modelmeshserving/modelmeshserving.go +++ b/components/modelmeshserving/modelmeshserving.go @@ -36,7 +36,7 @@ type ModelMeshServing struct { } func (m *ModelMeshServing) OverrideManifests(_ string) error { - // Go through each manifests and set the overlays if defined + // Go through each manifest and set the overlays if defined for _, subcomponent := range m.DevFlags.Manifests { if strings.Contains(subcomponent.URI, DependentComponentName) { // Download subcomponent @@ -150,7 +150,7 @@ func (m *ModelMeshServing) ReconcileComponent(ctx context.Context, // CloudService Monitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if service is up, so prometheus wont fire alerts when it is just startup + // first check if service is up, so prometheus won't fire alerts when it is just startup if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } diff --git a/components/ray/ray.go b/components/ray/ray.go index 009cf40c40e..8168647df13 100644 --- a/components/ray/ray.go +++ b/components/ray/ray.go @@ -86,7 +86,7 @@ func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, resConf // CloudService Monitoring handling if platform == deploy.ManagedRhods { if enabled { - // first check if the service is up, so prometheus wont fire alerts when it is just startup + // first check if the service is up, so prometheus won't fire alerts when it is just startup if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } diff --git a/components/trustyai/trustyai.go b/components/trustyai/trustyai.go index 6a6ca215c5c..29eedc4d89e 100644 --- a/components/trustyai/trustyai.go +++ b/components/trustyai/trustyai.go @@ -26,6 +26,9 @@ var ( var _ components.ComponentInterface = (*TrustyAI)(nil) // TrustyAI struct holds the configuration for the TrustyAI component. +// ## DEPRECATED ## : Installation of TrustyAI operator is deprecated in downstream(RHOAI). +// If TrustyAI operator is installed, it will be removed +// Changes in managemenstState are not supported. // +kubebuilder:object:generate=true type TrustyAI struct { components.Component `json:""` @@ -65,6 +68,11 @@ func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, re return err } + //TODO: Remove manifests for trustyai in 2.8 + if platform == deploy.SelfManagedRhods || platform == deploy.ManagedRhods { + enabled = false + } + if enabled { if t.DevFlags != nil { // Download manifests and update paths @@ -72,8 +80,7 @@ func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, re return err } } - - if dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "" { + if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (t.DevFlags == nil || len(t.DevFlags.Manifests) == 0) { if err := deploy.ApplyParams(Path, t.SetImageParamsMap(imageParamMap), false); err != nil { return err } diff --git a/components/workbenches/workbenches.go b/components/workbenches/workbenches.go index 25a01ea1ee5..afd840c5fc3 100644 --- a/components/workbenches/workbenches.go +++ b/components/workbenches/workbenches.go @@ -41,7 +41,7 @@ type Workbenches struct { func (w *Workbenches) OverrideManifests(platform string) error { // Download manifests if defined by devflags - // Go through each manifests and set the overlays if defined + // Go through each manifest and set the overlays if defined for _, subcomponent := range w.DevFlags.Manifests { if strings.Contains(subcomponent.URI, DependentComponentName) { // Download subcomponent diff --git a/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml b/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml index 5b269e6319b..fd4c96d290b 100644 --- a/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml +++ b/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml @@ -63,12 +63,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -106,12 +106,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -150,12 +150,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -196,12 +196,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -299,12 +299,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -343,12 +343,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -386,12 +386,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -429,12 +429,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array @@ -472,12 +472,12 @@ spec: description: 'sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, - `default`, `odh` etc' + `default`, `odh` etc.' type: string uri: default: "" description: uri is the URI point to a git repo - with tag/branch. e.g https://github.com/org/repo/tarball/ + with tag/branch. e.g. https://github.com/org/repo/tarball/ type: string type: object type: array diff --git a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml index 01213c67a24..48621d88cc7 100644 --- a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml +++ b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml @@ -124,6 +124,31 @@ spec: pattern: ^(Managed|Unmanaged|Force|Removed)$ type: string type: object + trustedCABundle: + description: When set to `Managed`, adds odh-trusted-ca-bundle Configmap + to all namespaces that includes cluster-wide Trusted CA Bundle in + .data["ca-bundle.crt"]. Additionally, this fields allows admins + to add custom CA bundles to the configmap using the .CustomCABundle + field. + properties: + customCABundle: + description: A custom CA bundle that will be available for all components + in the Data Science Cluster(DSC). This bundle will be stored + in odh-trusted-ca-bundle ConfigMap .data.odh-ca-bundle.crt . + type: string + managementState: + default: Removed + description: managementState indicates whether and how the operator + should manage customized CA bundle + enum: + - Managed + - Removed + - Unmanaged + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + required: + - managementState + type: object required: - applicationsNamespace type: object diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml deleted file mode 100644 index 6c400155cfb..00000000000 --- a/config/default/manager_config_patch.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - args: - - "--config=controller_manager_config.yaml" - volumeMounts: - - name: manager-config - mountPath: /controller_manager_config.yaml - subPath: controller_manager_config.yaml - volumes: - - name: manager-config - configMap: - name: manager-config diff --git a/config/manifests/bases/rhods-operator.clusterserviceversion.yaml b/config/manifests/bases/rhods-operator.clusterserviceversion.yaml index aedbae1a6d1..6da19c778bf 100644 --- a/config/manifests/bases/rhods-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/rhods-operator.clusterserviceversion.yaml @@ -6,9 +6,9 @@ metadata: capabilities: Full Lifecycle categories: AI/Machine Learning, Big Data certified: "False" - containerImage: quay.io/opendatahub/opendatahub-operator:v2.4.0 + containerImage: quay.io/opendatahub/opendatahub-operator:v2.7.0 createdAt: "2023-8-23T00:00:00Z" - olm.skipRange: '>=1.0.0 <2.0.0' + olm.skipRange: '>=1.0.0 <2.7.0' operatorframework.io/initialization-resource: |- { "apiVersion": "datasciencecluster.opendatahub.io/v1", @@ -58,15 +58,12 @@ metadata: "workbenches": { "managementState": "Managed" }, - "trustyai": { - "managementState": "Managed" - } } } } operators.operatorframework.io/internal-objects: '[dscinitialization.opendatahub.io]' repository: https://github.com/red-hat-data-services/rhods-operator - name: rhods-operator.v2.4.0 + name: rhods-operator.v2.7.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -138,11 +135,11 @@ spec: - training - kserve - distributed-workloads - - trustyai links: - name: Red Hat OpenShift AI url: https://www.redhat.com/en/technologies/cloud-computing/openshift/openshift-ai minKubeVersion: 1.22.0 provider: name: Red Hat - version: 2.4.0 + replaces: rhods-operator.v2.6.0 + version: 2.7.0 diff --git a/config/manifests/kustomization.yaml b/config/manifests/kustomization.yaml index 940e327e0fa..bea4b1f7217 100644 --- a/config/manifests/kustomization.yaml +++ b/config/manifests/kustomization.yaml @@ -6,7 +6,8 @@ kind: Kustomization resources: - bases/rhods-operator.clusterserviceversion.yaml - ../default -- ../samples +- ../prometheus +- ../samples # to generate CSV alm-example - ../scorecard patches: diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml index 7f6a57fbe15..86458dbecb0 100644 --- a/config/prometheus/kustomization.yaml +++ b/config/prometheus/kustomization.yaml @@ -1,4 +1,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: -- monitor.yaml +- prom_clusterrole.yaml +- prom_clusterrolebinding.yaml diff --git a/config/prometheus/prom_clusterrole.yaml b/config/prometheus/prom_clusterrole.yaml new file mode 100644 index 00000000000..cc55247cb73 --- /dev/null +++ b/config/prometheus/prom_clusterrole.yaml @@ -0,0 +1,15 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: prometheus-k8s-viewer +rules: + - apiGroups: + - '' + verbs: + - get + - watch + - list + resources: + - pods + - services + - endpoints diff --git a/config/prometheus/prom_clusterrolebinding.yaml b/config/prometheus/prom_clusterrolebinding.yaml new file mode 100644 index 00000000000..645f130ca14 --- /dev/null +++ b/config/prometheus/prom_clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus-k8s-viewer +subjects: + - kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus-k8s-viewer \ No newline at end of file diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml deleted file mode 100644 index 80e1857c594..00000000000 --- a/config/rbac/auth_proxy_role.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: proxy-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml index 2e46fc2b55f..65b4e67a891 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/rbac/auth_proxy_service.yaml @@ -12,4 +12,4 @@ spec: protocol: TCP targetPort: 8080 selector: - control-plane: controller-manager + control-plane: controller-manager \ No newline at end of file diff --git a/config/rbac/dscinitialization.opendatahub.io_dscinitialization_editor_role.yaml b/config/rbac/dscinitialization.opendatahub.io_dscinitialization_editor_role.yaml deleted file mode 100644 index a832c3aed02..00000000000 --- a/config/rbac/dscinitialization.opendatahub.io_dscinitialization_editor_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# permissions for end users to edit dscinitializations. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: dscinitialization-editor-role -rules: -- apiGroups: - - dscinitialization.opendatahub.io.opendatahub.io - resources: - - dscinitializations - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - dscinitialization.opendatahub.io.opendatahub.io - resources: - - dscinitializations/status - verbs: - - get diff --git a/config/rbac/dscinitialization.opendatahub.io_dscinitialization_viewer_role.yaml b/config/rbac/dscinitialization.opendatahub.io_dscinitialization_viewer_role.yaml deleted file mode 100644 index df72ff59e80..00000000000 --- a/config/rbac/dscinitialization.opendatahub.io_dscinitialization_viewer_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# permissions for end users to view dscinitializations. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: dscinitialization-viewer-role -rules: -- apiGroups: - - dscinitialization.opendatahub.io.opendatahub.io - resources: - - dscinitializations - verbs: - - get - - list - - watch -- apiGroups: - - dscinitialization.opendatahub.io.opendatahub.io - resources: - - dscinitializations/status - verbs: - - get diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index c15bf590e11..63c426323d0 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -8,13 +8,6 @@ kind: Kustomization resources: - service_account.yaml - role.yaml -- role_binding.yaml -- leader_election_role.yaml -- leader_election_role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -- auth_proxy_service.yaml -- auth_proxy_role.yaml -- auth_proxy_role_binding.yaml +- role_binding.yaml # add role with binding for SA in CSV - auth_proxy_client_clusterrole.yaml +- auth_proxy_service.yaml # for ODH operator to scrape metrics diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml deleted file mode 100644 index 1e4984e7c75..00000000000 --- a/config/rbac/leader_election_role.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# permissions to do leader election. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: leader-election-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch \ No newline at end of file diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml deleted file mode 100644 index 1d1321ed4f0..00000000000 --- a/config/rbac/leader_election_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: leader-election-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: leader-election-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 7589b3ba71b..bbe029f5342 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1093,6 +1093,7 @@ rules: resources: - subscriptions verbs: + - delete - get - list - watch diff --git a/config/samples/datasciencecluster_v1_datasciencecluster.yaml b/config/samples/datasciencecluster_v1_datasciencecluster.yaml index b7e30473dac..631141e756f 100644 --- a/config/samples/datasciencecluster_v1_datasciencecluster.yaml +++ b/config/samples/datasciencecluster_v1_datasciencecluster.yaml @@ -36,5 +36,3 @@ spec: managementState: "Removed" workbenches: managementState: "Managed" - trustyai: - managementState: "Managed" diff --git a/config/samples/dscinitialization_v1_dscinitialization.yaml b/config/samples/dscinitialization_v1_dscinitialization.yaml index 0d6645911b7..d02a5d4c34e 100644 --- a/config/samples/dscinitialization_v1_dscinitialization.yaml +++ b/config/samples/dscinitialization_v1_dscinitialization.yaml @@ -18,5 +18,8 @@ spec: metricsCollection: Istio name: data-science-smcp namespace: istio-system - managementState: Managed + managementState: "Managed" + trustedCABundle: + managementState: "Managed" + customCABundle: "" diff --git a/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go new file mode 100644 index 00000000000..17732592d39 --- /dev/null +++ b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go @@ -0,0 +1,158 @@ +// Package certconfigmapgenerator contains generator logic of add cert configmap resource in user namespaces +package certconfigmapgenerator + +import ( + "context" + "reflect" + + "github.com/go-logr/logr" + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + dsci "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/trustedcabundle" +) + +var configmapGenLog = log.Log.WithName("cert-configmap-generator") + +// CertConfigmapGeneratorReconciler holds the controller configuration. +type CertConfigmapGeneratorReconciler struct { //nolint:golint,revive // Readability + Client client.Client + Scheme *runtime.Scheme + Log logr.Logger +} + +// SetupWithManager sets up the controller with the Manager. +func (r *CertConfigmapGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error { + configmapGenLog.Info("Adding controller for Configmap Generation.") + return ctrl.NewControllerManagedBy(mgr). + Named("cert-configmap-generator-controller"). + Watches(&source.Kind{Type: &corev1.ConfigMap{}}, handler.EnqueueRequestsFromMapFunc(r.watchTrustedCABundleConfigMapResource), builder.WithPredicates(ConfigMapChangedPredicate)). + Watches(&source.Kind{Type: &corev1.Namespace{}}, handler.EnqueueRequestsFromMapFunc(r.watchNamespaceResource), builder.WithPredicates(NamespaceCreatedPredicate)). + Complete(r) +} + +// Reconcile will generate new configmap, odh-trusted-ca-bundle, that includes cluster-wide trusted-ca bundle and custom +// ca bundle in every new namespace created. +func (r *CertConfigmapGeneratorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // Request includes namespace that is newly created or where odh-trusted-ca-bundle configmap is updated. + r.Log.Info("Reconciling certConfigMapGenerator.", " CertConfigMapGenerator Request.Namespace", req.NamespacedName) + // Get namespace instance + userNamespace := &corev1.Namespace{} + err := r.Client.Get(ctx, client.ObjectKey{Name: req.Namespace}, userNamespace) + if err != nil { + return ctrl.Result{}, errors.WithMessage(err, "error getting user namespace to inject trustedCA bundle ") + } + + // Get DSCI instance + dsciInstances := &dsci.DSCInitializationList{} + err = r.Client.List(ctx, dsciInstances) + if err != nil { + r.Log.Error(err, "Failed to retrieve DSCInitialization resource for certconfigmapgenerator ", "CertConfigmapGenerator Request.Name", req.Name) + return ctrl.Result{}, err + } + + var dsciInstance *dsci.DSCInitialization + switch len(dsciInstances.Items) { + case 0: + return ctrl.Result{}, nil + case 1: + dsciInstance = &dsciInstances.Items[0] + default: + message := "only one instance of DSCInitialization object is allowed" + return ctrl.Result{}, errors.New(message) + } + + if dsciInstance.Spec.TrustedCABundle.ManagementState != operatorv1.Managed { + return ctrl.Result{}, nil + } + + // Verify if namespace did not opt out of trustedCABundle injection + if trustedcabundle.HasCABundleAnnotationDisabled(userNamespace) { + r.Log.Info("Namespace has opted-out of CA bundle injection using annotation ", "namespace", userNamespace.Name, + "annotation", trustedcabundle.InjectionOfCABundleAnnotatoion) + err := trustedcabundle.DeleteOdhTrustedCABundleConfigMap(ctx, r.Client, req.Namespace) + if err != nil { + if !apierrors.IsNotFound(err) { + r.Log.Error(err, "error deleting existing configmap from namespace", "name", trustedcabundle.CAConfigMapName, "namespace", req.Namespace) + return reconcile.Result{}, err + } + } + return reconcile.Result{}, nil + } + + // Verify odh-trusted-ca-bundle Configmap is created for the given namespace + if trustedcabundle.ShouldInjectTrustedBundle(userNamespace) { + r.Log.Info("Adding trusted CA bundle configmap to the new or existing namespace ", "namespace", userNamespace.Name, + "configmap", trustedcabundle.CAConfigMapName) + trustCAData := dsciInstance.Spec.TrustedCABundle.CustomCABundle + err = trustedcabundle.CreateOdhTrustedCABundleConfigMap(ctx, r.Client, req.Namespace, trustCAData) + if err != nil { + r.Log.Error(err, "error adding configmap to namespace", "name", trustedcabundle.CAConfigMapName, "namespace", req.Namespace) + return reconcile.Result{}, err + } + } + return ctrl.Result{}, err +} + +func (r *CertConfigmapGeneratorReconciler) watchNamespaceResource(a client.Object) []reconcile.Request { + if trustedcabundle.ShouldInjectTrustedBundle(a) { + return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: trustedcabundle.CAConfigMapName, Namespace: a.GetName()}}} + } + return nil +} + +func (r *CertConfigmapGeneratorReconciler) watchTrustedCABundleConfigMapResource(a client.Object) []reconcile.Request { + if a.GetName() == trustedcabundle.CAConfigMapName { + r.Log.Info("Cert configmap has been updated, start reconcile") + return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: a.GetName(), Namespace: a.GetNamespace()}}} + } + return nil +} + +var NamespaceCreatedPredicate = predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return trustedcabundle.ShouldInjectTrustedBundle(e.Object) + }, + + // If user changes the annotation of namespace to opt out of CABundle injection, reconcile. + UpdateFunc: func(e event.UpdateEvent) bool { + oldNamespace, _ := e.ObjectOld.(*corev1.Namespace) + newNamespace, _ := e.ObjectNew.(*corev1.Namespace) + + oldNsAnnValue, oldNsAnnExists := oldNamespace.GetAnnotations()[trustedcabundle.InjectionOfCABundleAnnotatoion] + newNsAnnValue, newNsAnnExists := newNamespace.GetAnnotations()[trustedcabundle.InjectionOfCABundleAnnotatoion] + + if newNsAnnExists && !oldNsAnnExists { + return true + } else if newNsAnnExists && oldNsAnnExists && oldNsAnnValue != newNsAnnValue { + return true + } + return false + }, +} + +var ConfigMapChangedPredicate = predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldCM, _ := e.ObjectOld.(*corev1.ConfigMap) + newCM, _ := e.ObjectNew.(*corev1.ConfigMap) + return !reflect.DeepEqual(oldCM.Data, newCM.Data) + }, + + DeleteFunc: func(deleteEvent event.DeleteEvent) bool { + return true + }, +} diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index 10677ab0399..3010faa80f8 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -35,6 +35,7 @@ import ( corev1 "k8s.io/api/core/v1" netv1 "k8s.io/api/networking/v1" authv1 "k8s.io/api/rbac/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" @@ -54,6 +55,7 @@ import ( dsc "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsci "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/components" + "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" ) @@ -80,10 +82,11 @@ const ( // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. -func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { //nolint:gocyclo,maintidx r.Log.Info("Reconciling DataScienceCluster resources", "Request.Name", req.Name) instances := &dsc.DataScienceClusterList{} + if err := r.Client.List(ctx, instances); err != nil { return ctrl.Result{}, err } @@ -104,6 +107,37 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R instance := &instances.Items[0] + allComponents, err := getAllComponents(&instance.Spec.Components) + if err != nil { + return ctrl.Result{}, err + } + + // If DSC CR exist and deletion CM exist + // delete DSC CR and let reconcile requeue + // sometimes with finalzier DSC CR wont get deleted, force to remove finalizer here + if upgrade.HasDeleteConfigMap(r.Client) { + if controllerutil.ContainsFinalizer(instance, finalizerName) { + if controllerutil.RemoveFinalizer(instance, finalizerName) { + if err := r.Update(ctx, instance); err != nil { + r.Log.Info("Error to remove DSC finalzier", "error", err) + return ctrl.Result{}, err + } + r.Log.Info("Removed finalizer for DataScienceCluster", "name", instance.Name, "finalizer", finalizerName) + } + } + if err := r.Client.Delete(context.TODO(), instance, []client.DeleteOption{}...); err != nil { + if !apierrs.IsNotFound(err) { + return reconcile.Result{}, err + } + } + for _, component := range allComponents { + if err := component.Cleanup(r.Client, r.DataScienceCluster.DSCISpec); err != nil { + return ctrl.Result{}, err + } + } + return reconcile.Result{Requeue: true}, nil + } + if len(instances.Items) > 1 { message := fmt.Sprintf("only one instance of DataScienceCluster object is allowed. Update existing instance %s", req.Name) err := errors.New(message) @@ -117,8 +151,6 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, err } - var err error - // Verify a valid DSCInitialization instance is created dsciInstances := &dsci.DSCInitializationList{} err = r.Client.List(ctx, dsciInstances) @@ -154,11 +186,6 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, errors.New(message) } - allComponents, err := getAllComponents(&instance.Spec.Components) - if err != nil { - return ctrl.Result{}, err - } - if instance.ObjectMeta.DeletionTimestamp.IsZero() { if !controllerutil.ContainsFinalizer(instance, finalizerName) { r.Log.Info("Adding finalizer for DataScienceCluster", "name", instance.Name, "finalizer", finalizerName) @@ -252,9 +279,14 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context enabled := component.GetManagementState() == v1.Managed // First set conditions to reflect a component is about to be reconciled instance, err := r.updateStatus(ctx, instance, func(saved *dsc.DataScienceCluster) { - message := "Component is disabled" - if enabled { - message = "Component is enabled" + var message string + if componentName == trustyai.ComponentName { + message = "TrustyAI is deprecated. Component state is disabled." + } else { + message = "Component is disabled" + if enabled { + message = "Component is enabled" + } } status.SetComponentCondition(&saved.Status.Conditions, componentName, status.ReconcileInit, message, corev1.ConditionUnknown) }) diff --git a/controllers/datasciencecluster/kubebuilder_rbac.go b/controllers/datasciencecluster/kubebuilder_rbac.go index 38f9d1c1d03..dd8c02699b5 100644 --- a/controllers/datasciencecluster/kubebuilder_rbac.go +++ b/controllers/datasciencecluster/kubebuilder_rbac.go @@ -29,7 +29,7 @@ package datasciencecluster // +kubebuilder:rbac:groups="operators.coreos.com",resources=clusterserviceversions,verbs=get;list;watch;delete;update // +kubebuilder:rbac:groups="operators.coreos.com",resources=customresourcedefinitions,verbs=create;get;patch;delete -// +kubebuilder:rbac:groups="operators.coreos.com",resources=subscriptions,verbs=get;list;watch +// +kubebuilder:rbac:groups="operators.coreos.com",resources=subscriptions,verbs=get;list;watch;delete // +kubebuilder:rbac:groups="operators.coreos.com",resources=operatorconditions,verbs=get;list;watch /* This is for operator */ diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index abe659cb7bb..be34f146816 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -49,6 +49,7 @@ import ( dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/trustedcabundle" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" ) @@ -56,6 +57,10 @@ const ( finalizerName = "dscinitialization.opendatahub.io/finalizer" ) +// This ar is required by the .spec.TrustedCABundle field. When a user goes from Unmanaged to Managed, update all +// namespaces irrespective of any changes in the configmap. +var managementStateChangeTrustedCA = false + // DSCInitializationReconciler reconciles a DSCInitialization object. type DSCInitializationReconciler struct { //nolint:golint,revive // Readability client.Client @@ -161,6 +166,13 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re return reconcile.Result{}, err } + // Verify odh-trusted-ca-bundle Configmap is configured for the namespaces + err = trustedcabundle.ConfigureTrustedCABundle(ctx, r.Client, r.Log, instance, managementStateChangeTrustedCA) + if err != nil { + return reconcile.Result{}, err + } + managementStateChangeTrustedCA = false + // Get platform platform, err := deploy.GetPlatform(r.Client) if err != nil { @@ -287,7 +299,8 @@ func (r *DSCInitializationReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). // add predicates prevents meaningless reconciliations from being triggered // not use WithEventFilter() because it conflict with secret and configmap predicate - For(&dsciv1.DSCInitialization{}, builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))). + For(&dsciv1.DSCInitialization{}, builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, + predicate.LabelChangedPredicate{}), dsciPredicateStateChangeTrustedCA)). Owns(&corev1.Namespace{}, builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))). Owns(&corev1.Secret{}, builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))). Owns(&corev1.ConfigMap{}, builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))). @@ -347,6 +360,18 @@ var DSCDeletionPredicate = predicate.Funcs{ }, } +var dsciPredicateStateChangeTrustedCA = predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldDSCI, _ := e.ObjectOld.(*dsciv1.DSCInitialization) + newDSCI, _ := e.ObjectNew.(*dsciv1.DSCInitialization) + + if oldDSCI.Spec.TrustedCABundle.ManagementState != newDSCI.Spec.TrustedCABundle.ManagementState { + managementStateChangeTrustedCA = true + } + return true + }, +} + func (r *DSCInitializationReconciler) watchMonitoringConfigMapResource(a client.Object) []reconcile.Request { if a.GetName() == "prometheus" && a.GetNamespace() == "redhat-ods-monitoring" { r.Log.Info("Found monitoring configmap has updated, start reconcile") @@ -375,11 +400,10 @@ func (r *DSCInitializationReconciler) watchDSCResource(_ client.Object) []reconc // do not handle if cannot get list return nil } - if len(instanceList.Items) == 0 { - r.Log.Info("Found no DSC instance in cluster, reset monitoring stack config") - return []reconcile.Request{{ - NamespacedName: types.NamespacedName{Name: "backup"}, - }} + if len(instanceList.Items) == 0 && !upgrade.HasDeleteConfigMap(r.Client) { + r.Log.Info("Found no DSC instance in cluster but not in uninstalltion process, reset monitoring stack config") + + return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: "backup"}}} } return nil } diff --git a/controllers/dscinitialization/dscinitialization_test.go b/controllers/dscinitialization/dscinitialization_test.go index c2cd564bfaf..6a726a1998b 100644 --- a/controllers/dscinitialization/dscinitialization_test.go +++ b/controllers/dscinitialization/dscinitialization_test.go @@ -32,10 +32,13 @@ var _ = Describe("DataScienceCluster initialization", func() { const applicationName = "default-dsci" BeforeEach(func() { // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} - Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci), timeout, interval).Should(BeTrue()) + Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) }) AfterEach(cleanupResources) @@ -43,7 +46,10 @@ var _ = Describe("DataScienceCluster initialization", func() { It("Should create default application namespace", func() { // then foundApplicationNamespace := &corev1.Namespace{} - Eventually(namespaceExists(applicationNamespace, foundApplicationNamespace), timeout, interval).Should(BeTrue()) + Eventually(namespaceExists(applicationNamespace, foundApplicationNamespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundApplicationNamespace.Name).To(Equal(applicationNamespace)) }) @@ -58,7 +64,10 @@ var _ = Describe("DataScienceCluster initialization", func() { It("Should create default network policy", func() { // then foundNetworkPolicy := &netv1.NetworkPolicy{} - Eventually(objectExists(applicationNamespace, applicationNamespace, foundNetworkPolicy), timeout, interval).Should(BeTrue()) + Eventually(objectExists(applicationNamespace, applicationNamespace, foundNetworkPolicy)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundNetworkPolicy.Name).To(Equal(applicationNamespace)) Expect(foundNetworkPolicy.Namespace).To(Equal(applicationNamespace)) Expect(foundNetworkPolicy.Spec.PolicyTypes[0]).To(Equal(netv1.PolicyTypeIngress)) @@ -67,7 +76,10 @@ var _ = Describe("DataScienceCluster initialization", func() { It("Should create default rolebinding", func() { // then foundRoleBinding := &authv1.RoleBinding{} - Eventually(objectExists(applicationNamespace, applicationNamespace, foundRoleBinding), timeout, interval).Should(BeTrue()) + Eventually(objectExists(applicationNamespace, applicationNamespace, foundRoleBinding)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) expectedSubjects := []authv1.Subject{ { Kind: "ServiceAccount", @@ -89,12 +101,16 @@ var _ = Describe("DataScienceCluster initialization", func() { It("Should create default configmap", func() { // then foundConfigMap := &corev1.ConfigMap{} - Eventually(objectExists(configmapName, applicationNamespace, foundConfigMap), timeout, interval).Should(BeTrue()) + Eventually(objectExists(configmapName, applicationNamespace, foundConfigMap)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundConfigMap.Name).To(Equal(configmapName)) Expect(foundConfigMap.Namespace).To(Equal(applicationNamespace)) expectedConfigmapData := map[string]string{"namespace": applicationNamespace} Expect(foundConfigMap.Data).To(Equal(expectedConfigmapData)) }) + }) Context("Monitoring Resource", func() { @@ -103,23 +119,35 @@ var _ = Describe("DataScienceCluster initialization", func() { const applicationName = "default-dsci" It("Should not create monitoring namespace if monitoring is disabled", func() { // when - desiredDsci := createDSCI(applicationName, operatorv1.Removed, monitoringNamespace2) + desiredDsci := createDSCI(applicationName, operatorv1.Removed, operatorv1.Managed, monitoringNamespace2) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} - Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci), timeout, interval).Should(BeTrue()) + Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // then foundMonitoringNamespace := &corev1.Namespace{} - Eventually(namespaceExists(monitoringNamespace2, foundMonitoringNamespace), timeout, interval).Should(BeFalse()) + Eventually(namespaceExists(monitoringNamespace2, foundMonitoringNamespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeFalse()) }) It("Should create default monitoring namespace if monitoring enabled", func() { // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace2) + desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace2) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} - Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci), timeout, interval).Should(BeTrue()) + Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // then foundMonitoringNamespace := &corev1.Namespace{} - Eventually(namespaceExists(monitoringNamespace2, foundMonitoringNamespace), timeout, interval).Should(BeTrue()) + Eventually(namespaceExists(monitoringNamespace2, foundMonitoringNamespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundMonitoringNamespace.Name).Should(Equal(monitoringNamespace2)) }) }) @@ -132,12 +160,15 @@ var _ = Describe("DataScienceCluster initialization", func() { anotherApplicationName := "default2" // given - desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) // when - desiredDsci2 := createDSCI(anotherApplicationName, operatorv1.Managed, monitoringNamespace) + desiredDsci2 := createDSCI(anotherApplicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) // then - Eventually(dscInitializationIsReady(anotherApplicationName, workingNamespace, desiredDsci2), timeout, interval).Should(BeFalse()) + Eventually(dscInitializationIsReady(anotherApplicationName, workingNamespace, desiredDsci2)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeFalse()) }) It("Should not update rolebinding if it exists", func() { @@ -162,17 +193,26 @@ var _ = Describe("DataScienceCluster initialization", func() { } Expect(k8sClient.Create(context.Background(), desiredRoleBinding)).Should(Succeed()) createdRoleBinding := &authv1.RoleBinding{} - Eventually(objectExists(applicationNamespace, applicationNamespace, createdRoleBinding), timeout, interval).Should(BeTrue()) + Eventually(objectExists(applicationNamespace, applicationNamespace, createdRoleBinding)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} - Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci), timeout, interval).Should(BeTrue()) + Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // then foundRoleBinding := &authv1.RoleBinding{} - Eventually(objectExists(applicationNamespace, applicationNamespace, foundRoleBinding), timeout, interval).Should(BeTrue()) + Eventually(objectExists(applicationNamespace, applicationNamespace, foundRoleBinding)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundRoleBinding.UID).To(Equal(createdRoleBinding.UID)) Expect(foundRoleBinding.Subjects).To(BeNil()) }) @@ -194,17 +234,26 @@ var _ = Describe("DataScienceCluster initialization", func() { } Expect(k8sClient.Create(context.Background(), desiredConfigMap)).Should(Succeed()) createdConfigMap := &corev1.ConfigMap{} - Eventually(objectExists(configmapName, applicationNamespace, createdConfigMap), timeout, interval).Should(BeTrue()) + Eventually(objectExists(configmapName, applicationNamespace, createdConfigMap)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} - Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci), timeout, interval).Should(BeTrue()) + Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // then foundConfigMap := &corev1.ConfigMap{} - Eventually(objectExists(configmapName, applicationNamespace, foundConfigMap), timeout, interval).Should(BeTrue()) + Eventually(objectExists(configmapName, applicationNamespace, foundConfigMap)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundConfigMap.UID).To(Equal(createdConfigMap.UID)) Expect(foundConfigMap.Data).To(Equal(map[string]string{"namespace": "existing-data"})) Expect(foundConfigMap.Data).ToNot(Equal(map[string]string{"namespace": applicationNamespace})) @@ -222,17 +271,26 @@ var _ = Describe("DataScienceCluster initialization", func() { } Expect(k8sClient.Create(context.Background(), desiredNamespace)).Should(Succeed()) createdNamespace := &corev1.Namespace{} - Eventually(namespaceExists(anotherNamespace, createdNamespace), timeout, interval).Should(BeTrue()) + Eventually(namespaceExists(anotherNamespace, createdNamespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} - Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci), timeout, interval).Should(BeTrue()) + Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) // then foundApplicationNamespace := &corev1.Namespace{} - Eventually(namespaceExists(anotherNamespace, foundApplicationNamespace), timeout, interval).Should(BeTrue()) + Eventually(namespaceExists(anotherNamespace, foundApplicationNamespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) Expect(foundApplicationNamespace.Name).To(Equal(createdNamespace.Name)) Expect(foundApplicationNamespace.UID).To(Equal(createdNamespace.UID)) }) @@ -249,10 +307,22 @@ func cleanupResources() { Expect(k8sClient.DeleteAllOf(context.TODO(), &authv1.RoleBinding{}, appNamespace)).To(Succeed()) Expect(k8sClient.DeleteAllOf(context.TODO(), &authv1.ClusterRoleBinding{}, appNamespace)).To(Succeed()) - Eventually(noInstanceExistsIn(workingNamespace, &dsci.DSCInitializationList{}), timeout, interval).Should(BeTrue()) - Eventually(noInstanceExistsIn(applicationNamespace, &authv1.ClusterRoleBindingList{}), timeout, interval).Should(BeTrue()) - Eventually(noInstanceExistsIn(applicationNamespace, &authv1.RoleBindingList{}), timeout, interval).Should(BeTrue()) - Eventually(noInstanceExistsIn(applicationNamespace, &corev1.ConfigMapList{}), timeout, interval).Should(BeTrue()) + Eventually(noInstanceExistsIn(workingNamespace, &dsci.DSCInitializationList{})). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) + Eventually(noInstanceExistsIn(applicationNamespace, &authv1.ClusterRoleBindingList{})). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) + Eventually(noInstanceExistsIn(applicationNamespace, &authv1.RoleBindingList{})). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) + Eventually(noInstanceExistsIn(applicationNamespace, &corev1.ConfigMapList{})). + WithTimeout(timeout). + WithPolling(interval). + Should(BeTrue()) } func noInstanceExistsIn(namespace string, list client.ObjectList) func() bool { @@ -279,7 +349,7 @@ func objectExists(ns string, name string, obj client.Object) func() bool { //nol } } -func createDSCI(appName string, enableMonitoring operatorv1.ManagementState, monitoringNS string) *dsci.DSCInitialization { +func createDSCI(appName string, enableMonitoring operatorv1.ManagementState, enableTrustedCABundle operatorv1.ManagementState, monitoringNS string) *dsci.DSCInitialization { return &dsci.DSCInitialization{ TypeMeta: metav1.TypeMeta{ Kind: "DSCInitialization", @@ -295,6 +365,9 @@ func createDSCI(appName string, enableMonitoring operatorv1.ManagementState, mon Namespace: monitoringNS, ManagementState: enableMonitoring, }, + TrustedCABundle: dsci.TrustedCABundleSpec{ + ManagementState: enableTrustedCABundle, + }, }, } } diff --git a/controllers/dscinitialization/servicemesh_setup.go b/controllers/dscinitialization/servicemesh_setup.go index e39ed64bfe7..7d67bf5205e 100644 --- a/controllers/dscinitialization/servicemesh_setup.go +++ b/controllers/dscinitialization/servicemesh_setup.go @@ -11,19 +11,12 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" ) -const templatesDir = "templates/servicemesh" - func (r *DSCInitializationReconciler) configureServiceMesh(instance *dsciv1.DSCInitialization) error { switch instance.Spec.ServiceMesh.ManagementState { case operatorv1.Managed: - serviceMeshInitializer := feature.ClusterFeaturesInitializer(instance, configureServiceMeshFeatures()) - if err := serviceMeshInitializer.Prepare(); err != nil { - r.Log.Error(err, "failed configuring service mesh resources") - r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed configuring service mesh resources") - return err - } + serviceMeshFeatures := feature.ClusterFeaturesHandler(instance, configureServiceMeshFeatures()) - if err := serviceMeshInitializer.Apply(); err != nil { + if err := serviceMeshFeatures.Apply(); err != nil { r.Log.Error(err, "failed applying service mesh resources") r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed applying service mesh resources") return err @@ -41,17 +34,11 @@ func (r *DSCInitializationReconciler) configureServiceMesh(instance *dsciv1.DSCI } func (r *DSCInitializationReconciler) removeServiceMesh(instance *dsciv1.DSCInitialization) error { - // on condition of Managed, do not handle Removed when set to Removed it trigger DSCI reconcile to cleanup + // on condition of Managed, do not handle Removed when set to Removed it trigger DSCI reconcile to clean up if instance.Spec.ServiceMesh.ManagementState == operatorv1.Managed { - serviceMeshInitializer := feature.ClusterFeaturesInitializer(instance, configureServiceMeshFeatures()) - if err := serviceMeshInitializer.Prepare(); err != nil { - r.Log.Error(err, "failed configuring service mesh resources") - r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed configuring service mesh resources") - - return err - } + serviceMeshFeatures := feature.ClusterFeaturesHandler(instance, configureServiceMeshFeatures()) - if err := serviceMeshInitializer.Delete(); err != nil { + if err := serviceMeshFeatures.Delete(); err != nil { r.Log.Error(err, "failed deleting service mesh resources") r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed deleting service mesh resources") @@ -62,15 +49,14 @@ func (r *DSCInitializationReconciler) removeServiceMesh(instance *dsciv1.DSCInit return nil } -func configureServiceMeshFeatures() feature.DefinedFeatures { - return func(initializer *feature.FeaturesInitializer) error { - serviceMeshSpec := initializer.DSCInitializationSpec.ServiceMesh +func configureServiceMeshFeatures() feature.FeaturesProvider { + return func(handler *feature.FeaturesHandler) error { + serviceMeshSpec := handler.DSCInitializationSpec.ServiceMesh - smcpCreation, errSmcp := feature.CreateFeature("mesh-control-plane-creation"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). + smcpCreationErr := feature.CreateFeature("mesh-control-plane-creation"). + For(handler). Manifests( - path.Join(templatesDir, "base", "create-smcp.tmpl"), + path.Join(feature.ServiceMeshDir, "base", "create-smcp.tmpl"), ). PreConditions( servicemesh.EnsureServiceMeshOperatorInstalled, @@ -81,27 +67,25 @@ func configureServiceMeshFeatures() feature.DefinedFeatures { ). Load() - if errSmcp != nil { - return errSmcp + if smcpCreationErr != nil { + return smcpCreationErr } - initializer.Features = append(initializer.Features, smcpCreation) if serviceMeshSpec.ControlPlane.MetricsCollection == "Istio" { - metricsCollection, errMetrics := feature.CreateFeature("mesh-metrics-collection"). - With(initializer.DSCInitializationSpec). - From(initializer.Source). - Manifests( - path.Join(templatesDir, "metrics-collection"), - ). + metricsCollectionErr := feature.CreateFeature("mesh-metrics-collection"). + For(handler). PreConditions( servicemesh.EnsureServiceMeshInstalled, ). + Manifests( + path.Join(feature.ServiceMeshDir, "metrics-collection"), + ). Load() - if errMetrics != nil { - return errMetrics + if metricsCollectionErr != nil { + return metricsCollectionErr } - initializer.Features = append(initializer.Features, metricsCollection) } + return nil } } diff --git a/controllers/dscinitialization/suite_test.go b/controllers/dscinitialization/suite_test.go index da61c0b6ab4..7deb7115277 100644 --- a/controllers/dscinitialization/suite_test.go +++ b/controllers/dscinitialization/suite_test.go @@ -61,7 +61,7 @@ var ( ) const ( - timeout = 20 * time.Second + timeout = 30 * time.Second // change this from original 20 to 30 because we often failed in post cleanup job interval = 250 * time.Millisecond ) diff --git a/controllers/dscinitialization/utils.go b/controllers/dscinitialization/utils.go index fc87113204e..4fce7baf52f 100644 --- a/controllers/dscinitialization/utils.go +++ b/controllers/dscinitialization/utils.go @@ -238,7 +238,7 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. r.Log.Error(err, "error to set networkpolicy in applications namespace", "path", networkpolicyPath) return err } - } else { // Expected namespace for the given name + } else { // Expected namespace for the given name in ODH desiredNetworkPolicy := &netv1.NetworkPolicy{ TypeMeta: metav1.TypeMeta{ Kind: "NetworkPolicy", @@ -255,7 +255,11 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. Ingress: []netv1.NetworkPolicyIngressRule{ { From: []netv1.NetworkPolicyPeer{ - { + { /* allow ODH namespace <->ODH namespace: + - default notebook project: rhods-notebooks + - redhat-odh-monitoring + - redhat-odh-applications / opendatahub + */ NamespaceSelector: &metav1.LabelSelector{ // AND logic MatchLabels: map[string]string{ cluster.ODHGeneratedNamespaceLabel: "true", @@ -266,7 +270,7 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. }, { // OR logic From: []netv1.NetworkPolicyPeer{ - { // need this for access dashboard + { // need this to access external-> dashboard NamespaceSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "network.openshift.io/policy-group": "ingress", @@ -277,7 +281,7 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. }, { // OR logic for PSI From: []netv1.NetworkPolicyPeer{ - { // need this to access dashboard + { // need this to access external->dashboard NamespaceSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "kubernetes.io/metadata.name": "openshift-host-network", @@ -286,6 +290,17 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. }, }, }, + { + From: []netv1.NetworkPolicyPeer{ + { // need this for cluster-monitoring work: cluster-monitoring->ODH namespaces + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "openshift-monitoring", + }, + }, + }, + }, + }, }, PolicyTypes: []netv1.PolicyType{ netv1.PolicyTypeIngress, diff --git a/controllers/secretgenerator/secretgenerator_controller.go b/controllers/secretgenerator/secretgenerator_controller.go index 25dd3c58280..6ec86d04c63 100644 --- a/controllers/secretgenerator/secretgenerator_controller.go +++ b/controllers/secretgenerator/secretgenerator_controller.go @@ -71,7 +71,7 @@ func (r *SecretGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error { return false }, // this only watch for secret deletion if has with annotation - // e.g dashboard-oauth-client but not dashboard-oauth-client-generated + // e.g. dashboard-oauth-client but not dashboard-oauth-client-generated DeleteFunc: func(e event.DeleteEvent) bool { if _, found := e.Object.GetAnnotations()[SECRET_NAME_ANNOTATION]; found { return true diff --git a/controllers/status/status.go b/controllers/status/status.go index d664903938b..d0e8865b851 100644 --- a/controllers/status/status.go +++ b/controllers/status/status.go @@ -21,6 +21,8 @@ package status import ( conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" + + "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" ) // These constants represent the overall Phase as used by .Status.Phase. @@ -179,7 +181,12 @@ func SetCompleteCondition(conditions *[]conditionsv1.Condition, reason string, m // SetComponentCondition appends Condition Type with const ReadySuffix for given component // when component finished reconcile. func SetComponentCondition(conditions *[]conditionsv1.Condition, component string, reason string, message string, status corev1.ConditionStatus) { - condtype := component + ReadySuffix + var condtype string + if component == trustyai.ComponentName { + condtype = component + "Deprecated" + } else { + condtype = component + ReadySuffix + } conditionsv1.SetStatusCondition(conditions, conditionsv1.Condition{ Type: conditionsv1.ConditionType(condtype), Status: status, @@ -190,6 +197,11 @@ func SetComponentCondition(conditions *[]conditionsv1.Condition, component strin // RemoveComponentCondition remove Condition of giving component. func RemoveComponentCondition(conditions *[]conditionsv1.Condition, component string) { - condType := component + ReadySuffix + var condType string + if component == trustyai.ComponentName { + condType = component + "Deprecated" + } else { + condType = component + ReadySuffix + } conditionsv1.RemoveStatusCondition(conditions, conditionsv1.ConditionType(condType)) } diff --git a/docs/upgrade-testing.md b/docs/upgrade-testing.md new file mode 100644 index 00000000000..f00ef9cf56d --- /dev/null +++ b/docs/upgrade-testing.md @@ -0,0 +1,140 @@ +## Upgrade testing +Follow below step for manual upgrade testing + +1. Set environment variables to overwrite values in Makefile. You should overwrite the IMAGE_OWNER and VERSION etc values for pushing container images into your quay.io account. + +``` +IMAGE_OWNER ?= IMAGE_OWNER +VERSION ?= VERSION +``` + +2. Add `replaces` property in [opendatahub-operator.clusterserviceversion.yaml](https://github.com/opendatahub-io/opendatahub-operator/blob/114a137d6289c748d421e7560f6f4fdf925e1b1f/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml) and add version which you would like to upgrade with next version + +``` +replaces: opendatahub-operator.v2.4.0 +``` + +3. Build and push docker container image + +``` +make image +``` + +4. Build bundle image + +``` +make bundle-build +``` + +5. Push bundle image into registry + +``` +make bundle-push +``` + +6. Build catalog source image + +``` +make catalog-build +``` + +7. Push catalog source image into registry + +``` +make catalog-push +``` +### Cluster +Deploy CatalogSource on cluster to install `Open Data Hub Operator`. + + +8. Deploy CatalogSource. Deploy catalog source on cluster by using your catalog source container image and wait until catalog source pod is ready + +```console +cat < 0 { - f.Log.Info("resource created", "namespace", namespace, "resource", gvr) + if len(list.Items) > 0 { + f.Log.Info("resource created", "namespace", namespace, "resource", gvk) return true, nil } diff --git a/pkg/feature/feature.go b/pkg/feature/feature.go index 5ccb5d6f3a5..21aec158343 100644 --- a/pkg/feature/feature.go +++ b/pkg/feature/feature.go @@ -9,10 +9,7 @@ import ( conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" ctrlLog "sigs.k8s.io/controller-runtime/pkg/log" @@ -23,10 +20,9 @@ type Feature struct { Name string Spec *Spec Enabled bool + Tracker *featurev1.FeatureTracker - Clientset *kubernetes.Clientset - DynamicClient dynamic.Interface - Client client.Client + Client client.Client manifests []manifest @@ -147,37 +143,6 @@ func (f *Feature) applyManifests() error { return applyErrors.ErrorOrNil() } -func (f *Feature) CreateConfigMap(cfgMapName string, data map[string]string) error { - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: cfgMapName, - Namespace: f.Spec.AppNamespace, - OwnerReferences: []metav1.OwnerReference{ - f.AsOwnerReference(), - }, - }, - Data: data, - } - - configMaps := f.Clientset.CoreV1().ConfigMaps(configMap.Namespace) - _, err := configMaps.Get(context.TODO(), configMap.Name, metav1.GetOptions{}) - if k8serrors.IsNotFound(err) { //nolint:gocritic - _, err = configMaps.Create(context.TODO(), configMap, metav1.CreateOptions{}) - if err != nil { - return err - } - } else if k8serrors.IsAlreadyExists(err) { - _, err = configMaps.Update(context.TODO(), configMap, metav1.UpdateOptions{}) - if err != nil { - return err - } - } else { - return err - } - - return nil -} - func (f *Feature) addCleanup(cleanupFuncs ...Action) { f.cleanups = append(f.cleanups, cleanupFuncs...) } @@ -212,14 +177,14 @@ func (f *Feature) apply(m manifest) error { } func (f *Feature) AsOwnerReference() metav1.OwnerReference { - return f.Spec.Tracker.ToOwnerReference() + return f.Tracker.ToOwnerReference() } // updateFeatureTrackerStatus updates conditions of a FeatureTracker. // It's deliberately logging errors instead of handing them as it is used in deferred error handling of Feature public API, // which is more predictable. func (f *Feature) updateFeatureTrackerStatus(condType conditionsv1.ConditionType, status corev1.ConditionStatus, reason featurev1.FeaturePhase, message string) { - tracker := f.Spec.Tracker + tracker := f.Tracker // Update the status if tracker.Status.Conditions == nil { @@ -237,5 +202,5 @@ func (f *Feature) updateFeatureTrackerStatus(condType conditionsv1.ConditionType f.Log.Error(err, "Error updating FeatureTracker status") } - f.Spec.Tracker.Status = tracker.Status + f.Tracker.Status = tracker.Status } diff --git a/pkg/feature/feature_tracker_handler.go b/pkg/feature/feature_tracker_handler.go index 28c175a6dd7..223e6a2d0b2 100644 --- a/pkg/feature/feature_tracker_handler.go +++ b/pkg/feature/feature_tracker_handler.go @@ -5,12 +5,10 @@ import ( "fmt" k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/common" ) // createFeatureTracker instantiates FeatureTracker for a given Feature. It's a cluster-scoped resource used @@ -30,13 +28,13 @@ func (f *Feature) createFeatureTracker() error { return err } - f.Spec.Tracker = tracker + f.Tracker = tracker return nil } func removeFeatureTracker(f *Feature) error { - if f.Spec.Tracker != nil { + if f.Tracker != nil { return deleteTracker(f) } @@ -52,18 +50,11 @@ func removeFeatureTracker(f *Feature) error { } func (f *Feature) getFeatureTracker() (*featurev1.FeatureTracker, error) { - tracker := &featurev1.FeatureTracker{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "features.opendatahub.io/v1", - Kind: "FeatureTracker", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: f.Spec.AppNamespace + "-" + common.TrimToRFC1123Name(f.Name), - }, - Spec: featurev1.FeatureTrackerSpec{ - Source: *f.Spec.Source, - AppNamespace: f.Spec.AppNamespace, - }, + tracker := featurev1.NewFeatureTracker(f.Name, f.Spec.AppNamespace) + + tracker.Spec = featurev1.FeatureTrackerSpec{ + Source: *f.Spec.Source, + AppNamespace: f.Spec.AppNamespace, } err := f.Client.Get(context.Background(), client.ObjectKeyFromObject(tracker), tracker) @@ -74,7 +65,7 @@ func (f *Feature) getFeatureTracker() (*featurev1.FeatureTracker, error) { func setFeatureTrackerIfAbsent(f *Feature) error { tracker, err := f.getFeatureTracker() - f.Spec.Tracker = tracker + f.Tracker = tracker return err } @@ -95,7 +86,7 @@ func (f *Feature) ensureGVKSet(obj runtime.Object) error { } func deleteTracker(f *Feature) error { - err := f.Client.Delete(context.Background(), f.Spec.Tracker) + err := f.Client.Delete(context.Background(), f.Tracker) if err != nil && !k8serrors.IsNotFound(err) { return err } diff --git a/pkg/feature/handler.go b/pkg/feature/handler.go new file mode 100644 index 00000000000..0dc539a5bb1 --- /dev/null +++ b/pkg/feature/handler.go @@ -0,0 +1,69 @@ +package feature + +import ( + "fmt" + + "github.com/hashicorp/go-multierror" + + v1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/components" +) + +// FeaturesHandler coordinates feature creations and removal from within controllers. +type FeaturesHandler struct { + *v1.DSCInitializationSpec + source featurev1.Source + features []*Feature + featuresProvider FeaturesProvider +} + +// FeaturesProvider is a function which allow to define list of features +// and couple them with the given initializer. +type FeaturesProvider func(handler *FeaturesHandler) error + +func ClusterFeaturesHandler(dsci *v1.DSCInitialization, def FeaturesProvider) *FeaturesHandler { + return &FeaturesHandler{ + DSCInitializationSpec: &dsci.Spec, + source: featurev1.Source{Type: featurev1.DSCIType, Name: dsci.Name}, + featuresProvider: def, + } +} + +func ComponentFeaturesHandler(component components.ComponentInterface, spec *v1.DSCInitializationSpec, def FeaturesProvider) *FeaturesHandler { + return &FeaturesHandler{ + DSCInitializationSpec: spec, + source: featurev1.Source{Type: featurev1.ComponentType, Name: component.GetComponentName()}, + featuresProvider: def, + } +} + +func (f *FeaturesHandler) Apply() error { + if err := f.featuresProvider(f); err != nil { + return fmt.Errorf("apply phase failed when wiring Feature instances: %w", err) + } + + var applyErrors *multierror.Error + for _, f := range f.features { + applyErrors = multierror.Append(applyErrors, f.Apply()) + } + + return applyErrors.ErrorOrNil() +} + +// Delete executes registered clean-up tasks in the opposite order they were initiated (following a stack structure). +// For instance, this allows for the undoing patches before its deletion. +// This approach assumes that Features are either instantiated in the correct sequence +// or are self-contained. +func (f *FeaturesHandler) Delete() error { + if err := f.featuresProvider(f); err != nil { + return fmt.Errorf("delete phase failed when wiring Feature instances: %w", err) + } + + var cleanupErrors *multierror.Error + for i := len(f.features) - 1; i >= 0; i-- { + cleanupErrors = multierror.Append(cleanupErrors, f.features[i].Cleanup()) + } + + return cleanupErrors.ErrorOrNil() +} diff --git a/pkg/feature/initializer.go b/pkg/feature/initializer.go deleted file mode 100644 index 412bd62b9f4..00000000000 --- a/pkg/feature/initializer.go +++ /dev/null @@ -1,64 +0,0 @@ -package feature - -import ( - "github.com/hashicorp/go-multierror" - - v1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" -) - -type FeaturesInitializer struct { - *v1.DSCInitializationSpec - Source featurev1.Source - Features []*Feature - definedFeatures DefinedFeatures -} - -type DefinedFeatures func(initializer *FeaturesInitializer) error - -func ClusterFeaturesInitializer(dsci *v1.DSCInitialization, def DefinedFeatures) *FeaturesInitializer { - return &FeaturesInitializer{ - DSCInitializationSpec: &dsci.Spec, - Source: featurev1.Source{Type: featurev1.DSCIType, Name: dsci.Name}, - definedFeatures: def, - } -} - -func ComponentFeaturesInitializer(component components.ComponentInterface, spec *v1.DSCInitializationSpec, def DefinedFeatures) *FeaturesInitializer { - return &FeaturesInitializer{ - DSCInitializationSpec: spec, - Source: featurev1.Source{Type: featurev1.ComponentType, Name: component.GetComponentName()}, - definedFeatures: def, - } -} - -// Prepare performs validation of the spec and ensures all resources, -// such as Features and their templates, are processed and initialized -// before proceeding with the actual cluster set-up. -func (s *FeaturesInitializer) Prepare() error { - return s.definedFeatures(s) -} - -func (s *FeaturesInitializer) Apply() error { - var applyErrors *multierror.Error - - for _, f := range s.Features { - applyErrors = multierror.Append(applyErrors, f.Apply()) - } - - return applyErrors.ErrorOrNil() -} - -// Delete executes registered clean-up tasks in the opposite order they were initiated (following a stack structure). -// For instance, this allows for the undoing patches before its deletion. -// This approach assumes that Features are either instantiated in the correct sequence -// or are self-contained. -func (s *FeaturesInitializer) Delete() error { - var cleanupErrors *multierror.Error - for i := len(s.Features) - 1; i >= 0; i-- { - cleanupErrors = multierror.Append(cleanupErrors, s.Features[i].Cleanup()) - } - - return cleanupErrors.ErrorOrNil() -} diff --git a/pkg/feature/manifest.go b/pkg/feature/manifest.go index 2726e2d0e23..db4b111f1dc 100644 --- a/pkg/feature/manifest.go +++ b/pkg/feature/manifest.go @@ -7,6 +7,7 @@ import ( "html/template" "io" "io/fs" + "path" "path/filepath" "strings" ) @@ -14,6 +15,12 @@ import ( //go:embed templates var embeddedFiles embed.FS +var ( + BaseDir = "templates" + ServiceMeshDir = path.Join(BaseDir, "servicemesh") + ServerlessDir = path.Join(BaseDir, "serverless") +) + type manifest struct { name, path, @@ -27,6 +34,10 @@ func loadManifestsFrom(fsys fs.FS, path string) ([]manifest, error) { var manifests []manifest err := fs.WalkDir(fsys, path, func(path string, dirEntry fs.DirEntry, walkErr error) error { + if walkErr != nil { + return walkErr + } + _, err := dirEntry.Info() if err != nil { return err diff --git a/pkg/feature/raw_resources.go b/pkg/feature/raw_resources.go index e388737da5d..14159b72f15 100644 --- a/pkg/feature/raw_resources.go +++ b/pkg/feature/raw_resources.go @@ -24,8 +24,8 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" k8stypes "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -83,39 +83,18 @@ func (f *Feature) patchResources(resources string) error { u := &unstructured.Unstructured{} if err := yaml.Unmarshal([]byte(str), u); err != nil { f.Log.Error(err, "error unmarshalling yaml") - return errors.WithStack(err) } ensureNamespaceIsSet(f, u) - gvr := schema.GroupVersionResource{ - Group: strings.ToLower(u.GroupVersionKind().Group), - Version: u.GroupVersionKind().Version, - Resource: strings.ToLower(u.GroupVersionKind().Kind) + "s", - } - - // Convert the individual resource patch from YAML to JSON patchAsJSON, err := yaml.YAMLToJSON([]byte(str)) if err != nil { - f.Log.Error(err, "error converting yaml to json") - - return errors.WithStack(err) + return fmt.Errorf("error converting yaml to json: %w", err) } - _, err = f.DynamicClient.Resource(gvr). - Namespace(u.GetNamespace()). - Patch(context.TODO(), u.GetName(), k8stypes.MergePatchType, patchAsJSON, metav1.PatchOptions{}) - if err != nil { - f.Log.Error(err, "error patching resource", - "gvr", fmt.Sprintf("%+v\n", gvr), - "patch", fmt.Sprintf("%+v\n", u), - "json", fmt.Sprintf("%+v\n", patchAsJSON)) - return errors.WithStack(err) - } - - if err != nil { - return errors.WithStack(err) + if err := f.Client.Patch(context.TODO(), u, client.RawPatch(k8stypes.MergePatchType, patchAsJSON)); err != nil { + return fmt.Errorf("failed patching resource: %w", err) } } diff --git a/pkg/feature/serverless/conditions.go b/pkg/feature/serverless/conditions.go index 739e9ebd2e1..6396e76c9da 100644 --- a/pkg/feature/serverless/conditions.go +++ b/pkg/feature/serverless/conditions.go @@ -4,11 +4,12 @@ import ( "context" "fmt" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" ctrlLog "sigs.k8s.io/controller-runtime/pkg/log" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" ) const ( @@ -18,9 +19,11 @@ const ( var log = ctrlLog.Log.WithName("features") func EnsureServerlessAbsent(f *feature.Feature) error { - list, err := f.DynamicClient.Resource(gvr.KnativeServing).Namespace("").List(context.TODO(), v1.ListOptions{}) - if err != nil { - return err + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(cluster.KnativeServingGVK) + + if err := f.Client.List(context.TODO(), list, client.InNamespace("")); err != nil { + return fmt.Errorf("failed to list KnativeServings: %w", err) } if len(list.Items) == 0 { @@ -55,4 +58,4 @@ func EnsureServerlessOperatorInstalled(f *feature.Feature) error { return nil } -var EnsureServerlessServingDeployed = feature.WaitForResourceToBeCreated(KnativeServingNamespace, gvr.KnativeServing) +var EnsureServerlessServingDeployed = feature.WaitForResourceToBeCreated(KnativeServingNamespace, cluster.KnativeServingGVK) diff --git a/pkg/feature/serverless/loaders.go b/pkg/feature/serverless/loaders.go index 115034dbdcc..966021e39da 100644 --- a/pkg/feature/serverless/loaders.go +++ b/pkg/feature/serverless/loaders.go @@ -23,7 +23,7 @@ func ServingIngressDomain(f *feature.Feature) error { domain := strings.TrimSpace(f.Spec.Serving.IngressGateway.Domain) if len(domain) == 0 { var errDomain error - domain, errDomain = GetDomain(f.DynamicClient) + domain, errDomain = GetDomain(f.Client) if errDomain != nil { return fmt.Errorf("failed to fetch OpenShift domain to generate certificate for Serverless: %w", errDomain) } diff --git a/pkg/feature/serverless/resources.go b/pkg/feature/serverless/resources.go index d06e4f1ae21..5acf5514974 100644 --- a/pkg/feature/serverless/resources.go +++ b/pkg/feature/serverless/resources.go @@ -2,29 +2,35 @@ package serverless import ( "context" + "fmt" "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/dynamic" + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" ) func ServingCertificateResource(f *feature.Feature) error { return f.CreateSelfSignedCertificate(f.Spec.KnativeCertificateSecret, f.Spec.Serving.IngressGateway.Certificate.Type, f.Spec.KnativeIngressDomain, f.Spec.ControlPlane.Namespace) } -func GetDomain(dynamicClient dynamic.Interface) (string, error) { - cluster, err := dynamicClient.Resource(gvr.OpenshiftIngress).Get(context.TODO(), "cluster", metav1.GetOptions{}) - if err != nil { - return "", err +func GetDomain(c client.Client) (string, error) { + ingress := &unstructured.Unstructured{} + ingress.SetGroupVersionKind(cluster.OpenshiftIngressGVK) + + if err := c.Get(context.TODO(), client.ObjectKey{ + Namespace: "", + Name: "cluster", + }, ingress); err != nil { + return "", fmt.Errorf("failed fetching cluster's ingress details: %w", err) } - domain, found, err := unstructured.NestedString(cluster.Object, "spec", "domain") + domain, found, err := unstructured.NestedString(ingress.Object, "spec", "domain") if !found { return "", errors.New("spec.domain not found") } + return domain, err } diff --git a/pkg/feature/servicemesh/conditions.go b/pkg/feature/servicemesh/conditions.go index e352c9545a7..947d811da76 100644 --- a/pkg/feature/servicemesh/conditions.go +++ b/pkg/feature/servicemesh/conditions.go @@ -7,13 +7,12 @@ import ( "github.com/hashicorp/go-multierror" "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" ) const ( @@ -55,7 +54,7 @@ func WaitForControlPlaneToBeReady(f *feature.Feature) error { f.Log.Info("waiting for control plane components to be ready", "control-plane", smcp, "namespace", smcpNs, "duration (s)", duration.Seconds()) return wait.PollUntilContextTimeout(context.TODO(), interval, duration, false, func(ctx context.Context) (bool, error) { - ready, err := CheckControlPlaneComponentReadiness(f.DynamicClient, smcp, smcpNs) + ready, err := CheckControlPlaneComponentReadiness(f.Client, smcp, smcpNs) if ready { f.Log.Info("done waiting for control plane components to be ready", "control-plane", smcp, "namespace", smcpNs) @@ -65,13 +64,19 @@ func WaitForControlPlaneToBeReady(f *feature.Feature) error { }) } -func CheckControlPlaneComponentReadiness(dynamicClient dynamic.Interface, smcp, smcpNs string) (bool, error) { - unstructObj, err := dynamicClient.Resource(gvr.SMCP).Namespace(smcpNs).Get(context.TODO(), smcp, metav1.GetOptions{}) +func CheckControlPlaneComponentReadiness(c client.Client, smcpName, smcpNs string) (bool, error) { + smcpObj := &unstructured.Unstructured{} + smcpObj.SetGroupVersionKind(cluster.ServiceMeshControlPlaneGVK) + err := c.Get(context.TODO(), client.ObjectKey{ + Namespace: smcpNs, + Name: smcpName, + }, smcpObj) + if err != nil { return false, fmt.Errorf("failed to find Service Mesh Control Plane: %w", err) } - components, found, err := unstructured.NestedMap(unstructObj.Object, "status", "readiness", "components") + components, found, err := unstructured.NestedMap(smcpObj.Object, "status", "readiness", "components") if err != nil || !found { return false, fmt.Errorf("status conditions not found or error in parsing of Service Mesh Control Plane: %w", err) } diff --git a/pkg/feature/types.go b/pkg/feature/types.go index 4857bb1d243..9733587b074 100644 --- a/pkg/feature/types.go +++ b/pkg/feature/types.go @@ -15,7 +15,6 @@ type Spec struct { Domain string KnativeCertificateSecret string KnativeIngressDomain string - Tracker *featurev1.FeatureTracker Source *featurev1.Source } diff --git a/pkg/gvr/gvr.go b/pkg/gvr/gvr.go deleted file mode 100644 index c8066ea4a12..00000000000 --- a/pkg/gvr/gvr.go +++ /dev/null @@ -1,35 +0,0 @@ -package gvr - -import "k8s.io/apimachinery/pkg/runtime/schema" - -var ( - KnativeServing = schema.GroupVersionResource{ - Group: "operator.knative.dev", - Version: "v1beta1", - Resource: "knativeservings", - } - - OpenshiftIngress = schema.GroupVersionResource{ - Group: "config.openshift.io", - Version: "v1", - Resource: "ingresses", - } - - FeatureTracker = schema.GroupVersionResource{ - Group: "features.opendatahub.io", - Version: "v1", - Resource: "featuretrackers", - } - - SMCP = schema.GroupVersionResource{ - Group: "maistra.io", - Version: "v2", - Resource: "servicemeshcontrolplanes", - } - - NetworkPolicies = schema.GroupVersionResource{ - Group: "networking.k8s.io", - Version: "v1", - Resource: "networkpolicies", - } -) diff --git a/pkg/trustedcabundle/trustedcabundle.go b/pkg/trustedcabundle/trustedcabundle.go new file mode 100644 index 00000000000..a058d7f8af4 --- /dev/null +++ b/pkg/trustedcabundle/trustedcabundle.go @@ -0,0 +1,210 @@ +package trustedcabundle + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + operatorv1 "github.com/openshift/api/operator/v1" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + dsci "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" +) + +const ( + InjectionOfCABundleAnnotatoion = "security.opendatahub.io/inject-trusted-ca-bundle" + CAConfigMapName = "odh-trusted-ca-bundle" + CADataFieldName = "odh-ca-bundle.crt" +) + +func ShouldInjectTrustedBundle(ns client.Object) bool { + if !strings.HasPrefix(ns.GetName(), "openshift-") && !strings.HasPrefix(ns.GetName(), "kube-") && + ns.GetName() != "default" && ns.GetName() != "openshift" && !HasCABundleAnnotationDisabled(ns) { + return true + } + return false +} + +func HasCABundleAnnotationDisabled(ns client.Object) bool { + if value, found := ns.GetAnnotations()[InjectionOfCABundleAnnotatoion]; found { + enabled, err := strconv.ParseBool(value) + return err == nil && !enabled + } + return false +} + +func AddCABundleConfigMapInAllNamespaces(ctx context.Context, cli client.Client, dscInit *dsci.DSCInitialization) error { + namespaceList := &corev1.NamespaceList{} + err := cli.List(ctx, namespaceList) + if err != nil { + return err + } + + for i := range namespaceList.Items { + ns := &namespaceList.Items[i] + if ShouldInjectTrustedBundle(ns) { + if err := wait.PollUntilContextTimeout(ctx, time.Second*1, time.Second*10, false, func(ctx context.Context) (bool, error) { + if cmErr := CreateOdhTrustedCABundleConfigMap(ctx, cli, ns.Name, dscInit.Spec.TrustedCABundle.CustomCABundle); cmErr != nil { + // Logging the error for debugging + fmt.Printf("error creating cert configmap in namespace %v: %v", ns.Name, cmErr) + return false, nil + } + return true, nil + }); err != nil { + return err + } + } + } + return nil +} + +// createOdhTrustedCABundleConfigMap creates a configMap 'odh-trusted-ca-bundle' -- Certificates for the cluster +// trusted CA Cert Bundle. +func CreateOdhTrustedCABundleConfigMap(ctx context.Context, cli client.Client, namespace string, customCAData string) error { + // Expected configmap for the given namespace + desiredConfigMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: CAConfigMapName, + Namespace: namespace, + Labels: map[string]string{ + "app.kubernetes.io/part-of": "opendatahub-operator", + // Label required for the Cluster Network Operator(CNO) to inject the cluster trusted CA bundle + // into .data["ca-bundle.crt"] + "config.openshift.io/inject-trusted-cabundle": "true", + }, + }, + // Add the DSCInitialzation specified TrustedCABundle.CustomCABundle to CM's data.odh-ca-bundle.crt field + // Additionally, the CNO operator will automatically create and maintain ca-bundle.crt + // if label 'config.openshift.io/inject-trusted-cabundle' is true + Data: map[string]string{CADataFieldName: customCAData}, + } + + // Create Configmap if doesn't exist + foundConfigMap := &corev1.ConfigMap{} + err := cli.Get(ctx, client.ObjectKey{ + Name: CAConfigMapName, + Namespace: namespace, + }, foundConfigMap) + if err != nil { + if apierrs.IsNotFound(err) { + err = cli.Create(ctx, desiredConfigMap) + if err != nil && !apierrs.IsAlreadyExists(err) { + return err + } + return nil + } + return err + } + + if foundConfigMap.Data[CADataFieldName] != customCAData { + foundConfigMap.Data[CADataFieldName] = customCAData + return cli.Update(ctx, foundConfigMap) + } + + return nil +} + +func DeleteOdhTrustedCABundleConfigMap(ctx context.Context, cli client.Client, namespace string) error { + // Delete Configmap if exists + foundConfigMap := &corev1.ConfigMap{} + err := cli.Get(ctx, client.ObjectKey{ + Name: CAConfigMapName, + Namespace: namespace, + }, foundConfigMap) + if err != nil { + if apierrs.IsNotFound(err) { + return nil + } + return err + } + return cli.Delete(ctx, foundConfigMap) +} + +// IsTrustedCABundleUpdated verifies for a given namespace if the odh-ca-bundle.crt field in cert configmap is updated. +func IsTrustedCABundleUpdated(ctx context.Context, cli client.Client, dscInit *dsci.DSCInitialization) (bool, error) { + usernamespace := &corev1.Namespace{} + if err := cli.Get(ctx, client.ObjectKey{Name: dscInit.Spec.ApplicationsNamespace}, usernamespace); err != nil { + if apierrs.IsNotFound(err) { + // if namespace is not found, return true. This is to ensure we reconcile, and check for other namespaces. + return true, nil + } + return false, err + } + + if HasCABundleAnnotationDisabled(usernamespace) { + return false, nil + } + + foundConfigMap := &corev1.ConfigMap{} + err := cli.Get(ctx, client.ObjectKey{ + Name: CAConfigMapName, + Namespace: dscInit.Spec.ApplicationsNamespace, + }, foundConfigMap) + + if err != nil { + if apierrs.IsNotFound(err) { + return false, nil + } + return false, err + } + + return foundConfigMap.Data[CADataFieldName] != dscInit.Spec.TrustedCABundle.CustomCABundle, nil +} + +func ConfigureTrustedCABundle(ctx context.Context, cli client.Client, log logr.Logger, dscInit *dsci.DSCInitialization, managementStateChanged bool) error { + switch dscInit.Spec.TrustedCABundle.ManagementState { + case operatorv1.Managed: + log.Info("Trusted CA Bundle injection is set to `Managed` state. Reconciling to add/update cert configmaps") + istrustedCABundleUpdated, err := IsTrustedCABundleUpdated(ctx, cli, dscInit) + if err != nil { + return err + } + + if istrustedCABundleUpdated || managementStateChanged { + err = AddCABundleConfigMapInAllNamespaces(ctx, cli, dscInit) + if err != nil { + log.Error(err, "error adding configmap to all namespaces", "name", CAConfigMapName) + return err + } + } + case operatorv1.Removed: + log.Info("Trusted CA Bundle injection is set to `Removed` state. Reconciling to delete all cert configmaps") + err := RemoveCABundleConfigMapInAllNamespaces(ctx, cli) + if err != nil { + log.Error(err, "error deleting configmap from all namespaces", "name", CAConfigMapName) + return err + } + + case operatorv1.Unmanaged: + log.Info("Trusted CA Bundle injection is set to `Unmanaged` state. Cert configmaps are no longer managed by DSCI") + } + + return nil +} + +func RemoveCABundleConfigMapInAllNamespaces(ctx context.Context, cli client.Client) error { + namespaceList := &corev1.NamespaceList{} + err := cli.List(ctx, namespaceList) + if err != nil { + return err + } + + for i := range namespaceList.Items { + ns := &namespaceList.Items[i] + if err := DeleteOdhTrustedCABundleConfigMap(ctx, cli, ns.Name); err != nil { + return err + } + } + return nil +} diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index ecbf0711916..a4e48232de2 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -4,15 +4,19 @@ import ( "context" "fmt" "os" + "reflect" "strings" "time" "github.com/hashicorp/go-multierror" operatorv1 "github.com/openshift/api/operator/v1" + routev1 "github.com/openshift/api/route/v1" ofapi "github.com/operator-framework/api/pkg/operators/v1alpha1" olmclientset "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1alpha1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + authv1 "k8s.io/api/rbac/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,7 +35,6 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/components/kueue" "github.com/opendatahub-io/opendatahub-operator/v2/components/modelmeshserving" "github.com/opendatahub-io/opendatahub-operator/v2/components/ray" - "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" "github.com/opendatahub-io/opendatahub-operator/v2/components/workbenches" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" @@ -42,7 +45,6 @@ const ( // DeleteConfigMapLabel is the label for configMap used to trigger operator uninstall // TODO: Label should be updated if addon name changes. DeleteConfigMapLabel = "api.openshift.com/addon-managed-odh-delete" - // odhGeneratedNamespaceLabel is the label added to all the namespaces genereated by odh-deployer. ) // OperatorUninstall deletes all the externally generated resources. This includes monitoring resources and applications @@ -70,7 +72,7 @@ func OperatorUninstall(cli client.Client, cfg *rest.Config) error { return fmt.Errorf("error getting generated namespaces : %w", err) } - // Return if any one of the namespaces is Terminating due to resources that are in process of deletion. (e.g CRDs) + // Return if any one of the namespaces is Terminating due to resources that are in process of deletion. (e.g. CRDs) for _, namespace := range generatedNamespaces.Items { if namespace.Status.Phase == corev1.NamespaceTerminating { return fmt.Errorf("waiting for namespace %v to be deleted", namespace.Name) @@ -83,16 +85,43 @@ func OperatorUninstall(cli client.Client, cfg *rest.Config) error { if err := cli.Delete(context.TODO(), &namespace, []client.DeleteOption{}...); err != nil { return fmt.Errorf("error deleting namespace %v: %w", namespace.Name, err) } - fmt.Printf("Namespace %s deleted as a part of uninstall.\n", namespace.Name) + fmt.Printf("Namespace %s deleted as a part of uninstallation.\n", namespace.Name) } } - // Wait for all resources to get cleaned up + // give enough time for namespace deletion before proceed time.Sleep(10 * time.Second) - fmt.Printf("All resources deleted as part of uninstall. Removing the operator csv\n") + // We can only assume the subscription is using standard names + // if user install by creating different named subs, then we will not know the name + // we cannot remove CSV before remove subscription because that need SA account + operatorNs, err := GetOperatorNamespace() + if err != nil { + return err + } + fmt.Printf("Removing operator subscription which in turn will remove installplan\n") + subsName := "opendatahub-operator" + if platform == deploy.SelfManagedRhods { + subsName = "rhods-operator" + } else if platform == deploy.ManagedRhods { + subsName = "addon-managed-odh" + } + sub, _ := deploy.SubscriptionExists(cli, operatorNs, subsName) + if sub == nil { + fmt.Printf("Could not find subscription %s in namespace %s. Maybe you have a different one", subsName, operatorNs) + } else { + if err := cli.Delete(context.TODO(), sub); err != nil { + return fmt.Errorf("error deleting subscription %s: %w", sub.Name, err) + } + } + + fmt.Printf("Removing the operator CSV in turn remove operator deployment\n") + time.Sleep(5 * time.Second) + + err = removeCSV(cli, cfg) - return removeCsv(cli, cfg) + fmt.Printf("All resources deleted as part of uninstall.") + return err } func removeDSCInitialization(cli client.Client) error { @@ -173,9 +202,6 @@ func CreateDefaultDSC(cli client.Client, _ deploy.Platform) error { Kueue: kueue.Kueue{ Component: components.Component{ManagementState: operatorv1.Removed}, }, - TrustyAI: trustyai.TrustyAI{ - Component: components.Component{ManagementState: operatorv1.Managed}, - }, }, }, } @@ -211,6 +237,9 @@ func CreateDefaultDSCI(cli client.Client, _ deploy.Platform, appNamespace, monNa MetricsCollection: "Istio", }, }, + TrustedCABundle: dsci.TrustedCABundleSpec{ + ManagementState: "Managed", + }, } defaultDsci := &dsci.DSCInitialization{ @@ -313,6 +342,47 @@ func UpdateFromLegacyVersion(cli client.Client, platform deploy.Platform, appNS return nil } +func CleanupExistingResource(cli client.Client, platform deploy.Platform) error { + var multiErr *multierror.Error + montNamespace := "redhat-ods-monitoring" + ctx := context.TODO() + + // Special Handling of cleanup of deprecated model monitoring stack + if platform == deploy.ManagedRhods { + deprecatedDeployments := []string{"rhods-prometheus-operator"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedDeployments, &appsv1.DeploymentList{})) + + deprecatedStatefulsets := []string{"prometheus-rhods-model-monitoring"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedStatefulsets, &appsv1.StatefulSetList{})) + + deprecatedServices := []string{"rhods-model-monitoring"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedServices, &corev1.ServiceList{})) + + deprecatedRoutes := []string{"rhods-model-monitoring"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedRoutes, &routev1.RouteList{})) + + deprecatedSecrets := []string{"rhods-monitoring-oauth-config"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedSecrets, &corev1.SecretList{})) + + deprecatedClusterroles := []string{"rhods-namespace-read", "rhods-prometheus-operator"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedClusterroles, &authv1.ClusterRoleList{})) + + deprecatedClusterrolebindings := []string{"rhods-namespace-read", "rhods-prometheus-operator"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedClusterrolebindings, &authv1.ClusterRoleBindingList{})) + + deprecatedServiceAccounts := []string{"rhods-prometheus-operator"} + multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, montNamespace, deprecatedServiceAccounts, &corev1.ServiceAccountList{})) + + deprecatedServicemonitors := []string{"modelmesh-federated-metrics"} + multiErr = multierror.Append(multiErr, deleteDeprecatedServiceMonitors(ctx, cli, montNamespace, deprecatedServicemonitors)) + } + // common logic for both self-managed and managed + deprecatedOperatorSM := []string{"rhods-monitor-federation2"} + multiErr = multierror.Append(multiErr, deleteDeprecatedServiceMonitors(ctx, cli, montNamespace, deprecatedOperatorSM)) + + return multiErr.ErrorOrNil() +} + func GetOperatorNamespace() (string, error) { data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") if err == nil { @@ -362,7 +432,7 @@ func RemoveKfDefInstances(cli client.Client, _ deploy.Platform) error { return nil } -func removeCsv(c client.Client, r *rest.Config) error { +func removeCSV(c client.Client, r *rest.Config) error { // Get watchNamespace operatorNamespace, err := GetOperatorNamespace() if err != nil { @@ -375,7 +445,7 @@ func removeCsv(c client.Client, r *rest.Config) error { } if operatorCsv != nil { - fmt.Printf("Deleting csv %s\n", operatorCsv.Name) + fmt.Printf("Deleting CSV %s\n", operatorCsv.Name) err = c.Delete(context.TODO(), operatorCsv, []client.DeleteOption{}...) if err != nil { if apierrs.IsNotFound(err) { @@ -385,9 +455,9 @@ func removeCsv(c client.Client, r *rest.Config) error { return fmt.Errorf("error deleting clusterserviceversion: %w", err) } fmt.Printf("Clusterserviceversion %s deleted as a part of uninstall.\n", operatorCsv.Name) + return nil } fmt.Printf("No clusterserviceversion for the operator found.\n") - return nil } @@ -402,7 +472,7 @@ func getClusterServiceVersion(cfg *rest.Config, watchNameSpace string) (*ofapi.C return nil, err } - // get csv with CRD DataScienceCluster + // get CSV with CRD DataScienceCluster if len(csvs.Items) != 0 { for _, csv := range csvs.Items { for _, operatorCR := range csv.Spec.CustomResourceDefinitions.Owned { @@ -415,6 +485,62 @@ func getClusterServiceVersion(cfg *rest.Config, watchNameSpace string) (*ofapi.C return nil, nil } +func deleteDeprecatedResources(ctx context.Context, cli client.Client, namespace string, resourceList []string, resourceType client.ObjectList) error { //nolint:unparam + var multiErr *multierror.Error + listOpts := &client.ListOptions{Namespace: namespace} + if err := cli.List(ctx, resourceType, listOpts); err != nil { + multiErr = multierror.Append(multiErr, err) + } + items := reflect.ValueOf(resourceType).Elem().FieldByName("Items") + for i := 0; i < items.Len(); i++ { + item := items.Index(i).Addr().Interface().(client.Object) //nolint:errcheck,forcetypeassert + for _, name := range resourceList { + if name == item.GetName() { + fmt.Printf("Attempting to delete %s in namespace %s\n", item.GetName(), namespace) + err := cli.Delete(ctx, item) + if err != nil { + if apierrs.IsNotFound(err) { + fmt.Printf("Could not find %s in namespace %s\n", item.GetName(), namespace) + } else { + multiErr = multierror.Append(multiErr, err) + } + } + fmt.Printf("Successfully deleted %s\n", item.GetName()) + } + } + } + return multiErr.ErrorOrNil() +} + +// Need to handle ServiceMonitor deletion separately as the generic function does not work for ServiceMonitors because of how the package is built. +func deleteDeprecatedServiceMonitors(ctx context.Context, cli client.Client, namespace string, resourceList []string) error { + var multiErr *multierror.Error + listOpts := &client.ListOptions{Namespace: namespace} + servicemonitors := &monitoringv1.ServiceMonitorList{} + if err := cli.List(ctx, servicemonitors, listOpts); err != nil { + multiErr = multierror.Append(multiErr, err) + } + + for _, servicemonitor := range servicemonitors.Items { + servicemonitor := servicemonitor + for _, name := range resourceList { + if name == servicemonitor.Name { + fmt.Printf("Attempting to delete %s in namespace %s\n", servicemonitor.Name, namespace) + err := cli.Delete(ctx, servicemonitor) + if err != nil { + if apierrs.IsNotFound(err) { + fmt.Printf("Could not find %s in namespace %s\n", servicemonitor.Name, namespace) + } else { + multiErr = multierror.Append(multiErr, err) + } + } + fmt.Printf("Successfully deleted %s\n", servicemonitor.Name) + } + } + } + return multiErr.ErrorOrNil() +} + func deleteResource(cli client.Client, namespace string, resourceType string) error { // In v2, Deployment selectors use a label "app.opendatahub.io/" which is // not present in v1. Since label selectors are immutable, we need to delete the existing @@ -462,7 +588,7 @@ func deleteDeploymentsAndCheck(ctx context.Context, cli client.Client, namespace if err := cli.List(ctx, deployments, listOpts); err != nil { return false, nil //nolint:nilerr } - // filter deployment which has the new label to limit that we do not over kill other deployment + // filter deployment which has the new label to limit that we do not overkill other deployment // this logic can be used even when upgrade from v2.4 to v2.5 without remove it markedForDeletion := []appsv1.Deployment{} for _, deployment := range deployments.Items { @@ -553,3 +679,28 @@ func deleteStatefulsetsAndCheck(ctx context.Context, cli client.Client, namespac return true, multiErr.ErrorOrNil() } + +func RemoveDeprecatedTrustyAI(cli client.Client, platform deploy.Platform) error { + existingDSCList := &dsc.DataScienceClusterList{} + err := cli.List(context.TODO(), existingDSCList) + if err != nil { + return fmt.Errorf("error getting existing DSC: %w", err) + } + + switch len(existingDSCList.Items) { + case 0: + return nil + case 1: + existingDSC := existingDSCList.Items[0] + if platform == deploy.ManagedRhods || platform == deploy.SelfManagedRhods { + if existingDSC.Spec.Components.TrustyAI.ManagementState != operatorv1.Removed { + existingDSC.Spec.Components.TrustyAI.ManagementState = operatorv1.Removed + err := cli.Update(context.TODO(), &existingDSC) + if err != nil { + return fmt.Errorf("error updating TrustyAI component: %w", err) + } + } + } + } + return nil +} diff --git a/tests/e2e/dsc_cfmap_deletion_test.go b/tests/e2e/dsc_cfmap_deletion_test.go index ba887219af8..ae95fabcce7 100644 --- a/tests/e2e/dsc_cfmap_deletion_test.go +++ b/tests/e2e/dsc_cfmap_deletion_test.go @@ -46,6 +46,11 @@ func cfgMapDeletionTestSuite(t *testing.T) { require.NoError(t, err, "Error while deleting owned namespaces") }) + // t.Run("DS Projects should be deleted", func(t *testing.T) { + // err = testCtx.testDSProjectDeletion() + // require.NoError(t, err, "Error while deleting DS Projectss") + // }) + t.Run("dsci should be deleted", func(t *testing.T) { err = testCtx.testDSCIDeletion() require.NoError(t, err, "failed deleting DSCI") diff --git a/tests/e2e/dsc_creation_test.go b/tests/e2e/dsc_creation_test.go index f4b1619a9d2..b5fcc71b888 100644 --- a/tests/e2e/dsc_creation_test.go +++ b/tests/e2e/dsc_creation_test.go @@ -178,14 +178,12 @@ func (tc *testContext) testAllApplicationCreation(t *testing.T) error { //nolint // speed testing in parallel t.Parallel() err = tc.testApplicationCreation(&(tc.testDsc.Spec.Components.Kserve)) + // test Unmanaged state, since servicemesh is not installed. if tc.testDsc.Spec.Components.Kserve.ManagementState == operatorv1.Managed { - if err != nil { - // depedent operator error, as expected - if strings.Contains(err.Error(), "Please install the operator before enabling component") { - t.Logf("expected error: %v", err.Error()) - } else { - require.NoError(t, err, "error validating application %v when enabled", tc.testDsc.Spec.Components.Kserve.GetComponentName()) - } + if err != nil && tc.testDsc.Spec.Components.Kserve.Serving.ManagementState == operatorv1.Unmanaged { + require.NoError(t, err, "error validating application %v when enabled", tc.testDsc.Spec.Components.Workbenches.GetComponentName()) + } else { + require.NoError(t, err, "error validating application %v when enabled", tc.testDsc.Spec.Components.Kserve.GetComponentName()) } } else { require.Error(t, err, "error validating application %v when disabled", tc.testDsc.Spec.Components.Kserve.GetComponentName()) @@ -237,17 +235,6 @@ func (tc *testContext) testAllApplicationCreation(t *testing.T) error { //nolint } }) - t.Run("Validate Kueue", func(t *testing.T) { - // speed testing in parallel - t.Parallel() - err = tc.testApplicationCreation(&(tc.testDsc.Spec.Components.Kueue)) - if tc.testDsc.Spec.Components.Kueue.ManagementState == operatorv1.Managed { - require.NoError(t, err, "error validating application %v when enabled", tc.testDsc.Spec.Components.Kueue.GetComponentName()) - } else { - require.Error(t, err, "error validating application %v when disabled", tc.testDsc.Spec.Components.Kueue.GetComponentName()) - } - }) - t.Run("Validate TrustyAI", func(t *testing.T) { // speed testing in parallel t.Parallel() diff --git a/tests/e2e/helper_test.go b/tests/e2e/helper_test.go index 40dbd1e45f4..592a95cff4b 100644 --- a/tests/e2e/helper_test.go +++ b/tests/e2e/helper_test.go @@ -25,6 +25,7 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/components/ray" "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" "github.com/opendatahub-io/opendatahub-operator/v2/components/workbenches" + infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" ) func (tc *testContext) waitForControllerDeployment(name string, replicas int32) error { @@ -63,6 +64,10 @@ func setupDSCICR() *dsci.DSCInitialization { ManagementState: "Managed", Namespace: "opendatahub", }, + TrustedCABundle: dsci.TrustedCABundleSpec{ + ManagementState: "Managed", + CustomCABundle: "", + }, }, } return dsciTest @@ -100,6 +105,9 @@ func setupDSCInstance() *dsc.DataScienceCluster { Component: components.Component{ ManagementState: operatorv1.Managed, }, + Serving: infrav1.ServingSpec{ + ManagementState: operatorv1.Unmanaged, + }, }, CodeFlare: codeflare.CodeFlare{ Component: components.Component{ diff --git a/tests/envtestutil/cleaner.go b/tests/envtestutil/cleaner.go index cfa70675902..1883a3faf4b 100644 --- a/tests/envtestutil/cleaner.go +++ b/tests/envtestutil/cleaner.go @@ -18,7 +18,7 @@ import ( ) // Cleaner is a struct to perform deletion of resources, -// enforcing removal of finalizers. Otherwise deletion of namespaces wouldn't be possible. +// enforcing removal of finalizers. Otherwise, deletion of namespaces wouldn't be possible. // See: https://book.kubebuilder.io/reference/envtest.html#namespace-usage-limitation // Based on https://github.com/kubernetes-sigs/controller-runtime/issues/880#issuecomment-749742403 type Cleaner struct { @@ -111,7 +111,10 @@ func (c *Cleaner) DeleteAll(objects ...client.Object) { // ability to patch the /finalize subresource on the namespace _, err = c.clientset.CoreV1().Namespaces().Finalize(context.Background(), ns, metav1.UpdateOptions{}) return err - }, c.timeout, c.interval).Should(Succeed()) + }). + WithTimeout(c.timeout). + WithPolling(c.interval). + Should(Succeed()) } Eventually(func() metav1.StatusReason { diff --git a/tests/envtestutil/name_gen.go b/tests/envtestutil/name_gen.go index e336a3fd687..44d39681282 100644 --- a/tests/envtestutil/name_gen.go +++ b/tests/envtestutil/name_gen.go @@ -1,60 +1,20 @@ package envtestutil import ( - "crypto/rand" - "encoding/hex" - "math" - "math/big" -) - -var letters = []rune("abcdefghijklmnopqrstuvwxyz") - -func RandomUUIDName(numlen int) string { - uuidBytes := make([]byte, numlen) - _, _ = rand.Read(uuidBytes) - - return hex.EncodeToString(uuidBytes)[:numlen] -} - -func AppendRandomNameTo(prefix string) string { - return ConcatToMax(63, prefix, GenerateString(16)) -} - -// GenerateString generates random alphabetical name which can be used for example as application or namespace name. -// Maximum length is capped at 63 characters. -// -// Don't forget to seed before using this function, e.g. rand.Seed(time.Now().UTC().UnixNano()) -// otherwise you will always get the same value. -func GenerateString(length int) string { - if length == 0 { - return "" - } + "fmt" - if length > 63 { - length = 63 - } - - b := make([]rune, length) - for i := range b { - ri, _ := rand.Int(rand.Reader, big.NewInt(int64(len(letters)))) - b[i] = letters[ri.Int64()] - } - - return string(b) -} + utilrand "k8s.io/apimachinery/pkg/util/rand" +) -// ConcatToMax will cut each section to length based on number of sections to not go beyond max and separate the sections with -. -func ConcatToMax(max int, sections ...string) string { - sectionLength := (max - len(sections) - 1) / len(sections) - name := "" +const ( + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) - for i, section := range sections { - s := section[:int32(math.Min(float64(len(section)), float64(sectionLength)))] - name = name + "-" + s - if i+1 != len(sections) { - sectionLength = (max - len(name) - 1) / (len(sections) - (i + 1)) - } +func AppendRandomNameTo(base string) string { + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] } - - return name[1:] + return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) } diff --git a/tests/envtestutil/utils.go b/tests/envtestutil/utils.go index e24b286560e..3a6d2396479 100644 --- a/tests/envtestutil/utils.go +++ b/tests/envtestutil/utils.go @@ -4,8 +4,6 @@ import ( "fmt" "os" "path/filepath" - - featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" ) func FindProjectRoot() (string, error) { @@ -29,11 +27,3 @@ func FindProjectRoot() (string, error) { return "", fmt.Errorf("project root not found") } - -// NewSource creates an origin object with specified component and name. -func NewSource(component featurev1.OwnerType, name string) featurev1.Source { - return featurev1.Source{ - Type: component, - Name: name, - } -} diff --git a/tests/integration/features/features_int_test.go b/tests/integration/features/features_int_test.go index 4232d3996f8..7e58e3a6714 100644 --- a/tests/integration/features/features_int_test.go +++ b/tests/integration/features/features_int_test.go @@ -16,10 +16,10 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" . "github.com/onsi/ginkgo/v2" @@ -31,9 +31,8 @@ import ( var testEmbeddedFiles embed.FS const ( - timeout = 5 * time.Second - interval = 250 * time.Millisecond - templatesDir = "templates" + timeout = 5 * time.Second + interval = 250 * time.Millisecond ) var _ = Describe("feature preconditions", func() { @@ -42,8 +41,8 @@ var _ = Describe("feature preconditions", func() { var ( objectCleaner *envtestutil.Cleaner - testFeature *feature.Feature namespace string + dsci *dsciv1.DSCInitialization ) BeforeEach(func() { @@ -51,16 +50,7 @@ var _ = Describe("feature preconditions", func() { testFeatureName := "test-ns-creation" namespace = envtestutil.AppendRandomNameTo(testFeatureName) - - dsciSpec := newDSCInitializationSpec(namespace) - source := envtestutil.NewSource(featurev1.DSCIType, "default") - var err error - testFeature, err = feature.CreateFeature(testFeatureName). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() - Expect(err).ToNot(HaveOccurred()) + dsci = newDSCInitialization(namespace) }) It("should create namespace if it does not exist", func() { @@ -70,78 +60,110 @@ var _ = Describe("feature preconditions", func() { defer objectCleaner.DeleteAll(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}) // when - err = feature.CreateNamespaceIfNotExists(namespace)(testFeature) + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + testFeatureErr := feature.CreateFeature("create-new-ns"). + For(handler). + PreConditions(feature.CreateNamespaceIfNotExists(namespace)). + UsingConfig(envTest.Config). + Load() + + Expect(testFeatureErr).ToNot(HaveOccurred()) + + return nil + }) // then - Expect(err).ToNot(HaveOccurred()) + Expect(featuresHandler.Apply()).To(Succeed()) + + // and + Eventually(func() error { + _, err := getNamespace(namespace) + return err + }).WithTimeout(timeout).WithPolling(interval).Should(Succeed()) }) It("should not try to create namespace if it does already exist", func() { // given - ns := createNamespace(namespace) + ns := newNamespace(namespace) Expect(envTestClient.Create(context.Background(), ns)).To(Succeed()) + Eventually(func() error { + _, err := getNamespace(namespace) + return err + }).WithTimeout(timeout).WithPolling(interval).Should(Succeed()) // wait for ns to actually get created + defer objectCleaner.DeleteAll(ns) // when - err := feature.CreateNamespaceIfNotExists(namespace)(testFeature) + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + testFeatureErr := feature.CreateFeature("create-new-ns"). + For(handler). + PreConditions(feature.CreateNamespaceIfNotExists(namespace)). + UsingConfig(envTest.Config). + Load() + + Expect(testFeatureErr).ToNot(HaveOccurred()) + + return nil + }) // then - Expect(err).ToNot(HaveOccurred()) + Expect(featuresHandler.Apply()).To(Succeed()) + }) + }) Context("ensuring custom resource definitions are installed", func() { var ( - dsciSpec *dscv1.DSCInitializationSpec - verificationFeature *feature.Feature - source featurev1.Source + dsci *dsciv1.DSCInitialization ) BeforeEach(func() { - dsciSpec = newDSCInitializationSpec("default") - source = envtestutil.NewSource(featurev1.DSCIType, "default") + namespace := envtestutil.AppendRandomNameTo("test-crd-creation") + dsci = newDSCInitialization(namespace) }) It("should successfully check for existing CRD", func() { // given example CRD installed into env name := "test-resources.openshift.io" - var err error - verificationFeature, err = feature.CreateFeature("CRD verification"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions(feature.EnsureCRDIsInstalled(name)). - Load() - Expect(err).ToNot(HaveOccurred()) - // when - err = verificationFeature.Apply() + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + crdVerificationErr := feature.CreateFeature("verify-crd-exists"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(feature.EnsureCRDIsInstalled(name)). + Load() + + Expect(crdVerificationErr).ToNot(HaveOccurred()) + + return nil + }) // then - Expect(err).ToNot(HaveOccurred()) + Expect(featuresHandler.Apply()).To(Succeed()) }) It("should fail to check non-existing CRD", func() { // given name := "non-existing-resource.non-existing-group.io" - var err error - verificationFeature, err = feature.CreateFeature("CRD verification"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions(feature.EnsureCRDIsInstalled(name)). - Load() - Expect(err).ToNot(HaveOccurred()) - // when - err = verificationFeature.Apply() + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + crdVerificationErr := feature.CreateFeature("fail-on-non-existing-crd"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(feature.EnsureCRDIsInstalled(name)). + Load() + + Expect(crdVerificationErr).ToNot(HaveOccurred()) + + return nil + }) // then - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("\"non-existing-resource.non-existing-group.io\" not found")) + Expect(featuresHandler.Apply()).To(MatchError(ContainSubstring("\"non-existing-resource.non-existing-group.io\" not found"))) }) }) }) @@ -150,323 +172,392 @@ var _ = Describe("feature cleanup", func() { Context("using FeatureTracker and ownership as cleanup strategy", Ordered, func() { + const ( + featureName = "create-secret" + secretName = "test-secret" + ) + var ( - namespace string - dsciSpec *dscv1.DSCInitializationSpec - source featurev1.Source + dsci *dsciv1.DSCInitialization + namespace string + featuresHandler *feature.FeaturesHandler ) BeforeAll(func() { - namespace = envtestutil.AppendRandomNameTo("feature-tracker-test") - dsciSpec = newDSCInitializationSpec(namespace) - source = envtestutil.NewSource(featurev1.DSCIType, "default") + namespace = envtestutil.AppendRandomNameTo("test-secret-ownership") + dsci = newDSCInitialization(namespace) + featuresHandler = feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + secretCreationErr := feature.CreateFeature(featureName). + For(handler). + UsingConfig(envTest.Config). + PreConditions( + feature.CreateNamespaceIfNotExists(namespace), + ). + WithResources(createSecret(secretName, namespace)). + Load() + + Expect(secretCreationErr).ToNot(HaveOccurred()) + + return nil + }) + }) It("should successfully create resource and associated feature tracker", func() { - // given - createConfigMap, err := feature.CreateFeature("create-cfg-map"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions( - feature.CreateNamespaceIfNotExists(namespace), - ). - WithResources(createTestSecret(namespace)). - Load() - Expect(err).ToNot(HaveOccurred()) - // when - Expect(createConfigMap.Apply()).To(Succeed()) + Expect(featuresHandler.Apply()).Should(Succeed()) // then - Expect(createConfigMap.Spec.Tracker).ToNot(BeNil()) - _, err = createConfigMap.DynamicClient. - Resource(gvr.FeatureTracker). - Get(context.TODO(), createConfigMap.Spec.Tracker.Name, metav1.GetOptions{}) - - Expect(err).ToNot(HaveOccurred()) + Eventually(createdSecretHasOwnerReferenceToOwningFeature(secretName, namespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(Succeed()) }) It("should remove feature tracker on clean-up", func() { - // recreating feature struct again as it would happen in the reconcile - // given - createConfigMap, err := feature.CreateFeature("create-cfg-map"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions( - feature.CreateNamespaceIfNotExists(namespace), - ). - WithResources(createTestSecret(namespace)). - Load() - Expect(err).ToNot(HaveOccurred()) - // when - Expect(createConfigMap.Cleanup()).To(Succeed()) - trackerName := createConfigMap.Spec.Tracker.Name + Expect(featuresHandler.Delete()).To(Succeed()) // then - _, err = createConfigMap.DynamicClient. - Resource(gvr.FeatureTracker). - Get(context.TODO(), trackerName, metav1.GetOptions{}) - - Expect(errors.IsNotFound(err)).To(BeTrue()) + Eventually(createdSecretHasOwnerReferenceToOwningFeature(secretName, namespace)). + WithTimeout(timeout). + WithPolling(interval). + Should(WithTransform(errors.IsNotFound, BeTrue())) }) }) -}) - -var _ = Describe("feature trackers", func() { - Context("ensuring feature trackers indicate status and phase", func() { - - var ( - dsciSpec *dscv1.DSCInitializationSpec - source featurev1.Source - ) - - BeforeEach(func() { - dsciSpec = newDSCInitializationSpec("default") - source = envtestutil.NewSource(featurev1.DSCIType, "default") - }) - - It("should indicate successful installation in FeatureTracker", func() { - // given example CRD installed into env - name := "test-resources.openshift.io" - verificationFeature, err := feature.CreateFeature("crd-verification"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions(feature.EnsureCRDIsInstalled(name)). - Load() - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(verificationFeature.Apply()).To(Succeed()) - - // then - featureTracker := getFeatureTracker("default-crd-verification") - Expect(*featureTracker.Status.Conditions).To(ContainElement( - MatchFields(IgnoreExtras, Fields{ - "Type": Equal(conditionsv1.ConditionAvailable), - "Status": Equal(v1.ConditionTrue), - "Reason": Equal(string(featurev1.FeatureCreated)), - }), - )) - }) - - It("should indicate failure in preconditions", func() { - // given - name := "non-existing-resource.non-existing-group.io" - verificationFeature, err := feature.CreateFeature("crd-verification"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PreConditions(feature.EnsureCRDIsInstalled(name)). - Load() - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(verificationFeature.Apply()).ToNot(Succeed()) + var _ = Describe("feature trackers", func() { + Context("ensuring feature trackers indicate status and phase", func() { + + const appNamespace = "default" + + var ( + dsci *dsciv1.DSCInitialization + ) + + BeforeEach(func() { + dsci = newDSCInitialization("default") + }) + + It("should indicate successful installation in FeatureTracker", func() { + // given example CRD installed into env + name := "test-resources.openshift.io" + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("crd-verification"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(feature.EnsureCRDIsInstalled(name)). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + featureTracker, err := getFeatureTracker("crd-verification", appNamespace) + Expect(err).ToNot(HaveOccurred()) + Expect(*featureTracker.Status.Conditions).To(ContainElement( + MatchFields(IgnoreExtras, Fields{ + "Type": Equal(conditionsv1.ConditionAvailable), + "Status": Equal(v1.ConditionTrue), + "Reason": Equal(string(featurev1.FeatureCreated)), + }), + )) + }) + + It("should indicate failure in preconditions", func() { + // given + name := "non-existing-resource.non-existing-group.io" + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("non-existing-crd-verification"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(feature.EnsureCRDIsInstalled(name)). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).ToNot(Succeed()) + + // then + featureTracker, err := getFeatureTracker("non-existing-crd-verification", appNamespace) + Expect(err).ToNot(HaveOccurred()) + Expect(*featureTracker.Status.Conditions).To(ContainElement( + MatchFields(IgnoreExtras, Fields{ + "Type": Equal(conditionsv1.ConditionDegraded), + "Status": Equal(v1.ConditionTrue), + "Reason": Equal(string(featurev1.PreConditions)), + }), + )) + }) + + It("should indicate failure in post-conditions", func() { + // given + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("post-condition-failure"). + For(handler). + UsingConfig(envTest.Config). + PostConditions(func(f *feature.Feature) error { + return fmt.Errorf("during test always fail") + }). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).ToNot(Succeed()) + + // then + featureTracker, err := getFeatureTracker("post-condition-failure", appNamespace) + Expect(err).ToNot(HaveOccurred()) + Expect(*featureTracker.Status.Conditions).To(ContainElement( + MatchFields(IgnoreExtras, Fields{ + "Type": Equal(conditionsv1.ConditionDegraded), + "Status": Equal(v1.ConditionTrue), + "Reason": Equal(string(featurev1.PostConditions)), + }), + )) + }) + + It("should correctly indicate source in the feature tracker", func() { + // given + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + emptyFeatureErr := feature.CreateFeature("empty-feature"). + For(handler). + UsingConfig(envTest.Config). + Load() + + Expect(emptyFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + featureTracker, err := getFeatureTracker("empty-feature", appNamespace) + Expect(err).ToNot(HaveOccurred()) + Expect(featureTracker.Spec.Source).To( + MatchFields(IgnoreExtras, Fields{ + "Name": Equal("default-dsci"), + "Type": Equal(featurev1.DSCIType), + }), + ) + }) + + It("should correctly indicate app namespace in the feature tracker", func() { + // given + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + emptyFeatureErr := feature.CreateFeature("empty-feature"). + For(handler). + UsingConfig(envTest.Config). + Load() + + Expect(emptyFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + featureTracker, err := getFeatureTracker("empty-feature", appNamespace) + Expect(err).ToNot(HaveOccurred()) + Expect(featureTracker.Spec.AppNamespace).To(Equal("default")) + }) - // then - featureTracker := getFeatureTracker("default-crd-verification") - Expect(*featureTracker.Status.Conditions).To(ContainElement( - MatchFields(IgnoreExtras, Fields{ - "Type": Equal(conditionsv1.ConditionDegraded), - "Status": Equal(v1.ConditionTrue), - "Reason": Equal(string(featurev1.PreConditions)), - }), - )) }) - It("should indicate failure in post-conditions", func() { - // given - verificationFeature, err := feature.CreateFeature("post-condition-failure"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - PostConditions(func(f *feature.Feature) error { - return fmt.Errorf("always fail") - }). - Load() - Expect(err).ToNot(HaveOccurred()) + }) - // when - Expect(verificationFeature.Apply()).ToNot(Succeed()) + var _ = Describe("Manifest sources", func() { + Context("using various manifest sources", func() { + + var ( + objectCleaner *envtestutil.Cleaner + dsci *dsciv1.DSCInitialization + namespace *v1.Namespace + ) + + BeforeEach(func() { + objectCleaner = envtestutil.CreateCleaner(envTestClient, envTest.Config, timeout, interval) + nsName := envtestutil.AppendRandomNameTo("smcp-ns") + + var err error + namespace, err = cluster.CreateNamespace(envTestClient, nsName) + Expect(err).ToNot(HaveOccurred()) + + dsci = newDSCInitialization(nsName) + dsci.Spec.ServiceMesh.ControlPlane.Namespace = namespace.Name + }) + + AfterEach(func() { + objectCleaner.DeleteAll(namespace) + }) + + It("should be able to process an embedded template from the default location", func() { + // given + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + createServiceErr := feature.CreateFeature("create-local-gw-svc"). + For(handler). + UsingConfig(envTest.Config). + Manifests(path.Join(feature.ServerlessDir, "serving-istio-gateways", "local-gateway-svc.tmpl")). + Load() + + Expect(createServiceErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + service, err := getService("knative-local-gateway", namespace.Name) + Expect(err).ToNot(HaveOccurred()) + Expect(service.Name).To(Equal("knative-local-gateway")) + }) + + It("should be able to process an embedded YAML file from the default location", func() { + // given + knativeNs, nsErr := cluster.CreateNamespace(envTestClient, "knative-serving") + Expect(nsErr).ToNot(HaveOccurred()) + defer objectCleaner.DeleteAll(knativeNs) + + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + createGatewayErr := feature.CreateFeature("create-local-gateway"). + For(handler). + UsingConfig(envTest.Config). + Manifests(path.Join(feature.ServerlessDir, "serving-istio-gateways", "istio-local-gateway.yaml")). + Load() + + Expect(createGatewayErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + gateway, err := getGateway(envTest.Config, "knative-serving", "knative-local-gateway") + Expect(err).ToNot(HaveOccurred()) + Expect(gateway).ToNot(BeNil()) + }) + + It("should be able to process an embedded file from a non default location", func() { + // given + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + createNamespaceErr := feature.CreateFeature("create-namespace"). + For(handler). + UsingConfig(envTest.Config). + ManifestSource(testEmbeddedFiles). + Manifests(path.Join(feature.BaseDir, "namespace.yaml")). + Load() + + Expect(createNamespaceErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + embeddedNs, err := getNamespace("embedded-test-ns") + defer objectCleaner.DeleteAll(embeddedNs) + Expect(err).ToNot(HaveOccurred()) + Expect(embeddedNs.Name).To(Equal("embedded-test-ns")) + }) + + const nsYAML = `apiVersion: v1 +kind: Namespace +metadata: + name: real-file-test-ns` - // then - featureTracker := getFeatureTracker("default-post-condition-failure") - Expect(*featureTracker.Status.Conditions).To(ContainElement( - MatchFields(IgnoreExtras, Fields{ - "Type": Equal(conditionsv1.ConditionDegraded), - "Status": Equal(v1.ConditionTrue), - "Reason": Equal(string(featurev1.PostConditions)), - }), - )) - }) + It("should source manifests from a specified temporary directory within the file system", func() { + // given + tempDir := GinkgoT().TempDir() - It("should correctly indicate source in the feature tracker", func() { - verificationFeature, err := feature.CreateFeature("empty-feature"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() - Expect(err).ToNot(HaveOccurred()) + Expect(createFile(tempDir, "namespace.yaml", nsYAML)).To(Succeed()) - // when - Expect(verificationFeature.Apply()).To(Succeed()) + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + createServiceErr := feature.CreateFeature("create-namespace"). + For(handler). + UsingConfig(envTest.Config). + ManifestSource(os.DirFS(tempDir)). + Manifests(path.Join("namespace.yaml")). // must be relative to root DirFS defined above + Load() - // then - featureTracker := getFeatureTracker("default-empty-feature") - Expect(featureTracker.Spec.Source.Name).To(Equal("default")) - Expect(featureTracker.Spec.Source.Type).To(Equal(featurev1.DSCIType)) - }) + Expect(createServiceErr).ToNot(HaveOccurred()) - It("should correctly indicate app namespace in the feature tracker", func() { - verificationFeature, err := feature.CreateFeature("empty-feature"). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() - Expect(err).ToNot(HaveOccurred()) + return nil + }) - // when - Expect(verificationFeature.Apply()).To(Succeed()) + // when + Expect(featuresHandler.Apply()).To(Succeed()) - // then - featureTracker := getFeatureTracker("default-empty-feature") - Expect(featureTracker.Spec.AppNamespace).To(Equal("default")) + // then + realNs, err := getNamespace("real-file-test-ns") + defer objectCleaner.DeleteAll(realNs) + Expect(err).ToNot(HaveOccurred()) + Expect(realNs.Name).To(Equal("real-file-test-ns")) + }) }) - }) }) -var _ = Describe("Manifest sources", func() { - Context("using various manifest sources", func() { - - var ( - objectCleaner *envtestutil.Cleaner - dsciSpec *dscv1.DSCInitializationSpec - source featurev1.Source - namespace = "default" - ) - - BeforeEach(func() { - objectCleaner = envtestutil.CreateCleaner(envTestClient, envTest.Config, timeout, interval) - dsciSpec = newDSCInitializationSpec(namespace) - source = envtestutil.NewSource(featurev1.DSCIType, "namespace") - }) - - It("should be able to process an embedded template from the default location", func() { - // given - ns := createNamespace("service-ns") - Expect(envTestClient.Create(context.Background(), ns)).To(Succeed()) - defer objectCleaner.DeleteAll(ns) - - serviceMeshSpec := &dsciSpec.ServiceMesh - serviceMeshSpec.ControlPlane.Namespace = "service-ns" - - createService, err := feature.CreateFeature("create-control-plane"). - With(dsciSpec). - From(source). - Manifests(path.Join(templatesDir, "serverless", "serving-istio-gateways", "local-gateway-svc.tmpl")). - UsingConfig(envTest.Config). - Load() - - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(createService.Apply()).To(Succeed()) - - // then - service, err := getService("knative-local-gateway", "service-ns") - Expect(err).ToNot(HaveOccurred()) - Expect(service.Name).To(Equal("knative-local-gateway")) - }) - - It("should be able to process an embedded YAML file from the default location", func() { - // given - ns := createNamespace("knative-serving") - Expect(envTestClient.Create(context.Background(), ns)).To(Succeed()) - defer objectCleaner.DeleteAll(ns) - - createGateway, err := feature.CreateFeature("create-gateway"). - With(dsciSpec). - From(source). - Manifests(path.Join(templatesDir, "serverless", "serving-istio-gateways", "istio-local-gateway.yaml")). - UsingConfig(envTest.Config). - Load() - - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(createGateway.Apply()).To(Succeed()) - - // then - gateway, err := getGateway(envTest.Config, "knative-serving", "knative-local-gateway") - Expect(err).ToNot(HaveOccurred()) - Expect(gateway).ToNot(BeNil()) - }) - - It("should be able to process an embedded file from a non default location", func() { - createNs, err := feature.CreateFeature("create-ns"). - With(dsciSpec). - From(source). - ManifestSource(testEmbeddedFiles). - Manifests(path.Join(templatesDir, "namespace.yaml")). - UsingConfig(envTest.Config). - Load() - - Expect(err).ToNot(HaveOccurred()) - - // when - Expect(createNs.Apply()).To(Succeed()) - - // then - namespace, err := getNamespace("embedded-test-ns") - Expect(err).ToNot(HaveOccurred()) - Expect(namespace.Name).To(Equal("embedded-test-ns")) - }) - - It("should source manifests from a specified temporary directory within the file system", func() { - // given - tempDir := GinkgoT().TempDir() - yamlData := `apiVersion: v1 -kind: Namespace -metadata: - name: real-file-test-ns` - - err := createFile(tempDir, "namespace.yaml", yamlData) - Expect(err).ToNot(HaveOccurred()) +func createdSecretHasOwnerReferenceToOwningFeature(secretName, namespace string) func() error { + return func() error { + secret, err := envTestClientset.CoreV1(). + Secrets(namespace). + Get(context.TODO(), secretName, metav1.GetOptions{}) - createNs, err := feature.CreateFeature("create-ns"). - With(dsciSpec). - From(source). - ManifestSource(os.DirFS(tempDir)). - Manifests(path.Join("namespace.yaml")). // must be relative to root DirFS defined above - UsingConfig(envTest.Config). - Load() + if err != nil { + return err + } - Expect(err).ToNot(HaveOccurred()) + Expect(secret.OwnerReferences).Should( + ContainElement( + MatchFields(IgnoreExtras, Fields{"Kind": Equal("FeatureTracker")}), + ), + ) - // when - Expect(createNs.Apply()).To(Succeed()) + trackerName := "" + for _, ownerRef := range secret.OwnerReferences { + if ownerRef.Kind == "FeatureTracker" { + trackerName = ownerRef.Name + break + } + } - // then - namespace, err := getNamespace("real-file-test-ns") - Expect(err).ToNot(HaveOccurred()) - Expect(namespace.Name).To(Equal("real-file-test-ns")) - }) - }) -}) + tracker := &featurev1.FeatureTracker{} + return envTestClient.Get(context.Background(), client.ObjectKey{ + Name: trackerName, + }, tracker) + } +} -func createTestSecret(namespace string) func(f *feature.Feature) error { +func createSecret(name, namespace string) func(f *feature.Feature) error { return func(f *feature.Feature) error { secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", + Name: name, Namespace: namespace, OwnerReferences: []metav1.OwnerReference{ f.AsOwnerReference(), @@ -477,15 +568,11 @@ func createTestSecret(namespace string) func(f *feature.Feature) error { }, } - _, err := f.Clientset.CoreV1(). - Secrets(namespace). - Create(context.TODO(), secret, metav1.CreateOptions{}) - - return err + return f.Client.Create(context.TODO(), secret) } } -func createNamespace(name string) *v1.Namespace { +func newNamespace(name string) *v1.Namespace { return &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -493,25 +580,28 @@ func createNamespace(name string) *v1.Namespace { } } -func getFeatureTracker(name string) *featurev1.FeatureTracker { - tracker := &featurev1.FeatureTracker{} +func getFeatureTracker(featureName, appNamespace string) (*featurev1.FeatureTracker, error) { //nolint:unparam //reason appNs + tracker := featurev1.NewFeatureTracker(featureName, appNamespace) err := envTestClient.Get(context.Background(), client.ObjectKey{ - Name: name, + Name: tracker.Name, }, tracker) - Expect(err).ToNot(HaveOccurred()) - - return tracker + return tracker, err } -func newDSCInitializationSpec(ns string) *dscv1.DSCInitializationSpec { - spec := dscv1.DSCInitializationSpec{} - spec.ApplicationsNamespace = ns - return &spec +func newDSCInitialization(ns string) *dsciv1.DSCInitialization { + return &dsciv1.DSCInitialization{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-dsci", + }, + Spec: dsciv1.DSCInitializationSpec{ + ApplicationsNamespace: ns, + }, + } } func getNamespace(namespace string) (*v1.Namespace, error) { - ns := createNamespace(namespace) + ns := newNamespace(namespace) err := envTestClient.Get(context.Background(), types.NamespacedName{Name: namespace}, ns) return ns, err diff --git a/tests/integration/features/features_suite_int_test.go b/tests/integration/features/features_suite_int_test.go index 89732414b74..37ba1573cf0 100644 --- a/tests/integration/features/features_suite_int_test.go +++ b/tests/integration/features/features_suite_int_test.go @@ -11,6 +11,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -24,10 +25,11 @@ import ( ) var ( - envTestClient client.Client - envTest *envtest.Environment - ctx context.Context - cancel context.CancelFunc + envTestClient client.Client + envTestClientset *kubernetes.Clientset + envTest *envtest.Environment + ctx context.Context + cancel context.CancelFunc ) var testScheme = runtime.NewScheme() @@ -78,6 +80,10 @@ var _ = BeforeSuite(func() { envTestClient, err = client.New(config, client.Options{Scheme: testScheme}) Expect(err).NotTo(HaveOccurred()) Expect(envTestClient).NotTo(BeNil()) + + envTestClientset, err = kubernetes.NewForConfig(config) + Expect(err).NotTo(HaveOccurred()) + Expect(envTestClientset).NotTo(BeNil()) }) var _ = AfterSuite(func() { diff --git a/tests/integration/features/serverless_feature_test.go b/tests/integration/features/serverless_feature_test.go index c4a642c58c9..a9b84bcbd3e 100644 --- a/tests/integration/features/serverless_feature_test.go +++ b/tests/integration/features/serverless_feature_test.go @@ -2,15 +2,18 @@ package features_test import ( "context" + "fmt" + corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/yaml" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" @@ -65,41 +68,50 @@ spec: var _ = Describe("Serverless feature", func() { - var testFeature *feature.Feature - var objectCleaner *envtestutil.Cleaner + var ( + dsci *dsciv1.DSCInitialization + objectCleaner *envtestutil.Cleaner + kserveComponent *kserve.Kserve + ) BeforeEach(func() { + // TODO rework c, err := client.New(envTest.Config, client.Options{}) Expect(err).ToNot(HaveOccurred()) - objectCleaner = envtestutil.CreateCleaner(c, envTest.Config, timeout, interval) - testFeatureName := "serverless-feature" - namespace := envtestutil.AppendRandomNameTo(testFeatureName) - - dsciSpec := newDSCInitializationSpec(namespace) - source := envtestutil.NewSource(featurev1.ComponentType, "kserve") - testFeature, err = feature.CreateFeature(testFeatureName). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() - - Expect(err).ToNot(HaveOccurred()) - - // Creates the actual Feature instance so that associated FeatureTracker is created as well - Expect(testFeature.Apply()).To(Succeed()) + dsci = newDSCInitialization("default") + kserveComponent = &kserve.Kserve{} }) Context("verifying preconditions", func() { When("operator is not installed", func() { - It("operator presence check should return an error", func() { - Expect(serverless.EnsureServerlessOperatorInstalled(testFeature)).ToNot(Succeed()) + + It("should fail on precondition check", func() { + // given + featuresHandler := feature.ComponentFeaturesHandler(kserveComponent, &dsci.Spec, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("no-serverless-operator-check"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(serverless.EnsureServerlessOperatorInstalled). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + applyErr := featuresHandler.Apply() + + // then + Expect(applyErr).To(MatchError(ContainSubstring("\"knativeservings.operator.knative.dev\" not found"))) }) }) When("operator is installed", func() { + var knativeServingCrdObj *apiextensionsv1.CustomResourceDefinition BeforeEach(func() { @@ -120,17 +132,46 @@ var _ = Describe("Serverless feature", func() { objectCleaner.DeleteAll(knativeServingCrdObj) }) - It("operator presence check should succeed", func() { - Expect(serverless.EnsureServerlessOperatorInstalled(testFeature)).To(Succeed()) + It("should succeed checking operator installation using precondition", func() { + // when + featuresHandler := feature.ComponentFeaturesHandler(kserveComponent, &dsci.Spec, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("serverless-operator-check"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(serverless.EnsureServerlessOperatorInstalled). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // then + Expect(featuresHandler.Apply()).To(Succeed()) }) - It("KNative serving absence check should succeed if serving is not installed", func() { - Expect(serverless.EnsureServerlessAbsent(testFeature)).To(Succeed()) + It("should succeed if serving is not installed for KNative serving precondition", func() { + // when + featuresHandler := feature.ComponentFeaturesHandler(kserveComponent, &dsci.Spec, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("no-serving-installed-yet"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(serverless.EnsureServerlessAbsent). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // then + Expect(featuresHandler.Apply()).To(Succeed()) }) - It("KNative serving absence check should fail when serving is present", func() { + It("should fail if serving is already installed for KNative serving precondition", func() { + // given ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) - nsResource := createNamespace(ns) + nsResource := newNamespace(ns) Expect(envTestClient.Create(context.TODO(), nsResource)).To(Succeed()) defer objectCleaner.DeleteAll(nsResource) @@ -139,13 +180,43 @@ var _ = Describe("Serverless feature", func() { knativeServing.SetNamespace(nsResource.Name) Expect(envTestClient.Create(context.TODO(), knativeServing)).To(Succeed()) - Expect(serverless.EnsureServerlessAbsent(testFeature)).ToNot(Succeed()) + // when + featuresHandler := feature.ComponentFeaturesHandler(kserveComponent, &dsci.Spec, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("serving-already-installed"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(serverless.EnsureServerlessAbsent). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // then + Expect(featuresHandler.Apply()).ToNot(Succeed()) }) }) + }) Context("default values", func() { + var testFeature *feature.Feature + + BeforeEach(func() { + // Stubbing feature as we want to test particular functions in isolation + testFeature = &feature.Feature{ + Name: "test-feature", + Spec: &feature.Spec{ + ServiceMeshSpec: &infrav1.ServiceMeshSpec{}, + Serving: &infrav1.ServingSpec{}, + }, + } + + testFeature.Client = envTestClient + }) + Context("ingress gateway TLS secret name", func() { It("should set default value when value is empty in the DSCI", func() { @@ -186,43 +257,97 @@ var _ = Describe("Serverless feature", func() { Context("resources creation", func() { - It("should create a TLS secret if certificate is SelfSigned", func() { + var ( + namespace *corev1.Namespace + ) + + BeforeEach(func() { ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) - nsResource := createNamespace(ns) - Expect(envTestClient.Create(context.TODO(), nsResource)).To(Succeed()) - defer objectCleaner.DeleteAll(nsResource) + namespace = newNamespace(ns) + Expect(envTestClient.Create(context.TODO(), namespace)).To(Succeed()) - testFeature.Spec.ControlPlane.Namespace = nsResource.Name - testFeature.Spec.Serving.IngressGateway.Certificate.Type = infrav1.SelfSigned - testFeature.Spec.Serving.IngressGateway.Domain = testDomainFooCom - Expect(serverless.ServingDefaultValues(testFeature)).To(Succeed()) - Expect(serverless.ServingIngressDomain(testFeature)).To(Succeed()) + dsci.Spec.ServiceMesh.ControlPlane.Namespace = ns + }) - Expect(serverless.ServingCertificateResource(testFeature)).To(Succeed()) + AfterEach(func() { + objectCleaner.DeleteAll(namespace) + }) - secret, err := testFeature.Clientset.CoreV1().Secrets(nsResource.Name).Get(context.TODO(), serverless.DefaultCertificateSecretName, v1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - Expect(secret).ToNot(BeNil()) + It("should create a TLS secret if certificate is SelfSigned", func() { + // given + kserveComponent.Serving.IngressGateway.Certificate.Type = infrav1.SelfSigned + kserveComponent.Serving.IngressGateway.Domain = testDomainFooCom + + featuresHandler := feature.ComponentFeaturesHandler(kserveComponent, &dsci.Spec, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("tls-secret-creation"). + For(handler). + UsingConfig(envTest.Config). + WithData( + kserve.PopulateComponentSettings(kserveComponent), + serverless.ServingDefaultValues, + serverless.ServingIngressDomain, + ). + WithResources(serverless.ServingCertificateResource). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + + // then + Eventually(func() error { + secret, err := envTestClientset.CoreV1().Secrets(namespace.Name).Get(context.TODO(), serverless.DefaultCertificateSecretName, metav1.GetOptions{}) + if err != nil { + return err + } + + if secret == nil { + return fmt.Errorf("secret not found") + } + + return nil + }).WithTimeout(timeout).WithPolling(interval).Should(Succeed()) }) It("should not create any TLS secret if certificate is user provided", func() { - ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) - nsResource := createNamespace(ns) - Expect(envTestClient.Create(context.TODO(), nsResource)).To(Succeed()) - defer objectCleaner.DeleteAll(nsResource) + // given + kserveComponent.Serving.IngressGateway.Certificate.Type = infrav1.Provided + kserveComponent.Serving.IngressGateway.Domain = testDomainFooCom + featuresHandler := feature.ComponentFeaturesHandler(kserveComponent, &dsci.Spec, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("tls-secret-creation"). + For(handler). + UsingConfig(envTest.Config). + WithData( + kserve.PopulateComponentSettings(kserveComponent), + serverless.ServingDefaultValues, + serverless.ServingIngressDomain, + ). + WithResources(serverless.ServingCertificateResource). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) - testFeature.Spec.ControlPlane.Namespace = nsResource.Name - testFeature.Spec.Serving.IngressGateway.Certificate.Type = infrav1.Provided - testFeature.Spec.Serving.IngressGateway.Domain = "*.foo.com" - Expect(serverless.ServingDefaultValues(testFeature)).To(Succeed()) - Expect(serverless.ServingIngressDomain(testFeature)).To(Succeed()) + // when + Expect(featuresHandler.Apply()).To(Succeed()) - Expect(serverless.ServingCertificateResource(testFeature)).To(Succeed()) + // then + Consistently(func() error { + list, err := envTestClientset.CoreV1().Secrets(namespace.Name).List(context.TODO(), metav1.ListOptions{}) + if err != nil || len(list.Items) != 0 { + return fmt.Errorf("list len: %d, error: %w", len(list.Items), err) + } - list, err := testFeature.Clientset.CoreV1().Secrets(nsResource.Name).List(context.TODO(), v1.ListOptions{}) - Expect(err).ToNot(HaveOccurred()) - Expect(list.Items).To(BeEmpty()) + return nil + }).WithTimeout(timeout).WithPolling(interval).Should(Succeed()) }) }) + }) diff --git a/tests/integration/features/servicemesh_feature_test.go b/tests/integration/features/servicemesh_feature_test.go index 2b6fb4086bf..2f9879f839f 100644 --- a/tests/integration/features/servicemesh_feature_test.go +++ b/tests/integration/features/servicemesh_feature_test.go @@ -13,10 +13,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" . "github.com/onsi/ginkgo/v2" @@ -66,35 +66,54 @@ spec: ` var _ = Describe("Service Mesh feature", func() { - var testFeature *feature.Feature - var objectCleaner *envtestutil.Cleaner + + var ( + dsci *dsciv1.DSCInitialization + objectCleaner *envtestutil.Cleaner + ) BeforeEach(func() { c, err := client.New(envTest.Config, client.Options{}) Expect(err).ToNot(HaveOccurred()) - objectCleaner = envtestutil.CreateCleaner(c, envTest.Config, timeout, interval) - testFeatureName := "servicemesh-feature" - namespace := envtestutil.AppendRandomNameTo(testFeatureName) + namespace := envtestutil.AppendRandomNameTo("service-mesh-settings") - dsciSpec := newDSCInitializationSpec(namespace) - source := envtestutil.NewSource(featurev1.DSCIType, "default") - testFeature, err = feature.CreateFeature(testFeatureName). - With(dsciSpec). - From(source). - UsingConfig(envTest.Config). - Load() + dsci = newDSCInitialization(namespace) Expect(err).ToNot(HaveOccurred()) }) + AfterEach(func() { + + }) + Describe("preconditions", func() { + When("operator is not installed", func() { - It("operator presence check should return an error", func() { - Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).To(HaveOccurred()) + + It("should fail using precondition check", func() { + // given + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("no-serverless-operator-check"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(servicemesh.EnsureServiceMeshOperatorInstalled). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + applyErr := featuresHandler.Apply() + + // then + Expect(applyErr).To(MatchError(ContainSubstring("customresourcedefinitions.apiextensions.k8s.io \"servicemeshcontrolplanes.maistra.io\" not found"))) }) }) + When("operator is installed", func() { var smcpCrdObj *apiextensionsv1.CustomResourceDefinition @@ -110,32 +129,85 @@ var _ = Describe("Service Mesh feature", func() { err = envtest.WaitForCRDs(envTest.Config, []*apiextensionsv1.CustomResourceDefinition{smcpCrdObj}, crdOptions) Expect(err).ToNot(HaveOccurred()) }) + AfterEach(func() { // Delete SMCP CRD objectCleaner.DeleteAll(smcpCrdObj) }) - It("operator presence check should succeed", func() { - Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).To(Succeed()) + + It("should succeed using precondition check", func() { + // when + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("service-mesh-operator-check"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(servicemesh.EnsureServiceMeshOperatorInstalled). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // when + Expect(featuresHandler.Apply()).To(Succeed()) + }) + It("should find installed Service Mesh Control Plane", func() { + // given c, err := client.New(envTest.Config, client.Options{}) Expect(err).ToNot(HaveOccurred()) ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) - nsResource := createNamespace(ns) + nsResource := newNamespace(ns) Expect(c.Create(context.Background(), nsResource)).To(Succeed()) defer objectCleaner.DeleteAll(nsResource) createServiceMeshControlPlane("test-name", ns) + dsci.Spec.ServiceMesh.ControlPlane.Namespace = ns + dsci.Spec.ServiceMesh.ControlPlane.Name = "test-name" + + // when + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("service-mesh-control-plane-check"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(servicemesh.EnsureServiceMeshInstalled). + Load() - testFeature.Spec.ControlPlane.Namespace = ns - testFeature.Spec.ControlPlane.Name = "test-name" - Expect(servicemesh.EnsureServiceMeshInstalled(testFeature)).To(Succeed()) + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // then + Expect(featuresHandler.Apply()).To(Succeed()) }) + It("should fail to find Service Mesh Control Plane if not present", func() { - Expect(servicemesh.EnsureServiceMeshInstalled(testFeature)).ToNot(Succeed()) + // given + dsci.Spec.ServiceMesh.ControlPlane.Name = "test-name" + + // when + featuresHandler := feature.ClusterFeaturesHandler(dsci, func(handler *feature.FeaturesHandler) error { + verificationFeatureErr := feature.CreateFeature("no-service-mesh-control-plane-check"). + For(handler). + UsingConfig(envTest.Config). + PreConditions(servicemesh.EnsureServiceMeshInstalled). + Load() + + Expect(verificationFeatureErr).ToNot(HaveOccurred()) + + return nil + }) + + // then + Expect(featuresHandler.Apply()).To(MatchError(ContainSubstring("failed to find Service Mesh Control Plane"))) }) + }) + }) }) @@ -169,18 +241,13 @@ func createServiceMeshControlPlane(name, namespace string) { "spec": map[string]interface{}{}, }, } - Expect(createSMCPInCluster(envTest.Config, serviceMeshControlPlane, namespace)).To(Succeed()) + Expect(createSMCPInCluster(serviceMeshControlPlane, namespace)).To(Succeed()) } -// createSMCPInCluster uses dynamic client to create a dummy SMCP resource for testing. -func createSMCPInCluster(cfg *rest.Config, smcpObj *unstructured.Unstructured, namespace string) error { - dynamicClient, err := dynamic.NewForConfig(cfg) - if err != nil { - return err - } - - result, err := dynamicClient.Resource(gvr.SMCP).Namespace(namespace).Create(context.TODO(), smcpObj, metav1.CreateOptions{}) - if err != nil { +func createSMCPInCluster(smcpObj *unstructured.Unstructured, namespace string) error { + smcpObj.SetGroupVersionKind(cluster.ServiceMeshControlPlaneGVK) + smcpObj.SetNamespace(namespace) + if err := envTestClient.Create(context.TODO(), smcpObj); err != nil { return err } @@ -205,15 +272,10 @@ func createSMCPInCluster(cfg *rest.Config, smcpObj *unstructured.Unstructured, n }, }, } - - if err := unstructured.SetNestedField(result.Object, status, "status"); err != nil { - return err - } - - _, err = dynamicClient.Resource(gvr.SMCP).Namespace(namespace).UpdateStatus(context.TODO(), result, metav1.UpdateOptions{}) - if err != nil { + update := smcpObj.DeepCopy() + if err := unstructured.SetNestedField(update.Object, status, "status"); err != nil { return err } - return nil + return envTestClient.Status().Update(context.TODO(), update) }