diff --git a/components/codeflare/codeflare.go b/components/codeflare/codeflare.go index c9fe4ad768c..72356290b7f 100644 --- a/components/codeflare/codeflare.go +++ b/components/codeflare/codeflare.go @@ -9,7 +9,6 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -57,7 +56,7 @@ func (c *CodeFlare) GetComponentName() string { return ComponentName } -func (c *CodeFlare) ReconcileComponent(ctx context.Context, cli client.Client, resConf *rest.Config, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, _ bool) error { +func (c *CodeFlare) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, _ bool) error { var imageParamMap = map[string]string{ "codeflare-operator-controller-image": "RELATED_IMAGE_ODH_CODEFLARE_OPERATOR_IMAGE", // no need mcad, embedded in cfo "namespace": dscispec.ApplicationsNamespace, @@ -109,7 +108,7 @@ func (c *CodeFlare) ReconcileComponent(ctx context.Context, cli client.Client, r if platform == deploy.ManagedRhods { if enabled { // first check if the service is up, so prometheus won't fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { + if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } fmt.Printf("deployment for %s is done, updating monitoring rules\n", ComponentName) diff --git a/components/component.go b/components/component.go index 00f6cfebc06..00b1bbd4bb4 100644 --- a/components/component.go +++ b/components/component.go @@ -10,7 +10,6 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" "gopkg.in/yaml.v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -80,7 +79,7 @@ type ManifestsConfig struct { } type ComponentInterface interface { - ReconcileComponent(ctx context.Context, cli client.Client, resConf *rest.Config, owner metav1.Object, DSCISpec *dsciv1.DSCInitializationSpec, currentComponentExist bool) error + ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, DSCISpec *dsciv1.DSCInitializationSpec, currentComponentStatus bool) error Cleanup(cli client.Client, DSCISpec *dsciv1.DSCInitializationSpec) error GetComponentName() string GetManagementState() operatorv1.ManagementState diff --git a/components/dashboard/dashboard.go b/components/dashboard/dashboard.go index b42cb5c91c4..1b3f7b8a998 100644 --- a/components/dashboard/dashboard.go +++ b/components/dashboard/dashboard.go @@ -13,7 +13,6 @@ import ( v1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -85,7 +84,6 @@ func (d *Dashboard) GetComponentName() string { //nolint:gocyclo func (d *Dashboard) ReconcileComponent(ctx context.Context, cli client.Client, - resConf *rest.Config, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, currentComponentExist bool, @@ -171,7 +169,7 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context, if platform == deploy.ManagedRhods { if enabled { // first check if the service is up, so prometheus won't fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentNameSupported, dscispec.ApplicationsNamespace, 20, 3); err != nil { + if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentNameSupported, dscispec.ApplicationsNamespace, 20, 3); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } fmt.Printf("deployment for %s is done, updating monitoring rules\n", ComponentNameSupported) diff --git a/components/datasciencepipelines/datasciencepipelines.go b/components/datasciencepipelines/datasciencepipelines.go index 6686f49a3f7..a847a2e65df 100644 --- a/components/datasciencepipelines/datasciencepipelines.go +++ b/components/datasciencepipelines/datasciencepipelines.go @@ -9,7 +9,6 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -57,7 +56,6 @@ func (d *DataSciencePipelines) GetComponentName() string { func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context, cli client.Client, - resConf *rest.Config, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, _ bool, @@ -118,7 +116,7 @@ func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context, if enabled { // first check if the service is up, so prometheus won't fire alerts when it is just startup // only 1 replica should be very quick - if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 10, 1); err != nil { + if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 10, 1); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } fmt.Printf("deployment for %s is done, updating monitoring rules\n", ComponentName) diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index dce65a7cf34..5f9ab9917e5 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -9,7 +9,6 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -93,7 +92,7 @@ func (k *Kserve) GetComponentName() string { return ComponentName } -func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, resConf *rest.Config, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, _ bool) error { +func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, _ bool) error { // paramMap for Kserve to use. var imageParamMap = map[string]string{} @@ -167,7 +166,7 @@ func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, resC if platform == deploy.ManagedRhods { if enabled { // first check if the service is up, so prometheus won't fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { + if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } fmt.Printf("deployment for %s is done, updating monitoing rules", ComponentName) diff --git a/components/kueue/kueue.go b/components/kueue/kueue.go index 0e0ab988f35..c74a549e88f 100644 --- a/components/kueue/kueue.go +++ b/components/kueue/kueue.go @@ -7,7 +7,6 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -52,7 +51,7 @@ func (r *Kueue) GetComponentName() string { return ComponentName } -func (r *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, resConf *rest.Config, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, _ bool) error { +func (r *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, _ bool) error { var imageParamMap = map[string]string{ "odh-kueue-controller-image": "RELATED_IMAGE_ODH_KUEUE_CONTROLLER_IMAGE", // new kueue image } @@ -85,7 +84,7 @@ func (r *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, resCo if platform == deploy.ManagedRhods { if enabled { // first check if the service is up, so prometheus won't fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { + if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } fmt.Printf("deployment for %s is done, updating monitoring rules\n", ComponentName) diff --git a/components/modelmeshserving/modelmeshserving.go b/components/modelmeshserving/modelmeshserving.go index 06968ee0728..c629a62ef71 100644 --- a/components/modelmeshserving/modelmeshserving.go +++ b/components/modelmeshserving/modelmeshserving.go @@ -9,7 +9,6 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -73,7 +72,6 @@ func (m *ModelMeshServing) GetComponentName() string { func (m *ModelMeshServing) ReconcileComponent(ctx context.Context, cli client.Client, - resConf *rest.Config, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, _ bool, @@ -151,7 +149,7 @@ func (m *ModelMeshServing) ReconcileComponent(ctx context.Context, if platform == deploy.ManagedRhods { if enabled { // first check if service is up, so prometheus won't fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { + if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } fmt.Printf("deployment for %s is done, updating monitoring rules\n", ComponentName) diff --git a/components/ray/ray.go b/components/ray/ray.go index 8168647df13..baf56e500a4 100644 --- a/components/ray/ray.go +++ b/components/ray/ray.go @@ -9,7 +9,6 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -53,7 +52,7 @@ func (r *Ray) GetComponentName() string { return ComponentName } -func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, resConf *rest.Config, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, _ bool) error { +func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, _ bool) error { var imageParamMap = map[string]string{ "odh-kuberay-operator-controller-image": "RELATED_IMAGE_ODH_KUBERAY_OPERATOR_CONTROLLER_IMAGE", "namespace": dscispec.ApplicationsNamespace, @@ -87,7 +86,7 @@ func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, resConf if platform == deploy.ManagedRhods { if enabled { // first check if the service is up, so prometheus won't fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { + if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } fmt.Printf("deployment for %s is done, updating monitoring rules\n", ComponentName) diff --git a/components/trustyai/trustyai.go b/components/trustyai/trustyai.go index 17ab7b3e886..9401ac99841 100644 --- a/components/trustyai/trustyai.go +++ b/components/trustyai/trustyai.go @@ -7,7 +7,6 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -53,7 +52,7 @@ func (t *TrustyAI) GetComponentName() string { return ComponentName } -func (t *TrustyAI) ReconcileComponent(_ context.Context, cli client.Client, _ *rest.Config, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, _ bool) error { +func (t *TrustyAI) ReconcileComponent(_ context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, _ bool) error { var imageParamMap = map[string]string{ "trustyaiServiceImage": "RELATED_IMAGE_ODH_TRUSTYAI_SERVICE_IMAGE", "trustyaiOperatorImage": "RELATED_IMAGE_ODH_TRUSTYAI_SERVICE_OPERATOR_IMAGE", diff --git a/components/workbenches/workbenches.go b/components/workbenches/workbenches.go index afd840c5fc3..d406e8247c7 100644 --- a/components/workbenches/workbenches.go +++ b/components/workbenches/workbenches.go @@ -9,7 +9,6 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" dsci "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -95,7 +94,7 @@ func (w *Workbenches) GetComponentName() string { return ComponentName } -func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, resConf *rest.Config, owner metav1.Object, dscispec *dsci.DSCInitializationSpec, _ bool) error { +func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsci.DSCInitializationSpec, _ bool) error { var imageParamMap = map[string]string{ "odh-notebook-controller-image": "RELATED_IMAGE_ODH_NOTEBOOK_CONTROLLER_IMAGE", "odh-kf-notebook-controller-image": "RELATED_IMAGE_ODH_KF_NOTEBOOK_CONTROLLER_IMAGE", @@ -175,7 +174,7 @@ func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, if enabled { // first check if the service is up, so prometheus wont fire alerts when it is just startup // only 1 replica set timeout to 1min - if err := monitoring.WaitForDeploymentAvailable(ctx, resConf, ComponentName, dscispec.ApplicationsNamespace, 10, 1); err != nil { + if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 10, 1); err != nil { return fmt.Errorf("deployments for %s are not ready to server: %w", ComponentName, err) } fmt.Printf("deployments for %s are done, updating monitoring rules\n", ComponentName) diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index a5d67231621..c89c64e31f8 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -19,6 +19,7 @@ package datasciencecluster import ( "context" + "errors" "fmt" "strings" "time" @@ -36,7 +37,6 @@ import ( apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" @@ -61,9 +61,8 @@ import ( // DataScienceClusterReconciler reconciles a DataScienceCluster object. type DataScienceClusterReconciler struct { //nolint:golint,revive client.Client - Scheme *runtime.Scheme - Log logr.Logger - RestConfig *rest.Config + Scheme *runtime.Scheme + Log logr.Logger // Recorder to generate events Recorder record.EventRecorder DataScienceCluster *DataScienceClusterConfig @@ -94,8 +93,8 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R // Owned objects are automatically garbage collected. // For additional cleanup logic use operatorUninstall function. // Return and don't requeue - if upgrade.HasDeleteConfigMap(r.Client) { - if uninstallErr := upgrade.OperatorUninstall(r.Client, r.RestConfig); uninstallErr != nil { + if upgrade.HasDeleteConfigMap(ctx, r.Client) { + if uninstallErr := upgrade.OperatorUninstall(ctx, r.Client); uninstallErr != nil { return ctrl.Result{}, fmt.Errorf("error while operator uninstall: %w", uninstallErr) } } @@ -113,7 +112,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R // If DSC CR exist and deletion CM exist // delete DSC CR and let reconcile requeue // sometimes with finalzier DSC CR wont get deleted, force to remove finalizer here - if upgrade.HasDeleteConfigMap(r.Client) { + if upgrade.HasDeleteConfigMap(ctx, r.Client) { if controllerutil.ContainsFinalizer(instance, finalizerName) { if controllerutil.RemoveFinalizer(instance, finalizerName) { if err := r.Update(ctx, instance); err != nil { @@ -205,7 +204,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, err } } - if upgrade.HasDeleteConfigMap(r.Client) { + if upgrade.HasDeleteConfigMap(ctx, r.Client) { // if delete configmap exists, requeue the request to handle operator uninstall return reconcile.Result{Requeue: true}, nil } @@ -290,7 +289,7 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context } // Reconcile component - err = component.ReconcileComponent(ctx, r.Client, r.RestConfig, instance, r.DataScienceCluster.DSCISpec, instance.Status.InstalledComponents[componentName]) + err = component.ReconcileComponent(ctx, r.Client, instance, r.DataScienceCluster.DSCISpec, instance.Status.InstalledComponents[componentName]) if err != nil { // reconciliation failed: log errors, raise event and update status accordingly diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index be34f146816..fe0bc302a30 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -400,7 +400,7 @@ func (r *DSCInitializationReconciler) watchDSCResource(_ client.Object) []reconc // do not handle if cannot get list return nil } - if len(instanceList.Items) == 0 && !upgrade.HasDeleteConfigMap(r.Client) { + if len(instanceList.Items) == 0 && !upgrade.HasDeleteConfigMap(context.TODO(), r.Client) { r.Log.Info("Found no DSC instance in cluster but not in uninstalltion process, reset monitoring stack config") return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: "backup"}}} diff --git a/main.go b/main.go index 859dbd8f3eb..8d908237e4c 100644 --- a/main.go +++ b/main.go @@ -151,10 +151,9 @@ func main() { } if err = (&datascienceclustercontrollers.DataScienceClusterReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - RestConfig: mgr.GetConfig(), - Log: ctrl.Log.WithName("controllers").WithName("DataScienceCluster"), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("DataScienceCluster"), DataScienceCluster: &datascienceclustercontrollers.DataScienceClusterConfig{ DSCISpec: &dsci.DSCInitializationSpec{ ApplicationsNamespace: dscApplicationsNamespace, diff --git a/pkg/monitoring/monitoring.go b/pkg/monitoring/monitoring.go index 67a71d85cac..1bb97abc2cb 100644 --- a/pkg/monitoring/monitoring.go +++ b/pkg/monitoring/monitoring.go @@ -5,41 +5,30 @@ import ( "fmt" "time" - errors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// WaitForDeploymentAvailable to check if component deployment from 'namepsace' is ready within 'timeout' before apply prometheus rules for the component. -func WaitForDeploymentAvailable(_ context.Context, restConfig *rest.Config, componentName string, namespace string, interval int, timeout int) error { +// WaitForDeploymentAvailable to check if component deployment from 'namespace' is ready within 'timeout' before apply prometheus rules for the component. +func WaitForDeploymentAvailable(ctx context.Context, c client.Client, componentName string, namespace string, interval int, timeout int) error { resourceInterval := time.Duration(interval) * time.Second resourceTimeout := time.Duration(timeout) * time.Minute - return wait.PollUntilContextTimeout(context.TODO(), resourceInterval, resourceTimeout, true, func(ctx context.Context) (bool, error) { - clientset, err := kubernetes.NewForConfig(restConfig) - if err != nil { - return false, fmt.Errorf("error getting client %w", err) - } - componentDeploymentList, err := clientset.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{ - LabelSelector: "app.opendatahub.io/" + componentName, - }) + + return wait.PollUntilContextTimeout(ctx, resourceInterval, resourceTimeout, true, func(ctx context.Context) (bool, error) { + componentDeploymentList := &v1.DeploymentList{} + err := c.List(ctx, componentDeploymentList, client.InNamespace(namespace), client.HasLabels{"app.opendatahub.io/" + componentName}) if err != nil { - if errors.IsNotFound(err) { - return false, nil - } + return false, fmt.Errorf("error fetching list of deployments: %w", err) } - isReady := false + fmt.Printf("waiting for %d deployment to be ready for %s\n", len(componentDeploymentList.Items), componentName) - if len(componentDeploymentList.Items) != 0 { - for _, deployment := range componentDeploymentList.Items { - if deployment.Status.ReadyReplicas == deployment.Status.Replicas { - isReady = true - } else { - isReady = false - } + for _, deployment := range componentDeploymentList.Items { + if deployment.Status.ReadyReplicas != deployment.Status.Replicas { + return false, nil } } - return isReady, nil + + return true, nil }) } diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index fdb9f38d4cc..dc52af90fae 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -12,7 +12,6 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" ofapi "github.com/operator-framework/api/pkg/operators/v1alpha1" - olmclientset "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1alpha1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -23,7 +22,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" kfdefv1 "github.com/opendatahub-io/opendatahub-operator/apis/kfdef.apps.kubeflow.org/v1" @@ -51,17 +49,17 @@ const ( // OperatorUninstall deletes all the externally generated resources. This includes monitoring resources and applications // installed by KfDef. -func OperatorUninstall(cli client.Client, cfg *rest.Config) error { +func OperatorUninstall(ctx context.Context, cli client.Client) error { platform, err := deploy.GetPlatform(cli) if err != nil { return err } - if err := RemoveKfDefInstances(cli, platform); err != nil { + if err := RemoveKfDefInstances(ctx, cli); err != nil { return err } - if err := removeDSCInitialization(cli); err != nil { + if err := removeDSCInitialization(ctx, cli); err != nil { return err } @@ -70,7 +68,7 @@ func OperatorUninstall(cli client.Client, cfg *rest.Config) error { nsOptions := []client.ListOption{ client.MatchingLabels{cluster.ODHGeneratedNamespaceLabel: "true"}, } - if err := cli.List(context.TODO(), generatedNamespaces, nsOptions...); err != nil { + if err := cli.List(ctx, generatedNamespaces, nsOptions...); err != nil { return fmt.Errorf("error getting generated namespaces : %w", err) } @@ -84,7 +82,7 @@ func OperatorUninstall(cli client.Client, cfg *rest.Config) error { for _, namespace := range generatedNamespaces.Items { namespace := namespace if namespace.Status.Phase == corev1.NamespaceActive { - if err := cli.Delete(context.TODO(), &namespace, []client.DeleteOption{}...); err != nil { + if err := cli.Delete(ctx, &namespace); err != nil { return fmt.Errorf("error deleting namespace %v: %w", namespace.Name, err) } fmt.Printf("Namespace %s deleted as a part of uninstallation.\n", namespace.Name) @@ -113,23 +111,23 @@ func OperatorUninstall(cli client.Client, cfg *rest.Config) error { } fmt.Printf("Removing the operator CSV in turn remove operator deployment\n") - err = removeCSV(cli, cfg) + err = removeCSV(ctx, cli) fmt.Printf("All resources deleted as part of uninstall.") return err } -func removeDSCInitialization(cli client.Client) error { +func removeDSCInitialization(ctx context.Context, cli client.Client) error { instanceList := &dsci.DSCInitializationList{} - if err := cli.List(context.TODO(), instanceList); err != nil { + if err := cli.List(ctx, instanceList); err != nil { return err } var multiErr *multierror.Error for _, dsciInstance := range instanceList.Items { dsciInstance := dsciInstance - if err := cli.Delete(context.TODO(), &dsciInstance); !apierrs.IsNotFound(err) { + if err := cli.Delete(ctx, &dsciInstance); !apierrs.IsNotFound(err) { multiErr = multierror.Append(multiErr, err) } } @@ -139,7 +137,7 @@ func removeDSCInitialization(cli client.Client) error { // HasDeleteConfigMap returns true if delete configMap is added to the operator namespace by managed-tenants repo. // It returns false in all other cases. -func HasDeleteConfigMap(c client.Client) bool { +func HasDeleteConfigMap(ctx context.Context, c client.Client) bool { // Get watchNamespace operatorNamespace, err := GetOperatorNamespace() if err != nil { @@ -153,7 +151,7 @@ func HasDeleteConfigMap(c client.Client) bool { client.MatchingLabels{DeleteConfigMapLabel: "true"}, } - if err := c.List(context.TODO(), deleteConfigMapList, cmOptions...); err != nil { + if err := c.List(ctx, deleteConfigMapList, cmOptions...); err != nil { return false } return len(deleteConfigMapList.Items) != 0 @@ -161,7 +159,7 @@ func HasDeleteConfigMap(c client.Client) bool { // createDefaultDSC creates a default instance of DSC. // Note: When the platform is not Managed, and a DSC instance already exists, the function doesn't re-create/update the resource. -func CreateDefaultDSC(cli client.Client, _ deploy.Platform) error { +func CreateDefaultDSC(ctx context.Context, cli client.Client) error { // Set the default DSC name depending on the platform releaseDataScienceCluster := &dsc.DataScienceCluster{ TypeMeta: metav1.TypeMeta{ @@ -200,7 +198,7 @@ func CreateDefaultDSC(cli client.Client, _ deploy.Platform) error { }, }, } - err := cli.Create(context.TODO(), releaseDataScienceCluster) + err := cli.Create(ctx, releaseDataScienceCluster) switch { case err == nil: fmt.Printf("created DataScienceCluster resource\n") @@ -286,10 +284,10 @@ func UpdateFromLegacyVersion(cli client.Client, platform deploy.Platform, appNS return err } fmt.Println("creating default DSC CR") - if err := CreateDefaultDSC(cli, platform); err != nil { + if err := CreateDefaultDSC(context.TODO(), cli); err != nil { return err } - return RemoveKfDefInstances(cli, platform) + return RemoveKfDefInstances(context.TODO(), cli) } if platform == deploy.SelfManagedRhods { @@ -324,7 +322,7 @@ func UpdateFromLegacyVersion(cli client.Client, platform deploy.Platform, appNS return err } // create default DSC - if err = CreateDefaultDSC(cli, platform); err != nil { + if err = CreateDefaultDSC(context.TODO(), cli); err != nil { return err } } @@ -389,11 +387,11 @@ func GetOperatorNamespace() (string, error) { return "", err } -func RemoveKfDefInstances(cli client.Client, _ deploy.Platform) error { +func RemoveKfDefInstances(ctx context.Context, cli client.Client) error { // Check if kfdef are deployed kfdefCrd := &apiextv1.CustomResourceDefinition{} - err := cli.Get(context.TODO(), client.ObjectKey{Name: "kfdefs.kfdef.apps.kubeflow.org"}, kfdefCrd) + err := cli.Get(ctx, client.ObjectKey{Name: "kfdefs.kfdef.apps.kubeflow.org"}, kfdefCrd) if err != nil { if apierrs.IsNotFound(err) { // If no Crd found, return, since its a new Installation @@ -402,7 +400,7 @@ func RemoveKfDefInstances(cli client.Client, _ deploy.Platform) error { return fmt.Errorf("error retrieving kfdef CRD : %w", err) } expectedKfDefList := &kfdefv1.KfDefList{} - err = cli.List(context.TODO(), expectedKfDefList) + err = cli.List(ctx, expectedKfDefList) if err != nil { return fmt.Errorf("error getting list of kfdefs: %w", err) } @@ -412,11 +410,11 @@ func RemoveKfDefInstances(cli client.Client, _ deploy.Platform) error { // Remove finalizer updatedKfDef := &kfdef updatedKfDef.Finalizers = []string{} - err = cli.Update(context.TODO(), updatedKfDef) + err = cli.Update(ctx, updatedKfDef) if err != nil { return fmt.Errorf("error removing finalizers from kfdef %v : %w", kfdef.Name, err) } - err = cli.Delete(context.TODO(), updatedKfDef) + err = cli.Delete(ctx, updatedKfDef) if err != nil { return fmt.Errorf("error deleting kfdef %v : %w", kfdef.Name, err) } @@ -424,21 +422,21 @@ func RemoveKfDefInstances(cli client.Client, _ deploy.Platform) error { return nil } -func removeCSV(c client.Client, r *rest.Config) error { +func removeCSV(ctx context.Context, c client.Client) error { // Get watchNamespace operatorNamespace, err := GetOperatorNamespace() if err != nil { return err } - operatorCsv, err := getClusterServiceVersion(r, operatorNamespace) + operatorCsv, err := getClusterServiceVersion(ctx, c, operatorNamespace) if err != nil { return err } if operatorCsv != nil { fmt.Printf("Deleting CSV %s\n", operatorCsv.Name) - err = c.Delete(context.TODO(), operatorCsv, []client.DeleteOption{}...) + err = c.Delete(ctx, operatorCsv) if err != nil { if apierrs.IsNotFound(err) { return nil @@ -454,23 +452,16 @@ func removeCSV(c client.Client, r *rest.Config) error { } // getClusterServiceVersion retries the clusterserviceversions available in the operator namespace. -func getClusterServiceVersion(cfg *rest.Config, watchNameSpace string) (*ofapi.ClusterServiceVersion, error) { - operatorClient, err := olmclientset.NewForConfig(cfg) - if err != nil { - return nil, fmt.Errorf("error getting operator client %w", err) - } - csvs, err := operatorClient.ClusterServiceVersions(watchNameSpace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return nil, err +func getClusterServiceVersion(ctx context.Context, c client.Client, watchNameSpace string) (*ofapi.ClusterServiceVersion, error) { + clusterServiceVersionList := &ofapi.ClusterServiceVersionList{} + if err := c.List(ctx, clusterServiceVersionList, client.InNamespace(watchNameSpace)); err != nil { + return nil, fmt.Errorf("failed listign cluster service versions: %w", err) } - // get CSV with CRD DataScienceCluster - if len(csvs.Items) != 0 { - for _, csv := range csvs.Items { - for _, operatorCR := range csv.Spec.CustomResourceDefinitions.Owned { - if operatorCR.Kind == "DataScienceCluster" { - return &csv, nil - } + for _, csv := range clusterServiceVersionList.Items { + for _, operatorCR := range csv.Spec.CustomResourceDefinitions.Owned { + if operatorCR.Kind == "DataScienceCluster" { + return &csv, nil } } }