diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index e551c1263..a887bb5b7 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -23,24 +23,24 @@ import ( "github.com/sanity-io/litter" "gopkg.in/d4l3k/messagediff.v1" - appsV1 "k8s.io/api/apps/v1" - coreV1 "k8s.io/api/core/v1" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" apiExtensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilRuntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" kubeInformers "k8s.io/client-go/informers" kube "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" - typedCoreV1 "k8s.io/client-go/kubernetes/typed/core/v1" + typedCore "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "github.com/altinity/queue" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/apis/metrics" "github.com/altinity/clickhouse-operator/pkg/chop" chopClientSet "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" @@ -67,13 +67,13 @@ func NewController( eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(log.Info) eventBroadcaster.StartRecordingToSink( - &typedCoreV1.EventSinkImpl{ + &typedCore.EventSinkImpl{ Interface: kubeClient.CoreV1().Events(""), }, ) recorder := eventBroadcaster.NewRecorder( scheme.Scheme, - coreV1.EventSource{ + core.EventSource{ Component: componentName, }, ) @@ -107,14 +107,14 @@ func NewController( // initQueues func (c *Controller) initQueues() { - queuesNum := chop.Config().Reconcile.Runtime.ReconcileCHIsThreadsNumber + chiV1.DefaultReconcileSystemThreadsNumber + queuesNum := chop.Config().Reconcile.Runtime.ReconcileCHIsThreadsNumber + api.DefaultReconcileSystemThreadsNumber for i := 0; i < queuesNum; i++ { c.queues = append( c.queues, queue.New(), //workqueue.NewNamedRateLimitingQueue( // workqueue.DefaultControllerRateLimiter(), - // fmt.Sprintf("chiV1%d", i), + // fmt.Sprintf("chi%d", i), //), ) } @@ -125,7 +125,7 @@ func (c *Controller) addEventHandlersCHI( ) { chopInformerFactory.Clickhouse().V1().ClickHouseInstallations().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - chi := obj.(*chiV1.ClickHouseInstallation) + chi := obj.(*api.ClickHouseInstallation) if !chop.Config().IsWatchedNamespace(chi.Namespace) { return } @@ -133,8 +133,8 @@ func (c *Controller) addEventHandlersCHI( c.enqueueObject(NewReconcileCHI(reconcileAdd, nil, chi)) }, UpdateFunc: func(old, new interface{}) { - oldChi := old.(*chiV1.ClickHouseInstallation) - newChi := new.(*chiV1.ClickHouseInstallation) + oldChi := old.(*api.ClickHouseInstallation) + newChi := new.(*api.ClickHouseInstallation) if !chop.Config().IsWatchedNamespace(newChi.Namespace) { return } @@ -142,7 +142,7 @@ func (c *Controller) addEventHandlersCHI( c.enqueueObject(NewReconcileCHI(reconcileUpdate, oldChi, newChi)) }, DeleteFunc: func(obj interface{}) { - chi := obj.(*chiV1.ClickHouseInstallation) + chi := obj.(*api.ClickHouseInstallation) if !chop.Config().IsWatchedNamespace(chi.Namespace) { return } @@ -157,7 +157,7 @@ func (c *Controller) addEventHandlersCHIT( ) { chopInformerFactory.Clickhouse().V1().ClickHouseInstallationTemplates().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - chit := obj.(*chiV1.ClickHouseInstallationTemplate) + chit := obj.(*api.ClickHouseInstallationTemplate) if !chop.Config().IsWatchedNamespace(chit.Namespace) { return } @@ -165,8 +165,8 @@ func (c *Controller) addEventHandlersCHIT( c.enqueueObject(NewReconcileCHIT(reconcileAdd, nil, chit)) }, UpdateFunc: func(old, new interface{}) { - oldChit := old.(*chiV1.ClickHouseInstallationTemplate) - newChit := new.(*chiV1.ClickHouseInstallationTemplate) + oldChit := old.(*api.ClickHouseInstallationTemplate) + newChit := new.(*api.ClickHouseInstallationTemplate) if !chop.Config().IsWatchedNamespace(newChit.Namespace) { return } @@ -174,7 +174,7 @@ func (c *Controller) addEventHandlersCHIT( c.enqueueObject(NewReconcileCHIT(reconcileUpdate, oldChit, newChit)) }, DeleteFunc: func(obj interface{}) { - chit := obj.(*chiV1.ClickHouseInstallationTemplate) + chit := obj.(*api.ClickHouseInstallationTemplate) if !chop.Config().IsWatchedNamespace(chit.Namespace) { return } @@ -189,7 +189,7 @@ func (c *Controller) addEventHandlersChopConfig( ) { chopInformerFactory.Clickhouse().V1().ClickHouseOperatorConfigurations().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - chopConfig := obj.(*chiV1.ClickHouseOperatorConfiguration) + chopConfig := obj.(*api.ClickHouseOperatorConfiguration) if !chop.Config().IsWatchedNamespace(chopConfig.Namespace) { return } @@ -197,8 +197,8 @@ func (c *Controller) addEventHandlersChopConfig( c.enqueueObject(NewReconcileChopConfig(reconcileAdd, nil, chopConfig)) }, UpdateFunc: func(old, new interface{}) { - newChopConfig := new.(*chiV1.ClickHouseOperatorConfiguration) - oldChopConfig := old.(*chiV1.ClickHouseOperatorConfiguration) + newChopConfig := new.(*api.ClickHouseOperatorConfiguration) + oldChopConfig := old.(*api.ClickHouseOperatorConfiguration) if !chop.Config().IsWatchedNamespace(newChopConfig.Namespace) { return } @@ -206,7 +206,7 @@ func (c *Controller) addEventHandlersChopConfig( c.enqueueObject(NewReconcileChopConfig(reconcileUpdate, oldChopConfig, newChopConfig)) }, DeleteFunc: func(obj interface{}) { - chopConfig := obj.(*chiV1.ClickHouseOperatorConfiguration) + chopConfig := obj.(*api.ClickHouseOperatorConfiguration) if !chop.Config().IsWatchedNamespace(chopConfig.Namespace) { return } @@ -221,21 +221,21 @@ func (c *Controller) addEventHandlersService( ) { kubeInformerFactory.Core().V1().Services().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - service := obj.(*coreV1.Service) + service := obj.(*core.Service) if !c.isTrackedObject(&service.ObjectMeta) { return } log.V(3).M(service).Info("serviceInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { - oldService := old.(*coreV1.Service) + oldService := old.(*core.Service) if !c.isTrackedObject(&oldService.ObjectMeta) { return } log.V(3).M(oldService).Info("serviceInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { - service := obj.(*coreV1.Service) + service := obj.(*core.Service) if !c.isTrackedObject(&service.ObjectMeta) { return } @@ -244,17 +244,17 @@ func (c *Controller) addEventHandlersService( }) } -func normalizeEndpoints(e *coreV1.Endpoints) *coreV1.Endpoints { +func normalizeEndpoints(e *core.Endpoints) *core.Endpoints { if e == nil { - e = &coreV1.Endpoints{} + e = &core.Endpoints{} } if len(e.Subsets) == 0 { - e.Subsets = []coreV1.EndpointSubset{ + e.Subsets = []core.EndpointSubset{ {}, } } if len(e.Subsets[0].Addresses) == 0 { - e.Subsets[0].Addresses = []coreV1.EndpointAddress{ + e.Subsets[0].Addresses = []core.EndpointAddress{ {}, } } @@ -279,7 +279,7 @@ func checkIP(path *messagediff.Path, iValue interface{}) bool { return false } -func updated(old, new *coreV1.Endpoints) bool { +func updated(old, new *core.Endpoints) bool { oldSubsets := normalizeEndpoints(old).Subsets newSubsets := normalizeEndpoints(new).Subsets @@ -293,7 +293,7 @@ func updated(old, new *coreV1.Endpoints) bool { assigned := false for path, iValue := range diff.Added { log.V(3).M(old).Info("endpointsInformer.UpdateFunc: added %v", path) - if address, ok := iValue.(coreV1.EndpointAddress); ok { + if address, ok := iValue.(core.EndpointAddress); ok { if address.IP != "" { assigned = true } @@ -320,15 +320,15 @@ func (c *Controller) addEventHandlersEndpoint( ) { kubeInformerFactory.Core().V1().Endpoints().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - endpoints := obj.(*coreV1.Endpoints) + endpoints := obj.(*core.Endpoints) if !c.isTrackedObject(&endpoints.ObjectMeta) { return } log.V(2).M(endpoints).Info("endpointsInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { - oldEndpoints := old.(*coreV1.Endpoints) - newEndpoints := new.(*coreV1.Endpoints) + oldEndpoints := old.(*core.Endpoints) + newEndpoints := new.(*core.Endpoints) if !c.isTrackedObject(&oldEndpoints.ObjectMeta) { return } @@ -339,7 +339,7 @@ func (c *Controller) addEventHandlersEndpoint( } }, DeleteFunc: func(obj interface{}) { - endpoints := obj.(*coreV1.Endpoints) + endpoints := obj.(*core.Endpoints) if !c.isTrackedObject(&endpoints.ObjectMeta) { return } @@ -353,21 +353,21 @@ func (c *Controller) addEventHandlersConfigMap( ) { kubeInformerFactory.Core().V1().ConfigMaps().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - configMap := obj.(*coreV1.ConfigMap) + configMap := obj.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } log.V(3).M(configMap).Info("configMapInformer.AddFunc") }, UpdateFunc: func(old, new interface{}) { - configMap := old.(*coreV1.ConfigMap) + configMap := old.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } log.V(3).M(configMap).Info("configMapInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { - configMap := obj.(*coreV1.ConfigMap) + configMap := obj.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } @@ -381,7 +381,7 @@ func (c *Controller) addEventHandlersStatefulSet( ) { kubeInformerFactory.Apps().V1().StatefulSets().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - statefulSet := obj.(*appsV1.StatefulSet) + statefulSet := obj.(*apps.StatefulSet) if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } @@ -389,14 +389,14 @@ func (c *Controller) addEventHandlersStatefulSet( //controller.handleObject(obj) }, UpdateFunc: func(old, new interface{}) { - statefulSet := old.(*appsV1.StatefulSet) + statefulSet := old.(*apps.StatefulSet) if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } log.V(3).M(statefulSet).Info("statefulSetInformer.UpdateFunc") }, DeleteFunc: func(obj interface{}) { - statefulSet := obj.(*appsV1.StatefulSet) + statefulSet := obj.(*apps.StatefulSet) if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } @@ -411,7 +411,7 @@ func (c *Controller) addEventHandlersPod( ) { kubeInformerFactory.Core().V1().Pods().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - pod := obj.(*coreV1.Pod) + pod := obj.(*core.Pod) if !c.isTrackedObject(&pod.ObjectMeta) { return } @@ -419,8 +419,8 @@ func (c *Controller) addEventHandlersPod( c.enqueueObject(NewReconcilePod(reconcileAdd, nil, pod)) }, UpdateFunc: func(old, new interface{}) { - oldPod := old.(*coreV1.Pod) - newPod := new.(*coreV1.Pod) + oldPod := old.(*core.Pod) + newPod := new.(*core.Pod) if !c.isTrackedObject(&newPod.ObjectMeta) { return } @@ -428,7 +428,7 @@ func (c *Controller) addEventHandlersPod( c.enqueueObject(NewReconcilePod(reconcileUpdate, oldPod, newPod)) }, DeleteFunc: func(obj interface{}) { - pod := obj.(*coreV1.Pod) + pod := obj.(*core.Pod) if !c.isTrackedObject(&pod.ObjectMeta) { return } @@ -454,7 +454,7 @@ func (c *Controller) addEventHandlers( } // isTrackedObject checks whether operator is interested in changes of this object -func (c *Controller) isTrackedObject(objectMeta *metaV1.ObjectMeta) bool { +func (c *Controller) isTrackedObject(objectMeta *meta.ObjectMeta) bool { return chop.Config().IsWatchedNamespace(objectMeta.Namespace) && model.IsCHOPGeneratedObject(objectMeta) } @@ -504,7 +504,7 @@ func (c *Controller) Run(ctx context.Context) { for i := 0; i < workersNum; i++ { log.V(1).F().Info("ClickHouseInstallation controller: starting worker %d out of %d", i+1, workersNum) sys := false - if i < chiV1.DefaultReconcileSystemThreadsNumber { + if i < api.DefaultReconcileSystemThreadsNumber { sys = true } worker := c.newWorker(c.queues[i], sys) @@ -518,10 +518,10 @@ func (c *Controller) Run(ctx context.Context) { func prepareCHIAdd(command *ReconcileCHI) bool { newjs, _ := json.Marshal(command.new) - newchi := chiV1.ClickHouseInstallation{ - TypeMeta: metaV1.TypeMeta{ - APIVersion: chiV1.SchemeGroupVersion.String(), - Kind: chiV1.ClickHouseInstallationCRDResourceKind, + newchi := api.ClickHouseInstallation{ + TypeMeta: meta.TypeMeta{ + APIVersion: api.SchemeGroupVersion.String(), + Kind: api.ClickHouseInstallationCRDResourceKind, }, } _ = json.Unmarshal(newjs, &newchi) @@ -543,11 +543,11 @@ func prepareCHIUpdate(command *ReconcileCHI) bool { oldjs, _ := json.Marshal(command.old) newjs, _ := json.Marshal(command.new) - oldchi := chiV1.ClickHouseInstallation{} - newchi := chiV1.ClickHouseInstallation{ - TypeMeta: metaV1.TypeMeta{ - APIVersion: chiV1.SchemeGroupVersion.String(), - Kind: chiV1.ClickHouseInstallationCRDResourceKind, + oldchi := api.ClickHouseInstallation{} + newchi := api.ClickHouseInstallation{ + TypeMeta: meta.TypeMeta{ + APIVersion: api.SchemeGroupVersion.String(), + Kind: api.ClickHouseInstallationCRDResourceKind, }, } _ = json.Unmarshal(oldjs, &oldchi) @@ -579,8 +579,8 @@ func (c *Controller) enqueueObject(obj queue.PriorityQueueItem) { enqueue := false switch command := obj.(type) { case *ReconcileCHI: - variants := len(c.queues) - chiV1.DefaultReconcileSystemThreadsNumber - index = chiV1.DefaultReconcileSystemThreadsNumber + util.HashIntoIntTopped(handle, variants) + variants := len(c.queues) - api.DefaultReconcileSystemThreadsNumber + index = api.DefaultReconcileSystemThreadsNumber + util.HashIntoIntTopped(handle, variants) switch command.cmd { case reconcileAdd: enqueue = prepareCHIAdd(command) @@ -593,7 +593,7 @@ func (c *Controller) enqueueObject(obj queue.PriorityQueueItem) { *ReconcileEndpoints, *ReconcilePod, *DropDns: - variants := chiV1.DefaultReconcileSystemThreadsNumber + variants := api.DefaultReconcileSystemThreadsNumber index = util.HashIntoIntTopped(handle, variants) enqueue = true } @@ -604,7 +604,7 @@ func (c *Controller) enqueueObject(obj queue.PriorityQueueItem) { } // updateWatch -func (c *Controller) updateWatch(chi *chiV1.ClickHouseInstallation) { +func (c *Controller) updateWatch(chi *api.ClickHouseInstallation) { watched := metrics.NewWatchedCHI(chi) go c.updateWatchAsync(watched) } @@ -619,7 +619,7 @@ func (c *Controller) updateWatchAsync(chi *metrics.WatchedCHI) { } // deleteWatch -func (c *Controller) deleteWatch(chi *chiV1.ClickHouseInstallation) { +func (c *Controller) deleteWatch(chi *api.ClickHouseInstallation) { watched := metrics.NewWatchedCHI(chi) go c.deleteWatchAsync(watched) } @@ -634,7 +634,7 @@ func (c *Controller) deleteWatchAsync(chi *metrics.WatchedCHI) { } // addChopConfig -func (c *Controller) addChopConfig(chopConfig *chiV1.ClickHouseOperatorConfiguration) error { +func (c *Controller) addChopConfig(chopConfig *api.ClickHouseOperatorConfiguration) error { if chop.Get().ConfigManager.IsConfigListed(chopConfig) { log.V(1).M(chopConfig).F().Info("already known config - do nothing") } else { @@ -648,7 +648,7 @@ func (c *Controller) addChopConfig(chopConfig *chiV1.ClickHouseOperatorConfigura } // updateChopConfig -func (c *Controller) updateChopConfig(old, new *chiV1.ClickHouseOperatorConfiguration) error { +func (c *Controller) updateChopConfig(old, new *api.ClickHouseOperatorConfiguration) error { if old.ObjectMeta.ResourceVersion == new.ObjectMeta.ResourceVersion { log.V(2).M(old).F().Info("ResourceVersion did not change: %s", old.ObjectMeta.ResourceVersion) // No need to react @@ -664,7 +664,7 @@ func (c *Controller) updateChopConfig(old, new *chiV1.ClickHouseOperatorConfigur } // deleteChit deletes CHIT -func (c *Controller) deleteChopConfig(chopConfig *chiV1.ClickHouseOperatorConfiguration) error { +func (c *Controller) deleteChopConfig(chopConfig *api.ClickHouseOperatorConfiguration) error { log.V(2).M(chopConfig).F().P() // TODO // NEED REFACTORING @@ -680,7 +680,7 @@ type patchFinalizers struct { } // patchCHIFinalizers patch ClickHouseInstallation finalizers -func (c *Controller) patchCHIFinalizers(ctx context.Context, chi *chiV1.ClickHouseInstallation) error { +func (c *Controller) patchCHIFinalizers(ctx context.Context, chi *api.ClickHouseInstallation) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -688,11 +688,11 @@ func (c *Controller) patchCHIFinalizers(ctx context.Context, chi *chiV1.ClickHou // TODO fix this with verbosity update // Start Debug object - //js, err := json.MarshalIndent(chiV1, "", " ") + //js, err := json.MarshalIndent(chi, "", " ") //if err != nil { - // log.V(1).M(chiV1).F().Error("%q", err) + // log.V(1).M(chi).F().Error("%q", err) //} - //log.V(3).M(chiV1).F().Info("\n%s\n", js) + //log.V(3).M(chi).F().Info("\n%s\n", js) // End Debug object payload, _ := json.Marshal([]patchFinalizers{{ @@ -722,12 +722,12 @@ func (c *Controller) patchCHIFinalizers(ctx context.Context, chi *chiV1.ClickHou // UpdateCHIStatusOptions defines how to update CHI status type UpdateCHIStatusOptions struct { - chiV1.CopyCHIStatusOptions + api.CopyCHIStatusOptions TolerateAbsence bool } // updateCHIObjectStatus updates ClickHouseInstallation object's Status -func (c *Controller) updateCHIObjectStatus(ctx context.Context, chi *chiV1.ClickHouseInstallation, opts UpdateCHIStatusOptions) (err error) { +func (c *Controller) updateCHIObjectStatus(ctx context.Context, chi *api.ClickHouseInstallation, opts UpdateCHIStatusOptions) (err error) { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -754,7 +754,7 @@ func (c *Controller) updateCHIObjectStatus(ctx context.Context, chi *chiV1.Click } // doUpdateCHIObjectStatus updates ClickHouseInstallation object's Status -func (c *Controller) doUpdateCHIObjectStatus(ctx context.Context, chi *chiV1.ClickHouseInstallation, opts UpdateCHIStatusOptions) error { +func (c *Controller) doUpdateCHIObjectStatus(ctx context.Context, chi *api.ClickHouseInstallation, opts UpdateCHIStatusOptions) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -792,7 +792,7 @@ func (c *Controller) doUpdateCHIObjectStatus(ctx context.Context, chi *chiV1.Cli return err } - // Propagate updated ResourceVersion into chiV1 + // Propagate updated ResourceVersion into chi if chi.ObjectMeta.ResourceVersion != _new.ObjectMeta.ResourceVersion { log.V(2).M(chi).F().Info("ResourceVersion change: %s to %s", chi.ObjectMeta.ResourceVersion, _new.ObjectMeta.ResourceVersion) chi.ObjectMeta.ResourceVersion = _new.ObjectMeta.ResourceVersion @@ -804,7 +804,7 @@ func (c *Controller) doUpdateCHIObjectStatus(ctx context.Context, chi *chiV1.Cli return nil } -func (c *Controller) poll(ctx context.Context, chi *chiV1.ClickHouseInstallation, f func(c *chiV1.ClickHouseInstallation, e error) bool) { +func (c *Controller) poll(ctx context.Context, chi *api.ClickHouseInstallation, f func(c *api.ClickHouseInstallation, e error) bool) { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return @@ -829,7 +829,7 @@ func (c *Controller) poll(ctx context.Context, chi *chiV1.ClickHouseInstallation } // installFinalizer -func (c *Controller) installFinalizer(ctx context.Context, chi *chiV1.ClickHouseInstallation) error { +func (c *Controller) installFinalizer(ctx context.Context, chi *api.ClickHouseInstallation) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -857,7 +857,7 @@ func (c *Controller) installFinalizer(ctx context.Context, chi *chiV1.ClickHouse } // uninstallFinalizer -func (c *Controller) uninstallFinalizer(ctx context.Context, chi *chiV1.ClickHouseInstallation) error { +func (c *Controller) uninstallFinalizer(ctx context.Context, chi *api.ClickHouseInstallation) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -882,14 +882,14 @@ func (c *Controller) uninstallFinalizer(ctx context.Context, chi *chiV1.ClickHou // handleObject enqueues CHI which is owner of `obj` into reconcile loop func (c *Controller) handleObject(obj interface{}) { // TODO review - object, ok := obj.(metaV1.Object) + object, ok := obj.(meta.Object) if !ok { ts, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { utilRuntime.HandleError(fmt.Errorf(messageUnableToDecode)) return } - object, ok = ts.Obj.(metaV1.Object) + object, ok = ts.Obj.(meta.Object) if !ok { utilRuntime.HandleError(fmt.Errorf(messageUnableToDecode)) return @@ -899,21 +899,21 @@ func (c *Controller) handleObject(obj interface{}) { // object is an instance of meta.Object // Checking that we control current StatefulSet Object - ownerRef := metaV1.GetControllerOf(object) + ownerRef := meta.GetControllerOf(object) if ownerRef == nil { // No owner return } // Ensure owner is of a proper kind - if ownerRef.Kind != chiV1.ClickHouseInstallationCRDResourceKind { + if ownerRef.Kind != api.ClickHouseInstallationCRDResourceKind { return } log.V(1).Info("Processing object: %s", object.GetName()) // Get owner - it is expected to be CHI - // TODO chiV1, err := c.chiV1.ClickHouseInstallations(object.GetNamespace()).Get(ownerRef.Name) + // TODO chi, err := c.chi.ClickHouseInstallations(object.GetNamespace()).Get(ownerRef.Name) // TODO //if err != nil { @@ -922,7 +922,7 @@ func (c *Controller) handleObject(obj interface{}) { //} // Add CHI object into reconcile loop - // TODO c.enqueueObject(chiV1.Namespace, chiV1.Name, chiV1) + // TODO c.enqueueObject(chi.Namespace, chi.Name, chi) } // waitForCacheSync is a logger-wrapper over cache.WaitForCacheSync() and it waits for caches to populate diff --git a/pkg/controller/chi/creator.go b/pkg/controller/chi/creator.go index b18137090..527e248ad 100644 --- a/pkg/controller/chi/creator.go +++ b/pkg/controller/chi/creator.go @@ -19,19 +19,19 @@ import ( "fmt" "gopkg.in/d4l3k/messagediff.v1" - appsV1 "k8s.io/api/apps/v1" - coreV1 "k8s.io/api/core/v1" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" apiErrors "k8s.io/apimachinery/pkg/api/errors" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" "github.com/altinity/clickhouse-operator/pkg/controller" "github.com/altinity/clickhouse-operator/pkg/util" ) // createStatefulSet is an internal function, used in reconcileStatefulSet only -func (c *Controller) createStatefulSet(ctx context.Context, host *chiV1.ChiHost) ErrorCRUD { +func (c *Controller) createStatefulSet(ctx context.Context, host *api.ChiHost) ErrorCRUD { log.V(1).M(host).F().P() if util.IsContextDone(ctx) { @@ -60,9 +60,9 @@ func (c *Controller) createStatefulSet(ctx context.Context, host *chiV1.ChiHost) // updateStatefulSet is an internal function, used in reconcileStatefulSet only func (c *Controller) updateStatefulSet( ctx context.Context, - oldStatefulSet *appsV1.StatefulSet, - newStatefulSet *appsV1.StatefulSet, - host *chiV1.ChiHost, + oldStatefulSet *apps.StatefulSet, + newStatefulSet *apps.StatefulSet, + host *api.ChiHost, ) ErrorCRUD { log.V(2).M(host).F().P() @@ -126,7 +126,7 @@ func (c *Controller) updateStatefulSet( // Comment out PV // updatePersistentVolume -//func (c *Controller) updatePersistentVolume(ctx context.Context, pv *coreV1.PersistentVolume) (*coreV1.PersistentVolume, error) { +//func (c *Controller) updatePersistentVolume(ctx context.Context, pv *core.PersistentVolume) (*core.PersistentVolume, error) { // log.V(2).M(pv).F().P() // if util.IsContextDone(ctx) { // log.V(2).Info("task is done") @@ -145,7 +145,7 @@ func (c *Controller) updateStatefulSet( //} // updatePersistentVolumeClaim -func (c *Controller) updatePersistentVolumeClaim(ctx context.Context, pvc *coreV1.PersistentVolumeClaim) (*coreV1.PersistentVolumeClaim, error) { +func (c *Controller) updatePersistentVolumeClaim(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) { log.V(2).M(pvc).F().P() if util.IsContextDone(ctx) { log.V(2).Info("task is done") @@ -182,7 +182,7 @@ func (c *Controller) updatePersistentVolumeClaim(ctx context.Context, pvc *coreV // onStatefulSetCreateFailed handles situation when StatefulSet create failed // It can just delete failed StatefulSet or do nothing -func (c *Controller) onStatefulSetCreateFailed(ctx context.Context, host *chiV1.ChiHost) ErrorCRUD { +func (c *Controller) onStatefulSetCreateFailed(ctx context.Context, host *api.ChiHost) ErrorCRUD { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return errCRUDIgnore @@ -190,18 +190,18 @@ func (c *Controller) onStatefulSetCreateFailed(ctx context.Context, host *chiV1. // What to do with StatefulSet - look into chop configuration settings switch chop.Config().Reconcile.StatefulSet.Create.OnFailure { - case chiV1.OnStatefulSetCreateFailureActionAbort: + case api.OnStatefulSetCreateFailureActionAbort: // Report appropriate error, it will break reconcile loop log.V(1).M(host).F().Info("abort") return errCRUDAbort - case chiV1.OnStatefulSetCreateFailureActionDelete: + case api.OnStatefulSetCreateFailureActionDelete: // Delete gracefully failed StatefulSet log.V(1).M(host).F().Info("going to DELETE FAILED StatefulSet %s", util.NamespaceNameString(host.DesiredStatefulSet.ObjectMeta)) _ = c.deleteHost(ctx, host) return c.shouldContinueOnCreateFailed() - case chiV1.OnStatefulSetCreateFailureActionIgnore: + case api.OnStatefulSetCreateFailureActionIgnore: // Ignore error, continue reconcile loop log.V(1).M(host).F().Info("going to ignore error %s", util.NamespaceNameString(host.DesiredStatefulSet.ObjectMeta)) return errCRUDIgnore @@ -216,7 +216,7 @@ func (c *Controller) onStatefulSetCreateFailed(ctx context.Context, host *chiV1. // onStatefulSetUpdateFailed handles situation when StatefulSet update failed // It can try to revert StatefulSet to its previous version, specified in rollbackStatefulSet -func (c *Controller) onStatefulSetUpdateFailed(ctx context.Context, rollbackStatefulSet *appsV1.StatefulSet, host *chiV1.ChiHost) ErrorCRUD { +func (c *Controller) onStatefulSetUpdateFailed(ctx context.Context, rollbackStatefulSet *apps.StatefulSet, host *api.ChiHost) ErrorCRUD { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return errCRUDIgnore @@ -227,12 +227,12 @@ func (c *Controller) onStatefulSetUpdateFailed(ctx context.Context, rollbackStat // What to do with StatefulSet - look into chop configuration settings switch chop.Config().Reconcile.StatefulSet.Update.OnFailure { - case chiV1.OnStatefulSetUpdateFailureActionAbort: + case api.OnStatefulSetUpdateFailureActionAbort: // Report appropriate error, it will break reconcile loop log.V(1).M(host).F().Info("abort StatefulSet %s", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta)) return errCRUDAbort - case chiV1.OnStatefulSetUpdateFailureActionRollback: + case api.OnStatefulSetUpdateFailureActionRollback: // Need to revert current StatefulSet to oldStatefulSet log.V(1).M(host).F().Info("going to ROLLBACK FAILED StatefulSet %s", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta)) statefulSet, err := c.getStatefulSet(host) @@ -252,7 +252,7 @@ func (c *Controller) onStatefulSetUpdateFailed(ctx context.Context, rollbackStat return c.shouldContinueOnUpdateFailed() - case chiV1.OnStatefulSetUpdateFailureActionIgnore: + case api.OnStatefulSetUpdateFailureActionIgnore: // Ignore error, continue reconcile loop log.V(1).M(host).F().Info("going to ignore error %s", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta)) return errCRUDIgnore @@ -293,7 +293,7 @@ func (c *Controller) shouldContinueOnUpdateFailed() ErrorCRUD { return errCRUDAbort } -func (c *Controller) createSecret(ctx context.Context, secret *coreV1.Secret) error { +func (c *Controller) createSecret(ctx context.Context, secret *core.Secret) error { log.V(1).M(secret).F().P() if util.IsContextDone(ctx) { diff --git a/pkg/controller/chi/deleter.go b/pkg/controller/chi/deleter.go index 8049075a9..215decd57 100644 --- a/pkg/controller/chi/deleter.go +++ b/pkg/controller/chi/deleter.go @@ -18,19 +18,19 @@ import ( "context" "time" - appsV1 "k8s.io/api/apps/v1" - coreV1 "k8s.io/api/core/v1" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" apiErrors "k8s.io/apimachinery/pkg/api/errors" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/controller" model "github.com/altinity/clickhouse-operator/pkg/model/chi" "github.com/altinity/clickhouse-operator/pkg/util" ) // deleteHost deletes all kubernetes resources related to replica *chop.ChiHost -func (c *Controller) deleteHost(ctx context.Context, host *chiV1.ChiHost) error { +func (c *Controller) deleteHost(ctx context.Context, host *api.ChiHost) error { log.V(1).M(host).S().Info(host.Address.ClusterNameString()) // Each host consists of: @@ -45,7 +45,7 @@ func (c *Controller) deleteHost(ctx context.Context, host *chiV1.ChiHost) error } // deleteConfigMapsCHI -func (c *Controller) deleteConfigMapsCHI(ctx context.Context, chi *chiV1.ClickHouseInstallation) error { +func (c *Controller) deleteConfigMapsCHI(ctx context.Context, chi *api.ClickHouseInstallation) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -88,7 +88,7 @@ func (c *Controller) deleteConfigMapsCHI(ctx context.Context, chi *chiV1.ClickHo } // statefulSetDeletePod delete a pod of a StatefulSet. This requests StatefulSet to relaunch deleted pod -func (c *Controller) statefulSetDeletePod(ctx context.Context, statefulSet *appsV1.StatefulSet, host *chiV1.ChiHost) error { +func (c *Controller) statefulSetDeletePod(ctx context.Context, statefulSet *apps.StatefulSet, host *api.ChiHost) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -110,7 +110,7 @@ func (c *Controller) statefulSetDeletePod(ctx context.Context, statefulSet *apps } // deleteStatefulSet gracefully deletes StatefulSet through zeroing Pod's count -func (c *Controller) deleteStatefulSet(ctx context.Context, host *chiV1.ChiHost) error { +func (c *Controller) deleteStatefulSet(ctx context.Context, host *api.ChiHost) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -164,7 +164,7 @@ func (c *Controller) deleteStatefulSet(ctx context.Context, host *chiV1.ChiHost) } // syncStatefulSet -func (c *Controller) syncStatefulSet(ctx context.Context, host *chiV1.ChiHost) { +func (c *Controller) syncStatefulSet(ctx context.Context, host *api.ChiHost) { for { if util.IsContextDone(ctx) { log.V(2).Info("task is done") @@ -183,7 +183,7 @@ func (c *Controller) syncStatefulSet(ctx context.Context, host *chiV1.ChiHost) { } // deletePVC deletes PersistentVolumeClaim -func (c *Controller) deletePVC(ctx context.Context, host *chiV1.ChiHost) error { +func (c *Controller) deletePVC(ctx context.Context, host *api.ChiHost) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -193,7 +193,7 @@ func (c *Controller) deletePVC(ctx context.Context, host *chiV1.ChiHost) error { defer log.V(2).M(host).E().P() namespace := host.Address.Namespace - c.walkDiscoveredPVCs(host, func(pvc *coreV1.PersistentVolumeClaim) { + c.walkDiscoveredPVCs(host, func(pvc *core.PersistentVolumeClaim) { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return @@ -222,7 +222,7 @@ func (c *Controller) deletePVC(ctx context.Context, host *chiV1.ChiHost) error { } // deleteConfigMap deletes ConfigMap -func (c *Controller) deleteConfigMap(ctx context.Context, host *chiV1.ChiHost) error { +func (c *Controller) deleteConfigMap(ctx context.Context, host *api.ChiHost) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -256,7 +256,7 @@ func (c *Controller) deleteConfigMap(ctx context.Context, host *chiV1.ChiHost) e } // deleteServiceHost deletes Service -func (c *Controller) deleteServiceHost(ctx context.Context, host *chiV1.ChiHost) error { +func (c *Controller) deleteServiceHost(ctx context.Context, host *api.ChiHost) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -269,7 +269,7 @@ func (c *Controller) deleteServiceHost(ctx context.Context, host *chiV1.ChiHost) } // deleteServiceShard -func (c *Controller) deleteServiceShard(ctx context.Context, shard *chiV1.ChiShard) error { +func (c *Controller) deleteServiceShard(ctx context.Context, shard *api.ChiShard) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -282,7 +282,7 @@ func (c *Controller) deleteServiceShard(ctx context.Context, shard *chiV1.ChiSha } // deleteServiceCluster -func (c *Controller) deleteServiceCluster(ctx context.Context, cluster *chiV1.Cluster) error { +func (c *Controller) deleteServiceCluster(ctx context.Context, cluster *api.Cluster) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -295,7 +295,7 @@ func (c *Controller) deleteServiceCluster(ctx context.Context, cluster *chiV1.Cl } // deleteServiceCHI -func (c *Controller) deleteServiceCHI(ctx context.Context, chi *chiV1.ClickHouseInstallation) error { +func (c *Controller) deleteServiceCHI(ctx context.Context, chi *api.ClickHouseInstallation) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -334,7 +334,7 @@ func (c *Controller) deleteServiceIfExists(ctx context.Context, namespace, name } // deleteSecretCluster -func (c *Controller) deleteSecretCluster(ctx context.Context, cluster *chiV1.Cluster) error { +func (c *Controller) deleteSecretCluster(ctx context.Context, cluster *api.Cluster) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil diff --git a/pkg/controller/chi/discoverer.go b/pkg/controller/chi/discoverer.go index c59226f20..88a1b73db 100644 --- a/pkg/controller/chi/discoverer.go +++ b/pkg/controller/chi/discoverer.go @@ -17,16 +17,16 @@ package chi import ( "context" - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/controller" model "github.com/altinity/clickhouse-operator/pkg/model/chi" "github.com/altinity/clickhouse-operator/pkg/util" ) -func (c *Controller) discovery(ctx context.Context, chi *chiV1.ClickHouseInstallation) *model.Registry { +func (c *Controller) discovery(ctx context.Context, chi *api.ClickHouseInstallation) *model.Registry { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -45,7 +45,7 @@ func (c *Controller) discovery(ctx context.Context, chi *chiV1.ClickHouseInstall return r } -func (c *Controller) discoveryStatefulSets(ctx context.Context, r *model.Registry, chi *chiV1.ClickHouseInstallation, opts metaV1.ListOptions) { +func (c *Controller) discoveryStatefulSets(ctx context.Context, r *model.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) { list, err := c.kubeClient.AppsV1().StatefulSets(chi.Namespace).List(ctx, opts) if err != nil { log.M(chi).F().Error("FAIL list StatefulSet err: %v", err) @@ -60,7 +60,7 @@ func (c *Controller) discoveryStatefulSets(ctx context.Context, r *model.Registr } } -func (c *Controller) discoveryConfigMaps(ctx context.Context, r *model.Registry, chi *chiV1.ClickHouseInstallation, opts metaV1.ListOptions) { +func (c *Controller) discoveryConfigMaps(ctx context.Context, r *model.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) { list, err := c.kubeClient.CoreV1().ConfigMaps(chi.Namespace).List(ctx, opts) if err != nil { log.M(chi).F().Error("FAIL list ConfigMap err: %v", err) @@ -75,7 +75,7 @@ func (c *Controller) discoveryConfigMaps(ctx context.Context, r *model.Registry, } } -func (c *Controller) discoveryServices(ctx context.Context, r *model.Registry, chi *chiV1.ClickHouseInstallation, opts metaV1.ListOptions) { +func (c *Controller) discoveryServices(ctx context.Context, r *model.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) { list, err := c.kubeClient.CoreV1().Services(chi.Namespace).List(ctx, opts) if err != nil { log.M(chi).F().Error("FAIL list Service err: %v", err) @@ -90,7 +90,7 @@ func (c *Controller) discoveryServices(ctx context.Context, r *model.Registry, c } } -func (c *Controller) discoverySecrets(ctx context.Context, r *model.Registry, chi *chiV1.ClickHouseInstallation, opts metaV1.ListOptions) { +func (c *Controller) discoverySecrets(ctx context.Context, r *model.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) { list, err := c.kubeClient.CoreV1().Secrets(chi.Namespace).List(ctx, opts) if err != nil { log.M(chi).F().Error("FAIL list Secret err: %v", err) @@ -105,7 +105,7 @@ func (c *Controller) discoverySecrets(ctx context.Context, r *model.Registry, ch } } -func (c *Controller) discoveryPVCs(ctx context.Context, r *model.Registry, chi *chiV1.ClickHouseInstallation, opts metaV1.ListOptions) { +func (c *Controller) discoveryPVCs(ctx context.Context, r *model.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) { list, err := c.kubeClient.CoreV1().PersistentVolumeClaims(chi.Namespace).List(ctx, opts) if err != nil { log.M(chi).F().Error("FAIL list PVC err: %v", err) @@ -121,7 +121,7 @@ func (c *Controller) discoveryPVCs(ctx context.Context, r *model.Registry, chi * } // Comment out PV -//func (c *Controller) discoveryPVs(ctx context.Context, r *chopModel.Registry, chi *chiV1.ClickHouseInstallation, opts metaV1.ListOptions) { +//func (c *Controller) discoveryPVs(ctx context.Context, r *chopModel.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) { // list, err := c.kubeClient.CoreV1().PersistentVolumes().List(ctx, opts) // if err != nil { // log.M(chi).F().Error("FAIL list PV err: %v", err) @@ -136,7 +136,7 @@ func (c *Controller) discoveryPVCs(ctx context.Context, r *model.Registry, chi * // } //} -func (c *Controller) discoveryPDBs(ctx context.Context, r *model.Registry, chi *chiV1.ClickHouseInstallation, opts metaV1.ListOptions) { +func (c *Controller) discoveryPDBs(ctx context.Context, r *model.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) { list, err := c.kubeClient.PolicyV1().PodDisruptionBudgets(chi.Namespace).List(ctx, opts) if err != nil { log.M(chi).F().Error("FAIL list PDB err: %v", err) diff --git a/pkg/controller/chi/event.go b/pkg/controller/chi/event.go index fc6de0f47..4ac2fd7ec 100644 --- a/pkg/controller/chi/event.go +++ b/pkg/controller/chi/event.go @@ -17,11 +17,11 @@ package chi import ( "time" - coreV1 "k8s.io/api/core/v1" - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/controller" ) @@ -64,7 +64,7 @@ const ( // EventInfo emits event Info func (c *Controller) EventInfo( - chi *chiV1.ClickHouseInstallation, + chi *api.ClickHouseInstallation, action string, reason string, message string, @@ -74,7 +74,7 @@ func (c *Controller) EventInfo( // EventWarning emits event Warning func (c *Controller) EventWarning( - chi *chiV1.ClickHouseInstallation, + chi *api.ClickHouseInstallation, action string, reason string, message string, @@ -84,7 +84,7 @@ func (c *Controller) EventWarning( // EventError emits event Error func (c *Controller) EventError( - chi *chiV1.ClickHouseInstallation, + chi *api.ClickHouseInstallation, action string, reason string, message string, @@ -98,7 +98,7 @@ func (c *Controller) EventError( // reason - short, machine understandable string, one of eventReason* // message - human-readable description func (c *Controller) emitEvent( - chi *chiV1.ClickHouseInstallation, + chi *api.ClickHouseInstallation, _type string, action string, reason string, @@ -111,11 +111,11 @@ func (c *Controller) emitEvent( uid := chi.UID resourceVersion := chi.ResourceVersion - event := &coreV1.Event{ - ObjectMeta: metaV1.ObjectMeta{ + event := &core.Event{ + ObjectMeta: meta.ObjectMeta{ GenerateName: "chop-chi-", }, - InvolvedObject: coreV1.ObjectReference{ + InvolvedObject: core.ObjectReference{ Kind: kind, Namespace: namespace, Name: name, @@ -125,13 +125,13 @@ func (c *Controller) emitEvent( }, Reason: reason, Message: message, - Source: coreV1.EventSource{ + Source: core.EventSource{ Component: componentName, }, - FirstTimestamp: metaV1.Time{ + FirstTimestamp: meta.Time{ Time: now, }, - LastTimestamp: metaV1.Time{ + LastTimestamp: meta.Time{ Time: now, }, Count: 1, diff --git a/pkg/controller/chi/getter.go b/pkg/controller/chi/getter.go index 5b3e4bba1..929c37dd9 100644 --- a/pkg/controller/chi/getter.go +++ b/pkg/controller/chi/getter.go @@ -17,24 +17,24 @@ package chi import ( "fmt" - appsV1 "k8s.io/api/apps/v1" - coreV1 "k8s.io/api/core/v1" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" apiErrors "k8s.io/apimachinery/pkg/api/errors" - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8slabels "k8s.io/apimachinery/pkg/labels" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sLabels "k8s.io/apimachinery/pkg/labels" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/controller" model "github.com/altinity/clickhouse-operator/pkg/model/chi" ) // getConfigMap gets ConfigMap either by namespaced name or by labels // TODO review byNameOnly params -func (c *Controller) getConfigMap(objMeta *metaV1.ObjectMeta, byNameOnly bool) (*coreV1.ConfigMap, error) { +func (c *Controller) getConfigMap(objMeta *meta.ObjectMeta, byNameOnly bool) (*core.ConfigMap, error) { get := c.configMapLister.ConfigMaps(objMeta.Namespace).Get list := c.configMapLister.ConfigMaps(objMeta.Namespace).List - var objects []*coreV1.ConfigMap + var objects []*core.ConfigMap // Check whether object with such name already exists obj, err := get(objMeta.Name) @@ -57,7 +57,7 @@ func (c *Controller) getConfigMap(objMeta *metaV1.ObjectMeta, byNameOnly bool) ( // Try to find by labels - var selector k8slabels.Selector + var selector k8sLabels.Selector if selector, err = model.MakeSelectorFromObjectMeta(objMeta); err != nil { return nil, err } @@ -67,7 +67,7 @@ func (c *Controller) getConfigMap(objMeta *metaV1.ObjectMeta, byNameOnly bool) ( } if len(objects) == 0 { - return nil, apiErrors.NewNotFound(appsV1.Resource("ConfigMap"), objMeta.Name) + return nil, apiErrors.NewNotFound(apps.Resource("ConfigMap"), objMeta.Name) } if len(objects) == 1 { @@ -82,13 +82,13 @@ func (c *Controller) getConfigMap(objMeta *metaV1.ObjectMeta, byNameOnly bool) ( // getService gets Service. Accepted types: // 1. *core.Service // 2. *chop.ChiHost -func (c *Controller) getService(obj interface{}) (*coreV1.Service, error) { +func (c *Controller) getService(obj interface{}) (*core.Service, error) { var name, namespace string switch typedObj := obj.(type) { - case *coreV1.Service: + case *core.Service: name = typedObj.Name namespace = typedObj.Namespace - case *chiV1.ChiHost: + case *api.ChiHost: name = model.CreateStatefulSetServiceName(typedObj) namespace = typedObj.Address.Namespace } @@ -99,15 +99,15 @@ func (c *Controller) getService(obj interface{}) (*coreV1.Service, error) { // getStatefulSet gets StatefulSet. Accepted types: // 1. *meta.ObjectMeta // 2. *chop.ChiHost -func (c *Controller) getStatefulSet(obj interface{}, byName ...bool) (*appsV1.StatefulSet, error) { +func (c *Controller) getStatefulSet(obj interface{}, byName ...bool) (*apps.StatefulSet, error) { switch typedObj := obj.(type) { - case *metaV1.ObjectMeta: + case *meta.ObjectMeta: var b bool if len(byName) > 0 { b = byName[0] } return c.getStatefulSetByMeta(typedObj, b) - case *chiV1.ChiHost: + case *api.ChiHost: return c.getStatefulSetByHost(typedObj) } return nil, fmt.Errorf("unknown type") @@ -115,10 +115,10 @@ func (c *Controller) getStatefulSet(obj interface{}, byName ...bool) (*appsV1.St // getStatefulSet gets StatefulSet either by namespaced name or by labels // TODO review byNameOnly params -func (c *Controller) getStatefulSetByMeta(meta *metaV1.ObjectMeta, byNameOnly bool) (*appsV1.StatefulSet, error) { +func (c *Controller) getStatefulSetByMeta(meta *meta.ObjectMeta, byNameOnly bool) (*apps.StatefulSet, error) { get := c.statefulSetLister.StatefulSets(meta.Namespace).Get list := c.statefulSetLister.StatefulSets(meta.Namespace).List - var objects []*appsV1.StatefulSet + var objects []*apps.StatefulSet // Check whether object with such name already exists obj, err := get(meta.Name) @@ -139,7 +139,7 @@ func (c *Controller) getStatefulSetByMeta(meta *metaV1.ObjectMeta, byNameOnly bo return nil, fmt.Errorf("object not found by name %s/%s and no label search allowed ", meta.Namespace, meta.Name) } - var selector k8slabels.Selector + var selector k8sLabels.Selector if selector, err = model.MakeSelectorFromObjectMeta(meta); err != nil { return nil, err } @@ -149,7 +149,7 @@ func (c *Controller) getStatefulSetByMeta(meta *metaV1.ObjectMeta, byNameOnly bo } if len(objects) == 0 { - return nil, apiErrors.NewNotFound(appsV1.Resource("StatefulSet"), meta.Name) + return nil, apiErrors.NewNotFound(apps.Resource("StatefulSet"), meta.Name) } if len(objects) == 1 { @@ -162,7 +162,7 @@ func (c *Controller) getStatefulSetByMeta(meta *metaV1.ObjectMeta, byNameOnly bo } // getStatefulSetByHost finds StatefulSet of a specified host -func (c *Controller) getStatefulSetByHost(host *chiV1.ChiHost) (*appsV1.StatefulSet, error) { +func (c *Controller) getStatefulSetByHost(host *api.ChiHost) (*apps.StatefulSet, error) { // Namespaced name name := model.CreateStatefulSetName(host) namespace := host.Address.Namespace @@ -171,20 +171,20 @@ func (c *Controller) getStatefulSetByHost(host *chiV1.ChiHost) (*appsV1.Stateful } // getSecret gets secret -func (c *Controller) getSecret(secret *coreV1.Secret) (*coreV1.Secret, error) { +func (c *Controller) getSecret(secret *core.Secret) (*core.Secret, error) { return c.kubeClient.CoreV1().Secrets(secret.Namespace).Get(controller.NewContext(), secret.Name, controller.NewGetOptions()) } // getPod gets pod. Accepted types: // 1. *apps.StatefulSet // 2. *chop.ChiHost -func (c *Controller) getPod(obj interface{}) (*coreV1.Pod, error) { +func (c *Controller) getPod(obj interface{}) (*core.Pod, error) { var name, namespace string switch typedObj := obj.(type) { - case *appsV1.StatefulSet: + case *apps.StatefulSet: name = model.CreatePodName(obj) namespace = typedObj.Namespace - case *chiV1.ChiHost: + case *api.ChiHost: name = model.CreatePodName(obj) namespace = typedObj.Address.Namespace } @@ -192,19 +192,19 @@ func (c *Controller) getPod(obj interface{}) (*coreV1.Pod, error) { } // getPods gets all pods for provided entity -func (c *Controller) getPods(obj interface{}) []*coreV1.Pod { +func (c *Controller) getPods(obj interface{}) []*core.Pod { switch typed := obj.(type) { - case *chiV1.ClickHouseInstallation: + case *api.ClickHouseInstallation: return c.getPodsOfCHI(typed) - case *chiV1.Cluster: + case *api.Cluster: return c.getPodsOfCluster(typed) - case *chiV1.ChiShard: + case *api.ChiShard: return c.getPodsOfShard(typed) case - *chiV1.ChiHost, - *appsV1.StatefulSet: + *api.ChiHost, + *apps.StatefulSet: if pod, err := c.getPod(typed); err == nil { - return []*coreV1.Pod{ + return []*core.Pod{ pod, } } @@ -213,8 +213,8 @@ func (c *Controller) getPods(obj interface{}) []*coreV1.Pod { } // getPodsOfCluster gets all pods in a cluster -func (c *Controller) getPodsOfCluster(cluster *chiV1.Cluster) (pods []*coreV1.Pod) { - cluster.WalkHosts(func(host *chiV1.ChiHost) error { +func (c *Controller) getPodsOfCluster(cluster *api.Cluster) (pods []*core.Pod) { + cluster.WalkHosts(func(host *api.ChiHost) error { if pod, err := c.getPod(host); err == nil { pods = append(pods, pod) } @@ -224,8 +224,8 @@ func (c *Controller) getPodsOfCluster(cluster *chiV1.Cluster) (pods []*coreV1.Po } // getPodsOfShard gets all pods in a shard -func (c *Controller) getPodsOfShard(shard *chiV1.ChiShard) (pods []*coreV1.Pod) { - shard.WalkHosts(func(host *chiV1.ChiHost) error { +func (c *Controller) getPodsOfShard(shard *api.ChiShard) (pods []*core.Pod) { + shard.WalkHosts(func(host *api.ChiHost) error { if pod, err := c.getPod(host); err == nil { pods = append(pods, pod) } @@ -235,8 +235,8 @@ func (c *Controller) getPodsOfShard(shard *chiV1.ChiShard) (pods []*coreV1.Pod) } // getPodsOfCHI gets all pods in a CHI -func (c *Controller) getPodsOfCHI(chi *chiV1.ClickHouseInstallation) (pods []*coreV1.Pod) { - chi.WalkHosts(func(host *chiV1.ChiHost) error { +func (c *Controller) getPodsOfCHI(chi *api.ClickHouseInstallation) (pods []*core.Pod) { + chi.WalkHosts(func(host *api.ChiHost) error { if pod, err := c.getPod(host); err == nil { pods = append(pods, pod) } @@ -262,7 +262,7 @@ func (c *Controller) getPodsIPs(obj interface{}) (ips []string) { } // GetCHIByObjectMeta gets CHI by namespaced name -func (c *Controller) GetCHIByObjectMeta(objectMeta *metaV1.ObjectMeta, isCHI bool) (*chiV1.ClickHouseInstallation, error) { +func (c *Controller) GetCHIByObjectMeta(objectMeta *meta.ObjectMeta, isCHI bool) (*api.ClickHouseInstallation, error) { var chiName string var err error if isCHI { diff --git a/pkg/controller/chi/labeler.go b/pkg/controller/chi/labeler.go index e84fb476e..5e22b2fc5 100644 --- a/pkg/controller/chi/labeler.go +++ b/pkg/controller/chi/labeler.go @@ -20,12 +20,12 @@ import ( "fmt" "strings" - appsV1 "k8s.io/api/apps/v1" - coreV1 "k8s.io/api/core/v1" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" apiErrors "k8s.io/apimachinery/pkg/api/errors" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" "github.com/altinity/clickhouse-operator/pkg/controller" model "github.com/altinity/clickhouse-operator/pkg/model/chi" @@ -70,11 +70,11 @@ func (c *Controller) labelMyObjectsTree(ctx context.Context) error { } // What pod does operator run in? - name, ok1 := chop.Get().ConfigManager.GetRuntimeParam(chiV1.OPERATOR_POD_NAME) - namespace, ok2 := chop.Get().ConfigManager.GetRuntimeParam(chiV1.OPERATOR_POD_NAMESPACE) + name, ok1 := chop.Get().ConfigManager.GetRuntimeParam(api.OPERATOR_POD_NAME) + namespace, ok2 := chop.Get().ConfigManager.GetRuntimeParam(api.OPERATOR_POD_NAMESPACE) if !ok1 || !ok2 { - str := fmt.Sprintf("ERROR read env vars: %s/%s ", chiV1.OPERATOR_POD_NAME, chiV1.OPERATOR_POD_NAMESPACE) + str := fmt.Sprintf("ERROR read env vars: %s/%s ", api.OPERATOR_POD_NAME, api.OPERATOR_POD_NAMESPACE) log.V(1).M(namespace, name).F().Error(str) return errors.New(str) } @@ -111,7 +111,7 @@ func (c *Controller) labelMyObjectsTree(ctx context.Context) error { return nil } -func (c *Controller) labelPod(ctx context.Context, namespace, name string) (*coreV1.Pod, error) { +func (c *Controller) labelPod(ctx context.Context, namespace, name string) (*core.Pod, error) { pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(ctx, name, controller.NewGetOptions()) if err != nil { log.V(1).M(namespace, name).F().Error("ERROR get Pod %s/%s %v", namespace, name, err) @@ -139,7 +139,7 @@ func (c *Controller) labelPod(ctx context.Context, namespace, name string) (*cor return pod, nil } -func (c *Controller) labelReplicaSet(ctx context.Context, pod *coreV1.Pod) (*appsV1.ReplicaSet, error) { +func (c *Controller) labelReplicaSet(ctx context.Context, pod *core.Pod) (*apps.ReplicaSet, error) { // Find parent ReplicaSet replicaSetName := "" for i := range pod.OwnerReferences { @@ -186,7 +186,7 @@ func (c *Controller) labelReplicaSet(ctx context.Context, pod *coreV1.Pod) (*app return replicaSet, nil } -func (c *Controller) labelDeployment(ctx context.Context, rs *appsV1.ReplicaSet) error { +func (c *Controller) labelDeployment(ctx context.Context, rs *apps.ReplicaSet) error { // Find parent Deployment deploymentName := "" for i := range rs.OwnerReferences { @@ -248,7 +248,7 @@ func (c *Controller) addLabels(labels map[string]string) map[string]string { } // appendLabelReadyOnPod appends Label "Ready" to the pod of the specified host -func (c *Controller) appendLabelReadyOnPod(ctx context.Context, host *chiV1.ChiHost) error { +func (c *Controller) appendLabelReadyOnPod(ctx context.Context, host *api.ChiHost) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -273,7 +273,7 @@ func (c *Controller) appendLabelReadyOnPod(ctx context.Context, host *chiV1.ChiH } // deleteLabelReadyPod deletes Label "Ready" from the pod of the specified host -func (c *Controller) deleteLabelReadyPod(ctx context.Context, host *chiV1.ChiHost) error { +func (c *Controller) deleteLabelReadyPod(ctx context.Context, host *api.ChiHost) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -304,7 +304,7 @@ func (c *Controller) deleteLabelReadyPod(ctx context.Context, host *chiV1.ChiHos } // appendAnnotationReadyOnService appends Annotation "Ready" to the service of the specified host -func (c *Controller) appendAnnotationReadyOnService(ctx context.Context, host *chiV1.ChiHost) error { +func (c *Controller) appendAnnotationReadyOnService(ctx context.Context, host *api.ChiHost) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil @@ -329,7 +329,7 @@ func (c *Controller) appendAnnotationReadyOnService(ctx context.Context, host *c } // deleteAnnotationReadyService deletes Annotation "Ready" from the service of the specified host -func (c *Controller) deleteAnnotationReadyService(ctx context.Context, host *chiV1.ChiHost) error { +func (c *Controller) deleteAnnotationReadyService(ctx context.Context, host *api.ChiHost) error { if util.IsContextDone(ctx) { log.V(2).Info("task is done") return nil diff --git a/pkg/controller/chi/podder.go b/pkg/controller/chi/podder.go index 3ddcbdb68..30c14ba07 100644 --- a/pkg/controller/chi/podder.go +++ b/pkg/controller/chi/podder.go @@ -18,11 +18,11 @@ import ( "k8s.io/api/core/v1" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" ) // walkContainers walks with specified func over all containers of the specified host -func (c *Controller) walkContainers(host *chiV1.ChiHost, f func(container *v1.Container)) { +func (c *Controller) walkContainers(host *api.ChiHost, f func(container *v1.Container)) { pod, err := c.getPod(host) if err != nil { log.M(host).F().Error("FAIL get pod for host '%s' err: %v", host.Address.NamespaceNameString(), err) @@ -36,7 +36,7 @@ func (c *Controller) walkContainers(host *chiV1.ChiHost, f func(container *v1.Co } // walkContainerStatuses walks with specified func over all statuses of the specified host -func (c *Controller) walkContainerStatuses(host *chiV1.ChiHost, f func(status *v1.ContainerStatus)) { +func (c *Controller) walkContainerStatuses(host *api.ChiHost, f func(status *v1.ContainerStatus)) { pod, err := c.getPod(host) if err != nil { log.M(host).F().Error("FAIL get pod for host %s err:%v", host.Address.NamespaceNameString(), err) @@ -50,7 +50,7 @@ func (c *Controller) walkContainerStatuses(host *chiV1.ChiHost, f func(status *v } // isHostRunning checks whether ALL containers of the specified host are running -func (c *Controller) isHostRunning(host *chiV1.ChiHost) bool { +func (c *Controller) isHostRunning(host *api.ChiHost) bool { all := true c.walkContainerStatuses(host, func(status *v1.ContainerStatus) { if status.State.Running == nil { diff --git a/pkg/controller/chi/type_cmd_queue.go b/pkg/controller/chi/type_cmd_queue.go index bedaa9612..658316aec 100644 --- a/pkg/controller/chi/type_cmd_queue.go +++ b/pkg/controller/chi/type_cmd_queue.go @@ -15,12 +15,12 @@ package chi import ( - coreV1 "k8s.io/api/core/v1" - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/altinity/queue" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" ) const ( @@ -51,8 +51,8 @@ const ( type ReconcileCHI struct { PriorityQueueItem cmd string - old *chiV1.ClickHouseInstallation - new *chiV1.ClickHouseInstallation + old *api.ClickHouseInstallation + new *api.ClickHouseInstallation } var _ queue.PriorityQueueItem = &ReconcileCHI{} @@ -69,7 +69,7 @@ func (r ReconcileCHI) Handle() queue.T { } // NewReconcileCHI creates new reconcile request queue item -func NewReconcileCHI(cmd string, old, new *chiV1.ClickHouseInstallation) *ReconcileCHI { +func NewReconcileCHI(cmd string, old, new *api.ClickHouseInstallation) *ReconcileCHI { return &ReconcileCHI{ PriorityQueueItem: PriorityQueueItem{ priority: priorityReconcileCHI, @@ -102,8 +102,8 @@ func NewReconcileCHI(cmd string, old, new *chiV1.ClickHouseInstallation) *Reconc type ReconcileCHIT struct { PriorityQueueItem cmd string - old *chiV1.ClickHouseInstallationTemplate - new *chiV1.ClickHouseInstallationTemplate + old *api.ClickHouseInstallationTemplate + new *api.ClickHouseInstallationTemplate } var _ queue.PriorityQueueItem = &ReconcileCHIT{} @@ -120,7 +120,7 @@ func (r ReconcileCHIT) Handle() queue.T { } // NewReconcileCHIT creates new reconcile CHI template queue item -func NewReconcileCHIT(cmd string, old, new *chiV1.ClickHouseInstallationTemplate) *ReconcileCHIT { +func NewReconcileCHIT(cmd string, old, new *api.ClickHouseInstallationTemplate) *ReconcileCHIT { return &ReconcileCHIT{ PriorityQueueItem: PriorityQueueItem{ priority: priorityReconcileCHIT, @@ -135,8 +135,8 @@ func NewReconcileCHIT(cmd string, old, new *chiV1.ClickHouseInstallationTemplate type ReconcileChopConfig struct { PriorityQueueItem cmd string - old *chiV1.ClickHouseOperatorConfiguration - new *chiV1.ClickHouseOperatorConfiguration + old *api.ClickHouseOperatorConfiguration + new *api.ClickHouseOperatorConfiguration } var _ queue.PriorityQueueItem = &ReconcileChopConfig{} @@ -153,7 +153,7 @@ func (r ReconcileChopConfig) Handle() queue.T { } // NewReconcileChopConfig creates new CHOp config queue item -func NewReconcileChopConfig(cmd string, old, new *chiV1.ClickHouseOperatorConfiguration) *ReconcileChopConfig { +func NewReconcileChopConfig(cmd string, old, new *api.ClickHouseOperatorConfiguration) *ReconcileChopConfig { return &ReconcileChopConfig{ PriorityQueueItem: PriorityQueueItem{ priority: priorityReconcileChopConfig, @@ -168,8 +168,8 @@ func NewReconcileChopConfig(cmd string, old, new *chiV1.ClickHouseOperatorConfig type ReconcileEndpoints struct { PriorityQueueItem cmd string - old *coreV1.Endpoints - new *coreV1.Endpoints + old *core.Endpoints + new *core.Endpoints } var _ queue.PriorityQueueItem = &ReconcileEndpoints{} @@ -186,7 +186,7 @@ func (r ReconcileEndpoints) Handle() queue.T { } // NewReconcileEndpoints creates new reconcile endpoints queue item -func NewReconcileEndpoints(cmd string, old, new *coreV1.Endpoints) *ReconcileEndpoints { +func NewReconcileEndpoints(cmd string, old, new *core.Endpoints) *ReconcileEndpoints { return &ReconcileEndpoints{ PriorityQueueItem: PriorityQueueItem{ priority: priorityReconcileEndpoints, @@ -200,7 +200,7 @@ func NewReconcileEndpoints(cmd string, old, new *coreV1.Endpoints) *ReconcileEnd // DropDns specifies drop dns queue item type DropDns struct { PriorityQueueItem - initiator *metaV1.ObjectMeta + initiator *meta.ObjectMeta } var _ queue.PriorityQueueItem = &DropDns{} @@ -214,7 +214,7 @@ func (r DropDns) Handle() queue.T { } // NewDropDns creates new drop dns queue item -func NewDropDns(initiator *metaV1.ObjectMeta) *DropDns { +func NewDropDns(initiator *meta.ObjectMeta) *DropDns { return &DropDns{ PriorityQueueItem: PriorityQueueItem{ priority: priorityDropDNS, @@ -227,8 +227,8 @@ func NewDropDns(initiator *metaV1.ObjectMeta) *DropDns { type ReconcilePod struct { PriorityQueueItem cmd string - old *coreV1.Pod - new *coreV1.Pod + old *core.Pod + new *core.Pod } var _ queue.PriorityQueueItem = &ReconcileEndpoints{} @@ -245,7 +245,7 @@ func (r ReconcilePod) Handle() queue.T { } // NewReconcilePod creates new reconcile endpoints queue item -func NewReconcilePod(cmd string, old, new *coreV1.Pod) *ReconcilePod { +func NewReconcilePod(cmd string, old, new *core.Pod) *ReconcilePod { return &ReconcilePod{ cmd: cmd, old: old, diff --git a/pkg/controller/chi/type_controller.go b/pkg/controller/chi/type_controller.go index c5450302b..52a8fc629 100644 --- a/pkg/controller/chi/type_controller.go +++ b/pkg/controller/chi/type_controller.go @@ -18,53 +18,53 @@ import ( "time" kube "k8s.io/client-go/kubernetes" - appslisters "k8s.io/client-go/listers/apps/v1" - corelisters "k8s.io/client-go/listers/core/v1" + appsListers "k8s.io/client-go/listers/apps/v1" + coreListers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" //"k8s.io/client-go/util/workqueue" - apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apiExtensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "github.com/altinity/queue" - chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" - choplisters "github.com/altinity/clickhouse-operator/pkg/client/listers/clickhouse.altinity.com/v1" + chopClientSet "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" + chopListers "github.com/altinity/clickhouse-operator/pkg/client/listers/clickhouse.altinity.com/v1" ) // Controller defines CRO controller type Controller struct { // kubeClient used to Create() k8s resources as c.kubeClient.AppsV1().StatefulSets(namespace).Create(name) kubeClient kube.Interface - extClient apiextensions.Interface + extClient apiExtensions.Interface // chopClient used to Update() CRD k8s resource as c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).Update(chiCopy) - chopClient chopclientset.Interface + chopClient chopClientSet.Interface // chiLister used as chiLister.ClickHouseInstallations(namespace).Get(name) - chiLister choplisters.ClickHouseInstallationLister + chiLister chopListers.ClickHouseInstallationLister // chiListerSynced used in waitForCacheSync() chiListerSynced cache.InformerSynced - chitLister choplisters.ClickHouseInstallationTemplateLister + chitLister chopListers.ClickHouseInstallationTemplateLister chitListerSynced cache.InformerSynced // serviceLister used as serviceLister.Services(namespace).Get(name) - serviceLister corelisters.ServiceLister + serviceLister coreListers.ServiceLister // serviceListerSynced used in waitForCacheSync() serviceListerSynced cache.InformerSynced // endpointsLister used as endpointsLister.Endpoints(namespace).Get(name) - endpointsLister corelisters.EndpointsLister + endpointsLister coreListers.EndpointsLister // endpointsListerSynced used in waitForCacheSync() endpointsListerSynced cache.InformerSynced // configMapLister used as configMapLister.ConfigMaps(namespace).Get(name) - configMapLister corelisters.ConfigMapLister + configMapLister coreListers.ConfigMapLister // configMapListerSynced used in waitForCacheSync() configMapListerSynced cache.InformerSynced // statefulSetLister used as statefulSetLister.StatefulSets(namespace).Get(name) - statefulSetLister appslisters.StatefulSetLister + statefulSetLister appsListers.StatefulSetLister // statefulSetListerSynced used in waitForCacheSync() statefulSetListerSynced cache.InformerSynced // podLister used as statefulSetLister.StatefulSets(namespace).Get(name) - podLister corelisters.PodLister + podLister coreListers.PodLister // podListerSynced used in waitForCacheSync() podListerSynced cache.InformerSynced diff --git a/pkg/controller/chi/volumes.go b/pkg/controller/chi/volumes.go index 612ff9f3d..8aa2c7497 100644 --- a/pkg/controller/chi/volumes.go +++ b/pkg/controller/chi/volumes.go @@ -15,15 +15,15 @@ package chi import ( - coreV1 "k8s.io/api/core/v1" + core "k8s.io/api/core/v1" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/controller" model "github.com/altinity/clickhouse-operator/pkg/model/chi" ) -func (c *Controller) walkPVCs(host *chiV1.ChiHost, f func(pvc *coreV1.PersistentVolumeClaim)) { +func (c *Controller) walkPVCs(host *api.ChiHost, f func(pvc *core.PersistentVolumeClaim)) { namespace := host.Address.Namespace name := model.CreatePodName(host) pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(controller.NewContext(), name, controller.NewGetOptions()) @@ -49,7 +49,7 @@ func (c *Controller) walkPVCs(host *chiV1.ChiHost, f func(pvc *coreV1.Persistent } } -func (c *Controller) walkDiscoveredPVCs(host *chiV1.ChiHost, f func(pvc *coreV1.PersistentVolumeClaim)) { +func (c *Controller) walkDiscoveredPVCs(host *api.ChiHost, f func(pvc *core.PersistentVolumeClaim)) { namespace := host.Address.Namespace pvcList, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(controller.NewContext(), controller.NewListOptions(model.GetSelectorHostScope(host))) @@ -67,8 +67,8 @@ func (c *Controller) walkDiscoveredPVCs(host *chiV1.ChiHost, f func(pvc *coreV1. } // Comment out PV -//func (c *Controller) walkPVs(host *chiV1.ChiHost, f func(pv *coreV1.PersistentVolume)) { -// c.walkPVCs(host, func(pvc *coreV1.PersistentVolumeClaim) { +//func (c *Controller) walkPVs(host *api.ChiHost, f func(pv *core.PersistentVolume)) { +// c.walkPVCs(host, func(pvc *core.PersistentVolumeClaim) { // pv, err := c.kubeClient.CoreV1().PersistentVolumes().Get(newContext(), pvc.Spec.VolumeName, newGetOptions()) // if err != nil { // log.M(host).F().Error("FAIL get PV %s err:%v", pvc.Spec.VolumeName, err) diff --git a/pkg/controller/options.go b/pkg/controller/options.go index 6976c673e..3d4d3f76e 100644 --- a/pkg/controller/options.go +++ b/pkg/controller/options.go @@ -15,43 +15,43 @@ package controller import ( - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" ) // NewListOptions returns filled metav1.ListOptions -func NewListOptions(labelsMap map[string]string) metaV1.ListOptions { +func NewListOptions(labelsMap map[string]string) meta.ListOptions { labelSelector := labels.SelectorFromSet(labelsMap) - return metaV1.ListOptions{ + return meta.ListOptions{ LabelSelector: labelSelector.String(), } } // NewGetOptions returns filled metav1.GetOptions -func NewGetOptions() metaV1.GetOptions { - return metaV1.GetOptions{} +func NewGetOptions() meta.GetOptions { + return meta.GetOptions{} } // NewCreateOptions returns filled metav1.CreateOptions -func NewCreateOptions() metaV1.CreateOptions { - return metaV1.CreateOptions{} +func NewCreateOptions() meta.CreateOptions { + return meta.CreateOptions{} } // NewUpdateOptions returns filled metav1.UpdateOptions -func NewUpdateOptions() metaV1.UpdateOptions { - return metaV1.UpdateOptions{} +func NewUpdateOptions() meta.UpdateOptions { + return meta.UpdateOptions{} } // NewPatchOptions returns filled metav1.PatchOptions -func NewPatchOptions() metaV1.PatchOptions { - return metaV1.PatchOptions{} +func NewPatchOptions() meta.PatchOptions { + return meta.PatchOptions{} } // NewDeleteOptions returns filled *metav1.DeleteOptions -func NewDeleteOptions() metaV1.DeleteOptions { +func NewDeleteOptions() meta.DeleteOptions { gracePeriodSeconds := int64(0) - propagationPolicy := metaV1.DeletePropagationForeground - return metaV1.DeleteOptions{ + propagationPolicy := meta.DeletePropagationForeground + return meta.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &propagationPolicy, } diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index faebe3f25..36361db50 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -16,23 +16,24 @@ package metrics import ( "fmt" + "net/http" + "github.com/prometheus/client_golang/prometheus/promhttp" "go.opentelemetry.io/otel/exporters/prometheus" otelApi "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk/metric" otelResource "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" - "net/http" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" "github.com/altinity/clickhouse-operator/pkg/version" ) func newOTELResource() (*otelResource.Resource, error) { - pod, _ := chop.Get().ConfigManager.GetRuntimeParam(chiV1.OPERATOR_POD_NAME) - namespace, _ := chop.Get().ConfigManager.GetRuntimeParam(chiV1.OPERATOR_POD_NAMESPACE) + pod, _ := chop.Get().ConfigManager.GetRuntimeParam(api.OPERATOR_POD_NAME) + namespace, _ := chop.Get().ConfigManager.GetRuntimeParam(api.OPERATOR_POD_NAMESPACE) return otelResource.Merge( otelResource.Default(), otelResource.NewWithAttributes( @@ -54,7 +55,7 @@ func StartMetricsExporter(endpoint, path string) { // Prometheus exporter embeds a default OpenTelemetry Reader and implements prometheus.Collector, // allowing it to be used as both a Reader and Collector. - //namespace, _ := chop.Get().ConfigManager.GetRuntimeParam(chiV1.OPERATOR_POD_NAMESPACE) + //namespace, _ := chop.Get().ConfigManager.GetRuntimeParam(api.OPERATOR_POD_NAMESPACE) exporter, err := prometheus.New( prometheus.WithoutUnits(), //prometheus.WithoutTargetInfo(), diff --git a/pkg/model/chi/action_plan.go b/pkg/model/chi/action_plan.go index 9ca1186b4..2fb6aff88 100644 --- a/pkg/model/chi/action_plan.go +++ b/pkg/model/chi/action_plan.go @@ -16,7 +16,8 @@ package chi import ( "gopkg.in/d4l3k/messagediff.v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + meta "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/util" @@ -94,7 +95,7 @@ func NewActionPlan(old, new *v1.ClickHouseInstallation) *ActionPlan { return ap } -func (ap *ActionPlan) timestampEqual(old, new *metav1.Time) bool { +func (ap *ActionPlan) timestampEqual(old, new *meta.Time) bool { switch { case (old == nil) && (new == nil): // Both are useless diff --git a/pkg/model/chi/affinity.go b/pkg/model/chi/affinity.go index 629824601..7efba5a73 100644 --- a/pkg/model/chi/affinity.go +++ b/pkg/model/chi/affinity.go @@ -16,15 +16,16 @@ package chi import ( "gopkg.in/d4l3k/messagediff.v1" + "k8s.io/api/core/v1" - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/util" ) // NewAffinity creates new Affinity struct -func NewAffinity(template *chiV1.ChiPodTemplate) *v1.Affinity { +func NewAffinity(template *api.ChiPodTemplate) *v1.Affinity { // Pod node affinity scheduling rules. nodeAffinity := newNodeAffinity(template) // Pod affinity scheduling rules. Ex.: co-locate this pod in the same node, zone, etc @@ -73,7 +74,7 @@ func MergeAffinity(dst *v1.Affinity, src *v1.Affinity) *v1.Affinity { } // newNodeAffinity -func newNodeAffinity(template *chiV1.ChiPodTemplate) *v1.NodeAffinity { +func newNodeAffinity(template *api.ChiPodTemplate) *v1.NodeAffinity { if template.Zone.Key == "" { return nil } @@ -227,7 +228,7 @@ func mergeNodeAffinity(dst *v1.NodeAffinity, src *v1.NodeAffinity) *v1.NodeAffin } // newPodAffinity -func newPodAffinity(template *chiV1.ChiPodTemplate) *v1.PodAffinity { +func newPodAffinity(template *api.ChiPodTemplate) *v1.PodAffinity { // Return podAffinity only in case something was added into it added := false podAffinity := &v1.PodAffinity{} @@ -235,7 +236,7 @@ func newPodAffinity(template *chiV1.ChiPodTemplate) *v1.PodAffinity { for i := range template.PodDistribution { podDistribution := &template.PodDistribution[i] switch podDistribution.Type { - case chiV1.PodDistributionNamespaceAffinity: + case api.PodDistributionNamespaceAffinity: added = true podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append( podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, @@ -247,7 +248,7 @@ func newPodAffinity(template *chiV1.ChiPodTemplate) *v1.PodAffinity { }, ), ) - case chiV1.PodDistributionClickHouseInstallationAffinity: + case api.PodDistributionClickHouseInstallationAffinity: added = true podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append( podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, @@ -259,7 +260,7 @@ func newPodAffinity(template *chiV1.ChiPodTemplate) *v1.PodAffinity { }, ), ) - case chiV1.PodDistributionClusterAffinity: + case api.PodDistributionClusterAffinity: added = true podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append( podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, @@ -271,7 +272,7 @@ func newPodAffinity(template *chiV1.ChiPodTemplate) *v1.PodAffinity { }, ), ) - case chiV1.PodDistributionShardAffinity: + case api.PodDistributionShardAffinity: added = true podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append( podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, @@ -283,7 +284,7 @@ func newPodAffinity(template *chiV1.ChiPodTemplate) *v1.PodAffinity { }, ), ) - case chiV1.PodDistributionReplicaAffinity: + case api.PodDistributionReplicaAffinity: added = true podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append( podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, @@ -295,7 +296,7 @@ func newPodAffinity(template *chiV1.ChiPodTemplate) *v1.PodAffinity { }, ), ) - case chiV1.PodDistributionPreviousTailAffinity: + case api.PodDistributionPreviousTailAffinity: // Newer k8s insists on Required for this Affinity added = true podAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append( @@ -448,42 +449,42 @@ func mergePodAffinity(dst *v1.PodAffinity, src *v1.PodAffinity) *v1.PodAffinity // newMatchLabels func newMatchLabels( - podDistribution *chiV1.ChiPodDistribution, + podDistribution *api.ChiPodDistribution, matchLabels map[string]string, ) map[string]string { var scopeLabels map[string]string switch podDistribution.Scope { - case chiV1.PodDistributionScopeShard: + case api.PodDistributionScopeShard: scopeLabels = map[string]string{ LabelNamespace: macrosNamespace, LabelCHIName: macrosChiName, LabelClusterName: macrosClusterName, LabelShardName: macrosShardName, } - case chiV1.PodDistributionScopeReplica: + case api.PodDistributionScopeReplica: scopeLabels = map[string]string{ LabelNamespace: macrosNamespace, LabelCHIName: macrosChiName, LabelClusterName: macrosClusterName, LabelReplicaName: macrosReplicaName, } - case chiV1.PodDistributionScopeCluster: + case api.PodDistributionScopeCluster: scopeLabels = map[string]string{ LabelNamespace: macrosNamespace, LabelCHIName: macrosChiName, LabelClusterName: macrosClusterName, } - case chiV1.PodDistributionScopeClickHouseInstallation: + case api.PodDistributionScopeClickHouseInstallation: scopeLabels = map[string]string{ LabelNamespace: macrosNamespace, LabelCHIName: macrosChiName, } - case chiV1.PodDistributionScopeNamespace: + case api.PodDistributionScopeNamespace: scopeLabels = map[string]string{ LabelNamespace: macrosNamespace, } - case chiV1.PodDistributionScopeGlobal: + case api.PodDistributionScopeGlobal: scopeLabels = map[string]string{} } @@ -491,7 +492,7 @@ func newMatchLabels( } // newPodAntiAffinity -func newPodAntiAffinity(template *chiV1.ChiPodTemplate) *v1.PodAntiAffinity { +func newPodAntiAffinity(template *api.ChiPodTemplate) *v1.PodAntiAffinity { // Return podAntiAffinity only in case something was added into it added := false podAntiAffinity := &v1.PodAntiAffinity{} @@ -500,7 +501,7 @@ func newPodAntiAffinity(template *chiV1.ChiPodTemplate) *v1.PodAntiAffinity { for i := range template.PodDistribution { podDistribution := &template.PodDistribution[i] switch podDistribution.Type { - case chiV1.PodDistributionClickHouseAntiAffinity: + case api.PodDistributionClickHouseAntiAffinity: added = true podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append( podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, @@ -514,7 +515,7 @@ func newPodAntiAffinity(template *chiV1.ChiPodTemplate) *v1.PodAntiAffinity { ), ), ) - case chiV1.PodDistributionMaxNumberPerNode: + case api.PodDistributionMaxNumberPerNode: added = true podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append( podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, @@ -528,7 +529,7 @@ func newPodAntiAffinity(template *chiV1.ChiPodTemplate) *v1.PodAntiAffinity { ), ), ) - case chiV1.PodDistributionShardAntiAffinity: + case api.PodDistributionShardAntiAffinity: added = true podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append( podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, @@ -542,7 +543,7 @@ func newPodAntiAffinity(template *chiV1.ChiPodTemplate) *v1.PodAntiAffinity { ), ), ) - case chiV1.PodDistributionReplicaAntiAffinity: + case api.PodDistributionReplicaAntiAffinity: added = true podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append( podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, @@ -556,16 +557,16 @@ func newPodAntiAffinity(template *chiV1.ChiPodTemplate) *v1.PodAntiAffinity { ), ), ) - case chiV1.PodDistributionAnotherNamespaceAntiAffinity: + case api.PodDistributionAnotherNamespaceAntiAffinity: added = true podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append( podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, newPodAffinityTermWithMatchExpressions( podDistribution, - []metaV1.LabelSelectorRequirement{ + []meta.LabelSelectorRequirement{ { Key: LabelNamespace, - Operator: metaV1.LabelSelectorOpNotIn, + Operator: meta.LabelSelectorOpNotIn, Values: []string{ macrosNamespace, }, @@ -573,16 +574,16 @@ func newPodAntiAffinity(template *chiV1.ChiPodTemplate) *v1.PodAntiAffinity { }, ), ) - case chiV1.PodDistributionAnotherClickHouseInstallationAntiAffinity: + case api.PodDistributionAnotherClickHouseInstallationAntiAffinity: added = true podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append( podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, newPodAffinityTermWithMatchExpressions( podDistribution, - []metaV1.LabelSelectorRequirement{ + []meta.LabelSelectorRequirement{ { Key: LabelCHIName, - Operator: metaV1.LabelSelectorOpNotIn, + Operator: meta.LabelSelectorOpNotIn, Values: []string{ macrosChiName, }, @@ -590,16 +591,16 @@ func newPodAntiAffinity(template *chiV1.ChiPodTemplate) *v1.PodAntiAffinity { }, ), ) - case chiV1.PodDistributionAnotherClusterAntiAffinity: + case api.PodDistributionAnotherClusterAntiAffinity: added = true podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append( podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, newPodAffinityTermWithMatchExpressions( podDistribution, - []metaV1.LabelSelectorRequirement{ + []meta.LabelSelectorRequirement{ { Key: LabelClusterName, - Operator: metaV1.LabelSelectorOpNotIn, + Operator: meta.LabelSelectorOpNotIn, Values: []string{ macrosClusterName, }, @@ -738,21 +739,21 @@ func mergePodAntiAffinity(dst *v1.PodAntiAffinity, src *v1.PodAntiAffinity) *v1. // newPodAffinityTermWithMatchLabels func newPodAffinityTermWithMatchLabels( - podDistribution *chiV1.ChiPodDistribution, + podDistribution *api.ChiPodDistribution, matchLabels map[string]string, ) v1.PodAffinityTerm { return v1.PodAffinityTerm{ - LabelSelector: &metaV1.LabelSelector{ + LabelSelector: &meta.LabelSelector{ // A list of node selector requirements by node's labels. //MatchLabels: map[string]string{ // LabelClusterScopeCycleIndex: macrosClusterScopeCycleIndex, //}, MatchLabels: matchLabels, // Switch to MatchLabels - //MatchExpressions: []metaV1.LabelSelectorRequirement{ + //MatchExpressions: []meta.LabelSelectorRequirement{ // { // Key: LabelAppName, - // Operator: metaV1.LabelSelectorOpIn, + // Operator: meta.LabelSelectorOpIn, // Values: []string{ // LabelAppValue, // }, @@ -765,19 +766,19 @@ func newPodAffinityTermWithMatchLabels( // newPodAffinityTermWithMatchExpressions func newPodAffinityTermWithMatchExpressions( - podDistribution *chiV1.ChiPodDistribution, - matchExpressions []metaV1.LabelSelectorRequirement, + podDistribution *api.ChiPodDistribution, + matchExpressions []meta.LabelSelectorRequirement, ) v1.PodAffinityTerm { return v1.PodAffinityTerm{ - LabelSelector: &metaV1.LabelSelector{ + LabelSelector: &meta.LabelSelector{ // A list of node selector requirements by node's labels. //MatchLabels: map[string]string{ // LabelClusterScopeCycleIndex: macrosClusterScopeCycleIndex, //}, - //MatchExpressions: []metaV1.LabelSelectorRequirement{ + //MatchExpressions: []meta.LabelSelectorRequirement{ // { // Key: LabelAppName, - // Operator: metaV1.LabelSelectorOpIn, + // Operator: meta.LabelSelectorOpIn, // Values: []string{ // LabelAppValue, // }, @@ -792,23 +793,23 @@ func newPodAffinityTermWithMatchExpressions( // newWeightedPodAffinityTermWithMatchLabels is an enhanced append() func newWeightedPodAffinityTermWithMatchLabels( weight int32, - podDistribution *chiV1.ChiPodDistribution, + podDistribution *api.ChiPodDistribution, matchLabels map[string]string, ) v1.WeightedPodAffinityTerm { return v1.WeightedPodAffinityTerm{ Weight: weight, PodAffinityTerm: v1.PodAffinityTerm{ - LabelSelector: &metaV1.LabelSelector{ + LabelSelector: &meta.LabelSelector{ // A list of node selector requirements by node's labels. //MatchLabels: map[string]string{ // LabelClusterScopeCycleIndex: macrosClusterScopeCycleIndex, //}, MatchLabels: matchLabels, // Switch to MatchLabels - //MatchExpressions: []metaV1.LabelSelectorRequirement{ + //MatchExpressions: []meta.LabelSelectorRequirement{ // { // Key: LabelAppName, - // Operator: metaV1.LabelSelectorOpIn, + // Operator: meta.LabelSelectorOpIn, // Values: []string{ // LabelAppValue, // }, @@ -821,7 +822,7 @@ func newWeightedPodAffinityTermWithMatchLabels( } // prepareAffinity -func prepareAffinity(podTemplate *chiV1.ChiPodTemplate, host *chiV1.ChiHost) { +func prepareAffinity(podTemplate *api.ChiPodTemplate, host *api.ChiHost) { switch { case podTemplate == nil: return @@ -848,7 +849,7 @@ func prepareAffinity(podTemplate *chiV1.ChiPodTemplate, host *chiV1.ChiHost) { } // processNodeSelector -func processNodeSelector(nodeSelector *v1.NodeSelector, host *chiV1.ChiHost) { +func processNodeSelector(nodeSelector *v1.NodeSelector, host *api.ChiHost) { if nodeSelector == nil { return } @@ -859,7 +860,7 @@ func processNodeSelector(nodeSelector *v1.NodeSelector, host *chiV1.ChiHost) { } // processPreferredSchedulingTerms -func processPreferredSchedulingTerms(preferredSchedulingTerms []v1.PreferredSchedulingTerm, host *chiV1.ChiHost) { +func processPreferredSchedulingTerms(preferredSchedulingTerms []v1.PreferredSchedulingTerm, host *api.ChiHost) { for i := range preferredSchedulingTerms { nodeSelectorTerm := &preferredSchedulingTerms[i].Preference processNodeSelectorTerm(nodeSelectorTerm, host) @@ -867,7 +868,7 @@ func processPreferredSchedulingTerms(preferredSchedulingTerms []v1.PreferredSche } // processNodeSelectorTerm -func processNodeSelectorTerm(nodeSelectorTerm *v1.NodeSelectorTerm, host *chiV1.ChiHost) { +func processNodeSelectorTerm(nodeSelectorTerm *v1.NodeSelectorTerm, host *api.ChiHost) { for i := range nodeSelectorTerm.MatchExpressions { nodeSelectorRequirement := &nodeSelectorTerm.MatchExpressions[i] processNodeSelectorRequirement(nodeSelectorRequirement, host) @@ -880,7 +881,7 @@ func processNodeSelectorTerm(nodeSelectorTerm *v1.NodeSelectorTerm, host *chiV1. } // processNodeSelectorRequirement -func processNodeSelectorRequirement(nodeSelectorRequirement *v1.NodeSelectorRequirement, host *chiV1.ChiHost) { +func processNodeSelectorRequirement(nodeSelectorRequirement *v1.NodeSelectorRequirement, host *api.ChiHost) { if nodeSelectorRequirement == nil { return } @@ -892,7 +893,7 @@ func processNodeSelectorRequirement(nodeSelectorRequirement *v1.NodeSelectorRequ } // processPodAffinityTerms -func processPodAffinityTerms(podAffinityTerms []v1.PodAffinityTerm, host *chiV1.ChiHost) { +func processPodAffinityTerms(podAffinityTerms []v1.PodAffinityTerm, host *api.ChiHost) { for i := range podAffinityTerms { podAffinityTerm := &podAffinityTerms[i] processPodAffinityTerm(podAffinityTerm, host) @@ -900,7 +901,7 @@ func processPodAffinityTerms(podAffinityTerms []v1.PodAffinityTerm, host *chiV1. } // processWeightedPodAffinityTerms -func processWeightedPodAffinityTerms(weightedPodAffinityTerms []v1.WeightedPodAffinityTerm, host *chiV1.ChiHost) { +func processWeightedPodAffinityTerms(weightedPodAffinityTerms []v1.WeightedPodAffinityTerm, host *api.ChiHost) { for i := range weightedPodAffinityTerms { podAffinityTerm := &weightedPodAffinityTerms[i].PodAffinityTerm processPodAffinityTerm(podAffinityTerm, host) @@ -908,7 +909,7 @@ func processWeightedPodAffinityTerms(weightedPodAffinityTerms []v1.WeightedPodAf } // processPodAffinityTerm -func processPodAffinityTerm(podAffinityTerm *v1.PodAffinityTerm, host *chiV1.ChiHost) { +func processPodAffinityTerm(podAffinityTerm *v1.PodAffinityTerm, host *api.ChiHost) { if podAffinityTerm == nil { return } @@ -917,7 +918,7 @@ func processPodAffinityTerm(podAffinityTerm *v1.PodAffinityTerm, host *chiV1.Chi } // processLabelSelector -func processLabelSelector(labelSelector *metaV1.LabelSelector, host *chiV1.ChiHost) { +func processLabelSelector(labelSelector *meta.LabelSelector, host *api.ChiHost) { if labelSelector == nil { return } @@ -932,7 +933,7 @@ func processLabelSelector(labelSelector *metaV1.LabelSelector, host *chiV1.ChiHo } // processLabelSelectorRequirement -func processLabelSelectorRequirement(labelSelectorRequirement *metaV1.LabelSelectorRequirement, host *chiV1.ChiHost) { +func processLabelSelectorRequirement(labelSelectorRequirement *meta.LabelSelectorRequirement, host *api.ChiHost) { if labelSelectorRequirement == nil { return } diff --git a/pkg/model/chi/annotator.go b/pkg/model/chi/annotator.go index 8898b4170..4b237b650 100644 --- a/pkg/model/chi/annotator.go +++ b/pkg/model/chi/annotator.go @@ -15,20 +15,20 @@ package chi import ( - v1 "k8s.io/api/core/v1" + core "k8s.io/api/core/v1" - chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" "github.com/altinity/clickhouse-operator/pkg/util" ) // Annotator is an entity which can annotate CHI artifacts type Annotator struct { - chi *chiv1.ClickHouseInstallation + chi *api.ClickHouseInstallation } // NewAnnotator creates new annotator with context -func NewAnnotator(chi *chiv1.ClickHouseInstallation) *Annotator { +func NewAnnotator(chi *api.ClickHouseInstallation) *Annotator { return &Annotator{ chi: chi, } @@ -51,7 +51,7 @@ func (a *Annotator) getConfigMapCHICommonUsers() map[string]string { } // getConfigMapHost -func (a *Annotator) getConfigMapHost(host *chiv1.ChiHost) map[string]string { +func (a *Annotator) getConfigMapHost(host *api.ChiHost) map[string]string { return util.MergeStringMapsOverwrite( a.getHostScope(host), nil, @@ -59,7 +59,7 @@ func (a *Annotator) getConfigMapHost(host *chiv1.ChiHost) map[string]string { } // getServiceCHI -func (a *Annotator) getServiceCHI(chi *chiv1.ClickHouseInstallation) map[string]string { +func (a *Annotator) getServiceCHI(chi *api.ClickHouseInstallation) map[string]string { return util.MergeStringMapsOverwrite( a.getCHIScope(), nil, @@ -67,7 +67,7 @@ func (a *Annotator) getServiceCHI(chi *chiv1.ClickHouseInstallation) map[string] } // getServiceCluster -func (a *Annotator) getServiceCluster(cluster *chiv1.Cluster) map[string]string { +func (a *Annotator) getServiceCluster(cluster *api.Cluster) map[string]string { return util.MergeStringMapsOverwrite( a.getClusterScope(cluster), nil, @@ -75,7 +75,7 @@ func (a *Annotator) getServiceCluster(cluster *chiv1.Cluster) map[string]string } // getServiceShard -func (a *Annotator) getServiceShard(shard *chiv1.ChiShard) map[string]string { +func (a *Annotator) getServiceShard(shard *api.ChiShard) map[string]string { return util.MergeStringMapsOverwrite( a.getShardScope(shard), nil, @@ -83,7 +83,7 @@ func (a *Annotator) getServiceShard(shard *chiv1.ChiShard) map[string]string { } // getServiceHost -func (a *Annotator) getServiceHost(host *chiv1.ChiHost) map[string]string { +func (a *Annotator) getServiceHost(host *api.ChiHost) map[string]string { return util.MergeStringMapsOverwrite( a.getHostScope(host), nil, @@ -97,19 +97,19 @@ func (a *Annotator) getCHIScope() map[string]string { } // getClusterScope gets annotations for Cluster-scoped object -func (a *Annotator) getClusterScope(cluster *chiv1.Cluster) map[string]string { +func (a *Annotator) getClusterScope(cluster *api.Cluster) map[string]string { // Combine generated annotations and CHI-provided annotations return a.filterOutPredefined(a.appendCHIProvidedTo(nil)) } // getShardScope gets annotations for Shard-scoped object -func (a *Annotator) getShardScope(shard *chiv1.ChiShard) map[string]string { +func (a *Annotator) getShardScope(shard *api.ChiShard) map[string]string { // Combine generated annotations and CHI-provided annotations return a.filterOutPredefined(a.appendCHIProvidedTo(nil)) } // getHostScope gets annotations for Host-scoped object -func (a *Annotator) getHostScope(host *chiv1.ChiHost) map[string]string { +func (a *Annotator) getHostScope(host *api.ChiHost) map[string]string { return a.filterOutPredefined(a.appendCHIProvidedTo(nil)) } @@ -125,15 +125,15 @@ func (a *Annotator) appendCHIProvidedTo(dst map[string]string) map[string]string } // getPV -func (a *Annotator) getPV(pv *v1.PersistentVolume, host *chiv1.ChiHost) map[string]string { +func (a *Annotator) getPV(pv *core.PersistentVolume, host *api.ChiHost) map[string]string { return util.MergeStringMapsOverwrite(pv.Annotations, a.getHostScope(host)) } // getPVC func (a *Annotator) getPVC( - pvc *v1.PersistentVolumeClaim, - host *chiv1.ChiHost, - template *chiv1.ChiVolumeClaimTemplate, + pvc *core.PersistentVolumeClaim, + host *api.ChiHost, + template *api.ChiVolumeClaimTemplate, ) map[string]string { annotations := util.MergeStringMapsOverwrite(pvc.Annotations, template.ObjectMeta.Annotations) return util.MergeStringMapsOverwrite(annotations, a.getHostScope(host)) diff --git a/pkg/model/chi/ch_config_const.go b/pkg/model/chi/ch_config_const.go index 6b569822b..fcce22a18 100644 --- a/pkg/model/chi/ch_config_const.go +++ b/pkg/model/chi/ch_config_const.go @@ -14,7 +14,7 @@ package chi -import "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" +import api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" const ( xmlTagYandex = "yandex" @@ -36,7 +36,7 @@ const ( // for the following sections: // 1. remote servers // 2. operator-provided additional config files - dirPathCommonConfig = "/etc/clickhouse-server/" + v1.CommonConfigDir + "/" + dirPathCommonConfig = "/etc/clickhouse-server/" + api.CommonConfigDir + "/" // dirPathUsersConfig specifies full path to folder, where generated users XML files for ClickHouse would be placed // for the following sections: @@ -44,7 +44,7 @@ const ( // 2. quotas // 3. profiles // 4. operator-provided additional config files - dirPathUsersConfig = "/etc/clickhouse-server/" + v1.UsersConfigDir + "/" + dirPathUsersConfig = "/etc/clickhouse-server/" + api.UsersConfigDir + "/" // dirPathHostConfig specifies full path to folder, where generated host XML files for ClickHouse would be placed // for the following sections: @@ -53,7 +53,7 @@ const ( // 3. settings // 4. files // 5. operator-provided additional config files - dirPathHostConfig = "/etc/clickhouse-server/" + v1.HostConfigDir + "/" + dirPathHostConfig = "/etc/clickhouse-server/" + api.HostConfigDir + "/" // dirPathClickHouseData specifies full path of data folder where ClickHouse would place its data storage dirPathClickHouseData = "/var/lib/clickhouse" diff --git a/pkg/model/chi/ch_config_files_generator.go b/pkg/model/chi/ch_config_files_generator.go index 462623b82..206b5d70a 100644 --- a/pkg/model/chi/ch_config_files_generator.go +++ b/pkg/model/chi/ch_config_files_generator.go @@ -15,7 +15,7 @@ package chi import ( - chi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/util" ) @@ -24,13 +24,13 @@ type ClickHouseConfigFilesGenerator struct { // ClickHouse config generator chConfigGenerator *ClickHouseConfigGenerator // clickhouse-operator configuration - chopConfig *chi.OperatorConfig + chopConfig *api.OperatorConfig } // NewClickHouseConfigFilesGenerator creates new clickhouse configuration generator object func NewClickHouseConfigFilesGenerator( chConfigGenerator *ClickHouseConfigGenerator, - chopConfig *chi.OperatorConfig, + chopConfig *api.OperatorConfig, ) *ClickHouseConfigFilesGenerator { return &ClickHouseConfigFilesGenerator{ chConfigGenerator: chConfigGenerator, @@ -83,7 +83,7 @@ func (c *ClickHouseConfigFilesGenerator) CreateConfigFilesGroupCommon(options *C // 3. common files util.IncludeNonEmpty(commonConfigSections, createConfigSectionFilename(configRemoteServers), c.chConfigGenerator.GetRemoteServers(options.GetRemoteServersGeneratorOptions())) util.IncludeNonEmpty(commonConfigSections, createConfigSectionFilename(configSettings), c.chConfigGenerator.GetSettingsGlobal()) - util.MergeStringMapsOverwrite(commonConfigSections, c.chConfigGenerator.GetFiles(chi.SectionCommon, true, nil)) + util.MergeStringMapsOverwrite(commonConfigSections, c.chConfigGenerator.GetFiles(api.SectionCommon, true, nil)) // Extra user-specified config files util.MergeStringMapsOverwrite(commonConfigSections, c.chopConfig.ClickHouse.Config.File.Runtime.CommonConfigFiles) @@ -101,7 +101,7 @@ func (c *ClickHouseConfigFilesGenerator) CreateConfigFilesGroupUsers() map[strin util.IncludeNonEmpty(commonUsersConfigSections, createConfigSectionFilename(configUsers), c.chConfigGenerator.GetUsers()) util.IncludeNonEmpty(commonUsersConfigSections, createConfigSectionFilename(configQuotas), c.chConfigGenerator.GetQuotas()) util.IncludeNonEmpty(commonUsersConfigSections, createConfigSectionFilename(configProfiles), c.chConfigGenerator.GetProfiles()) - util.MergeStringMapsOverwrite(commonUsersConfigSections, c.chConfigGenerator.GetFiles(chi.SectionUsers, false, nil)) + util.MergeStringMapsOverwrite(commonUsersConfigSections, c.chConfigGenerator.GetFiles(api.SectionUsers, false, nil)) // Extra user-specified config files util.MergeStringMapsOverwrite(commonUsersConfigSections, c.chopConfig.ClickHouse.Config.File.Runtime.UsersConfigFiles) @@ -109,14 +109,14 @@ func (c *ClickHouseConfigFilesGenerator) CreateConfigFilesGroupUsers() map[strin } // CreateConfigFilesGroupHost creates host config files -func (c *ClickHouseConfigFilesGenerator) CreateConfigFilesGroupHost(host *chi.ChiHost) map[string]string { +func (c *ClickHouseConfigFilesGenerator) CreateConfigFilesGroupHost(host *api.ChiHost) map[string]string { // Prepare for this replica deployment chopConfig files map as filename->content hostConfigSections := make(map[string]string) util.IncludeNonEmpty(hostConfigSections, createConfigSectionFilename(configMacros), c.chConfigGenerator.GetHostMacros(host)) util.IncludeNonEmpty(hostConfigSections, createConfigSectionFilename(configHostnamePorts), c.chConfigGenerator.GetHostHostnameAndPorts(host)) util.IncludeNonEmpty(hostConfigSections, createConfigSectionFilename(configZookeeper), c.chConfigGenerator.GetHostZookeeper(host)) util.IncludeNonEmpty(hostConfigSections, createConfigSectionFilename(configSettings), c.chConfigGenerator.GetSettings(host)) - util.MergeStringMapsOverwrite(hostConfigSections, c.chConfigGenerator.GetFiles(chi.SectionHost, true, host)) + util.MergeStringMapsOverwrite(hostConfigSections, c.chConfigGenerator.GetFiles(api.SectionHost, true, host)) // Extra user-specified config files util.MergeStringMapsOverwrite(hostConfigSections, c.chopConfig.ClickHouse.Config.File.Runtime.HostConfigFiles) diff --git a/pkg/model/chi/chop_config.go b/pkg/model/chi/chop_config.go index c1a5d339c..f532751fc 100644 --- a/pkg/model/chi/chop_config.go +++ b/pkg/model/chi/chop_config.go @@ -19,13 +19,13 @@ import ( "gopkg.in/d4l3k/messagediff.v1" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" ) // isZookeeperChangeRequiresReboot checks two ZooKeeper configs and decides, // whether config modifications require a reboot to be applied -func isZookeeperChangeRequiresReboot(host *chiV1.ChiHost, a, b *chiV1.ChiZookeeperConfig) bool { +func isZookeeperChangeRequiresReboot(host *api.ChiHost, a, b *api.ChiZookeeperConfig) bool { return !a.Equals(b) } @@ -33,9 +33,9 @@ func isZookeeperChangeRequiresReboot(host *chiV1.ChiHost, a, b *chiV1.ChiZookeep // Ex.: `prefix` = file // file/setting1 // file/setting2 -func makePaths(a, b *chiV1.Settings, prefix string, path *messagediff.Path, value interface{}) (sections []string) { - if settings, ok := (value).(*chiV1.Settings); ok { - // Provided `value` is of type chiV1.Settings, which means that the whole +func makePaths(a, b *api.Settings, prefix string, path *messagediff.Path, value interface{}) (sections []string) { + if settings, ok := (value).(*api.Settings); ok { + // Provided `value` is of type api.Settings, which means that the whole // settings such as 'files' or 'settings' is being either added or removed if settings == nil { // Completely removed settings such as 'files' or 'settings', so the value changed from Settings to nil @@ -51,7 +51,7 @@ func makePaths(a, b *chiV1.Settings, prefix string, path *messagediff.Path, valu } } } else { - // Provided `value` is not of type chiV1.Settings, expecting it to be a piece of settings. + // Provided `value` is not of type api.Settings, expecting it to be a piece of settings. // Modify settings such as 'files' or 'settings' but without full removal, // something is still left in the remaining part of settings in case of deletion or added in case of addition. suffix := "" @@ -73,7 +73,7 @@ func makePaths(a, b *chiV1.Settings, prefix string, path *messagediff.Path, valu // Ex.: `prefix` = file // file/setting1 // file/setting2 -func makePathsFromDiff(a, b *chiV1.Settings, diff *messagediff.Diff, prefix string) (res []string) { +func makePathsFromDiff(a, b *api.Settings, diff *messagediff.Diff, prefix string) (res []string) { for path, value := range diff.Added { res = append(res, makePaths(a, b, prefix, path, value)...) } @@ -87,7 +87,7 @@ func makePathsFromDiff(a, b *chiV1.Settings, diff *messagediff.Diff, prefix stri } // isSettingsChangeRequiresReboot checks whether changes between two settings requires ClickHouse reboot -func isSettingsChangeRequiresReboot(host *chiV1.ChiHost, section string, a, b *chiV1.Settings) bool { +func isSettingsChangeRequiresReboot(host *api.ChiHost, section string, a, b *api.Settings) bool { diff, equal := messagediff.DeepDiff(a, b) if equal { return false @@ -97,7 +97,7 @@ func isSettingsChangeRequiresReboot(host *chiV1.ChiHost, section string, a, b *c } // hostVersionMatches checks whether host's ClickHouse version matches specified constraint -func hostVersionMatches(host *chiV1.ChiHost, versionConstraint string) bool { +func hostVersionMatches(host *api.ChiHost, versionConstraint string) bool { // Special version of "*" - default version - has to satisfy all host versions // Default version will also be used in case ClickHouse version is unknown. // ClickHouse version may be unknown due to host being down - for example, because of incorrect "settings" section. @@ -106,7 +106,7 @@ func hostVersionMatches(host *chiV1.ChiHost, versionConstraint string) bool { } // ruleMatches checks whether provided rule (rule set) matches specified `path` -func ruleMatches(set chiV1.OperatorConfigRestartPolicyRuleSet, path string) (matches bool, value bool) { +func ruleMatches(set api.OperatorConfigRestartPolicyRuleSet, path string) (matches bool, value bool) { for pattern, val := range set { if pattern.Match(path) { matches = true @@ -125,7 +125,7 @@ func ruleMatches(set chiV1.OperatorConfigRestartPolicyRuleSet, path string) (mat // getLatestConfigMatchValue returns value of the latest match of a specified `path` in ConfigRestartPolicy.Rules // in case match found in ConfigRestartPolicy.Rules or false -func getLatestConfigMatchValue(host *chiV1.ChiHost, path string) (matches bool, value bool) { +func getLatestConfigMatchValue(host *api.ChiHost, path string) (matches bool, value bool) { // Check all rules for _, r := range chop.Config().ClickHouse.ConfigRestartPolicy.Rules { // Check ClickHouse version of a particular rule @@ -146,7 +146,7 @@ func getLatestConfigMatchValue(host *chiV1.ChiHost, path string) (matches bool, } // isListedChangeRequiresReboot checks whether any of the provided paths requires reboot to apply configuration -func isListedChangeRequiresReboot(host *chiV1.ChiHost, paths []string) bool { +func isListedChangeRequiresReboot(host *api.ChiHost, paths []string) bool { // Check whether any path matches ClickHouse configuration restart policy rules requires reboot for _, path := range paths { if matches, value := getLatestConfigMatchValue(host, path); matches { @@ -165,10 +165,10 @@ func isListedChangeRequiresReboot(host *chiV1.ChiHost, paths []string) bool { } // IsConfigurationChangeRequiresReboot checks whether configuration changes requires a reboot -func IsConfigurationChangeRequiresReboot(host *chiV1.ChiHost) bool { +func IsConfigurationChangeRequiresReboot(host *api.ChiHost) bool { // Zookeeper { - var old, new *chiV1.ChiZookeeperConfig + var old, new *api.ChiZookeeperConfig if host.HasAncestor() { old = host.GetAncestor().GetZookeeper() } @@ -179,7 +179,7 @@ func IsConfigurationChangeRequiresReboot(host *chiV1.ChiHost) bool { } // Profiles Global { - var old, new *chiV1.Settings + var old, new *api.Settings if host.HasAncestorCHI() { old = host.GetAncestorCHI().Spec.Configuration.Profiles } @@ -192,7 +192,7 @@ func IsConfigurationChangeRequiresReboot(host *chiV1.ChiHost) bool { } // Quotas Global { - var old, new *chiV1.Settings + var old, new *api.Settings if host.HasAncestorCHI() { old = host.GetAncestorCHI().Spec.Configuration.Quotas } @@ -205,7 +205,7 @@ func IsConfigurationChangeRequiresReboot(host *chiV1.ChiHost) bool { } // Settings Global { - var old, new *chiV1.Settings + var old, new *api.Settings if host.HasAncestorCHI() { old = host.GetAncestorCHI().Spec.Configuration.Settings } @@ -218,7 +218,7 @@ func IsConfigurationChangeRequiresReboot(host *chiV1.ChiHost) bool { } // Settings Local { - var old, new *chiV1.Settings + var old, new *api.Settings if host.HasAncestor() { old = host.GetAncestor().Settings } @@ -229,18 +229,18 @@ func IsConfigurationChangeRequiresReboot(host *chiV1.ChiHost) bool { } // Files Global { - var old, new *chiV1.Settings + var old, new *api.Settings if host.HasAncestorCHI() { old = host.GetAncestorCHI().Spec.Configuration.Files.Filter( nil, - []chiV1.SettingsSection{chiV1.SectionUsers}, + []api.SettingsSection{api.SectionUsers}, true, ) } if host.HasCHI() { new = host.GetCHI().Spec.Configuration.Files.Filter( nil, - []chiV1.SettingsSection{chiV1.SectionUsers}, + []api.SettingsSection{api.SectionUsers}, true, ) } @@ -250,17 +250,17 @@ func IsConfigurationChangeRequiresReboot(host *chiV1.ChiHost) bool { } // Files Local { - var old, new *chiV1.Settings + var old, new *api.Settings if host.HasAncestor() { old = host.GetAncestor().Files.Filter( nil, - []chiV1.SettingsSection{chiV1.SectionUsers}, + []api.SettingsSection{api.SectionUsers}, true, ) } new = host.Files.Filter( nil, - []chiV1.SettingsSection{chiV1.SectionUsers}, + []api.SettingsSection{api.SectionUsers}, true, ) if isSettingsChangeRequiresReboot(host, "files", old, new) { diff --git a/pkg/model/chi/cluster.go b/pkg/model/chi/cluster.go index df7131122..013848133 100644 --- a/pkg/model/chi/cluster.go +++ b/pkg/model/chi/cluster.go @@ -19,7 +19,7 @@ import ( "strings" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/model/clickhouse" "github.com/altinity/clickhouse-operator/pkg/util" ) @@ -101,29 +101,29 @@ func (c *Cluster) QueryUnzipAndApplyUUIDs(ctx context.Context, endpoints []strin } // ExecCHI runs set of SQL queries over the whole CHI -func (c *Cluster) ExecCHI(ctx context.Context, chi *chop.ClickHouseInstallation, SQLs []string, _opts ...*clickhouse.QueryOptions) error { +func (c *Cluster) ExecCHI(ctx context.Context, chi *api.ClickHouseInstallation, SQLs []string, _opts ...*clickhouse.QueryOptions) error { hosts := CreateFQDNs(chi, nil, false) opts := clickhouse.QueryOptionsNormalize(_opts...) return c.SetHosts(hosts).ExecAll(ctx, SQLs, opts) } // ExecCluster runs set of SQL queries over the cluster -func (c *Cluster) ExecCluster(ctx context.Context, cluster *chop.Cluster, SQLs []string, _opts ...*clickhouse.QueryOptions) error { +func (c *Cluster) ExecCluster(ctx context.Context, cluster *api.Cluster, SQLs []string, _opts ...*clickhouse.QueryOptions) error { hosts := CreateFQDNs(cluster, nil, false) opts := clickhouse.QueryOptionsNormalize(_opts...) return c.SetHosts(hosts).ExecAll(ctx, SQLs, opts) } // ExecShard runs set of SQL queries over the shard replicas -func (c *Cluster) ExecShard(ctx context.Context, shard *chop.ChiShard, SQLs []string, _opts ...*clickhouse.QueryOptions) error { +func (c *Cluster) ExecShard(ctx context.Context, shard *api.ChiShard, SQLs []string, _opts ...*clickhouse.QueryOptions) error { hosts := CreateFQDNs(shard, nil, false) opts := clickhouse.QueryOptionsNormalize(_opts...) return c.SetHosts(hosts).ExecAll(ctx, SQLs, opts) } // ExecHost runs set of SQL queries over the replica -func (c *Cluster) ExecHost(ctx context.Context, host *chop.ChiHost, SQLs []string, _opts ...*clickhouse.QueryOptions) error { - hosts := CreateFQDNs(host, chop.ChiHost{}, false) +func (c *Cluster) ExecHost(ctx context.Context, host *api.ChiHost, SQLs []string, _opts ...*clickhouse.QueryOptions) error { + hosts := CreateFQDNs(host, api.ChiHost{}, false) opts := clickhouse.QueryOptionsNormalize(_opts...) c.SetHosts(hosts) if opts.GetSilent() { @@ -135,8 +135,8 @@ func (c *Cluster) ExecHost(ctx context.Context, host *chop.ChiHost, SQLs []strin } // QueryHost runs specified query on specified host -func (c *Cluster) QueryHost(ctx context.Context, host *chop.ChiHost, sql string, _opts ...*clickhouse.QueryOptions) (*clickhouse.QueryResult, error) { - hosts := CreateFQDNs(host, chop.ChiHost{}, false) +func (c *Cluster) QueryHost(ctx context.Context, host *api.ChiHost, sql string, _opts ...*clickhouse.QueryOptions) (*clickhouse.QueryResult, error) { + hosts := CreateFQDNs(host, api.ChiHost{}, false) opts := clickhouse.QueryOptionsNormalize(_opts...) c.SetHosts(hosts) if opts.GetSilent() { @@ -149,7 +149,7 @@ func (c *Cluster) QueryHost(ctx context.Context, host *chop.ChiHost, sql string, } // QueryHostInt runs specified query on specified host and returns one int as a result -func (c *Cluster) QueryHostInt(ctx context.Context, host *chop.ChiHost, sql string, _opts ...*clickhouse.QueryOptions) (int, error) { +func (c *Cluster) QueryHostInt(ctx context.Context, host *api.ChiHost, sql string, _opts ...*clickhouse.QueryOptions) (int, error) { if util.IsContextDone(ctx) { log.V(2).Info("ctx is done") return 0, nil @@ -168,7 +168,7 @@ func (c *Cluster) QueryHostInt(ctx context.Context, host *chop.ChiHost, sql stri } // QueryHostString runs specified query on specified host and returns one string as a result -func (c *Cluster) QueryHostString(ctx context.Context, host *chop.ChiHost, sql string, _opts ...*clickhouse.QueryOptions) (string, error) { +func (c *Cluster) QueryHostString(ctx context.Context, host *api.ChiHost, sql string, _opts ...*clickhouse.QueryOptions) (string, error) { if util.IsContextDone(ctx) { log.V(2).Info("ctx is done") return "", nil diff --git a/pkg/model/chi/cluster_schemer.go b/pkg/model/chi/cluster_schemer.go index 3b4ed4488..3e63f3195 100644 --- a/pkg/model/chi/cluster_schemer.go +++ b/pkg/model/chi/cluster_schemer.go @@ -22,7 +22,7 @@ import ( "github.com/MakeNowJust/heredoc" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/model/clickhouse" "github.com/altinity/clickhouse-operator/pkg/util" ) @@ -43,8 +43,8 @@ func NewClusterSchemer(clusterConnectionParams *clickhouse.ClusterConnectionPara } // shouldCreateDistributedObjects determines whether distributed objects should be created -func shouldCreateDistributedObjects(host *chop.ChiHost) bool { - hosts := CreateFQDNs(host, chop.Cluster{}, false) +func shouldCreateDistributedObjects(host *api.ChiHost) bool { + hosts := CreateFQDNs(host, api.Cluster{}, false) if host.GetCluster().SchemaPolicy.Shard == SchemaPolicyShardNone { log.V(1).M(host).F().Info("SchemaPolicy.Shard says there is no need to distribute objects") @@ -79,7 +79,7 @@ func concatSlices[T any](slices [][]T) []T { // getDistributedObjectsSQLs returns a list of objects that needs to be created on a shard in a cluster. // That includes all distributed tables, corresponding local tables and databases, if necessary -func (s *ClusterSchemer) getDistributedObjectsSQLs(ctx context.Context, host *chop.ChiHost) ([]string, []string, error) { +func (s *ClusterSchemer) getDistributedObjectsSQLs(ctx context.Context, host *api.ChiHost) ([]string, []string, error) { if util.IsContextDone(ctx) { log.V(2).Info("ctx is done") return nil, nil, nil @@ -93,21 +93,21 @@ func (s *ClusterSchemer) getDistributedObjectsSQLs(ctx context.Context, host *ch databaseNames, createDatabaseSQLs := debugCreateSQLs( s.QueryUnzip2Columns( ctx, - CreateFQDNs(host, chop.ClickHouseInstallation{}, false), + CreateFQDNs(host, api.ClickHouseInstallation{}, false), createDatabaseDistributed(host.Address.ClusterName), ), ) tableNames, createTableSQLs := debugCreateSQLs( s.QueryUnzipAndApplyUUIDs( ctx, - CreateFQDNs(host, chop.ClickHouseInstallation{}, false), + CreateFQDNs(host, api.ClickHouseInstallation{}, false), createTableDistributed(host.Address.ClusterName), ), ) functionNames, createFunctionSQLs := debugCreateSQLs( s.QueryUnzip2Columns( ctx, - CreateFQDNs(host, chop.ClickHouseInstallation{}, false), + CreateFQDNs(host, api.ClickHouseInstallation{}, false), createFunction(host.Address.ClusterName), ), ) @@ -117,9 +117,9 @@ func (s *ClusterSchemer) getDistributedObjectsSQLs(ctx context.Context, host *ch } // shouldCreateReplicatedObjects determines whether replicated objects should be created -func shouldCreateReplicatedObjects(host *chop.ChiHost) bool { - shard := CreateFQDNs(host, chop.ChiShard{}, false) - cluster := CreateFQDNs(host, chop.Cluster{}, false) +func shouldCreateReplicatedObjects(host *api.ChiHost) bool { + shard := CreateFQDNs(host, api.ChiShard{}, false) + cluster := CreateFQDNs(host, api.Cluster{}, false) if host.GetCluster().SchemaPolicy.Shard == SchemaPolicyShardAll { // We have explicit request to create replicated objects on each shard @@ -145,7 +145,7 @@ func shouldCreateReplicatedObjects(host *chop.ChiHost) bool { } // getReplicatedObjectsSQLs returns a list of objects that needs to be created on a host in a cluster -func (s *ClusterSchemer) getReplicatedObjectsSQLs(ctx context.Context, host *chop.ChiHost) ([]string, []string, error) { +func (s *ClusterSchemer) getReplicatedObjectsSQLs(ctx context.Context, host *api.ChiHost) ([]string, []string, error) { if util.IsContextDone(ctx) { log.V(2).Info("ctx is done") return nil, nil, nil @@ -159,21 +159,21 @@ func (s *ClusterSchemer) getReplicatedObjectsSQLs(ctx context.Context, host *cho databaseNames, createDatabaseSQLs := debugCreateSQLs( s.QueryUnzip2Columns( ctx, - CreateFQDNs(host, chop.ClickHouseInstallation{}, false), + CreateFQDNs(host, api.ClickHouseInstallation{}, false), createDatabaseReplicated(host.Address.ClusterName), ), ) tableNames, createTableSQLs := debugCreateSQLs( s.QueryUnzipAndApplyUUIDs( ctx, - CreateFQDNs(host, chop.ClickHouseInstallation{}, false), + CreateFQDNs(host, api.ClickHouseInstallation{}, false), createTableReplicated(host.Address.ClusterName), ), ) functionNames, createFunctionSQLs := debugCreateSQLs( s.QueryUnzip2Columns( ctx, - CreateFQDNs(host, chop.ClickHouseInstallation{}, false), + CreateFQDNs(host, api.ClickHouseInstallation{}, false), createFunction(host.Address.ClusterName), ), ) @@ -183,7 +183,7 @@ func (s *ClusterSchemer) getReplicatedObjectsSQLs(ctx context.Context, host *cho } // HostSyncTables calls SYSTEM SYNC REPLICA for replicated tables -func (s *ClusterSchemer) HostSyncTables(ctx context.Context, host *chop.ChiHost) error { +func (s *ClusterSchemer) HostSyncTables(ctx context.Context, host *api.ChiHost) error { tableNames, syncTableSQLs, _ := s.getSyncTablesSQLs(ctx, host) log.V(1).M(host).F().Info("Sync tables: %v as %v", tableNames, syncTableSQLs) opts := clickhouse.NewQueryOptions() @@ -192,7 +192,7 @@ func (s *ClusterSchemer) HostSyncTables(ctx context.Context, host *chop.ChiHost) } // HostDropReplica calls SYSTEM DROP REPLICA -func (s *ClusterSchemer) HostDropReplica(ctx context.Context, hostToRunOn, hostToDrop *chop.ChiHost) error { +func (s *ClusterSchemer) HostDropReplica(ctx context.Context, hostToRunOn, hostToDrop *api.ChiHost) error { log.V(1).M(hostToRunOn).F().Info("Drop replica: %v at %v", CreateInstanceHostname(hostToDrop), hostToRunOn.Address.HostName) return s.ExecHost(ctx, hostToRunOn, []string{fmt.Sprintf("SYSTEM DROP REPLICA '%s'", CreateInstanceHostname(hostToDrop))}) } @@ -200,7 +200,7 @@ func (s *ClusterSchemer) HostDropReplica(ctx context.Context, hostToRunOn, hostT // createTablesSQLs makes all SQL for migrating tables func (s *ClusterSchemer) createTablesSQLs( ctx context.Context, - host *chop.ChiHost, + host *api.ChiHost, ) ( replicatedObjectNames []string, replicatedCreateSQLs []string, @@ -219,7 +219,7 @@ func (s *ClusterSchemer) createTablesSQLs( } // HostCreateTables creates tables on a new host -func (s *ClusterSchemer) HostCreateTables(ctx context.Context, host *chop.ChiHost) error { +func (s *ClusterSchemer) HostCreateTables(ctx context.Context, host *api.ChiHost) error { if util.IsContextDone(ctx) { log.V(2).Info("ctx is done") return nil @@ -258,14 +258,14 @@ func (s *ClusterSchemer) HostCreateTables(ctx context.Context, host *chop.ChiHos } // HostDropTables drops tables on a host -func (s *ClusterSchemer) HostDropTables(ctx context.Context, host *chop.ChiHost) error { +func (s *ClusterSchemer) HostDropTables(ctx context.Context, host *api.ChiHost) error { tableNames, dropTableSQLs, _ := s.getDropTablesSQLs(ctx, host) log.V(1).M(host).F().Info("Drop tables: %v as %v", tableNames, dropTableSQLs) return s.ExecHost(ctx, host, dropTableSQLs, clickhouse.NewQueryOptions().SetRetry(false)) } // IsHostInCluster checks whether host is a member of at least one ClickHouse cluster -func (s *ClusterSchemer) IsHostInCluster(ctx context.Context, host *chop.ChiHost) bool { +func (s *ClusterSchemer) IsHostInCluster(ctx context.Context, host *api.ChiHost) bool { inside := false SQLs := []string{ heredoc.Docf( @@ -288,22 +288,22 @@ func (s *ClusterSchemer) IsHostInCluster(ctx context.Context, host *chop.ChiHost } // CHIDropDnsCache runs 'DROP DNS CACHE' over the whole CHI -func (s *ClusterSchemer) CHIDropDnsCache(ctx context.Context, chi *chop.ClickHouseInstallation) error { +func (s *ClusterSchemer) CHIDropDnsCache(ctx context.Context, chi *api.ClickHouseInstallation) error { sql := `SYSTEM DROP DNS CACHE` - chi.WalkHosts(func(host *chop.ChiHost) error { + chi.WalkHosts(func(host *api.ChiHost) error { return s.ExecHost(ctx, host, []string{sql}) }) return nil } // HostActiveQueriesNum returns how many active queries are on the host -func (s *ClusterSchemer) HostActiveQueriesNum(ctx context.Context, host *chop.ChiHost) (int, error) { +func (s *ClusterSchemer) HostActiveQueriesNum(ctx context.Context, host *api.ChiHost) (int, error) { sql := `SELECT count() FROM system.processes` return s.QueryHostInt(ctx, host, sql) } // HostClickHouseVersion returns ClickHouse version on the host -func (s *ClusterSchemer) HostClickHouseVersion(ctx context.Context, host *chop.ChiHost) (string, error) { +func (s *ClusterSchemer) HostClickHouseVersion(ctx context.Context, host *api.ChiHost) (string, error) { sql := `SELECT version()` return s.QueryHostString(ctx, host, sql) } diff --git a/pkg/model/chi/cluster_schemer_queries.go b/pkg/model/chi/cluster_schemer_queries.go index 5f88be9d3..989b8e81c 100644 --- a/pkg/model/chi/cluster_schemer_queries.go +++ b/pkg/model/chi/cluster_schemer_queries.go @@ -16,13 +16,14 @@ package chi import ( "context" + "github.com/MakeNowJust/heredoc" - chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" ) // getDropTablesSQLs returns set of 'DROP TABLE ...' SQLs -func (s *ClusterSchemer) getDropTablesSQLs(ctx context.Context, host *chop.ChiHost) ([]string, []string, error) { +func (s *ClusterSchemer) getDropTablesSQLs(ctx context.Context, host *api.ChiHost) ([]string, []string, error) { // There isn't a separate query for deleting views. To delete a view, use DROP TABLE // See https://clickhouse.yandex/docs/en/query_language/create/ sql := heredoc.Docf(` @@ -45,12 +46,12 @@ func (s *ClusterSchemer) getDropTablesSQLs(ctx context.Context, host *chop.ChiHo ignoredDBs, ) - names, sqlStatements, _ := s.QueryUnzip2Columns(ctx, CreateFQDNs(host, chop.ChiHost{}, false), sql) + names, sqlStatements, _ := s.QueryUnzip2Columns(ctx, CreateFQDNs(host, api.ChiHost{}, false), sql) return names, sqlStatements, nil } // getSyncTablesSQLs returns set of 'SYSTEM SYNC REPLICA database.table ...' SQLs -func (s *ClusterSchemer) getSyncTablesSQLs(ctx context.Context, host *chop.ChiHost) ([]string, []string, error) { +func (s *ClusterSchemer) getSyncTablesSQLs(ctx context.Context, host *api.ChiHost) ([]string, []string, error) { sql := heredoc.Doc(` SELECT DISTINCT name, @@ -62,7 +63,7 @@ func (s *ClusterSchemer) getSyncTablesSQLs(ctx context.Context, host *chop.ChiHo `, ) - names, sqlStatements, _ := s.QueryUnzip2Columns(ctx, CreateFQDNs(host, chop.ChiHost{}, false), sql) + names, sqlStatements, _ := s.QueryUnzip2Columns(ctx, CreateFQDNs(host, api.ChiHost{}, false), sql) return names, sqlStatements, nil } diff --git a/pkg/model/chi/creator.go b/pkg/model/chi/creator.go index 03ff1c13b..9125f4a9e 100644 --- a/pkg/model/chi/creator.go +++ b/pkg/model/chi/creator.go @@ -19,20 +19,20 @@ import ( "github.com/gosimple/slug" apps "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - policyv1 "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" "github.com/altinity/clickhouse-operator/pkg/util" ) // Creator specifies creator object type Creator struct { - chi *chiv1.ClickHouseInstallation + chi *api.ClickHouseInstallation chConfigFilesGenerator *ClickHouseConfigFilesGenerator labels *Labeler annotations *Annotator @@ -40,7 +40,7 @@ type Creator struct { } // NewCreator creates new Creator object -func NewCreator(chi *chiv1.ClickHouseInstallation) *Creator { +func NewCreator(chi *api.ClickHouseInstallation) *Creator { return &Creator{ chi: chi, chConfigFilesGenerator: NewClickHouseConfigFilesGenerator(NewClickHouseConfigGenerator(chi), chop.Config()), @@ -50,8 +50,8 @@ func NewCreator(chi *chiv1.ClickHouseInstallation) *Creator { } } -// CreateServiceCHI creates new corev1.Service for specified CHI -func (c *Creator) CreateServiceCHI() *corev1.Service { +// CreateServiceCHI creates new core.Service for specified CHI +func (c *Creator) CreateServiceCHI() *core.Service { serviceName := CreateCHIServiceName(c.chi) ownerReferences := getOwnerReferences(c.chi) @@ -72,41 +72,41 @@ func (c *Creator) CreateServiceCHI() *corev1.Service { // Create default Service // We do not have .templates.ServiceTemplate specified or it is incorrect - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ + svc := &core.Service{ + ObjectMeta: meta.ObjectMeta{ Name: serviceName, Namespace: c.chi.Namespace, Labels: macro(c.chi).Map(c.labels.getServiceCHI(c.chi)), Annotations: macro(c.chi).Map(c.annotations.getServiceCHI(c.chi)), OwnerReferences: ownerReferences, }, - Spec: corev1.ServiceSpec{ + Spec: core.ServiceSpec{ // ClusterIP: templateDefaultsServiceClusterIP, - Ports: []corev1.ServicePort{ + Ports: []core.ServicePort{ { Name: chDefaultHTTPPortName, - Protocol: corev1.ProtocolTCP, + Protocol: core.ProtocolTCP, Port: chDefaultHTTPPortNumber, TargetPort: intstr.FromString(chDefaultHTTPPortName), }, { Name: chDefaultTCPPortName, - Protocol: corev1.ProtocolTCP, + Protocol: core.ProtocolTCP, Port: chDefaultTCPPortNumber, TargetPort: intstr.FromString(chDefaultTCPPortName), }, }, Selector: c.labels.getSelectorCHIScopeReady(), - Type: corev1.ServiceTypeLoadBalancer, - ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeLocal, + Type: core.ServiceTypeLoadBalancer, + ExternalTrafficPolicy: core.ServiceExternalTrafficPolicyTypeLocal, }, } MakeObjectVersion(&svc.ObjectMeta, svc) return svc } -// CreateServiceCluster creates new corev1.Service for specified Cluster -func (c *Creator) CreateServiceCluster(cluster *chiv1.Cluster) *corev1.Service { +// CreateServiceCluster creates new core.Service for specified Cluster +func (c *Creator) CreateServiceCluster(cluster *api.Cluster) *core.Service { serviceName := CreateClusterServiceName(cluster) ownerReferences := getOwnerReferences(c.chi) @@ -128,8 +128,8 @@ func (c *Creator) CreateServiceCluster(cluster *chiv1.Cluster) *corev1.Service { return nil } -// CreateServiceShard creates new corev1.Service for specified Shard -func (c *Creator) CreateServiceShard(shard *chiv1.ChiShard) *corev1.Service { +// CreateServiceShard creates new core.Service for specified Shard +func (c *Creator) CreateServiceShard(shard *api.ChiShard) *core.Service { serviceName := CreateShardServiceName(shard) ownerReferences := getOwnerReferences(c.chi) @@ -151,8 +151,8 @@ func (c *Creator) CreateServiceShard(shard *chiv1.ChiShard) *corev1.Service { return nil } -// CreateServiceHost creates new corev1.Service for specified host -func (c *Creator) CreateServiceHost(host *chiv1.ChiHost) *corev1.Service { +// CreateServiceHost creates new core.Service for specified host +func (c *Creator) CreateServiceHost(host *api.ChiHost) *core.Service { serviceName := CreateStatefulSetServiceName(host) statefulSetName := CreateStatefulSetName(host) ownerReferences := getOwnerReferences(c.chi) @@ -174,15 +174,15 @@ func (c *Creator) CreateServiceHost(host *chiv1.ChiHost) *corev1.Service { // Create default Service // We do not have .templates.ServiceTemplate specified or it is incorrect - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ + svc := &core.Service{ + ObjectMeta: meta.ObjectMeta{ Name: serviceName, Namespace: host.Address.Namespace, Labels: macro(host).Map(c.labels.getServiceHost(host)), Annotations: macro(host).Map(c.annotations.getServiceHost(host)), OwnerReferences: ownerReferences, }, - Spec: corev1.ServiceSpec{ + Spec: core.ServiceSpec{ Selector: GetSelectorHostScope(host), ClusterIP: templateDefaultsServiceClusterIP, Type: "ClusterIP", @@ -194,52 +194,52 @@ func (c *Creator) CreateServiceHost(host *chiv1.ChiHost) *corev1.Service { return svc } -func appendServicePorts(service *corev1.Service, host *chiv1.ChiHost) { - if chiv1.IsPortAssigned(host.TCPPort) { +func appendServicePorts(service *core.Service, host *api.ChiHost) { + if api.IsPortAssigned(host.TCPPort) { service.Spec.Ports = append(service.Spec.Ports, - corev1.ServicePort{ + core.ServicePort{ Name: chDefaultTCPPortName, - Protocol: corev1.ProtocolTCP, + Protocol: core.ProtocolTCP, Port: host.TCPPort, TargetPort: intstr.FromInt(int(host.TCPPort)), }, ) } - if chiv1.IsPortAssigned(host.TLSPort) { + if api.IsPortAssigned(host.TLSPort) { service.Spec.Ports = append(service.Spec.Ports, - corev1.ServicePort{ + core.ServicePort{ Name: chDefaultTLSPortName, - Protocol: corev1.ProtocolTCP, + Protocol: core.ProtocolTCP, Port: host.TLSPort, TargetPort: intstr.FromInt(int(host.TLSPort)), }, ) } - if chiv1.IsPortAssigned(host.HTTPPort) { + if api.IsPortAssigned(host.HTTPPort) { service.Spec.Ports = append(service.Spec.Ports, - corev1.ServicePort{ + core.ServicePort{ Name: chDefaultHTTPPortName, - Protocol: corev1.ProtocolTCP, + Protocol: core.ProtocolTCP, Port: host.HTTPPort, TargetPort: intstr.FromInt(int(host.HTTPPort)), }, ) } - if chiv1.IsPortAssigned(host.HTTPSPort) { + if api.IsPortAssigned(host.HTTPSPort) { service.Spec.Ports = append(service.Spec.Ports, - corev1.ServicePort{ + core.ServicePort{ Name: chDefaultHTTPSPortName, - Protocol: corev1.ProtocolTCP, + Protocol: core.ProtocolTCP, Port: host.HTTPSPort, TargetPort: intstr.FromInt(int(host.HTTPSPort)), }, ) } - if chiv1.IsPortAssigned(host.InterserverHTTPPort) { + if api.IsPortAssigned(host.InterserverHTTPPort) { service.Spec.Ports = append(service.Spec.Ports, - corev1.ServicePort{ + core.ServicePort{ Name: chDefaultInterserverHTTPPortName, - Protocol: corev1.ProtocolTCP, + Protocol: core.ProtocolTCP, Port: host.InterserverHTTPPort, TargetPort: intstr.FromInt(int(host.InterserverHTTPPort)), }, @@ -248,10 +248,10 @@ func appendServicePorts(service *corev1.Service, host *chiv1.ChiHost) { } // verifyServiceTemplatePorts verifies ChiServiceTemplate to have reasonable ports specified -func (c *Creator) verifyServiceTemplatePorts(template *chiv1.ChiServiceTemplate) error { +func (c *Creator) verifyServiceTemplatePorts(template *api.ChiServiceTemplate) error { for i := range template.Spec.Ports { servicePort := &template.Spec.Ports[i] - if chiv1.IsPortInvalid(servicePort.Port) { + if api.IsPortInvalid(servicePort.Port) { msg := fmt.Sprintf("template:%s INCORRECT PORT:%d", template.Name, servicePort.Port) c.a.V(1).F().Warning(msg) return fmt.Errorf(msg) @@ -262,15 +262,15 @@ func (c *Creator) verifyServiceTemplatePorts(template *chiv1.ChiServiceTemplate) // createServiceFromTemplate create Service from ChiServiceTemplate and additional info func (c *Creator) createServiceFromTemplate( - template *chiv1.ChiServiceTemplate, + template *api.ChiServiceTemplate, namespace string, name string, labels map[string]string, annotations map[string]string, selector map[string]string, - ownerReferences []metav1.OwnerReference, + ownerReferences []meta.OwnerReference, macro *macrosEngine, -) *corev1.Service { +) *core.Service { // Verify Ports if err := c.verifyServiceTemplatePorts(template); err != nil { @@ -278,7 +278,7 @@ func (c *Creator) createServiceFromTemplate( } // Create Service - service := &corev1.Service{ + service := &core.Service{ ObjectMeta: *template.ObjectMeta.DeepCopy(), Spec: *template.Spec.DeepCopy(), } @@ -301,10 +301,10 @@ func (c *Creator) createServiceFromTemplate( return service } -// CreateConfigMapCHICommon creates new corev1.ConfigMap -func (c *Creator) CreateConfigMapCHICommon(options *ClickHouseConfigFilesGeneratorOptions) *corev1.ConfigMap { - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ +// CreateConfigMapCHICommon creates new core.ConfigMap +func (c *Creator) CreateConfigMapCHICommon(options *ClickHouseConfigFilesGeneratorOptions) *core.ConfigMap { + cm := &core.ConfigMap{ + ObjectMeta: meta.ObjectMeta{ Name: CreateConfigMapCommonName(c.chi), Namespace: c.chi.Namespace, Labels: macro(c.chi).Map(c.labels.getConfigMapCHICommon()), @@ -319,10 +319,10 @@ func (c *Creator) CreateConfigMapCHICommon(options *ClickHouseConfigFilesGenerat return cm } -// CreateConfigMapCHICommonUsers creates new corev1.ConfigMap -func (c *Creator) CreateConfigMapCHICommonUsers() *corev1.ConfigMap { - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ +// CreateConfigMapCHICommonUsers creates new core.ConfigMap +func (c *Creator) CreateConfigMapCHICommonUsers() *core.ConfigMap { + cm := &core.ConfigMap{ + ObjectMeta: meta.ObjectMeta{ Name: CreateConfigMapCommonUsersName(c.chi), Namespace: c.chi.Namespace, Labels: macro(c.chi).Map(c.labels.getConfigMapCHICommonUsers()), @@ -337,10 +337,10 @@ func (c *Creator) CreateConfigMapCHICommonUsers() *corev1.ConfigMap { return cm } -// createConfigMapHost creates new corev1.ConfigMap -func (c *Creator) createConfigMapHost(host *chiv1.ChiHost, name string, data map[string]string) *corev1.ConfigMap { - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ +// createConfigMapHost creates new core.ConfigMap +func (c *Creator) createConfigMapHost(host *api.ChiHost, name string, data map[string]string) *core.ConfigMap { + cm := &core.ConfigMap{ + ObjectMeta: meta.ObjectMeta{ Name: name, Namespace: host.Address.Namespace, Labels: macro(host).Map(c.labels.getConfigMapHost(host)), @@ -354,13 +354,13 @@ func (c *Creator) createConfigMapHost(host *chiv1.ChiHost, name string, data map return cm } -// CreateConfigMapHost creates new corev1.ConfigMap -func (c *Creator) CreateConfigMapHost(host *chiv1.ChiHost) *corev1.ConfigMap { +// CreateConfigMapHost creates new core.ConfigMap +func (c *Creator) CreateConfigMapHost(host *api.ChiHost) *core.ConfigMap { return c.createConfigMapHost(host, CreateConfigMapHostName(host), c.chConfigFilesGenerator.CreateConfigFilesGroupHost(host)) } -// CreateConfigMapHostMigration creates new corev1.ConfigMap -//func (c *Creator) CreateConfigMapHostMigration(host *chiv1.ChiHost, data map[string]string) *corev1.ConfigMap { +// CreateConfigMapHostMigration creates new core.ConfigMap +//func (c *Creator) CreateConfigMapHostMigration(host *api.ChiHost, data map[string]string) *core.ConfigMap { // return c.createConfigMapHost(host, CreateConfigMapHostMigrationName(host), data) //} @@ -379,9 +379,9 @@ func (c *Creator) MakeConfigMapData(names, files []string) map[string]string { } // CreateStatefulSet creates new apps.StatefulSet -func (c *Creator) CreateStatefulSet(host *chiv1.ChiHost, shutdown bool) *apps.StatefulSet { +func (c *Creator) CreateStatefulSet(host *api.ChiHost, shutdown bool) *apps.StatefulSet { statefulSet := &apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ + ObjectMeta: meta.ObjectMeta{ Name: CreateStatefulSetName(host), Namespace: host.Address.Namespace, Labels: macro(host).Map(c.labels.getHostScope(host, true)), @@ -391,13 +391,13 @@ func (c *Creator) CreateStatefulSet(host *chiv1.ChiHost, shutdown bool) *apps.St Spec: apps.StatefulSetSpec{ Replicas: host.GetStatefulSetReplicasNum(shutdown), ServiceName: CreateStatefulSetServiceName(host), - Selector: &metav1.LabelSelector{ + Selector: &meta.LabelSelector{ MatchLabels: GetSelectorHostScope(host), }, // IMPORTANT // Template is to be setup later - Template: corev1.PodTemplateSpec{}, + Template: core.PodTemplateSpec{}, // IMPORTANT // VolumeClaimTemplates are to be setup later @@ -419,7 +419,7 @@ func (c *Creator) CreateStatefulSet(host *chiv1.ChiHost, shutdown bool) *apps.St } // PreparePersistentVolume prepares PV labels -func (c *Creator) PreparePersistentVolume(pv *corev1.PersistentVolume, host *chiv1.ChiHost) *corev1.PersistentVolume { +func (c *Creator) PreparePersistentVolume(pv *core.PersistentVolume, host *api.ChiHost) *core.PersistentVolume { pv.Labels = macro(host).Map(c.labels.getPV(pv, host)) pv.Annotations = macro(host).Map(c.annotations.getPV(pv, host)) // And after the object is ready we can put version label @@ -429,10 +429,10 @@ func (c *Creator) PreparePersistentVolume(pv *corev1.PersistentVolume, host *chi // PreparePersistentVolumeClaim prepares PVC - labels and annotations func (c *Creator) PreparePersistentVolumeClaim( - pvc *corev1.PersistentVolumeClaim, - host *chiv1.ChiHost, - template *chiv1.ChiVolumeClaimTemplate, -) *corev1.PersistentVolumeClaim { + pvc *core.PersistentVolumeClaim, + host *api.ChiHost, + template *api.ChiVolumeClaimTemplate, +) *core.PersistentVolumeClaim { pvc.Labels = macro(host).Map(c.labels.getPVC(pvc, host, template)) pvc.Annotations = macro(host).Map(c.annotations.getPVC(pvc, host, template)) // And after the object is ready we can put version label @@ -441,7 +441,7 @@ func (c *Creator) PreparePersistentVolumeClaim( } // setupStatefulSetPodTemplate performs PodTemplate setup of StatefulSet -func (c *Creator) setupStatefulSetPodTemplate(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { +func (c *Creator) setupStatefulSetPodTemplate(statefulSet *apps.StatefulSet, host *api.ChiHost) { // Process Pod Template podTemplate := c.getPodTemplate(host) c.statefulSetApplyPodTemplate(statefulSet, podTemplate, host) @@ -453,14 +453,14 @@ func (c *Creator) setupStatefulSetPodTemplate(statefulSet *apps.StatefulSet, hos } // ensureStatefulSetTemplateIntegrity -func ensureStatefulSetTemplateIntegrity(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { +func ensureStatefulSetTemplateIntegrity(statefulSet *apps.StatefulSet, host *api.ChiHost) { ensureClickHouseContainerSpecified(statefulSet, host) ensureProbesSpecified(statefulSet, host) ensureNamedPortsSpecified(statefulSet, host) } // setupEnvVars setup ENV vars for clickhouse container -func setupEnvVars(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { +func setupEnvVars(statefulSet *apps.StatefulSet, host *api.ChiHost) { container, ok := getClickHouseContainer(statefulSet) if !ok { return @@ -470,7 +470,7 @@ func setupEnvVars(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { } // ensureClickHouseContainerSpecified -func ensureClickHouseContainerSpecified(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { +func ensureClickHouseContainerSpecified(statefulSet *apps.StatefulSet, host *api.ChiHost) { _, ok := getClickHouseContainer(statefulSet) if ok { return @@ -499,7 +499,7 @@ func ensureClickHouseLogContainerSpecified(statefulSet *apps.StatefulSet) { } // ensureProbesSpecified -func ensureProbesSpecified(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { +func ensureProbesSpecified(statefulSet *apps.StatefulSet, host *api.ChiHost) { container, ok := getClickHouseContainer(statefulSet) if !ok { return @@ -513,9 +513,9 @@ func ensureProbesSpecified(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { } // personalizeStatefulSetTemplate -func (c *Creator) personalizeStatefulSetTemplate(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { +func (c *Creator) personalizeStatefulSetTemplate(statefulSet *apps.StatefulSet, host *api.ChiHost) { // Ensure pod created by this StatefulSet has alias 127.0.0.1 - statefulSet.Spec.Template.Spec.HostAliases = []corev1.HostAlias{ + statefulSet.Spec.Template.Spec.HostAliases = []core.HostAlias{ { IP: "127.0.0.1", Hostnames: []string{CreatePodHostname(host)}, @@ -566,7 +566,7 @@ func (c *Creator) setupTroubleshoot(statefulSet *apps.StatefulSet) { } // setupLogContainer -func (c *Creator) setupLogContainer(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { +func (c *Creator) setupLogContainer(statefulSet *apps.StatefulSet, host *api.ChiHost) { statefulSetName := CreateStatefulSetName(host) // In case we have default LogVolumeClaimTemplate specified - need to append log container to Pod Template if host.Templates.HasLogVolumeClaimTemplate() { @@ -577,7 +577,7 @@ func (c *Creator) setupLogContainer(statefulSet *apps.StatefulSet, host *chiv1.C } // getPodTemplate gets Pod Template to be used to create StatefulSet -func (c *Creator) getPodTemplate(host *chiv1.ChiHost) *chiv1.ChiPodTemplate { +func (c *Creator) getPodTemplate(host *api.ChiHost) *api.ChiPodTemplate { statefulSetName := CreateStatefulSetName(host) // Which pod template would be used - either explicitly defined in or a default one @@ -602,7 +602,7 @@ func (c *Creator) getPodTemplate(host *chiv1.ChiHost) *chiv1.ChiPodTemplate { } // statefulSetSetupVolumesForConfigMaps adds to each container in the Pod VolumeMount objects with -func (c *Creator) statefulSetSetupVolumesForConfigMaps(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { +func (c *Creator) statefulSetSetupVolumesForConfigMaps(statefulSet *apps.StatefulSet, host *api.ChiHost) { configMapHostName := CreateConfigMapHostName(host) configMapCommonName := CreateConfigMapCommonName(c.chi) configMapCommonUsersName := CreateConfigMapCommonUsersName(c.chi) @@ -632,7 +632,7 @@ func (c *Creator) statefulSetSetupVolumesForConfigMaps(statefulSet *apps.Statefu // statefulSetAppendUsedPVCTemplates appends all PVC templates which are used (referenced by name) by containers // to the StatefulSet.Spec.VolumeClaimTemplates list -func (c *Creator) statefulSetAppendUsedPVCTemplates(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { +func (c *Creator) statefulSetAppendUsedPVCTemplates(statefulSet *apps.StatefulSet, host *api.ChiHost) { // VolumeClaimTemplates, that are directly referenced in containers' VolumeMount object(s) // are appended to StatefulSet's Spec.VolumeClaimTemplates slice // @@ -652,7 +652,7 @@ func (c *Creator) statefulSetAppendUsedPVCTemplates(statefulSet *apps.StatefulSe // statefulSetAppendVolumeMountsForDataAndLogVolumeClaimTemplates // appends VolumeMounts for Data and Log VolumeClaimTemplates on all containers. // Creates VolumeMounts for Data and Log volumes in case these volume templates are specified in `templates`. -func (c *Creator) statefulSetAppendVolumeMountsForDataAndLogVolumeClaimTemplates(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { +func (c *Creator) statefulSetAppendVolumeMountsForDataAndLogVolumeClaimTemplates(statefulSet *apps.StatefulSet, host *api.ChiHost) { // Mount all named (data and log so far) VolumeClaimTemplates into all containers for i := range statefulSet.Spec.Template.Spec.Containers { // Convenience wrapper @@ -669,7 +669,7 @@ func (c *Creator) statefulSetAppendVolumeMountsForDataAndLogVolumeClaimTemplates } // setupStatefulSetVolumeClaimTemplates performs VolumeClaimTemplate setup for Containers in PodTemplate of a StatefulSet -func (c *Creator) setupStatefulSetVolumeClaimTemplates(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { +func (c *Creator) setupStatefulSetVolumeClaimTemplates(statefulSet *apps.StatefulSet, host *api.ChiHost) { c.statefulSetAppendVolumeMountsForDataAndLogVolumeClaimTemplates(statefulSet, host) c.statefulSetAppendUsedPVCTemplates(statefulSet, host) } @@ -677,13 +677,13 @@ func (c *Creator) setupStatefulSetVolumeClaimTemplates(statefulSet *apps.Statefu // statefulSetApplyPodTemplate fills StatefulSet.Spec.Template with data from provided ChiPodTemplate func (c *Creator) statefulSetApplyPodTemplate( statefulSet *apps.StatefulSet, - template *chiv1.ChiPodTemplate, - host *chiv1.ChiHost, + template *api.ChiPodTemplate, + host *api.ChiHost, ) { // StatefulSet's pod template is not directly compatible with ChiPodTemplate, // we need to extract some fields from ChiPodTemplate and apply on StatefulSet - statefulSet.Spec.Template = corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ + statefulSet.Spec.Template = core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ Name: template.Name, Labels: macro(host).Map(util.MergeStringMapsOverwrite( c.labels.getHostScopeReady(host, true), @@ -703,7 +703,7 @@ func (c *Creator) statefulSetApplyPodTemplate( } // getContainer gets container from the StatefulSet either by name or by index -func getContainer(statefulSet *apps.StatefulSet, name string, index int) (*corev1.Container, bool) { +func getContainer(statefulSet *apps.StatefulSet, name string, index int) (*core.Container, bool) { if len(name) > 0 { // Find by name for i := range statefulSet.Spec.Template.Spec.Containers { @@ -725,12 +725,12 @@ func getContainer(statefulSet *apps.StatefulSet, name string, index int) (*corev } // getClickHouseContainer -func getClickHouseContainer(statefulSet *apps.StatefulSet) (*corev1.Container, bool) { +func getClickHouseContainer(statefulSet *apps.StatefulSet) (*core.Container, bool) { return getContainer(statefulSet, clickHouseContainerName, 0) } // getClickHouseLogContainer -func getClickHouseLogContainer(statefulSet *apps.StatefulSet) (*corev1.Container, bool) { +func getClickHouseLogContainer(statefulSet *apps.StatefulSet) (*core.Container, bool) { return getContainer(statefulSet, clickHouseLogContainerName, -1) } @@ -789,7 +789,7 @@ func StrStatefulSetStatus(status *apps.StatefulSetStatus) string { } // ensureNamedPortsSpecified -func ensureNamedPortsSpecified(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { +func ensureNamedPortsSpecified(statefulSet *apps.StatefulSet, host *api.ChiHost) { // Ensure ClickHouse container has all named ports specified container, ok := getClickHouseContainer(statefulSet) if !ok { @@ -803,8 +803,8 @@ func ensureNamedPortsSpecified(statefulSet *apps.StatefulSet, host *chiv1.ChiHos } // ensurePortByName -func ensurePortByName(container *corev1.Container, name string, port int32) { - if chiv1.IsPortUnassigned(port) { +func ensurePortByName(container *core.Container, name string, port int32) { + if api.IsPortUnassigned(port) { return } @@ -820,25 +820,25 @@ func ensurePortByName(container *corev1.Container, name string, port int32) { } // Port with specified name not found. Need to append - container.Ports = append(container.Ports, corev1.ContainerPort{ + container.Ports = append(container.Ports, core.ContainerPort{ Name: name, ContainerPort: port, }) } // NewPodDisruptionBudget creates new PodDisruptionBudget -func (c *Creator) NewPodDisruptionBudget(cluster *chiv1.Cluster) *policyv1.PodDisruptionBudget { +func (c *Creator) NewPodDisruptionBudget(cluster *api.Cluster) *policy.PodDisruptionBudget { ownerReferences := getOwnerReferences(c.chi) - return &policyv1.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ + return &policy.PodDisruptionBudget{ + ObjectMeta: meta.ObjectMeta{ Name: fmt.Sprintf("%s-%s", cluster.Address.CHIName, cluster.Address.ClusterName), Namespace: c.chi.Namespace, Labels: macro(c.chi).Map(c.labels.getClusterScope(cluster)), Annotations: macro(c.chi).Map(c.annotations.getClusterScope(cluster)), OwnerReferences: ownerReferences, }, - Spec: policyv1.PodDisruptionBudgetSpec{ - Selector: &metav1.LabelSelector{ + Spec: policy.PodDisruptionBudgetSpec{ + Selector: &meta.LabelSelector{ MatchLabels: getSelectorClusterScope(cluster), }, MaxUnavailable: &intstr.IntOrString{ @@ -851,10 +851,10 @@ func (c *Creator) NewPodDisruptionBudget(cluster *chiv1.Cluster) *policyv1.PodDi // setupStatefulSetApplyVolumeMount applies .templates.volumeClaimTemplates.* to a StatefulSet func (c *Creator) setupStatefulSetApplyVolumeMount( - host *chiv1.ChiHost, + host *api.ChiHost, statefulSet *apps.StatefulSet, containerName string, - volumeMount corev1.VolumeMount, + volumeMount core.VolumeMount, ) error { // // Sanity checks @@ -935,7 +935,7 @@ func (c *Creator) setupStatefulSetApplyVolumeMount( } // statefulSetAppendVolumes appends multiple Volume(s) to the specified StatefulSet -func (c *Creator) statefulSetAppendVolumes(statefulSet *apps.StatefulSet, volumes ...corev1.Volume) { +func (c *Creator) statefulSetAppendVolumes(statefulSet *apps.StatefulSet, volumes ...core.Volume) { statefulSet.Spec.Template.Spec.Volumes = append( statefulSet.Spec.Template.Spec.Volumes, volumes..., @@ -943,14 +943,14 @@ func (c *Creator) statefulSetAppendVolumes(statefulSet *apps.StatefulSet, volume } // containerAppendVolumeMounts appends multiple VolumeMount(s) to the specified container -func (c *Creator) containerAppendVolumeMounts(container *corev1.Container, volumeMounts ...corev1.VolumeMount) { +func (c *Creator) containerAppendVolumeMounts(container *core.Container, volumeMounts ...core.VolumeMount) { for _, volumeMount := range volumeMounts { c.containerAppendVolumeMount(container, volumeMount) } } // containerAppendVolumeMount appends one VolumeMount to the specified container -func (c *Creator) containerAppendVolumeMount(container *corev1.Container, volumeMount corev1.VolumeMount) { +func (c *Creator) containerAppendVolumeMount(container *core.Container, volumeMount core.VolumeMount) { // // Sanity checks // @@ -1016,15 +1016,15 @@ func (c *Creator) containerAppendVolumeMount(container *corev1.Container, volume func (c *Creator) createPVC( name string, namespace string, - host *chiv1.ChiHost, - spec *corev1.PersistentVolumeClaimSpec, -) corev1.PersistentVolumeClaim { - persistentVolumeClaim := corev1.PersistentVolumeClaim{ - TypeMeta: metav1.TypeMeta{ + host *api.ChiHost, + spec *core.PersistentVolumeClaimSpec, +) core.PersistentVolumeClaim { + persistentVolumeClaim := core.PersistentVolumeClaim{ + TypeMeta: meta.TypeMeta{ Kind: "PersistentVolumeClaim", APIVersion: "v1", }, - ObjectMeta: metav1.ObjectMeta{ + ObjectMeta: meta.ObjectMeta{ Name: name, Namespace: namespace, // TODO @@ -1041,14 +1041,14 @@ func (c *Creator) createPVC( } // TODO introduce normalization // Overwrite .Spec.VolumeMode - volumeMode := corev1.PersistentVolumeFilesystem + volumeMode := core.PersistentVolumeFilesystem persistentVolumeClaim.Spec.VolumeMode = &volumeMode return persistentVolumeClaim } // CreatePVC creates PVC -func (c *Creator) CreatePVC(name string, host *chiv1.ChiHost, spec *corev1.PersistentVolumeClaimSpec) *corev1.PersistentVolumeClaim { +func (c *Creator) CreatePVC(name string, host *api.ChiHost, spec *core.PersistentVolumeClaimSpec) *core.PersistentVolumeClaim { pvc := c.createPVC(name, host.Address.Namespace, host, spec) return &pvc } @@ -1056,8 +1056,8 @@ func (c *Creator) CreatePVC(name string, host *chiv1.ChiHost, spec *corev1.Persi // statefulSetAppendPVCTemplate appends to StatefulSet.Spec.VolumeClaimTemplates new entry with data from provided 'src' ChiVolumeClaimTemplate func (c *Creator) statefulSetAppendPVCTemplate( statefulSet *apps.StatefulSet, - host *chiv1.ChiHost, - volumeClaimTemplate *chiv1.ChiVolumeClaimTemplate, + host *api.ChiHost, + volumeClaimTemplate *api.ChiVolumeClaimTemplate, ) { // Since we have the same names for PVs produced from both VolumeClaimTemplates and Volumes, // we need to check naming for all of them @@ -1106,73 +1106,73 @@ func (c *Creator) statefulSetAppendPVCTemplate( } // OperatorShouldCreatePVC checks whether operator should create PVC for specified volumeCLimaTemplate -func (c *Creator) OperatorShouldCreatePVC(host *chiv1.ChiHost, volumeClaimTemplate *chiv1.ChiVolumeClaimTemplate) bool { - return getPVCProvisioner(host, volumeClaimTemplate) == chiv1.PVCProvisionerOperator +func (c *Creator) OperatorShouldCreatePVC(host *api.ChiHost, volumeClaimTemplate *api.ChiVolumeClaimTemplate) bool { + return getPVCProvisioner(host, volumeClaimTemplate) == api.PVCProvisionerOperator } // CreateClusterSecret creates cluster secret -func (c *Creator) CreateClusterSecret(name string) *corev1.Secret { - return &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ +func (c *Creator) CreateClusterSecret(name string) *core.Secret { + return &core.Secret{ + ObjectMeta: meta.ObjectMeta{ Namespace: c.chi.Namespace, Name: name, }, StringData: map[string]string{ "secret": util.RandStringRange(10, 20), }, - Type: corev1.SecretTypeOpaque, + Type: core.SecretTypeOpaque, } } // newDefaultHostTemplate returns default Host Template to be used with StatefulSet -func newDefaultHostTemplate(name string) *chiv1.ChiHostTemplate { - return &chiv1.ChiHostTemplate{ +func newDefaultHostTemplate(name string) *api.ChiHostTemplate { + return &api.ChiHostTemplate{ Name: name, - PortDistribution: []chiv1.ChiPortDistribution{ + PortDistribution: []api.ChiPortDistribution{ { - Type: chiv1.PortDistributionUnspecified, + Type: api.PortDistributionUnspecified, }, }, - Spec: chiv1.ChiHost{ + Spec: api.ChiHost{ Name: "", - TCPPort: chiv1.PortUnassigned(), - TLSPort: chiv1.PortUnassigned(), - HTTPPort: chiv1.PortUnassigned(), - HTTPSPort: chiv1.PortUnassigned(), - InterserverHTTPPort: chiv1.PortUnassigned(), + TCPPort: api.PortUnassigned(), + TLSPort: api.PortUnassigned(), + HTTPPort: api.PortUnassigned(), + HTTPSPort: api.PortUnassigned(), + InterserverHTTPPort: api.PortUnassigned(), Templates: nil, }, } } // newDefaultHostTemplateForHostNetwork -func newDefaultHostTemplateForHostNetwork(name string) *chiv1.ChiHostTemplate { - return &chiv1.ChiHostTemplate{ +func newDefaultHostTemplateForHostNetwork(name string) *api.ChiHostTemplate { + return &api.ChiHostTemplate{ Name: name, - PortDistribution: []chiv1.ChiPortDistribution{ + PortDistribution: []api.ChiPortDistribution{ { - Type: chiv1.PortDistributionClusterScopeIndex, + Type: api.PortDistributionClusterScopeIndex, }, }, - Spec: chiv1.ChiHost{ + Spec: api.ChiHost{ Name: "", - TCPPort: chiv1.PortUnassigned(), - TLSPort: chiv1.PortUnassigned(), - HTTPPort: chiv1.PortUnassigned(), - HTTPSPort: chiv1.PortUnassigned(), - InterserverHTTPPort: chiv1.PortUnassigned(), + TCPPort: api.PortUnassigned(), + TLSPort: api.PortUnassigned(), + HTTPPort: api.PortUnassigned(), + HTTPSPort: api.PortUnassigned(), + InterserverHTTPPort: api.PortUnassigned(), Templates: nil, }, } } // newDefaultPodTemplate returns default Pod Template to be used with StatefulSet -func newDefaultPodTemplate(name string, host *chiv1.ChiHost) *chiv1.ChiPodTemplate { - podTemplate := &chiv1.ChiPodTemplate{ +func newDefaultPodTemplate(name string, host *api.ChiHost) *api.ChiPodTemplate { + podTemplate := &api.ChiPodTemplate{ Name: name, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{}, - Volumes: []corev1.Volume{}, + Spec: core.PodSpec{ + Containers: []core.Container{}, + Volumes: []core.Volume{}, }, } @@ -1182,12 +1182,12 @@ func newDefaultPodTemplate(name string, host *chiv1.ChiHost) *chiv1.ChiPodTempla } // newDefaultLivenessProbe returns default liveness probe -func newDefaultLivenessProbe(host *chiv1.ChiHost) *corev1.Probe { +func newDefaultLivenessProbe(host *api.ChiHost) *core.Probe { // Introduce http probe in case http port is specified - if chiv1.IsPortAssigned(host.HTTPPort) { - return &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ + if api.IsPortAssigned(host.HTTPPort) { + return &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ Path: "/ping", Port: intstr.Parse(chDefaultHTTPPortName), // What if it is not a default? }, @@ -1199,13 +1199,13 @@ func newDefaultLivenessProbe(host *chiv1.ChiHost) *corev1.Probe { } // Introduce https probe in case https port is specified - if chiv1.IsPortAssigned(host.HTTPSPort) { - return &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ + if api.IsPortAssigned(host.HTTPSPort) { + return &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ Path: "/ping", Port: intstr.Parse(chDefaultHTTPSPortName), // What if it is not a default? - Scheme: corev1.URISchemeHTTPS, + Scheme: core.URISchemeHTTPS, }, }, InitialDelaySeconds: 60, @@ -1219,12 +1219,12 @@ func newDefaultLivenessProbe(host *chiv1.ChiHost) *corev1.Probe { } // newDefaultReadinessProbe returns default readiness probe -func newDefaultReadinessProbe(host *chiv1.ChiHost) *corev1.Probe { +func newDefaultReadinessProbe(host *api.ChiHost) *core.Probe { // Introduce http probe in case http port is specified - if chiv1.IsPortAssigned(host.HTTPPort) { - return &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ + if api.IsPortAssigned(host.HTTPPort) { + return &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ Path: "/ping", Port: intstr.Parse(chDefaultHTTPPortName), // What if port name is not a default? }, @@ -1235,13 +1235,13 @@ func newDefaultReadinessProbe(host *chiv1.ChiHost) *corev1.Probe { } // Introduce https probe in case https port is specified - if chiv1.IsPortAssigned(host.HTTPSPort) { - return &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ + if api.IsPortAssigned(host.HTTPSPort) { + return &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ Path: "/ping", Port: intstr.Parse(chDefaultHTTPSPortName), // What if port name is not a default? - Scheme: corev1.URISchemeHTTPS, + Scheme: core.URISchemeHTTPS, }, }, InitialDelaySeconds: 10, @@ -1253,57 +1253,57 @@ func newDefaultReadinessProbe(host *chiv1.ChiHost) *corev1.Probe { return nil } -func appendContainerPorts(container *corev1.Container, host *chiv1.ChiHost) { - if chiv1.IsPortAssigned(host.TCPPort) { +func appendContainerPorts(container *core.Container, host *api.ChiHost) { + if api.IsPortAssigned(host.TCPPort) { container.Ports = append(container.Ports, - corev1.ContainerPort{ + core.ContainerPort{ Name: chDefaultTCPPortName, ContainerPort: host.TCPPort, - Protocol: corev1.ProtocolTCP, + Protocol: core.ProtocolTCP, }, ) } - if chiv1.IsPortAssigned(host.TLSPort) { + if api.IsPortAssigned(host.TLSPort) { container.Ports = append(container.Ports, - corev1.ContainerPort{ + core.ContainerPort{ Name: chDefaultTLSPortName, ContainerPort: host.TLSPort, - Protocol: corev1.ProtocolTCP, + Protocol: core.ProtocolTCP, }, ) } - if chiv1.IsPortAssigned(host.HTTPPort) { + if api.IsPortAssigned(host.HTTPPort) { container.Ports = append(container.Ports, - corev1.ContainerPort{ + core.ContainerPort{ Name: chDefaultHTTPPortName, ContainerPort: host.HTTPPort, - Protocol: corev1.ProtocolTCP, + Protocol: core.ProtocolTCP, }, ) } - if chiv1.IsPortAssigned(host.HTTPSPort) { + if api.IsPortAssigned(host.HTTPSPort) { container.Ports = append(container.Ports, - corev1.ContainerPort{ + core.ContainerPort{ Name: chDefaultHTTPSPortName, ContainerPort: host.HTTPSPort, - Protocol: corev1.ProtocolTCP, + Protocol: core.ProtocolTCP, }, ) } - if chiv1.IsPortAssigned(host.InterserverHTTPPort) { + if api.IsPortAssigned(host.InterserverHTTPPort) { container.Ports = append(container.Ports, - corev1.ContainerPort{ + core.ContainerPort{ Name: chDefaultInterserverHTTPPortName, ContainerPort: host.InterserverHTTPPort, - Protocol: corev1.ProtocolTCP, + Protocol: core.ProtocolTCP, }, ) } } // newDefaultClickHouseContainer returns default ClickHouse Container -func newDefaultClickHouseContainer(host *chiv1.ChiHost) corev1.Container { - container := corev1.Container{ +func newDefaultClickHouseContainer(host *api.ChiHost) core.Container { + container := core.Container{ Name: clickHouseContainerName, Image: defaultClickHouseDockerImage, LivenessProbe: newDefaultLivenessProbe(host), @@ -1314,8 +1314,8 @@ func newDefaultClickHouseContainer(host *chiv1.ChiHost) corev1.Container { } // newDefaultLogContainer returns default Log Container -func newDefaultLogContainer() corev1.Container { - return corev1.Container{ +func newDefaultLogContainer() core.Container { + return core.Container{ Name: clickHouseLogContainerName, Image: defaultUbiDockerImage, Command: []string{ @@ -1328,16 +1328,16 @@ func newDefaultLogContainer() corev1.Container { } // addContainer adds container to ChiPodTemplate -func addContainer(podSpec *corev1.PodSpec, container corev1.Container) { +func addContainer(podSpec *core.PodSpec, container core.Container) { podSpec.Containers = append(podSpec.Containers, container) } -// newVolumeForPVC returns corev1.Volume object with defined name -func newVolumeForPVC(name, claimName string) corev1.Volume { - return corev1.Volume{ +// newVolumeForPVC returns core.Volume object with defined name +func newVolumeForPVC(name, claimName string) core.Volume { + return core.Volume{ Name: name, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + VolumeSource: core.VolumeSource{ + PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ ClaimName: claimName, ReadOnly: false, }, @@ -1345,14 +1345,14 @@ func newVolumeForPVC(name, claimName string) corev1.Volume { } } -// newVolumeForConfigMap returns corev1.Volume object with defined name -func newVolumeForConfigMap(name string) corev1.Volume { +// newVolumeForConfigMap returns core.Volume object with defined name +func newVolumeForConfigMap(name string) core.Volume { var defaultMode int32 = 0644 - return corev1.Volume{ + return core.Volume{ Name: name, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ + VolumeSource: core.VolumeSource{ + ConfigMap: &core.ConfigMapVolumeSource{ + LocalObjectReference: core.LocalObjectReference{ Name: name, }, DefaultMode: &defaultMode, @@ -1361,16 +1361,16 @@ func newVolumeForConfigMap(name string) corev1.Volume { } } -// newVolumeMount returns corev1.VolumeMount object with name and mount path -func newVolumeMount(name, mountPath string) corev1.VolumeMount { - return corev1.VolumeMount{ +// newVolumeMount returns core.VolumeMount object with name and mount path +func newVolumeMount(name, mountPath string) core.VolumeMount { + return core.VolumeMount{ Name: name, MountPath: mountPath, } } // getContainerByName finds Container with specified name among all containers of Pod Template in StatefulSet -func getContainerByName(statefulSet *apps.StatefulSet, name string) *corev1.Container { +func getContainerByName(statefulSet *apps.StatefulSet, name string) *core.Container { for i := range statefulSet.Spec.Template.Spec.Containers { // Convenience wrapper container := &statefulSet.Spec.Template.Spec.Containers[i] @@ -1382,16 +1382,16 @@ func getContainerByName(statefulSet *apps.StatefulSet, name string) *corev1.Cont return nil } -func getOwnerReferences(chi *chiv1.ClickHouseInstallation) []metav1.OwnerReference { +func getOwnerReferences(chi *api.ClickHouseInstallation) []meta.OwnerReference { if chi.Attributes.SkipOwnerRef { return nil } controller := true block := true - return []metav1.OwnerReference{ + return []meta.OwnerReference{ { - APIVersion: chiv1.SchemeGroupVersion.String(), - Kind: chiv1.ClickHouseInstallationCRDResourceKind, + APIVersion: api.SchemeGroupVersion.String(), + Kind: api.ClickHouseInstallationCRDResourceKind, Name: chi.Name, UID: chi.UID, Controller: &controller, diff --git a/pkg/model/chi/deleter.go b/pkg/model/chi/deleter.go index 2ad69bb4e..4132aca2b 100644 --- a/pkg/model/chi/deleter.go +++ b/pkg/model/chi/deleter.go @@ -17,16 +17,16 @@ package chi import ( "k8s.io/api/core/v1" - chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" ) // HostCanDeletePVC checks whether PVC on a host can be deleted -func HostCanDeletePVC(host *chiv1.ChiHost, pvcName string) bool { +func HostCanDeletePVC(host *api.ChiHost, pvcName string) bool { // In any unknown cases just delete PVC with unclear bindings - policy := chiv1.PVCReclaimPolicyDelete + policy := api.PVCReclaimPolicyDelete // What host, VolumeMount and VolumeClaimTemplate this PVC is made from? - host.WalkVolumeMounts(chiv1.CurStatefulSet, func(volumeMount *v1.VolumeMount) { + host.WalkVolumeMounts(api.CurStatefulSet, func(volumeMount *v1.VolumeMount) { volumeClaimTemplate, ok := GetVolumeClaimTemplate(host, volumeMount) if !ok { // No this is not a reference to VolumeClaimTemplate @@ -42,14 +42,14 @@ func HostCanDeletePVC(host *chiv1.ChiHost, pvcName string) bool { }) // Delete all explicitly specified as deletable PVCs and all PVCs of un-templated or unclear origin - return policy == chiv1.PVCReclaimPolicyDelete + return policy == api.PVCReclaimPolicyDelete } // HostCanDeleteAllPVCs checks whether all PVCs can be deleted -func HostCanDeleteAllPVCs(host *chiv1.ChiHost) bool { +func HostCanDeleteAllPVCs(host *api.ChiHost) bool { canDeleteAllPVCs := true - host.CHI.WalkVolumeClaimTemplates(func(template *chiv1.ChiVolumeClaimTemplate) { - if getPVCReclaimPolicy(host, template) == chiv1.PVCReclaimPolicyRetain { + host.CHI.WalkVolumeClaimTemplates(func(template *api.ChiVolumeClaimTemplate) { + if getPVCReclaimPolicy(host, template) == api.PVCReclaimPolicyRetain { // At least one template wants to keep its PVC canDeleteAllPVCs = false } diff --git a/pkg/model/chi/labeler.go b/pkg/model/chi/labeler.go index b874585cd..aa5d0085e 100644 --- a/pkg/model/chi/labeler.go +++ b/pkg/model/chi/labeler.go @@ -16,13 +16,13 @@ package chi import ( "fmt" - v1 "k8s.io/api/core/v1" + core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - kublabels "k8s.io/apimachinery/pkg/labels" + k8sLabels "k8s.io/apimachinery/pkg/labels" "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com" - chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" "github.com/altinity/clickhouse-operator/pkg/util" ) @@ -77,11 +77,11 @@ const ( // Labeler is an entity which can label CHI artifacts type Labeler struct { - chi *chiv1.ClickHouseInstallation + chi *api.ClickHouseInstallation } // NewLabeler creates new labeler with context -func NewLabeler(chi *chiv1.ClickHouseInstallation) *Labeler { +func NewLabeler(chi *api.ClickHouseInstallation) *Labeler { return &Labeler{ chi: chi, } @@ -106,7 +106,7 @@ func (l *Labeler) getConfigMapCHICommonUsers() map[string]string { } // getConfigMapHost -func (l *Labeler) getConfigMapHost(host *chiv1.ChiHost) map[string]string { +func (l *Labeler) getConfigMapHost(host *api.ChiHost) map[string]string { return util.MergeStringMapsOverwrite( l.getHostScope(host, false), map[string]string{ @@ -115,7 +115,7 @@ func (l *Labeler) getConfigMapHost(host *chiv1.ChiHost) map[string]string { } // getServiceCHI -func (l *Labeler) getServiceCHI(chi *chiv1.ClickHouseInstallation) map[string]string { +func (l *Labeler) getServiceCHI(chi *api.ClickHouseInstallation) map[string]string { return util.MergeStringMapsOverwrite( l.getCHIScope(), map[string]string{ @@ -124,7 +124,7 @@ func (l *Labeler) getServiceCHI(chi *chiv1.ClickHouseInstallation) map[string]st } // getServiceCluster -func (l *Labeler) getServiceCluster(cluster *chiv1.Cluster) map[string]string { +func (l *Labeler) getServiceCluster(cluster *api.Cluster) map[string]string { return util.MergeStringMapsOverwrite( l.getClusterScope(cluster), map[string]string{ @@ -133,7 +133,7 @@ func (l *Labeler) getServiceCluster(cluster *chiv1.Cluster) map[string]string { } // getServiceShard -func (l *Labeler) getServiceShard(shard *chiv1.ChiShard) map[string]string { +func (l *Labeler) getServiceShard(shard *api.ChiShard) map[string]string { return util.MergeStringMapsOverwrite( l.getShardScope(shard), map[string]string{ @@ -142,7 +142,7 @@ func (l *Labeler) getServiceShard(shard *chiv1.ChiShard) map[string]string { } // getServiceHost -func (l *Labeler) getServiceHost(host *chiv1.ChiHost) map[string]string { +func (l *Labeler) getServiceHost(host *api.ChiHost) map[string]string { return util.MergeStringMapsOverwrite( l.getHostScope(host, false), map[string]string{ @@ -174,13 +174,13 @@ func (l *Labeler) getSelectorCHIScopeReady() map[string]string { } // getClusterScope gets labels for Cluster-scoped object -func (l *Labeler) getClusterScope(cluster *chiv1.Cluster) map[string]string { +func (l *Labeler) getClusterScope(cluster *api.Cluster) map[string]string { // Combine generated labels and CHI-provided labels return l.filterOutPredefined(l.appendCHIProvidedTo(getSelectorClusterScope(cluster))) } // getSelectorClusterScope gets labels to select a Cluster-scoped object -func getSelectorClusterScope(cluster *chiv1.Cluster) map[string]string { +func getSelectorClusterScope(cluster *api.Cluster) map[string]string { // Do not include CHI-provided labels return map[string]string{ LabelNamespace: labelsNamer.getNamePartNamespace(cluster), @@ -191,18 +191,18 @@ func getSelectorClusterScope(cluster *chiv1.Cluster) map[string]string { } // getSelectorClusterScope gets labels to select a ready-labelled Cluster-scoped object -func getSelectorClusterScopeReady(cluster *chiv1.Cluster) map[string]string { +func getSelectorClusterScopeReady(cluster *api.Cluster) map[string]string { return appendKeyReady(getSelectorClusterScope(cluster)) } // getShardScope gets labels for Shard-scoped object -func (l *Labeler) getShardScope(shard *chiv1.ChiShard) map[string]string { +func (l *Labeler) getShardScope(shard *api.ChiShard) map[string]string { // Combine generated labels and CHI-provided labels return l.filterOutPredefined(l.appendCHIProvidedTo(getSelectorShardScope(shard))) } // getSelectorShardScope gets labels to select a Shard-scoped object -func getSelectorShardScope(shard *chiv1.ChiShard) map[string]string { +func getSelectorShardScope(shard *api.ChiShard) map[string]string { // Do not include CHI-provided labels return map[string]string{ LabelNamespace: labelsNamer.getNamePartNamespace(shard), @@ -214,12 +214,12 @@ func getSelectorShardScope(shard *chiv1.ChiShard) map[string]string { } // getSelectorShardScope gets labels to select a ready-labelled Shard-scoped object -func getSelectorShardScopeReady(shard *chiv1.ChiShard) map[string]string { +func getSelectorShardScopeReady(shard *api.ChiShard) map[string]string { return appendKeyReady(getSelectorShardScope(shard)) } // getHostScope gets labels for Host-scoped object -func (l *Labeler) getHostScope(host *chiv1.ChiHost, applySupplementaryServiceLabels bool) map[string]string { +func (l *Labeler) getHostScope(host *api.ChiHost, applySupplementaryServiceLabels bool) map[string]string { // Combine generated labels and CHI-provided labels labels := GetSelectorHostScope(host) if chop.Config().Label.Runtime.AppendScope { @@ -244,7 +244,7 @@ func (l *Labeler) getHostScope(host *chiv1.ChiHost, applySupplementaryServiceLab return l.filterOutPredefined(l.appendCHIProvidedTo(labels)) } -func appendConfigLabels(host *chiv1.ChiHost, labels map[string]string) map[string]string { +func appendConfigLabels(host *api.ChiHost, labels map[string]string) map[string]string { if host.HasCurStatefulSet() { if val, exists := host.CurStatefulSet.Labels[LabelZookeeperConfigVersion]; exists { labels[LabelZookeeperConfigVersion] = val @@ -259,27 +259,27 @@ func appendConfigLabels(host *chiv1.ChiHost, labels map[string]string) map[strin } // getHostScopeReady gets labels for Host-scoped object including Ready label -func (l *Labeler) getHostScopeReady(host *chiv1.ChiHost, applySupplementaryServiceLabels bool) map[string]string { +func (l *Labeler) getHostScopeReady(host *api.ChiHost, applySupplementaryServiceLabels bool) map[string]string { return appendKeyReady(l.getHostScope(host, applySupplementaryServiceLabels)) } // getHostScopeReclaimPolicy gets host scope labels with PVCReclaimPolicy from template -func (l *Labeler) getHostScopeReclaimPolicy(host *chiv1.ChiHost, template *chiv1.ChiVolumeClaimTemplate, applySupplementaryServiceLabels bool) map[string]string { +func (l *Labeler) getHostScopeReclaimPolicy(host *api.ChiHost, template *api.ChiVolumeClaimTemplate, applySupplementaryServiceLabels bool) map[string]string { return util.MergeStringMapsOverwrite(l.getHostScope(host, applySupplementaryServiceLabels), map[string]string{ LabelPVCReclaimPolicyName: getPVCReclaimPolicy(host, template).String(), }) } // getPV -func (l *Labeler) getPV(pv *v1.PersistentVolume, host *chiv1.ChiHost) map[string]string { +func (l *Labeler) getPV(pv *core.PersistentVolume, host *api.ChiHost) map[string]string { return util.MergeStringMapsOverwrite(pv.Labels, l.getHostScope(host, false)) } // getPVC func (l *Labeler) getPVC( - pvc *v1.PersistentVolumeClaim, - host *chiv1.ChiHost, - template *chiv1.ChiVolumeClaimTemplate, + pvc *core.PersistentVolumeClaim, + host *api.ChiHost, + template *api.ChiVolumeClaimTemplate, ) map[string]string { // Prepare main labels based on template labels := util.MergeStringMapsOverwrite(pvc.Labels, template.ObjectMeta.Labels) @@ -291,11 +291,11 @@ func (l *Labeler) getPVC( } // GetReclaimPolicy gets reclaim policy from meta -func GetReclaimPolicy(meta meta.ObjectMeta) chiv1.PVCReclaimPolicy { - defaultReclaimPolicy := chiv1.PVCReclaimPolicyDelete +func GetReclaimPolicy(meta meta.ObjectMeta) api.PVCReclaimPolicy { + defaultReclaimPolicy := api.PVCReclaimPolicyDelete if value, ok := meta.Labels[LabelPVCReclaimPolicyName]; ok { - reclaimPolicy := chiv1.NewPVCReclaimPolicyFromString(value) + reclaimPolicy := api.NewPVCReclaimPolicyFromString(value) if reclaimPolicy.IsValid() { return reclaimPolicy } @@ -305,7 +305,7 @@ func GetReclaimPolicy(meta meta.ObjectMeta) chiv1.PVCReclaimPolicy { } // GetSelectorHostScope gets labels to select a Host-scoped object -func GetSelectorHostScope(host *chiv1.ChiHost) map[string]string { +func GetSelectorHostScope(host *api.ChiHost) map[string]string { // Do not include CHI-provided labels return map[string]string{ LabelNamespace: labelsNamer.getNamePartNamespace(host), @@ -328,8 +328,8 @@ func (l *Labeler) appendCHIProvidedTo(dst map[string]string) map[string]string { return util.MergeStringMapsOverwrite(dst, sourceLabels) } -// makeSetFromObjectMeta makes kublabels.Set from ObjectMeta -func makeSetFromObjectMeta(objMeta *meta.ObjectMeta) (kublabels.Set, error) { +// makeSetFromObjectMeta makes k8sLabels.Set from ObjectMeta +func makeSetFromObjectMeta(objMeta *meta.ObjectMeta) (k8sLabels.Set, error) { // Check mandatory labels are in place if !util.MapHasKeys(objMeta.Labels, LabelNamespace, LabelAppName, LabelCHIName) { return nil, fmt.Errorf( @@ -352,7 +352,7 @@ func makeSetFromObjectMeta(objMeta *meta.ObjectMeta) (kublabels.Set, error) { LabelService, } - set := kublabels.Set{} + set := k8sLabels.Set{} util.MergeStringMapsOverwrite(set, objMeta.Labels, labels...) // skip StatefulSet @@ -363,13 +363,13 @@ func makeSetFromObjectMeta(objMeta *meta.ObjectMeta) (kublabels.Set, error) { // MakeSelectorFromObjectMeta makes selector from meta // TODO review usage -func MakeSelectorFromObjectMeta(objMeta *meta.ObjectMeta) (kublabels.Selector, error) { +func MakeSelectorFromObjectMeta(objMeta *meta.ObjectMeta) (k8sLabels.Selector, error) { set, err := makeSetFromObjectMeta(objMeta) if err != nil { // Unable to make set return nil, err } - return kublabels.SelectorFromSet(set), nil + return k8sLabels.SelectorFromSet(set), nil } // IsCHOPGeneratedObject check whether object is generated by an operator. Check is label-based diff --git a/pkg/model/chi/macro.go b/pkg/model/chi/macro.go index db7bb33b1..e696f65f8 100644 --- a/pkg/model/chi/macro.go +++ b/pkg/model/chi/macro.go @@ -18,7 +18,7 @@ import ( "strconv" "strings" - chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/util" ) @@ -79,10 +79,10 @@ const ( // macrosEngine type macrosEngine struct { names *namer - chi *chop.ClickHouseInstallation - cluster *chop.Cluster - shard *chop.ChiShard - host *chop.ChiHost + chi *api.ClickHouseInstallation + cluster *api.Cluster + shard *api.ChiShard + host *api.ChiHost } // macro @@ -90,13 +90,13 @@ func macro(scope interface{}) *macrosEngine { m := new(macrosEngine) m.names = newNamer(namerContextNames) switch t := scope.(type) { - case *chop.ClickHouseInstallation: + case *api.ClickHouseInstallation: m.chi = t - case *chop.Cluster: + case *api.Cluster: m.cluster = t - case *chop.ChiShard: + case *api.ChiShard: m.shard = t - case *chop.ChiHost: + case *api.ChiHost: m.host = t } return m @@ -186,7 +186,7 @@ func (m *macrosEngine) newMapMacroReplacerShard() *util.MapReplacer { } // clusterScopeIndexOfPreviousCycleTail gets cluster-scope index of previous cycle tail -func clusterScopeIndexOfPreviousCycleTail(host *chop.ChiHost) int { +func clusterScopeIndexOfPreviousCycleTail(host *api.ChiHost) int { if host.Address.ClusterScopeCycleOffset == 0 { // This is the cycle head - the first host of the cycle // We need to point to previous host in this cluster - which would be previous cycle tail diff --git a/pkg/model/chi/namer.go b/pkg/model/chi/namer.go index 4ab5bca9e..6a531caf3 100644 --- a/pkg/model/chi/namer.go +++ b/pkg/model/chi/namer.go @@ -20,9 +20,9 @@ import ( "strings" apps "k8s.io/api/apps/v1" - coreV1 "k8s.io/api/core/v1" + core "k8s.io/api/core/v1" - chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/util" ) @@ -238,17 +238,17 @@ func (n *namer) namePartHostNameID(name string) string { // getNamePartNamespace func (n *namer) getNamePartNamespace(obj interface{}) string { switch obj.(type) { - case *chop.ClickHouseInstallation: - chi := obj.(*chop.ClickHouseInstallation) + case *api.ClickHouseInstallation: + chi := obj.(*api.ClickHouseInstallation) return n.namePartChiName(chi.Namespace) - case *chop.Cluster: - cluster := obj.(*chop.Cluster) + case *api.Cluster: + cluster := obj.(*api.Cluster) return n.namePartChiName(cluster.Address.Namespace) - case *chop.ChiShard: - shard := obj.(*chop.ChiShard) + case *api.ChiShard: + shard := obj.(*api.ChiShard) return n.namePartChiName(shard.Address.Namespace) - case *chop.ChiHost: - host := obj.(*chop.ChiHost) + case *api.ChiHost: + host := obj.(*api.ChiHost) return n.namePartChiName(host.Address.Namespace) } @@ -258,17 +258,17 @@ func (n *namer) getNamePartNamespace(obj interface{}) string { // getNamePartCHIName func (n *namer) getNamePartCHIName(obj interface{}) string { switch obj.(type) { - case *chop.ClickHouseInstallation: - chi := obj.(*chop.ClickHouseInstallation) + case *api.ClickHouseInstallation: + chi := obj.(*api.ClickHouseInstallation) return n.namePartChiName(chi.Name) - case *chop.Cluster: - cluster := obj.(*chop.Cluster) + case *api.Cluster: + cluster := obj.(*api.Cluster) return n.namePartChiName(cluster.Address.CHIName) - case *chop.ChiShard: - shard := obj.(*chop.ChiShard) + case *api.ChiShard: + shard := obj.(*api.ChiShard) return n.namePartChiName(shard.Address.CHIName) - case *chop.ChiHost: - host := obj.(*chop.ChiHost) + case *api.ChiHost: + host := obj.(*api.ChiHost) return n.namePartChiName(host.Address.CHIName) } @@ -278,14 +278,14 @@ func (n *namer) getNamePartCHIName(obj interface{}) string { // getNamePartClusterName func (n *namer) getNamePartClusterName(obj interface{}) string { switch obj.(type) { - case *chop.Cluster: - cluster := obj.(*chop.Cluster) + case *api.Cluster: + cluster := obj.(*api.Cluster) return n.namePartClusterName(cluster.Address.ClusterName) - case *chop.ChiShard: - shard := obj.(*chop.ChiShard) + case *api.ChiShard: + shard := obj.(*api.ChiShard) return n.namePartClusterName(shard.Address.ClusterName) - case *chop.ChiHost: - host := obj.(*chop.ChiHost) + case *api.ChiHost: + host := obj.(*api.ChiHost) return n.namePartClusterName(host.Address.ClusterName) } @@ -295,11 +295,11 @@ func (n *namer) getNamePartClusterName(obj interface{}) string { // getNamePartShardName func (n *namer) getNamePartShardName(obj interface{}) string { switch obj.(type) { - case *chop.ChiShard: - shard := obj.(*chop.ChiShard) + case *api.ChiShard: + shard := obj.(*api.ChiShard) return n.namePartShardName(shard.Address.ShardName) - case *chop.ChiHost: - host := obj.(*chop.ChiHost) + case *api.ChiHost: + host := obj.(*api.ChiHost) return n.namePartShardName(host.Address.ShardName) } @@ -307,87 +307,87 @@ func (n *namer) getNamePartShardName(obj interface{}) string { } // getNamePartReplicaName -func (n *namer) getNamePartReplicaName(host *chop.ChiHost) string { +func (n *namer) getNamePartReplicaName(host *api.ChiHost) string { return n.namePartReplicaName(host.Address.ReplicaName) } // getNamePartHostName -func (n *namer) getNamePartHostName(host *chop.ChiHost) string { +func (n *namer) getNamePartHostName(host *api.ChiHost) string { return n.namePartHostName(host.Address.HostName) } // getNamePartCHIScopeCycleSize -func getNamePartCHIScopeCycleSize(host *chop.ChiHost) string { +func getNamePartCHIScopeCycleSize(host *api.ChiHost) string { return strconv.Itoa(host.Address.CHIScopeCycleSize) } // getNamePartCHIScopeCycleIndex -func getNamePartCHIScopeCycleIndex(host *chop.ChiHost) string { +func getNamePartCHIScopeCycleIndex(host *api.ChiHost) string { return strconv.Itoa(host.Address.CHIScopeCycleIndex) } // getNamePartCHIScopeCycleOffset -func getNamePartCHIScopeCycleOffset(host *chop.ChiHost) string { +func getNamePartCHIScopeCycleOffset(host *api.ChiHost) string { return strconv.Itoa(host.Address.CHIScopeCycleOffset) } // getNamePartClusterScopeCycleSize -func getNamePartClusterScopeCycleSize(host *chop.ChiHost) string { +func getNamePartClusterScopeCycleSize(host *api.ChiHost) string { return strconv.Itoa(host.Address.ClusterScopeCycleSize) } // getNamePartClusterScopeCycleIndex -func getNamePartClusterScopeCycleIndex(host *chop.ChiHost) string { +func getNamePartClusterScopeCycleIndex(host *api.ChiHost) string { return strconv.Itoa(host.Address.ClusterScopeCycleIndex) } // getNamePartClusterScopeCycleOffset -func getNamePartClusterScopeCycleOffset(host *chop.ChiHost) string { +func getNamePartClusterScopeCycleOffset(host *api.ChiHost) string { return strconv.Itoa(host.Address.ClusterScopeCycleOffset) } // getNamePartCHIScopeIndex -func getNamePartCHIScopeIndex(host *chop.ChiHost) string { +func getNamePartCHIScopeIndex(host *api.ChiHost) string { return strconv.Itoa(host.Address.CHIScopeIndex) } // getNamePartClusterScopeIndex -func getNamePartClusterScopeIndex(host *chop.ChiHost) string { +func getNamePartClusterScopeIndex(host *api.ChiHost) string { return strconv.Itoa(host.Address.ClusterScopeIndex) } // getNamePartShardScopeIndex -func getNamePartShardScopeIndex(host *chop.ChiHost) string { +func getNamePartShardScopeIndex(host *api.ChiHost) string { return strconv.Itoa(host.Address.ShardScopeIndex) } // getNamePartReplicaScopeIndex -func getNamePartReplicaScopeIndex(host *chop.ChiHost) string { +func getNamePartReplicaScopeIndex(host *api.ChiHost) string { return strconv.Itoa(host.Address.ReplicaScopeIndex) } // CreateConfigMapHostName returns a name for a ConfigMap for replica's personal config -func CreateConfigMapHostName(host *chop.ChiHost) string { +func CreateConfigMapHostName(host *api.ChiHost) string { return macro(host).Line(configMapHostNamePattern) } // CreateConfigMapHostMigrationName returns a name for a ConfigMap for replica's personal config -//func CreateConfigMapHostMigrationName(host *chop.ChiHost) string { +//func CreateConfigMapHostMigrationName(host *api.ChiHost) string { // return macro(host).Line(configMapHostMigrationNamePattern) //} // CreateConfigMapCommonName returns a name for a ConfigMap for replica's common config -func CreateConfigMapCommonName(chi *chop.ClickHouseInstallation) string { +func CreateConfigMapCommonName(chi *api.ClickHouseInstallation) string { return macro(chi).Line(configMapCommonNamePattern) } // CreateConfigMapCommonUsersName returns a name for a ConfigMap for replica's common users config -func CreateConfigMapCommonUsersName(chi *chop.ClickHouseInstallation) string { +func CreateConfigMapCommonUsersName(chi *api.ClickHouseInstallation) string { return macro(chi).Line(configMapCommonUsersNamePattern) } // CreateCHIServiceName creates a name of a root ClickHouseInstallation Service resource -func CreateCHIServiceName(chi *chop.ClickHouseInstallation) string { +func CreateCHIServiceName(chi *api.ClickHouseInstallation) string { // Name can be generated either from default name pattern, // or from personal name pattern provided in ServiceTemplate @@ -408,7 +408,7 @@ func CreateCHIServiceName(chi *chop.ClickHouseInstallation) string { } // CreateCHIServiceFQDN creates a FQD name of a root ClickHouseInstallation Service resource -func CreateCHIServiceFQDN(chi *chop.ClickHouseInstallation) string { +func CreateCHIServiceFQDN(chi *api.ClickHouseInstallation) string { // FQDN can be generated either from default pattern, // or from personal pattern provided @@ -429,7 +429,7 @@ func CreateCHIServiceFQDN(chi *chop.ClickHouseInstallation) string { } // CreateClusterServiceName returns a name of a cluster's Service -func CreateClusterServiceName(cluster *chop.Cluster) string { +func CreateClusterServiceName(cluster *api.Cluster) string { // Name can be generated either from default name pattern, // or from personal name pattern provided in ServiceTemplate @@ -450,7 +450,7 @@ func CreateClusterServiceName(cluster *chop.Cluster) string { } // CreateShardServiceName returns a name of a shard's Service -func CreateShardServiceName(shard *chop.ChiShard) string { +func CreateShardServiceName(shard *api.ChiShard) string { // Name can be generated either from default name pattern, // or from personal name pattern provided in ServiceTemplate @@ -471,12 +471,12 @@ func CreateShardServiceName(shard *chop.ChiShard) string { } // CreateShardName returns a name of a shard -func CreateShardName(shard *chop.ChiShard, index int) string { +func CreateShardName(shard *api.ChiShard, index int) string { return strconv.Itoa(index) } // IsAutoGeneratedShardName checks whether provided name is auto-generated -func IsAutoGeneratedShardName(name string, shard *chop.ChiShard, index int) bool { +func IsAutoGeneratedShardName(name string, shard *api.ChiShard, index int) bool { return name == CreateShardName(shard, index) } @@ -484,17 +484,17 @@ func IsAutoGeneratedShardName(name string, shard *chop.ChiShard, index int) bool // Here replica is a CHOp-internal replica - i.e. a vertical slice of hosts field. // In case you are looking for replica name in terms of a hostname to address particular host as in remote_servers.xml // you need to take a look on CreateInstanceHostname function -func CreateReplicaName(replica *chop.ChiReplica, index int) string { +func CreateReplicaName(replica *api.ChiReplica, index int) string { return strconv.Itoa(index) } // IsAutoGeneratedReplicaName checks whether provided name is auto-generated -func IsAutoGeneratedReplicaName(name string, replica *chop.ChiReplica, index int) bool { +func IsAutoGeneratedReplicaName(name string, replica *api.ChiReplica, index int) bool { return name == CreateReplicaName(replica, index) } // CreateHostName returns a name of a host -func CreateHostName(host *chop.ChiHost, shard *chop.ChiShard, shardIndex int, replica *chop.ChiReplica, replicaIndex int) string { +func CreateHostName(host *api.ChiHost, shard *api.ChiShard, shardIndex int, replica *api.ChiReplica, replicaIndex int) string { return fmt.Sprintf("%s-%s", shard.Name, replica.Name) } @@ -504,7 +504,7 @@ func CreateHostName(host *chop.ChiHost, shard *chop.ChiShard, shardIndex int, re // 2. statements like SYSTEM DROP REPLICA // any other places // Function operations are based on .Spec.Defaults.ReplicasUseFQDN -func CreateInstanceHostname(host *chop.ChiHost) string { +func CreateInstanceHostname(host *api.ChiHost) string { if host.GetCHI().Spec.Defaults.ReplicasUseFQDN.IsTrue() { // In case .Spec.Defaults.ReplicasUseFQDN is set replicas would use FQDN pod hostname, // otherwise hostname+service name (unique within namespace) would be used @@ -518,10 +518,10 @@ func CreateInstanceHostname(host *chop.ChiHost) string { // IsAutoGeneratedHostName checks whether name is auto-generated func IsAutoGeneratedHostName( name string, - host *chop.ChiHost, - shard *chop.ChiShard, + host *api.ChiHost, + shard *api.ChiShard, shardIndex int, - replica *chop.ChiReplica, + replica *api.ChiReplica, replicaIndex int, ) bool { if name == CreateHostName(host, shard, shardIndex, replica, replicaIndex) { @@ -548,7 +548,7 @@ func IsAutoGeneratedHostName( } // CreateStatefulSetName creates a name of a StatefulSet for ClickHouse instance -func CreateStatefulSetName(host *chop.ChiHost) string { +func CreateStatefulSetName(host *api.ChiHost) string { // Name can be generated either from default name pattern, // or from personal name pattern provided in PodTemplate @@ -569,7 +569,7 @@ func CreateStatefulSetName(host *chop.ChiHost) string { } // CreateStatefulSetServiceName returns a name of a StatefulSet-related Service for ClickHouse instance -func CreateStatefulSetServiceName(host *chop.ChiHost) string { +func CreateStatefulSetServiceName(host *api.ChiHost) string { // Name can be generated either from default name pattern, // or from personal name pattern provided in ServiceTemplate @@ -592,14 +592,14 @@ func CreateStatefulSetServiceName(host *chop.ChiHost) string { // CreatePodHostname returns a hostname of a Pod of a ClickHouse instance. // Is supposed to be used where network connection to a Pod is required. // NB: right now Pod's hostname points to a Service, through which Pod can be accessed. -func CreatePodHostname(host *chop.ChiHost) string { +func CreatePodHostname(host *api.ChiHost) string { // Do not use Pod own hostname - point to appropriate StatefulSet's Service return CreateStatefulSetServiceName(host) } // createPodFQDN creates a fully qualified domain name of a pod // ss-1eb454-2-0.my-dev-domain.svc.cluster.local -func createPodFQDN(host *chop.ChiHost) string { +func createPodFQDN(host *api.ChiHost) string { // FQDN can be generated either from default pattern, // or from personal pattern provided @@ -620,8 +620,8 @@ func createPodFQDN(host *chop.ChiHost) string { } // createPodFQDNsOfCluster creates fully qualified domain names of all pods in a cluster -func createPodFQDNsOfCluster(cluster *chop.Cluster) (fqdns []string) { - cluster.WalkHosts(func(host *chop.ChiHost) error { +func createPodFQDNsOfCluster(cluster *api.Cluster) (fqdns []string) { + cluster.WalkHosts(func(host *api.ChiHost) error { fqdns = append(fqdns, createPodFQDN(host)) return nil }) @@ -629,8 +629,8 @@ func createPodFQDNsOfCluster(cluster *chop.Cluster) (fqdns []string) { } // createPodFQDNsOfShard creates fully qualified domain names of all pods in a shard -func createPodFQDNsOfShard(shard *chop.ChiShard) (fqdns []string) { - shard.WalkHosts(func(host *chop.ChiHost) error { +func createPodFQDNsOfShard(shard *api.ChiShard) (fqdns []string) { + shard.WalkHosts(func(host *api.ChiHost) error { fqdns = append(fqdns, createPodFQDN(host)) return nil }) @@ -638,8 +638,8 @@ func createPodFQDNsOfShard(shard *chop.ChiShard) (fqdns []string) { } // createPodFQDNsOfCHI creates fully qualified domain names of all pods in a CHI -func createPodFQDNsOfCHI(chi *chop.ClickHouseInstallation) (fqdns []string) { - chi.WalkHosts(func(host *chop.ChiHost) error { +func createPodFQDNsOfCHI(chi *api.ClickHouseInstallation) (fqdns []string) { + chi.WalkHosts(func(host *api.ChiHost) error { fqdns = append(fqdns, createPodFQDN(host)) return nil }) @@ -647,7 +647,7 @@ func createPodFQDNsOfCHI(chi *chop.ClickHouseInstallation) (fqdns []string) { } // CreateFQDN is a wrapper over pod FQDN function -func CreateFQDN(host *chop.ChiHost) string { +func CreateFQDN(host *api.ChiHost) string { return createPodFQDN(host) } @@ -657,25 +657,25 @@ func CreateFQDN(host *chop.ChiHost) string { // excludeSelf specifies whether to exclude the host itself from the result. Applicable only in case obj is a host func CreateFQDNs(obj interface{}, scope interface{}, excludeSelf bool) []string { switch typed := obj.(type) { - case *chop.ClickHouseInstallation: + case *api.ClickHouseInstallation: return createPodFQDNsOfCHI(typed) - case *chop.Cluster: + case *api.Cluster: return createPodFQDNsOfCluster(typed) - case *chop.ChiShard: + case *api.ChiShard: return createPodFQDNsOfShard(typed) - case *chop.ChiHost: + case *api.ChiHost: self := "" if excludeSelf { self = createPodFQDN(typed) } switch scope.(type) { - case chop.ChiHost: + case api.ChiHost: return util.RemoveFromArray(self, []string{createPodFQDN(typed)}) - case chop.ChiShard: + case api.ChiShard: return util.RemoveFromArray(self, createPodFQDNsOfShard(typed.GetShard())) - case chop.Cluster: + case api.Cluster: return util.RemoveFromArray(self, createPodFQDNsOfCluster(typed.GetCluster())) - case chop.ClickHouseInstallation: + case api.ClickHouseInstallation: return util.RemoveFromArray(self, createPodFQDNsOfCHI(typed.GetCHI())) } } @@ -685,7 +685,7 @@ func CreateFQDNs(obj interface{}, scope interface{}, excludeSelf bool) []string // CreatePodHostnameRegexp creates pod hostname regexp. // For example, `template` can be defined in operator config: // HostRegexpTemplate: chi-{chi}-[^.]+\\d+-\\d+\\.{namespace}.svc.cluster.local$" -func CreatePodHostnameRegexp(chi *chop.ClickHouseInstallation, template string) string { +func CreatePodHostnameRegexp(chi *api.ClickHouseInstallation, template string) string { return macro(chi).Line(template) } @@ -695,8 +695,8 @@ func CreatePodName(obj interface{}) string { case *apps.StatefulSet: statefulSet := obj.(*apps.StatefulSet) return fmt.Sprintf(podNamePattern, statefulSet.Name) - case *chop.ChiHost: - host := obj.(*chop.ChiHost) + case *api.ChiHost: + host := obj.(*api.ChiHost) return fmt.Sprintf(podNamePattern, CreateStatefulSetName(host)) } return "unknown-type" @@ -706,14 +706,14 @@ func CreatePodName(obj interface{}) string { // obj specifies source object to create names from func CreatePodNames(obj interface{}) []string { switch typed := obj.(type) { - case *chop.ClickHouseInstallation: + case *api.ClickHouseInstallation: return createPodNamesOfCHI(typed) - case *chop.Cluster: + case *api.Cluster: return createPodNamesOfCluster(typed) - case *chop.ChiShard: + case *api.ChiShard: return createPodNamesOfShard(typed) case - *chop.ChiHost, + *api.ChiHost, *apps.StatefulSet: return []string{ CreatePodName(typed), @@ -723,8 +723,8 @@ func CreatePodNames(obj interface{}) []string { } // createPodNamesOfCluster creates pod names of all pods in a cluster -func createPodNamesOfCluster(cluster *chop.Cluster) (names []string) { - cluster.WalkHosts(func(host *chop.ChiHost) error { +func createPodNamesOfCluster(cluster *api.Cluster) (names []string) { + cluster.WalkHosts(func(host *api.ChiHost) error { names = append(names, CreatePodName(host)) return nil }) @@ -732,8 +732,8 @@ func createPodNamesOfCluster(cluster *chop.Cluster) (names []string) { } // createPodNamesOfShard creates pod names of all pods in a shard -func createPodNamesOfShard(shard *chop.ChiShard) (names []string) { - shard.WalkHosts(func(host *chop.ChiHost) error { +func createPodNamesOfShard(shard *api.ChiShard) (names []string) { + shard.WalkHosts(func(host *api.ChiHost) error { names = append(names, CreatePodName(host)) return nil }) @@ -741,8 +741,8 @@ func createPodNamesOfShard(shard *chop.ChiShard) (names []string) { } // createPodNamesOfCHI creates fully qualified domain names of all pods in a CHI -func createPodNamesOfCHI(chi *chop.ClickHouseInstallation) (names []string) { - chi.WalkHosts(func(host *chop.ChiHost) error { +func createPodNamesOfCHI(chi *api.ClickHouseInstallation) (names []string) { + chi.WalkHosts(func(host *api.ChiHost) error { names = append(names, CreatePodName(host)) return nil }) @@ -750,12 +750,12 @@ func createPodNamesOfCHI(chi *chop.ClickHouseInstallation) (names []string) { } // CreatePVCNameByVolumeClaimTemplate creates PVC name -func CreatePVCNameByVolumeClaimTemplate(host *chop.ChiHost, volumeClaimTemplate *chop.ChiVolumeClaimTemplate) string { +func CreatePVCNameByVolumeClaimTemplate(host *api.ChiHost, volumeClaimTemplate *api.ChiVolumeClaimTemplate) string { return createPVCName(host, volumeClaimTemplate.Name) } // CreatePVCNameByVolumeMount creates PVC name -func CreatePVCNameByVolumeMount(host *chop.ChiHost, volumeMount *coreV1.VolumeMount) (string, bool) { +func CreatePVCNameByVolumeMount(host *api.ChiHost, volumeMount *core.VolumeMount) (string, bool) { volumeClaimTemplate, ok := GetVolumeClaimTemplate(host, volumeMount) if !ok { // Unable to find VolumeClaimTemplate related to this volumeMount. @@ -766,12 +766,12 @@ func CreatePVCNameByVolumeMount(host *chop.ChiHost, volumeMount *coreV1.VolumeMo } // createPVCName is an internal function -func createPVCName(host *chop.ChiHost, volumeMountName string) string { +func createPVCName(host *api.ChiHost, volumeMountName string) string { return volumeMountName + "-" + CreatePodName(host) } // CreateClusterAutoSecretName creates Secret name where auto-generated secret is kept -func CreateClusterAutoSecretName(cluster *chop.Cluster) string { +func CreateClusterAutoSecretName(cluster *api.Cluster) string { if cluster.Name == "" { return fmt.Sprintf( "%s-auto-secret", diff --git a/pkg/model/chi/normalizer.go b/pkg/model/chi/normalizer.go index 613e51951..8ab0a853e 100644 --- a/pkg/model/chi/normalizer.go +++ b/pkg/model/chi/normalizer.go @@ -22,14 +22,14 @@ import ( "sort" "strings" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" kube "k8s.io/client-go/kubernetes" "github.com/google/uuid" log "github.com/altinity/clickhouse-operator/pkg/announcer" - chiV1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/chop" "github.com/altinity/clickhouse-operator/pkg/util" ) @@ -37,9 +37,9 @@ import ( // NormalizerContext specifies CHI-related normalization context type NormalizerContext struct { // start specifies start CHI from which normalization has started - start *chiV1.ClickHouseInstallation + start *api.ClickHouseInstallation // chi specifies current CHI being normalized - chi *chiV1.ClickHouseInstallation + chi *api.ClickHouseInstallation // options specifies normalization options options *NormalizerOptions } @@ -80,20 +80,20 @@ func NewNormalizer(kubeClient kube.Interface) *Normalizer { } } -func newCHI() *chiV1.ClickHouseInstallation { - return &chiV1.ClickHouseInstallation{ - TypeMeta: metav1.TypeMeta{ - Kind: chiV1.ClickHouseInstallationCRDResourceKind, - APIVersion: chiV1.SchemeGroupVersion.String(), +func newCHI() *api.ClickHouseInstallation { + return &api.ClickHouseInstallation{ + TypeMeta: meta.TypeMeta{ + Kind: api.ClickHouseInstallationCRDResourceKind, + APIVersion: api.SchemeGroupVersion.String(), }, } } // CreateTemplatedCHI produces ready-to-use CHI object func (n *Normalizer) CreateTemplatedCHI( - chi *chiV1.ClickHouseInstallation, + chi *api.ClickHouseInstallation, options *NormalizerOptions, -) (*chiV1.ClickHouseInstallation, error) { +) (*api.ClickHouseInstallation, error) { // New CHI starts with new context n.ctx = NewNormalizerContext(options) @@ -108,12 +108,12 @@ func (n *Normalizer) CreateTemplatedCHI( n.applyCHITemplates(chi) // After all templates applied, place provided CHI on top of the whole stack - n.ctx.chi.MergeFrom(chi, chiV1.MergeTypeOverrideByNonEmptyValues) + n.ctx.chi.MergeFrom(chi, api.MergeTypeOverrideByNonEmptyValues) return n.normalize() } -func (n *Normalizer) normalizeStartCHI(chi *chiV1.ClickHouseInstallation) *chiV1.ClickHouseInstallation { +func (n *Normalizer) normalizeStartCHI(chi *api.ClickHouseInstallation) *api.ClickHouseInstallation { if chi == nil { // No CHI specified - meaning we are building over provided 'empty' CHI with no clusters inside chi = newCHI() @@ -125,7 +125,7 @@ func (n *Normalizer) normalizeStartCHI(chi *chiV1.ClickHouseInstallation) *chiV1 return chi } -func (n *Normalizer) createBaseCHI() *chiV1.ClickHouseInstallation { +func (n *Normalizer) createBaseCHI() *api.ClickHouseInstallation { // What base should be used to create CHI if chop.Config().Template.CHI.Runtime.Template == nil { // No template specified - start with clear page @@ -137,16 +137,16 @@ func (n *Normalizer) createBaseCHI() *chiV1.ClickHouseInstallation { } // prepareListOfCHITemplates prepares list of CHI templates to be used by CHI -func (n *Normalizer) prepareListOfCHITemplates(chi *chiV1.ClickHouseInstallation) []chiV1.ChiUseTemplate { +func (n *Normalizer) prepareListOfCHITemplates(chi *api.ClickHouseInstallation) []api.ChiUseTemplate { // useTemplates specifies list of templates to be applied to the CHI - var useTemplates []chiV1.ChiUseTemplate + var useTemplates []api.ChiUseTemplate // 1. Get list of auto templates available if autoTemplates := chop.Config().GetAutoTemplates(); len(autoTemplates) > 0 { log.V(1).M(chi).F().Info("Found auto-templates num: %d", len(autoTemplates)) for _, template := range autoTemplates { log.V(1).M(chi).F().Info("Adding auto-template to list of applicable templates: %s/%s ", template.Namespace, template.Name) - useTemplates = append(useTemplates, chiV1.ChiUseTemplate{ + useTemplates = append(useTemplates, api.ChiUseTemplate{ Name: template.Name, Namespace: template.Namespace, UseType: useTypeMerge, @@ -170,7 +170,7 @@ func (n *Normalizer) prepareListOfCHITemplates(chi *chiV1.ClickHouseInstallation } // applyCHITemplates applies CHI templates over n.ctx.chi -func (n *Normalizer) applyCHITemplates(chi *chiV1.ClickHouseInstallation) { +func (n *Normalizer) applyCHITemplates(chi *api.ClickHouseInstallation) { // At this moment n.chi is either newly created 'empty' CHI or a system-wide template // useTemplates specifies list of templates to be applied to the CHI @@ -212,9 +212,9 @@ func (n *Normalizer) applyCHITemplates(chi *chiV1.ClickHouseInstallation) { log.V(1).M(chi).F().Info("Used templates count: %d", n.ctx.chi.EnsureStatus().GetUsedTemplatesCount()) } -func (n *Normalizer) mergeCHIFromTemplate(chi, template *chiV1.ClickHouseInstallation) *chiV1.ClickHouseInstallation { +func (n *Normalizer) mergeCHIFromTemplate(chi, template *api.ClickHouseInstallation) *api.ClickHouseInstallation { // Merge template's Spec over CHI's Spec - (&chi.Spec).MergeFrom(&template.Spec, chiV1.MergeTypeOverrideByNonEmptyValues) + (&chi.Spec).MergeFrom(&template.Spec, api.MergeTypeOverrideByNonEmptyValues) // Merge template's Labels over CHI's Labels chi.Labels = util.MergeStringMapsOverwrite( @@ -240,7 +240,7 @@ func (n *Normalizer) mergeCHIFromTemplate(chi, template *chiV1.ClickHouseInstall // normalize normalizes whole CHI. // Returns normalized CHI -func (n *Normalizer) normalize() (*chiV1.ClickHouseInstallation, error) { +func (n *Normalizer) normalize() (*api.ClickHouseInstallation, error) { // Walk over ChiSpec datatype fields n.ctx.chi.Spec.TaskID = n.normalizeTaskID(n.ctx.chi.Spec.TaskID) n.ctx.chi.Spec.UseTemplates = n.normalizeUseTemplates(n.ctx.chi.Spec.UseTemplates) @@ -265,7 +265,7 @@ func (n *Normalizer) normalize() (*chiV1.ClickHouseInstallation, error) { func (n *Normalizer) finalizeCHI() { n.ctx.chi.FillSelfCalculatedAddressInfo() n.ctx.chi.FillCHIPointer() - n.ctx.chi.WalkHosts(func(host *chiV1.ChiHost) error { + n.ctx.chi.WalkHosts(func(host *api.ChiHost) error { hostTemplate := n.getHostTemplate(host) hostApplyHostTemplate(host, hostTemplate) return nil @@ -275,7 +275,7 @@ func (n *Normalizer) finalizeCHI() { // fillCHIAddressInfo func (n *Normalizer) fillCHIAddressInfo() { - n.ctx.chi.WalkHosts(func(host *chiV1.ChiHost) error { + n.ctx.chi.WalkHosts(func(host *api.ChiHost) error { host.Address.StatefulSet = CreateStatefulSetName(host) host.Address.FQDN = CreateFQDN(host) return nil @@ -283,7 +283,7 @@ func (n *Normalizer) fillCHIAddressInfo() { } // getHostTemplate gets Host Template to be used to normalize Host -func (n *Normalizer) getHostTemplate(host *chiV1.ChiHost) *chiV1.ChiHostTemplate { +func (n *Normalizer) getHostTemplate(host *api.ChiHost) *api.ChiHostTemplate { statefulSetName := CreateStatefulSetName(host) // Which host template would be used - either explicitly defined in or a default one @@ -317,7 +317,7 @@ func (n *Normalizer) getHostTemplate(host *chiV1.ChiHost) *chiV1.ChiHostTemplate } // hostApplyHostTemplate -func hostApplyHostTemplate(host *chiV1.ChiHost, template *chiV1.ChiHostTemplate) { +func hostApplyHostTemplate(host *api.ChiHost, template *api.ChiHostTemplate) { if host.GetName() == "" { host.Name = template.Spec.Name } @@ -327,54 +327,54 @@ func hostApplyHostTemplate(host *chiV1.ChiHost, template *chiV1.ChiHostTemplate) for _, portDistribution := range template.PortDistribution { switch portDistribution.Type { - case chiV1.PortDistributionUnspecified: - if chiV1.IsPortUnassigned(host.TCPPort) { + case api.PortDistributionUnspecified: + if api.IsPortUnassigned(host.TCPPort) { host.TCPPort = template.Spec.TCPPort } - if chiV1.IsPortUnassigned(host.TLSPort) { + if api.IsPortUnassigned(host.TLSPort) { host.TLSPort = template.Spec.TLSPort } - if chiV1.IsPortUnassigned(host.HTTPPort) { + if api.IsPortUnassigned(host.HTTPPort) { host.HTTPPort = template.Spec.HTTPPort } - if chiV1.IsPortUnassigned(host.HTTPSPort) { + if api.IsPortUnassigned(host.HTTPSPort) { host.HTTPSPort = template.Spec.HTTPSPort } - if chiV1.IsPortUnassigned(host.InterserverHTTPPort) { + if api.IsPortUnassigned(host.InterserverHTTPPort) { host.InterserverHTTPPort = template.Spec.InterserverHTTPPort } - case chiV1.PortDistributionClusterScopeIndex: - if chiV1.IsPortUnassigned(host.TCPPort) { + case api.PortDistributionClusterScopeIndex: + if api.IsPortUnassigned(host.TCPPort) { base := chDefaultTCPPortNumber - if chiV1.IsPortAssigned(template.Spec.TCPPort) { + if api.IsPortAssigned(template.Spec.TCPPort) { base = template.Spec.TCPPort } host.TCPPort = base + int32(host.Address.ClusterScopeIndex) } - if chiV1.IsPortUnassigned(host.TLSPort) { + if api.IsPortUnassigned(host.TLSPort) { base := chDefaultTLSPortNumber - if chiV1.IsPortAssigned(template.Spec.TLSPort) { + if api.IsPortAssigned(template.Spec.TLSPort) { base = template.Spec.TLSPort } host.TLSPort = base + int32(host.Address.ClusterScopeIndex) } - if chiV1.IsPortUnassigned(host.HTTPPort) { + if api.IsPortUnassigned(host.HTTPPort) { base := chDefaultHTTPPortNumber - if chiV1.IsPortAssigned(template.Spec.HTTPPort) { + if api.IsPortAssigned(template.Spec.HTTPPort) { base = template.Spec.HTTPPort } host.HTTPPort = base + int32(host.Address.ClusterScopeIndex) } - if chiV1.IsPortUnassigned(host.HTTPSPort) { + if api.IsPortUnassigned(host.HTTPSPort) { base := chDefaultHTTPSPortNumber - if chiV1.IsPortAssigned(template.Spec.HTTPSPort) { + if api.IsPortAssigned(template.Spec.HTTPSPort) { base = template.Spec.HTTPSPort } host.HTTPSPort = base + int32(host.Address.ClusterScopeIndex) } - if chiV1.IsPortUnassigned(host.InterserverHTTPPort) { + if api.IsPortUnassigned(host.InterserverHTTPPort) { base := chDefaultInterserverHTTPPortNumber - if chiV1.IsPortAssigned(template.Spec.InterserverHTTPPort) { + if api.IsPortAssigned(template.Spec.InterserverHTTPPort) { base = template.Spec.InterserverHTTPPort } host.InterserverHTTPPort = base + int32(host.Address.ClusterScopeIndex) @@ -388,7 +388,7 @@ func hostApplyHostTemplate(host *chiV1.ChiHost, template *chiV1.ChiHostTemplate) } // hostApplyPortsFromSettings -func hostApplyPortsFromSettings(host *chiV1.ChiHost) { +func hostApplyPortsFromSettings(host *api.ChiHost) { // Use host personal settings at first ensurePortValuesFromSettings(host, host.GetSettings(), false) // Fallback to common settings @@ -396,14 +396,14 @@ func hostApplyPortsFromSettings(host *chiV1.ChiHost) { } // ensurePortValuesFromSettings fetches port spec from settings, if any provided -func ensurePortValuesFromSettings(host *chiV1.ChiHost, settings *chiV1.Settings, final bool) { +func ensurePortValuesFromSettings(host *api.ChiHost, settings *api.Settings, final bool) { // For intermittent (non-final) setup fallback values should be from "MustBeAssignedLater" family, // because this is not final setup (just intermittent) and all these ports may be overwritten later - fallbackTCPPort := chiV1.PortUnassigned() - fallbackTLSPort := chiV1.PortUnassigned() - fallbackHTTPPort := chiV1.PortUnassigned() - fallbackHTTPSPort := chiV1.PortUnassigned() - fallbackInterserverHTTPPort := chiV1.PortUnassigned() + fallbackTCPPort := api.PortUnassigned() + fallbackTLSPort := api.PortUnassigned() + fallbackHTTPPort := api.PortUnassigned() + fallbackHTTPSPort := api.PortUnassigned() + fallbackInterserverHTTPPort := api.PortUnassigned() if final { // This is final setup and we need to assign real numbers to ports @@ -418,11 +418,11 @@ func ensurePortValuesFromSettings(host *chiV1.ChiHost, settings *chiV1.Settings, fallbackInterserverHTTPPort = chDefaultInterserverHTTPPortNumber } - host.TCPPort = chiV1.EnsurePortValue(host.TCPPort, settings.GetTCPPort(), fallbackTCPPort) - host.TLSPort = chiV1.EnsurePortValue(host.TLSPort, settings.GetTCPPortSecure(), fallbackTLSPort) - host.HTTPPort = chiV1.EnsurePortValue(host.HTTPPort, settings.GetHTTPPort(), fallbackHTTPPort) - host.HTTPSPort = chiV1.EnsurePortValue(host.HTTPSPort, settings.GetHTTPSPort(), fallbackHTTPSPort) - host.InterserverHTTPPort = chiV1.EnsurePortValue(host.InterserverHTTPPort, settings.GetInterserverHTTPPort(), fallbackInterserverHTTPPort) + host.TCPPort = api.EnsurePortValue(host.TCPPort, settings.GetTCPPort(), fallbackTCPPort) + host.TLSPort = api.EnsurePortValue(host.TLSPort, settings.GetTCPPortSecure(), fallbackTLSPort) + host.HTTPPort = api.EnsurePortValue(host.HTTPPort, settings.GetHTTPPort(), fallbackHTTPPort) + host.HTTPSPort = api.EnsurePortValue(host.HTTPSPort, settings.GetHTTPSPort(), fallbackHTTPSPort) + host.InterserverHTTPPort = api.EnsurePortValue(host.InterserverHTTPPort, settings.GetInterserverHTTPPort(), fallbackInterserverHTTPPort) } // fillStatus fills .status section of a CHI with values based on current CHI @@ -430,12 +430,12 @@ func (n *Normalizer) fillStatus() { endpoint := CreateCHIServiceFQDN(n.ctx.chi) pods := make([]string, 0) fqdns := make([]string, 0) - n.ctx.chi.WalkHosts(func(host *chiV1.ChiHost) error { + n.ctx.chi.WalkHosts(func(host *api.ChiHost) error { pods = append(pods, CreatePodName(host)) fqdns = append(fqdns, CreateFQDN(host)) return nil }) - ip, _ := chop.Get().ConfigManager.GetRuntimeParam(chiV1.OPERATOR_POD_IP) + ip, _ := chop.Get().ConfigManager.GetRuntimeParam(api.OPERATOR_POD_IP) n.ctx.chi.FillStatus(endpoint, pods, fqdns, ip) } @@ -452,22 +452,22 @@ func (n *Normalizer) normalizeTaskID(taskID *string) *string { } // normalizeStop normalizes .spec.stop -func (n *Normalizer) normalizeStop(stop *chiV1.StringBool) *chiV1.StringBool { +func (n *Normalizer) normalizeStop(stop *api.StringBool) *api.StringBool { if stop.IsValid() { // It is bool, use as it is return stop } // In case it is unknown value - just use set it to false - return chiV1.NewStringBool(false) + return api.NewStringBool(false) } // normalizeRestart normalizes .spec.restart func (n *Normalizer) normalizeRestart(restart string) string { switch strings.ToLower(restart) { - case strings.ToLower(chiV1.RestartRollingUpdate): + case strings.ToLower(api.RestartRollingUpdate): // Known value, overwrite it to ensure case-ness - return chiV1.RestartRollingUpdate + return api.RestartRollingUpdate } // In case it is unknown value - just use empty @@ -475,14 +475,14 @@ func (n *Normalizer) normalizeRestart(restart string) string { } // normalizeTroubleshoot normalizes .spec.stop -func (n *Normalizer) normalizeTroubleshoot(troubleshoot *chiV1.StringBool) *chiV1.StringBool { +func (n *Normalizer) normalizeTroubleshoot(troubleshoot *api.StringBool) *api.StringBool { if troubleshoot.IsValid() { // It is bool, use as it is return troubleshoot } // In case it is unknown value - just use set it to false - return chiV1.NewStringBool(false) + return api.NewStringBool(false) } // normalizeNamespaceDomainPattern normalizes .spec.namespaceDomainPattern @@ -494,32 +494,32 @@ func (n *Normalizer) normalizeNamespaceDomainPattern(namespaceDomainPattern stri } // normalizeDefaults normalizes .spec.defaults -func (n *Normalizer) normalizeDefaults(defaults *chiV1.ChiDefaults) *chiV1.ChiDefaults { +func (n *Normalizer) normalizeDefaults(defaults *api.ChiDefaults) *api.ChiDefaults { if defaults == nil { - defaults = chiV1.NewChiDefaults() + defaults = api.NewChiDefaults() } // Set defaults for CHI object properties defaults.ReplicasUseFQDN = defaults.ReplicasUseFQDN.Normalize(false) // Ensure field if defaults.DistributedDDL == nil { - //defaults.DistributedDDL = chiV1.NewChiDistributedDDL() + //defaults.DistributedDDL = api.NewChiDistributedDDL() } // Ensure field if defaults.StorageManagement == nil { - defaults.StorageManagement = chiV1.NewStorageManagement() + defaults.StorageManagement = api.NewStorageManagement() } // Ensure field if defaults.Templates == nil { - //defaults.Templates = chiV1.NewChiTemplateNames() + //defaults.Templates = api.NewChiTemplateNames() } defaults.Templates.HandleDeprecatedFields() return defaults } // normalizeConfiguration normalizes .spec.configuration -func (n *Normalizer) normalizeConfiguration(conf *chiV1.Configuration) *chiV1.Configuration { +func (n *Normalizer) normalizeConfiguration(conf *api.Configuration) *api.Configuration { if conf == nil { - conf = chiV1.NewConfiguration() + conf = api.NewConfiguration() } conf.Zookeeper = n.normalizeConfigurationZookeeper(conf.Zookeeper) conf.Users = n.normalizeConfigurationUsers(conf.Users) @@ -532,9 +532,9 @@ func (n *Normalizer) normalizeConfiguration(conf *chiV1.Configuration) *chiV1.Co } // normalizeTemplates normalizes .spec.templates -func (n *Normalizer) normalizeTemplates(templates *chiV1.ChiTemplates) *chiV1.ChiTemplates { +func (n *Normalizer) normalizeTemplates(templates *api.ChiTemplates) *api.ChiTemplates { if templates == nil { - //templates = chiV1.NewChiTemplates() + //templates = api.NewChiTemplates() return nil } @@ -562,64 +562,64 @@ func (n *Normalizer) normalizeTemplates(templates *chiV1.ChiTemplates) *chiV1.Ch } // normalizeTemplating normalizes .spec.templating -func (n *Normalizer) normalizeTemplating(templating *chiV1.ChiTemplating) *chiV1.ChiTemplating { +func (n *Normalizer) normalizeTemplating(templating *api.ChiTemplating) *api.ChiTemplating { if templating == nil { - templating = chiV1.NewChiTemplating() + templating = api.NewChiTemplating() } switch strings.ToLower(templating.GetPolicy()) { - case strings.ToLower(chiV1.TemplatingPolicyAuto): + case strings.ToLower(api.TemplatingPolicyAuto): // Known value, overwrite it to ensure case-ness - templating.SetPolicy(chiV1.TemplatingPolicyAuto) - case strings.ToLower(chiV1.TemplatingPolicyManual): + templating.SetPolicy(api.TemplatingPolicyAuto) + case strings.ToLower(api.TemplatingPolicyManual): // Known value, overwrite it to ensure case-ness - templating.SetPolicy(chiV1.TemplatingPolicyManual) + templating.SetPolicy(api.TemplatingPolicyManual) default: // Unknown value, fallback to default - templating.SetPolicy(chiV1.TemplatingPolicyManual) + templating.SetPolicy(api.TemplatingPolicyManual) } return templating } // normalizeReconciling normalizes .spec.reconciling -func (n *Normalizer) normalizeReconciling(reconciling *chiV1.ChiReconciling) *chiV1.ChiReconciling { +func (n *Normalizer) normalizeReconciling(reconciling *api.ChiReconciling) *api.ChiReconciling { if reconciling == nil { - reconciling = chiV1.NewChiReconciling().SetDefaults() + reconciling = api.NewChiReconciling().SetDefaults() } switch strings.ToLower(reconciling.GetPolicy()) { - case strings.ToLower(chiV1.ReconcilingPolicyWait): + case strings.ToLower(api.ReconcilingPolicyWait): // Known value, overwrite it to ensure case-ness - reconciling.SetPolicy(chiV1.ReconcilingPolicyWait) - case strings.ToLower(chiV1.ReconcilingPolicyNoWait): + reconciling.SetPolicy(api.ReconcilingPolicyWait) + case strings.ToLower(api.ReconcilingPolicyNoWait): // Known value, overwrite it to ensure case-ness - reconciling.SetPolicy(chiV1.ReconcilingPolicyNoWait) + reconciling.SetPolicy(api.ReconcilingPolicyNoWait) default: // Unknown value, fallback to default - reconciling.SetPolicy(chiV1.ReconcilingPolicyUnspecified) + reconciling.SetPolicy(api.ReconcilingPolicyUnspecified) } reconciling.Cleanup = n.normalizeReconcilingCleanup(reconciling.Cleanup) return reconciling } -func (n *Normalizer) normalizeReconcilingCleanup(cleanup *chiV1.ChiCleanup) *chiV1.ChiCleanup { +func (n *Normalizer) normalizeReconcilingCleanup(cleanup *api.ChiCleanup) *api.ChiCleanup { if cleanup == nil { - cleanup = chiV1.NewChiCleanup() + cleanup = api.NewChiCleanup() } if cleanup.UnknownObjects == nil { cleanup.UnknownObjects = cleanup.DefaultUnknownObjects() } - n.normalizeCleanup(&cleanup.UnknownObjects.StatefulSet, chiV1.ObjectsCleanupDelete) - n.normalizeCleanup(&cleanup.UnknownObjects.PVC, chiV1.ObjectsCleanupDelete) - n.normalizeCleanup(&cleanup.UnknownObjects.ConfigMap, chiV1.ObjectsCleanupDelete) - n.normalizeCleanup(&cleanup.UnknownObjects.Service, chiV1.ObjectsCleanupDelete) + n.normalizeCleanup(&cleanup.UnknownObjects.StatefulSet, api.ObjectsCleanupDelete) + n.normalizeCleanup(&cleanup.UnknownObjects.PVC, api.ObjectsCleanupDelete) + n.normalizeCleanup(&cleanup.UnknownObjects.ConfigMap, api.ObjectsCleanupDelete) + n.normalizeCleanup(&cleanup.UnknownObjects.Service, api.ObjectsCleanupDelete) if cleanup.ReconcileFailedObjects == nil { cleanup.ReconcileFailedObjects = cleanup.DefaultReconcileFailedObjects() } - n.normalizeCleanup(&cleanup.ReconcileFailedObjects.StatefulSet, chiV1.ObjectsCleanupRetain) - n.normalizeCleanup(&cleanup.ReconcileFailedObjects.PVC, chiV1.ObjectsCleanupRetain) - n.normalizeCleanup(&cleanup.ReconcileFailedObjects.ConfigMap, chiV1.ObjectsCleanupRetain) - n.normalizeCleanup(&cleanup.ReconcileFailedObjects.Service, chiV1.ObjectsCleanupRetain) + n.normalizeCleanup(&cleanup.ReconcileFailedObjects.StatefulSet, api.ObjectsCleanupRetain) + n.normalizeCleanup(&cleanup.ReconcileFailedObjects.PVC, api.ObjectsCleanupRetain) + n.normalizeCleanup(&cleanup.ReconcileFailedObjects.ConfigMap, api.ObjectsCleanupRetain) + n.normalizeCleanup(&cleanup.ReconcileFailedObjects.Service, api.ObjectsCleanupRetain) return cleanup } @@ -628,12 +628,12 @@ func (n *Normalizer) normalizeCleanup(str *string, value string) { return } switch strings.ToLower(*str) { - case strings.ToLower(chiV1.ObjectsCleanupRetain): + case strings.ToLower(api.ObjectsCleanupRetain): // Known value, overwrite it to ensure case-ness - *str = chiV1.ObjectsCleanupRetain - case strings.ToLower(chiV1.ObjectsCleanupDelete): + *str = api.ObjectsCleanupRetain + case strings.ToLower(api.ObjectsCleanupDelete): // Known value, overwrite it to ensure case-ness - *str = chiV1.ObjectsCleanupDelete + *str = api.ObjectsCleanupDelete default: // Unknown value, fallback to default *str = value @@ -641,15 +641,15 @@ func (n *Normalizer) normalizeCleanup(str *string, value string) { } // normalizeHostTemplate normalizes .spec.templates.hostTemplates -func (n *Normalizer) normalizeHostTemplate(template *chiV1.ChiHostTemplate) { +func (n *Normalizer) normalizeHostTemplate(template *api.ChiHostTemplate) { // Name // PortDistribution if template.PortDistribution == nil { // In case no PortDistribution provided - setup default one - template.PortDistribution = []chiV1.ChiPortDistribution{ - {Type: chiV1.PortDistributionUnspecified}, + template.PortDistribution = []api.ChiPortDistribution{ + {Type: api.PortDistributionUnspecified}, } } // Normalize PortDistribution @@ -657,12 +657,12 @@ func (n *Normalizer) normalizeHostTemplate(template *chiV1.ChiHostTemplate) { portDistribution := &template.PortDistribution[i] switch portDistribution.Type { case - chiV1.PortDistributionUnspecified, - chiV1.PortDistributionClusterScopeIndex: + api.PortDistributionUnspecified, + api.PortDistributionClusterScopeIndex: // distribution is known default: // distribution is not known - portDistribution.Type = chiV1.PortDistributionUnspecified + portDistribution.Type = api.PortDistributionUnspecified } } @@ -674,7 +674,7 @@ func (n *Normalizer) normalizeHostTemplate(template *chiV1.ChiHostTemplate) { } // normalizePodTemplate normalizes .spec.templates.podTemplates -func (n *Normalizer) normalizePodTemplate(template *chiV1.ChiPodTemplate) { +func (n *Normalizer) normalizePodTemplate(template *api.ChiPodTemplate) { // Name // Zone @@ -684,7 +684,7 @@ func (n *Normalizer) normalizePodTemplate(template *chiV1.ChiPodTemplate) { } else if template.Zone.Key == "" { // We have values specified, but no key // Use default zone key in this case - template.Zone.Key = corev1.LabelTopologyZone + template.Zone.Key = core.LabelTopologyZone } else { // We have both key and value(s) specified explicitly } @@ -703,39 +703,39 @@ func (n *Normalizer) normalizePodTemplate(template *chiV1.ChiPodTemplate) { // https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy // which tells: For Pods running with hostNetwork, you should explicitly set its DNS policy “ClusterFirstWithHostNet”. if template.Spec.HostNetwork { - template.Spec.DNSPolicy = corev1.DNSClusterFirstWithHostNet + template.Spec.DNSPolicy = core.DNSClusterFirstWithHostNet } // Introduce PodTemplate into Index n.ctx.chi.Spec.Templates.EnsurePodTemplatesIndex().Set(template.Name, template) } -const defaultTopologyKey = corev1.LabelHostname +const defaultTopologyKey = core.LabelHostname -func (n *Normalizer) normalizePodDistribution(podDistribution *chiV1.ChiPodDistribution) []chiV1.ChiPodDistribution { +func (n *Normalizer) normalizePodDistribution(podDistribution *api.ChiPodDistribution) []api.ChiPodDistribution { if podDistribution.TopologyKey == "" { podDistribution.TopologyKey = defaultTopologyKey } switch podDistribution.Type { case - chiV1.PodDistributionUnspecified, + api.PodDistributionUnspecified, // AntiAffinity section - chiV1.PodDistributionClickHouseAntiAffinity, - chiV1.PodDistributionShardAntiAffinity, - chiV1.PodDistributionReplicaAntiAffinity: + api.PodDistributionClickHouseAntiAffinity, + api.PodDistributionShardAntiAffinity, + api.PodDistributionReplicaAntiAffinity: // PodDistribution is known if podDistribution.Scope == "" { - podDistribution.Scope = chiV1.PodDistributionScopeCluster + podDistribution.Scope = api.PodDistributionScopeCluster } return nil case - chiV1.PodDistributionAnotherNamespaceAntiAffinity, - chiV1.PodDistributionAnotherClickHouseInstallationAntiAffinity, - chiV1.PodDistributionAnotherClusterAntiAffinity: + api.PodDistributionAnotherNamespaceAntiAffinity, + api.PodDistributionAnotherClickHouseInstallationAntiAffinity, + api.PodDistributionAnotherClusterAntiAffinity: // PodDistribution is known return nil case - chiV1.PodDistributionMaxNumberPerNode: + api.PodDistributionMaxNumberPerNode: // PodDistribution is known if podDistribution.Number < 0 { podDistribution.Number = 0 @@ -743,66 +743,66 @@ func (n *Normalizer) normalizePodDistribution(podDistribution *chiV1.ChiPodDistr return nil case // Affinity section - chiV1.PodDistributionNamespaceAffinity, - chiV1.PodDistributionClickHouseInstallationAffinity, - chiV1.PodDistributionClusterAffinity, - chiV1.PodDistributionShardAffinity, - chiV1.PodDistributionReplicaAffinity, - chiV1.PodDistributionPreviousTailAffinity: + api.PodDistributionNamespaceAffinity, + api.PodDistributionClickHouseInstallationAffinity, + api.PodDistributionClusterAffinity, + api.PodDistributionShardAffinity, + api.PodDistributionReplicaAffinity, + api.PodDistributionPreviousTailAffinity: // PodDistribution is known return nil - case chiV1.PodDistributionCircularReplication: + case api.PodDistributionCircularReplication: // PodDistribution is known // PodDistributionCircularReplication is a shortcut to simplify complex set of other distributions // All shortcuts have to be expanded if podDistribution.Scope == "" { - podDistribution.Scope = chiV1.PodDistributionScopeCluster + podDistribution.Scope = api.PodDistributionScopeCluster } // TODO need to support multi-cluster cluster := n.ctx.chi.Spec.Configuration.Clusters[0] // Expand shortcut - return []chiV1.ChiPodDistribution{ + return []api.ChiPodDistribution{ { - Type: chiV1.PodDistributionShardAntiAffinity, + Type: api.PodDistributionShardAntiAffinity, Scope: podDistribution.Scope, }, { - Type: chiV1.PodDistributionReplicaAntiAffinity, + Type: api.PodDistributionReplicaAntiAffinity, Scope: podDistribution.Scope, }, { - Type: chiV1.PodDistributionMaxNumberPerNode, + Type: api.PodDistributionMaxNumberPerNode, Scope: podDistribution.Scope, Number: cluster.Layout.ReplicasCount, }, { - Type: chiV1.PodDistributionPreviousTailAffinity, + Type: api.PodDistributionPreviousTailAffinity, }, { - Type: chiV1.PodDistributionNamespaceAffinity, + Type: api.PodDistributionNamespaceAffinity, }, { - Type: chiV1.PodDistributionClickHouseInstallationAffinity, + Type: api.PodDistributionClickHouseInstallationAffinity, }, { - Type: chiV1.PodDistributionClusterAffinity, + Type: api.PodDistributionClusterAffinity, }, } } // PodDistribution is not known - podDistribution.Type = chiV1.PodDistributionUnspecified + podDistribution.Type = api.PodDistributionUnspecified return nil } // normalizeVolumeClaimTemplate normalizes .spec.templates.volumeClaimTemplates -func (n *Normalizer) normalizeVolumeClaimTemplate(template *chiV1.ChiVolumeClaimTemplate) { +func (n *Normalizer) normalizeVolumeClaimTemplate(template *api.ChiVolumeClaimTemplate) { // Check name // Skip for now @@ -817,20 +817,20 @@ func (n *Normalizer) normalizeVolumeClaimTemplate(template *chiV1.ChiVolumeClaim } // normalizeStorageManagement normalizes StorageManagement -func (n *Normalizer) normalizeStorageManagement(storage *chiV1.StorageManagement) { +func (n *Normalizer) normalizeStorageManagement(storage *api.StorageManagement) { // Check PVCProvisioner if !storage.PVCProvisioner.IsValid() { - storage.PVCProvisioner = chiV1.PVCProvisionerUnspecified + storage.PVCProvisioner = api.PVCProvisionerUnspecified } // Check PVCReclaimPolicy if !storage.PVCReclaimPolicy.IsValid() { - storage.PVCReclaimPolicy = chiV1.PVCReclaimPolicyUnspecified + storage.PVCReclaimPolicy = api.PVCReclaimPolicyUnspecified } } // normalizeServiceTemplate normalizes .spec.templates.serviceTemplates -func (n *Normalizer) normalizeServiceTemplate(template *chiV1.ChiServiceTemplate) { +func (n *Normalizer) normalizeServiceTemplate(template *api.ChiServiceTemplate) { // Check name // Check GenerateName // Check ObjectMeta @@ -841,7 +841,7 @@ func (n *Normalizer) normalizeServiceTemplate(template *chiV1.ChiServiceTemplate } // normalizeUseTemplates normalizes list of templates use specifications -func (n *Normalizer) normalizeUseTemplates(useTemplates []chiV1.ChiUseTemplate) []chiV1.ChiUseTemplate { +func (n *Normalizer) normalizeUseTemplates(useTemplates []api.ChiUseTemplate) []api.ChiUseTemplate { for i := range useTemplates { useTemplate := &useTemplates[i] n.normalizeUseTemplate(useTemplate) @@ -850,7 +850,7 @@ func (n *Normalizer) normalizeUseTemplates(useTemplates []chiV1.ChiUseTemplate) } // normalizeUseTemplate normalizes ChiUseTemplate -func (n *Normalizer) normalizeUseTemplate(useTemplate *chiV1.ChiUseTemplate) { +func (n *Normalizer) normalizeUseTemplate(useTemplate *api.ChiUseTemplate) { // Check Name if useTemplate.Name == "" { // This is strange @@ -872,7 +872,7 @@ func (n *Normalizer) normalizeUseTemplate(useTemplate *chiV1.ChiUseTemplate) { } // normalizeClusters normalizes clusters -func (n *Normalizer) normalizeClusters(clusters []*chiV1.Cluster) []*chiV1.Cluster { +func (n *Normalizer) normalizeClusters(clusters []*api.Cluster) []*api.Cluster { // We need to have at least one cluster available clusters = n.ensureClusters(clusters) @@ -885,29 +885,29 @@ func (n *Normalizer) normalizeClusters(clusters []*chiV1.Cluster) []*chiV1.Clust } // newDefaultCluster -func (n *Normalizer) newDefaultCluster() *chiV1.Cluster { - return &chiV1.Cluster{ +func (n *Normalizer) newDefaultCluster() *api.Cluster { + return &api.Cluster{ Name: "cluster", } } // ensureClusters -func (n *Normalizer) ensureClusters(clusters []*chiV1.Cluster) []*chiV1.Cluster { +func (n *Normalizer) ensureClusters(clusters []*api.Cluster) []*api.Cluster { if len(clusters) > 0 { return clusters } if n.ctx.options.WithDefaultCluster { - return []*chiV1.Cluster{ + return []*api.Cluster{ n.newDefaultCluster(), } } - return []*chiV1.Cluster{} + return []*api.Cluster{} } // normalizeConfigurationZookeeper normalizes .spec.configuration.zookeeper -func (n *Normalizer) normalizeConfigurationZookeeper(zk *chiV1.ChiZookeeperConfig) *chiV1.ChiZookeeperConfig { +func (n *Normalizer) normalizeConfigurationZookeeper(zk *api.ChiZookeeperConfig) *api.ChiZookeeperConfig { if zk == nil { return nil } @@ -916,7 +916,7 @@ func (n *Normalizer) normalizeConfigurationZookeeper(zk *chiV1.ChiZookeeperConfi for i := range zk.Nodes { // Convenience wrapper node := &zk.Nodes[i] - if chiV1.IsPortUnassigned(node.Port) { + if api.IsPortUnassigned(node.Port) { node.Port = zkDefaultPort } } @@ -930,7 +930,7 @@ func (n *Normalizer) normalizeConfigurationZookeeper(zk *chiV1.ChiZookeeperConfi } // substWithSecretField substitute users settings field with value from k8s secret -func (n *Normalizer) substWithSecretField(users *chiV1.Settings, username string, userSettingsField, userSettingsK8SSecretField string) bool { +func (n *Normalizer) substWithSecretField(users *api.Settings, username string, userSettingsField, userSettingsK8SSecretField string) bool { // Has to have source field specified if !users.Has(username + "/" + userSettingsK8SSecretField) { return false @@ -945,12 +945,12 @@ func (n *Normalizer) substWithSecretField(users *chiV1.Settings, username string return false } - users.Set(username+"/"+userSettingsField, chiV1.NewSettingScalar(secretFieldValue)) + users.Set(username+"/"+userSettingsField, api.NewSettingScalar(secretFieldValue)) return true } // substWithSecretEnvField substitute users settings field with value from k8s secret stored in ENV var -func (n *Normalizer) substWithSecretEnvField(users *chiV1.Settings, username string, userSettingsField, userSettingsK8SSecretField string) bool { +func (n *Normalizer) substWithSecretEnvField(users *api.Settings, username string, userSettingsField, userSettingsK8SSecretField string) bool { // Fetch secret name and key within secret _, secretName, key, err := parseSecretFieldAddress(users, username, userSettingsK8SSecretField) if err != nil { @@ -965,11 +965,11 @@ func (n *Normalizer) substWithSecretEnvField(users *chiV1.Settings, username str // ENV VAR name and value envVarName := username + "_" + userSettingsField n.appendEnvVar( - corev1.EnvVar{ + core.EnvVar{ Name: envVarName, - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ + ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{ Name: secretName, }, Key: key, @@ -979,35 +979,35 @@ func (n *Normalizer) substWithSecretEnvField(users *chiV1.Settings, username str ) // Replace setting with empty value and reference to ENV VAR - users.Set(username+"/"+userSettingsField, chiV1.NewSettingScalar("").SetAttribute("from_env", envVarName)) + users.Set(username+"/"+userSettingsField, api.NewSettingScalar("").SetAttribute("from_env", envVarName)) return true } const internodeClusterSecretEnvName = "CLICKHOUSE_INTERNODE_CLUSTER_SECRET" -func (n *Normalizer) appendClusterSecretEnvVar(cluster *chiV1.Cluster) { +func (n *Normalizer) appendClusterSecretEnvVar(cluster *api.Cluster) { switch cluster.Secret.Source() { - case chiV1.ClusterSecretSourcePlaintext: + case api.ClusterSecretSourcePlaintext: // Secret has explicit value, it is not passed via ENV vars // Do nothing here - case chiV1.ClusterSecretSourceSecretRef: + case api.ClusterSecretSourceSecretRef: // Secret has explicit SecretKeyRef // Set the password for internode communication using an ENV VAR n.appendEnvVar( - corev1.EnvVar{ + core.EnvVar{ Name: internodeClusterSecretEnvName, - ValueFrom: &corev1.EnvVarSource{ + ValueFrom: &core.EnvVarSource{ SecretKeyRef: cluster.Secret.GetSecretKeyRef(), }, }, ) - case chiV1.ClusterSecretSourceAuto: + case api.ClusterSecretSourceAuto: // Secret is auto-generated // Set the password for internode communication using an ENV VAR n.appendEnvVar( - corev1.EnvVar{ + core.EnvVar{ Name: internodeClusterSecretEnvName, - ValueFrom: &corev1.EnvVarSource{ + ValueFrom: &core.EnvVarSource{ SecretKeyRef: cluster.Secret.GetAutoSecretKeyRef(CreateClusterAutoSecretName(cluster)), }, }, @@ -1015,7 +1015,7 @@ func (n *Normalizer) appendClusterSecretEnvVar(cluster *chiV1.Cluster) { } } -func (n *Normalizer) appendEnvVar(envVar corev1.EnvVar) { +func (n *Normalizer) appendEnvVar(envVar core.EnvVar) { // Sanity check if envVar.Name == "" { return @@ -1037,7 +1037,7 @@ var ( ) // parseSecretFieldAddress parses address into namespace, name, key triple -func parseSecretFieldAddress(users *chiV1.Settings, username, userSettingsK8SSecretField string) (string, string, string, error) { +func parseSecretFieldAddress(users *api.Settings, username, userSettingsK8SSecretField string) (string, string, string, error) { settingsPath := username + "/" + userSettingsK8SSecretField secretFieldAddress := users.Get(settingsPath).String() @@ -1076,14 +1076,14 @@ func parseSecretFieldAddress(users *chiV1.Settings, username, userSettingsK8SSec } // fetchSecretFieldValue fetches the value of the specified field in the specified secret -func (n *Normalizer) fetchSecretFieldValue(users *chiV1.Settings, username, userSettingsK8SSecretField string) (string, error) { +func (n *Normalizer) fetchSecretFieldValue(users *api.Settings, username, userSettingsK8SSecretField string) (string, error) { // Fetch address of the field namespace, name, key, err := parseSecretFieldAddress(users, username, userSettingsK8SSecretField) if err != nil { return "", err } - secret, err := n.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + secret, err := n.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, meta.GetOptions{}) if err != nil { log.V(1).M(namespace, name).F().Info("unable to read secret %v", err) return "", ErrSecretFieldNotFound @@ -1101,10 +1101,10 @@ func (n *Normalizer) fetchSecretFieldValue(users *chiV1.Settings, username, user } // normalizeUsersList extracts usernames from provided 'users' settings -func (n *Normalizer) normalizeUsersList(users *chiV1.Settings, extra ...string) (usernames []string) { +func (n *Normalizer) normalizeUsersList(users *api.Settings, extra ...string) (usernames []string) { // Extract username from path usernameMap := make(map[string]bool) - users.Walk(func(path string, _ *chiV1.Setting) { + users.Walk(func(path string, _ *api.Setting) { // Split username/action into username and all the rest. Ex. 'admin/password', 'admin/networks/ip' tags := strings.Split(path, "/") @@ -1140,10 +1140,10 @@ const defaultUsername = "default" const chopProfile = "clickhouse_operator" // normalizeConfigurationUsers normalizes .spec.configuration.users -func (n *Normalizer) normalizeConfigurationUsers(users *chiV1.Settings) *chiV1.Settings { +func (n *Normalizer) normalizeConfigurationUsers(users *api.Settings) *api.Settings { // Ensure and normalize user settings if users == nil { - users = chiV1.NewSettings() + users = api.NewSettings() } users.Normalize() @@ -1172,21 +1172,21 @@ func (n *Normalizer) normalizeConfigurationUsers(users *chiV1.Settings) *chiV1.S return users } -func (n *Normalizer) removePlainPassword(users *chiV1.Settings, username string) { +func (n *Normalizer) removePlainPassword(users *api.Settings, username string) { if users.Has(username+"/password_double_sha1_hex") || users.Has(username+"/password_sha256_hex") { // If user has encrypted password specified, we need to delete existing plaintext password. // Set "remove" flag for user's "password", which is specified as empty in stock ClickHouse users.xml, // thus we need to overwrite it. - users.Set(username+"/password", chiV1.NewSettingScalar("").SetAttribute("remove", "1")) + users.Set(username+"/password", api.NewSettingScalar("").SetAttribute("remove", "1")) } } -func (n *Normalizer) normalizeConfigurationUser(users *chiV1.Settings, username string) { +func (n *Normalizer) normalizeConfigurationUser(users *api.Settings, username string) { n.normalizeConfigurationUserEnsureMandatorySections(users, username) n.normalizeConfigurationUserPassword(users, username) } -func (n *Normalizer) normalizeConfigurationUserEnsureMandatorySections(users *chiV1.Settings, username string) { +func (n *Normalizer) normalizeConfigurationUserEnsureMandatorySections(users *api.Settings, username string) { chopUsername := chop.Config().ClickHouse.Access.Username // // Ensure each user has mandatory sections: @@ -1208,7 +1208,7 @@ func (n *Normalizer) normalizeConfigurationUserEnsureMandatorySections(users *ch regexp = "" } case chopUsername: - ip, _ := chop.Get().ConfigManager.GetRuntimeParam(chiV1.OPERATOR_POD_IP) + ip, _ := chop.Get().ConfigManager.GetRuntimeParam(api.OPERATOR_POD_IP) profile = chopProfile quota = "" @@ -1218,20 +1218,20 @@ func (n *Normalizer) normalizeConfigurationUserEnsureMandatorySections(users *ch // Ensure required values are in place and apply non-empty values in case no own value(s) provided if profile != "" { - users.SetIfNotExists(username+"/profile", chiV1.NewSettingScalar(profile)) + users.SetIfNotExists(username+"/profile", api.NewSettingScalar(profile)) } if quota != "" { - users.SetIfNotExists(username+"/quota", chiV1.NewSettingScalar(quota)) + users.SetIfNotExists(username+"/quota", api.NewSettingScalar(quota)) } if len(ips) > 0 { - users.Set(username+"/networks/ip", chiV1.NewSettingVector(ips).MergeFrom(users.Get(username+"/networks/ip"))) + users.Set(username+"/networks/ip", api.NewSettingVector(ips).MergeFrom(users.Get(username+"/networks/ip"))) } if regexp != "" { - users.SetIfNotExists(username+"/networks/host_regexp", chiV1.NewSettingScalar(regexp)) + users.SetIfNotExists(username+"/networks/host_regexp", api.NewSettingScalar(regexp)) } } -func (n *Normalizer) normalizeConfigurationUserPassword(users *chiV1.Settings, username string) { +func (n *Normalizer) normalizeConfigurationUserPassword(users *api.Settings, username string) { // // Deal with passwords // @@ -1291,7 +1291,7 @@ func (n *Normalizer) normalizeConfigurationUserPassword(users *chiV1.Settings, u // Replace plaintext password with encrypted if passwordPlaintext != "" { passwordSHA256 := sha256.Sum256([]byte(passwordPlaintext)) - users.Set(username+"/password_sha256_hex", chiV1.NewSettingScalar(hex.EncodeToString(passwordSHA256[:]))) + users.Set(username+"/password_sha256_hex", api.NewSettingScalar(hex.EncodeToString(passwordSHA256[:]))) // And keep only one password specification users.Delete(username + "/password_double_sha1_hex") users.Delete(username + "/password") @@ -1301,9 +1301,9 @@ func (n *Normalizer) normalizeConfigurationUserPassword(users *chiV1.Settings, u } // normalizeConfigurationProfiles normalizes .spec.configuration.profiles -func (n *Normalizer) normalizeConfigurationProfiles(profiles *chiV1.Settings) *chiV1.Settings { +func (n *Normalizer) normalizeConfigurationProfiles(profiles *api.Settings) *api.Settings { if profiles == nil { - //profiles = chiV1.NewSettings() + //profiles = api.NewSettings() return nil } profiles.Normalize() @@ -1311,9 +1311,9 @@ func (n *Normalizer) normalizeConfigurationProfiles(profiles *chiV1.Settings) *c } // normalizeConfigurationQuotas normalizes .spec.configuration.quotas -func (n *Normalizer) normalizeConfigurationQuotas(quotas *chiV1.Settings) *chiV1.Settings { +func (n *Normalizer) normalizeConfigurationQuotas(quotas *api.Settings) *api.Settings { if quotas == nil { - //quotas = chiV1.NewSettings() + //quotas = api.NewSettings() return nil } quotas.Normalize() @@ -1321,9 +1321,9 @@ func (n *Normalizer) normalizeConfigurationQuotas(quotas *chiV1.Settings) *chiV1 } // normalizeConfigurationSettings normalizes .spec.configuration.settings -func (n *Normalizer) normalizeConfigurationSettings(settings *chiV1.Settings) *chiV1.Settings { +func (n *Normalizer) normalizeConfigurationSettings(settings *api.Settings) *api.Settings { if settings == nil { - //settings = chiV1.NewSettings() + //settings = api.NewSettings() return nil } settings.Normalize() @@ -1331,9 +1331,9 @@ func (n *Normalizer) normalizeConfigurationSettings(settings *chiV1.Settings) *c } // normalizeConfigurationFiles normalizes .spec.configuration.files -func (n *Normalizer) normalizeConfigurationFiles(files *chiV1.Settings) *chiV1.Settings { +func (n *Normalizer) normalizeConfigurationFiles(files *api.Settings) *api.Settings { if files == nil { - //files = chiV1.NewSettings() + //files = api.NewSettings() return nil } files.Normalize() @@ -1341,7 +1341,7 @@ func (n *Normalizer) normalizeConfigurationFiles(files *chiV1.Settings) *chiV1.S } // normalizeCluster normalizes cluster and returns deployments usage counters for this cluster -func (n *Normalizer) normalizeCluster(cluster *chiV1.Cluster) *chiV1.Cluster { +func (n *Normalizer) normalizeCluster(cluster *api.Cluster) *api.Cluster { if cluster == nil { cluster = n.newDefaultCluster() } @@ -1362,7 +1362,7 @@ func (n *Normalizer) normalizeCluster(cluster *chiV1.Cluster) *chiV1.Cluster { cluster.SchemaPolicy = n.normalizeClusterSchemaPolicy(cluster.SchemaPolicy) if cluster.Layout == nil { - cluster.Layout = chiV1.NewChiClusterLayout() + cluster.Layout = api.NewChiClusterLayout() } cluster.FillShardReplicaSpecified() cluster.Layout = n.normalizeClusterLayoutShardsCountAndReplicasCount(cluster.Layout) @@ -1373,17 +1373,17 @@ func (n *Normalizer) normalizeCluster(cluster *chiV1.Cluster) *chiV1.Cluster { n.appendClusterSecretEnvVar(cluster) // Loop over all shards and replicas inside shards and fill structure - cluster.WalkShards(func(index int, shard *chiV1.ChiShard) error { + cluster.WalkShards(func(index int, shard *api.ChiShard) error { n.normalizeShard(shard, cluster, index) return nil }) - cluster.WalkReplicas(func(index int, replica *chiV1.ChiReplica) error { + cluster.WalkReplicas(func(index int, replica *api.ChiReplica) error { n.normalizeReplica(replica, cluster, index) return nil }) - cluster.Layout.HostsField.WalkHosts(func(shard, replica int, host *chiV1.ChiHost) error { + cluster.Layout.HostsField.WalkHosts(func(shard, replica int, host *api.ChiHost) error { n.normalizeHost(host, cluster.GetShard(shard), cluster.GetReplica(replica), cluster, shard, replica) return nil }) @@ -1392,11 +1392,11 @@ func (n *Normalizer) normalizeCluster(cluster *chiV1.Cluster) *chiV1.Cluster { } // createHostsField -func (n *Normalizer) createHostsField(cluster *chiV1.Cluster) { - cluster.Layout.HostsField = chiV1.NewHostsField(cluster.Layout.ShardsCount, cluster.Layout.ReplicasCount) +func (n *Normalizer) createHostsField(cluster *api.Cluster) { + cluster.Layout.HostsField = api.NewHostsField(cluster.Layout.ShardsCount, cluster.Layout.ReplicasCount) // Need to migrate hosts from Shards and Replicas into HostsField - hostMergeFunc := func(shard, replica int, host *chiV1.ChiHost) error { + hostMergeFunc := func(shard, replica int, host *api.ChiHost) error { if curHost := cluster.Layout.HostsField.Get(shard, replica); curHost == nil { cluster.Layout.HostsField.Set(shard, replica, host) } else { @@ -1419,9 +1419,9 @@ const ( ) // normalizeClusterLayoutShardsCountAndReplicasCount ensures at least 1 shard and 1 replica counters -func (n *Normalizer) normalizeClusterSchemaPolicy(policy *chiV1.SchemaPolicy) *chiV1.SchemaPolicy { +func (n *Normalizer) normalizeClusterSchemaPolicy(policy *api.SchemaPolicy) *api.SchemaPolicy { if policy == nil { - policy = chiV1.NewClusterSchemaPolicy() + policy = api.NewClusterSchemaPolicy() } switch strings.ToLower(policy.Replica) { @@ -1455,9 +1455,9 @@ func (n *Normalizer) normalizeClusterSchemaPolicy(policy *chiV1.SchemaPolicy) *c } // normalizeClusterLayoutShardsCountAndReplicasCount ensures at least 1 shard and 1 replica counters -func (n *Normalizer) normalizeClusterLayoutShardsCountAndReplicasCount(layout *chiV1.ChiClusterLayout) *chiV1.ChiClusterLayout { +func (n *Normalizer) normalizeClusterLayoutShardsCountAndReplicasCount(layout *api.ChiClusterLayout) *api.ChiClusterLayout { if layout == nil { - layout = chiV1.NewChiClusterLayout() + layout = api.NewChiClusterLayout() } // Layout.ShardsCount and @@ -1525,31 +1525,31 @@ func (n *Normalizer) normalizeClusterLayoutShardsCountAndReplicasCount(layout *c } // ensureClusterLayoutShards ensures slice layout.Shards is in place -func (n *Normalizer) ensureClusterLayoutShards(layout *chiV1.ChiClusterLayout) { +func (n *Normalizer) ensureClusterLayoutShards(layout *api.ChiClusterLayout) { // Disposition of shards in slice would be // [explicitly specified shards 0..N, N+1..layout.ShardsCount-1 empty slots for to-be-filled shards] // Some (may be all) shards specified, need to append space for unspecified shards // TODO may be there is better way to append N slots to a slice for len(layout.Shards) < layout.ShardsCount { - layout.Shards = append(layout.Shards, chiV1.ChiShard{}) + layout.Shards = append(layout.Shards, api.ChiShard{}) } } // ensureClusterLayoutReplicas ensures slice layout.Replicas is in place -func (n *Normalizer) ensureClusterLayoutReplicas(layout *chiV1.ChiClusterLayout) { +func (n *Normalizer) ensureClusterLayoutReplicas(layout *api.ChiClusterLayout) { // Disposition of replicas in slice would be // [explicitly specified replicas 0..N, N+1..layout.ReplicasCount-1 empty slots for to-be-filled replicas] // Some (may be all) replicas specified, need to append space for unspecified replicas // TODO may be there is better way to append N slots to a slice for len(layout.Replicas) < layout.ReplicasCount { - layout.Replicas = append(layout.Replicas, chiV1.ChiReplica{}) + layout.Replicas = append(layout.Replicas, api.ChiReplica{}) } } // normalizeShard normalizes a shard - walks over all fields -func (n *Normalizer) normalizeShard(shard *chiV1.ChiShard, cluster *chiV1.Cluster, shardIndex int) { +func (n *Normalizer) normalizeShard(shard *api.ChiShard, cluster *api.Cluster, shardIndex int) { n.normalizeShardName(shard, shardIndex) n.normalizeShardWeight(shard) // For each shard of this normalized cluster inherit from cluster @@ -1566,7 +1566,7 @@ func (n *Normalizer) normalizeShard(shard *chiV1.ChiShard, cluster *chiV1.Cluste } // normalizeReplica normalizes a replica - walks over all fields -func (n *Normalizer) normalizeReplica(replica *chiV1.ChiReplica, cluster *chiV1.Cluster, replicaIndex int) { +func (n *Normalizer) normalizeReplica(replica *api.ChiReplica, cluster *api.Cluster, replicaIndex int) { n.normalizeReplicaName(replica, replicaIndex) // For each replica of this normalized cluster inherit from cluster replica.InheritSettingsFrom(cluster) @@ -1580,7 +1580,7 @@ func (n *Normalizer) normalizeReplica(replica *chiV1.ChiReplica, cluster *chiV1. } // normalizeShardReplicasCount ensures shard.ReplicasCount filled properly -func (n *Normalizer) normalizeShardReplicasCount(shard *chiV1.ChiShard, layoutReplicasCount int) { +func (n *Normalizer) normalizeShardReplicasCount(shard *api.ChiShard, layoutReplicasCount int) { if shard.ReplicasCount > 0 { // Shard has explicitly specified number of replicas return @@ -1603,7 +1603,7 @@ func (n *Normalizer) normalizeShardReplicasCount(shard *chiV1.ChiShard, layoutRe } // normalizeReplicaShardsCount ensures replica.ShardsCount filled properly -func (n *Normalizer) normalizeReplicaShardsCount(replica *chiV1.ChiReplica, layoutShardsCount int) { +func (n *Normalizer) normalizeReplicaShardsCount(replica *api.ChiReplica, layoutShardsCount int) { if replica.ShardsCount > 0 { // Replica has explicitly specified number of shards return @@ -1626,7 +1626,7 @@ func (n *Normalizer) normalizeReplicaShardsCount(replica *chiV1.ChiReplica, layo } // normalizeShardName normalizes shard name -func (n *Normalizer) normalizeShardName(shard *chiV1.ChiShard, index int) { +func (n *Normalizer) normalizeShardName(shard *api.ChiShard, index int) { if (len(shard.Name) > 0) && !IsAutoGeneratedShardName(shard.Name, shard, index) { // Has explicitly specified name already return @@ -1636,7 +1636,7 @@ func (n *Normalizer) normalizeShardName(shard *chiV1.ChiShard, index int) { } // normalizeReplicaName normalizes replica name -func (n *Normalizer) normalizeReplicaName(replica *chiV1.ChiReplica, index int) { +func (n *Normalizer) normalizeReplicaName(replica *api.ChiReplica, index int) { if (len(replica.Name) > 0) && !IsAutoGeneratedReplicaName(replica.Name, replica, index) { // Has explicitly specified name already return @@ -1646,11 +1646,11 @@ func (n *Normalizer) normalizeReplicaName(replica *chiV1.ChiReplica, index int) } // normalizeShardName normalizes shard weight -func (n *Normalizer) normalizeShardWeight(shard *chiV1.ChiShard) { +func (n *Normalizer) normalizeShardWeight(shard *api.ChiShard) { } // normalizeShardHosts normalizes all replicas of specified shard -func (n *Normalizer) normalizeShardHosts(shard *chiV1.ChiShard, cluster *chiV1.Cluster, shardIndex int) { +func (n *Normalizer) normalizeShardHosts(shard *api.ChiShard, cluster *api.Cluster, shardIndex int) { // Use hosts from HostsField shard.Hosts = nil for len(shard.Hosts) < shard.ReplicasCount { @@ -1663,7 +1663,7 @@ func (n *Normalizer) normalizeShardHosts(shard *chiV1.ChiShard, cluster *chiV1.C } // normalizeReplicaHosts normalizes all replicas of specified shard -func (n *Normalizer) normalizeReplicaHosts(replica *chiV1.ChiReplica, cluster *chiV1.Cluster, replicaIndex int) { +func (n *Normalizer) normalizeReplicaHosts(replica *api.ChiReplica, cluster *api.Cluster, replicaIndex int) { // Use hosts from HostsField replica.Hosts = nil for len(replica.Hosts) < replica.ShardsCount { @@ -1677,18 +1677,18 @@ func (n *Normalizer) normalizeReplicaHosts(replica *chiV1.ChiReplica, cluster *c // normalizeHost normalizes a host/replica func (n *Normalizer) normalizeHost( - host *chiV1.ChiHost, - shard *chiV1.ChiShard, - replica *chiV1.ChiReplica, - cluster *chiV1.Cluster, + host *api.ChiHost, + shard *api.ChiShard, + replica *api.ChiReplica, + cluster *api.Cluster, shardIndex int, replicaIndex int, ) { n.normalizeHostName(host, shard, shardIndex, replica, replicaIndex) n.normalizeHostPorts(host) // Inherit from either Shard or Replica - var s *chiV1.ChiShard - var r *chiV1.ChiReplica + var s *api.ChiShard + var r *api.ChiReplica if cluster.IsShardSpecified() { s = shard } else { @@ -1702,16 +1702,16 @@ func (n *Normalizer) normalizeHost( } // normalizeHostTemplateSpec is the same as normalizeHost but for a template -func (n *Normalizer) normalizeHostTemplateSpec(host *chiV1.ChiHost) { +func (n *Normalizer) normalizeHostTemplateSpec(host *api.ChiHost) { n.normalizeHostPorts(host) } // normalizeHostName normalizes host's name func (n *Normalizer) normalizeHostName( - host *chiV1.ChiHost, - shard *chiV1.ChiShard, + host *api.ChiHost, + shard *api.ChiShard, shardIndex int, - replica *chiV1.ChiReplica, + replica *api.ChiReplica, replicaIndex int, ) { if (len(host.GetName()) > 0) && !IsAutoGeneratedHostName(host.GetName(), host, shard, shardIndex, replica, replicaIndex) { @@ -1722,37 +1722,37 @@ func (n *Normalizer) normalizeHostName( host.Name = CreateHostName(host, shard, shardIndex, replica, replicaIndex) } -// normalizeHostPorts ensures chiV1.ChiReplica.Port is reasonable -func (n *Normalizer) normalizeHostPorts(host *chiV1.ChiHost) { +// normalizeHostPorts ensures api.ChiReplica.Port is reasonable +func (n *Normalizer) normalizeHostPorts(host *api.ChiHost) { // Deprecated - if chiV1.IsPortInvalid(host.Port) { - host.Port = chiV1.PortUnassigned() + if api.IsPortInvalid(host.Port) { + host.Port = api.PortUnassigned() } - if chiV1.IsPortInvalid(host.TCPPort) { - host.TCPPort = chiV1.PortUnassigned() + if api.IsPortInvalid(host.TCPPort) { + host.TCPPort = api.PortUnassigned() } - if chiV1.IsPortInvalid(host.TLSPort) { - host.TLSPort = chiV1.PortUnassigned() + if api.IsPortInvalid(host.TLSPort) { + host.TLSPort = api.PortUnassigned() } - if chiV1.IsPortInvalid(host.HTTPPort) { - host.HTTPPort = chiV1.PortUnassigned() + if api.IsPortInvalid(host.HTTPPort) { + host.HTTPPort = api.PortUnassigned() } - if chiV1.IsPortInvalid(host.HTTPSPort) { - host.HTTPSPort = chiV1.PortUnassigned() + if api.IsPortInvalid(host.HTTPSPort) { + host.HTTPSPort = api.PortUnassigned() } - if chiV1.IsPortInvalid(host.InterserverHTTPPort) { - host.InterserverHTTPPort = chiV1.PortUnassigned() + if api.IsPortInvalid(host.InterserverHTTPPort) { + host.InterserverHTTPPort = api.PortUnassigned() } } // normalizeShardInternalReplication ensures reasonable values in // .spec.configuration.clusters.layout.shards.internalReplication -func (n *Normalizer) normalizeShardInternalReplication(shard *chiV1.ChiShard) { +func (n *Normalizer) normalizeShardInternalReplication(shard *api.ChiShard) { // Shards with replicas are expected to have internal replication on by default defaultInternalReplication := false if shard.ReplicasCount > 1 { diff --git a/pkg/model/chi/registry.go b/pkg/model/chi/registry.go index 56deae4a4..937bbc1f1 100644 --- a/pkg/model/chi/registry.go +++ b/pkg/model/chi/registry.go @@ -18,7 +18,7 @@ import ( "fmt" "sync" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/altinity/clickhouse-operator/pkg/util" ) @@ -65,7 +65,7 @@ type objectMetaIdentity struct { // All accesses are synchronized. type objectMetaSet struct { entityType EntityType - contents map[objectMetaIdentity]v1.ObjectMeta + contents map[objectMetaIdentity]meta.ObjectMeta sync.RWMutex } @@ -103,7 +103,7 @@ func (r *Registry) Len(_what ...EntityType) int { // Note: this is fairly expensive in the sense that it locks the entire registry from being written // for the full duration of whatever workload is applied throughout iteration. Avoid calling when you know // the entity type you want. -func (r *Registry) Walk(f func(entityType EntityType, meta v1.ObjectMeta)) { +func (r *Registry) Walk(f func(entityType EntityType, meta meta.ObjectMeta)) { if r == nil { return } @@ -111,14 +111,14 @@ func (r *Registry) Walk(f func(entityType EntityType, meta v1.ObjectMeta)) { defer r.mu.RUnlock() for et, os := range r.r { - os.walk(func(meta v1.ObjectMeta) { + os.walk(func(meta meta.ObjectMeta) { f(et, meta) }) } } // WalkEntityType walks over registry -func (r *Registry) WalkEntityType(entityType EntityType, f func(meta v1.ObjectMeta)) { +func (r *Registry) WalkEntityType(entityType EntityType, f func(meta meta.ObjectMeta)) { if r == nil { return } @@ -133,14 +133,14 @@ func (r *Registry) String() string { return "" } s := "" - r.Walk(func(entityType EntityType, meta v1.ObjectMeta) { + r.Walk(func(entityType EntityType, meta meta.ObjectMeta) { s += fmt.Sprintf("%s: %s/%s\n", entityType, meta.Namespace, meta.Name) }) return s } // registerEntity register entity -func (r *Registry) registerEntity(entityType EntityType, meta v1.ObjectMeta) { +func (r *Registry) registerEntity(entityType EntityType, _meta meta.ObjectMeta) { if r == nil { return } @@ -150,11 +150,11 @@ func (r *Registry) registerEntity(entityType EntityType, meta v1.ObjectMeta) { setForType := r.ensureObjectSetForType(entityType) // Create the representation that we'll attempt to add. - newObj := v1.ObjectMeta{ - Namespace: meta.Namespace, - Name: meta.Name, - Labels: util.MergeStringMapsOverwrite(nil, meta.Labels), - Annotations: util.MergeStringMapsOverwrite(nil, meta.Annotations), + newObj := meta.ObjectMeta{ + Namespace: _meta.Namespace, + Name: _meta.Name, + Labels: util.MergeStringMapsOverwrite(nil, _meta.Labels), + Annotations: util.MergeStringMapsOverwrite(nil, _meta.Annotations), } // Add the object, which will only happen if no other object with the same identity is present in the set. @@ -162,12 +162,12 @@ func (r *Registry) registerEntity(entityType EntityType, meta v1.ObjectMeta) { } // RegisterStatefulSet registers StatefulSet -func (r *Registry) RegisterStatefulSet(meta v1.ObjectMeta) { +func (r *Registry) RegisterStatefulSet(meta meta.ObjectMeta) { r.registerEntity(StatefulSet, meta) } // HasStatefulSet checks whether registry has specified StatefulSet -func (r *Registry) HasStatefulSet(meta v1.ObjectMeta) bool { +func (r *Registry) HasStatefulSet(meta meta.ObjectMeta) bool { return r.hasEntity(StatefulSet, meta) } @@ -177,17 +177,17 @@ func (r *Registry) NumStatefulSet() int { } // WalkStatefulSet walk over specified entity types -func (r *Registry) WalkStatefulSet(f func(meta v1.ObjectMeta)) { +func (r *Registry) WalkStatefulSet(f func(meta meta.ObjectMeta)) { r.WalkEntityType(StatefulSet, f) } // RegisterConfigMap register ConfigMap -func (r *Registry) RegisterConfigMap(meta v1.ObjectMeta) { +func (r *Registry) RegisterConfigMap(meta meta.ObjectMeta) { r.registerEntity(ConfigMap, meta) } // HasConfigMap checks whether registry has specified ConfigMap -func (r *Registry) HasConfigMap(meta v1.ObjectMeta) bool { +func (r *Registry) HasConfigMap(meta meta.ObjectMeta) bool { return r.hasEntity(ConfigMap, meta) } @@ -197,17 +197,17 @@ func (r *Registry) NumConfigMap() int { } // WalkConfigMap walk over specified entity types -func (r *Registry) WalkConfigMap(f func(meta v1.ObjectMeta)) { +func (r *Registry) WalkConfigMap(f func(meta meta.ObjectMeta)) { r.WalkEntityType(ConfigMap, f) } // RegisterService register Service -func (r *Registry) RegisterService(meta v1.ObjectMeta) { +func (r *Registry) RegisterService(meta meta.ObjectMeta) { r.registerEntity(Service, meta) } // HasService checks whether registry has specified Service -func (r *Registry) HasService(meta v1.ObjectMeta) bool { +func (r *Registry) HasService(meta meta.ObjectMeta) bool { return r.hasEntity(Service, meta) } @@ -217,17 +217,17 @@ func (r *Registry) NumService() int { } // WalkService walk over specified entity types -func (r *Registry) WalkService(f func(meta v1.ObjectMeta)) { +func (r *Registry) WalkService(f func(meta meta.ObjectMeta)) { r.WalkEntityType(Service, f) } // RegisterSecret register Secret -func (r *Registry) RegisterSecret(meta v1.ObjectMeta) { +func (r *Registry) RegisterSecret(meta meta.ObjectMeta) { r.registerEntity(Secret, meta) } // HasSecret checks whether registry has specified Secret -func (r *Registry) HasSecret(meta v1.ObjectMeta) bool { +func (r *Registry) HasSecret(meta meta.ObjectMeta) bool { return r.hasEntity(Secret, meta) } @@ -237,17 +237,17 @@ func (r *Registry) NumSecret() int { } // WalkSecret walk over specified entity types -func (r *Registry) WalkSecret(f func(meta v1.ObjectMeta)) { +func (r *Registry) WalkSecret(f func(meta meta.ObjectMeta)) { r.WalkEntityType(Secret, f) } // RegisterPVC register PVC -func (r *Registry) RegisterPVC(meta v1.ObjectMeta) { +func (r *Registry) RegisterPVC(meta meta.ObjectMeta) { r.registerEntity(PVC, meta) } // HasPVC checks whether registry has specified PVC -func (r *Registry) HasPVC(meta v1.ObjectMeta) bool { +func (r *Registry) HasPVC(meta meta.ObjectMeta) bool { return r.hasEntity(PVC, meta) } @@ -257,18 +257,18 @@ func (r *Registry) NumPVC() int { } // WalkPVC walk over specified entity types -func (r *Registry) WalkPVC(f func(meta v1.ObjectMeta)) { +func (r *Registry) WalkPVC(f func(meta meta.ObjectMeta)) { r.WalkEntityType(PVC, f) } // Comment out PV //// RegisterPV register PV -//func (r *Registry) RegisterPV(meta v1.ObjectMeta) { +//func (r *Registry) RegisterPV(meta meta.ObjectMeta) { // r.registerEntity(PV, meta) //} // //// HasPV checks whether registry has specified PV -//func (r *Registry) HasPV(meta v1.ObjectMeta) bool { +//func (r *Registry) HasPV(meta meta.ObjectMeta) bool { // return r.hasEntity(PV, meta) //} // @@ -278,17 +278,17 @@ func (r *Registry) WalkPVC(f func(meta v1.ObjectMeta)) { //} // //// WalkPV walk over specified entity types -//func (r *Registry) WalkPV(f func(meta v1.ObjectMeta)) { +//func (r *Registry) WalkPV(f func(meta meta.ObjectMeta)) { // r.WalkEntityType(PV, f) //} // RegisterPDB register PDB -func (r *Registry) RegisterPDB(meta v1.ObjectMeta) { +func (r *Registry) RegisterPDB(meta meta.ObjectMeta) { r.registerEntity(PDB, meta) } // HasPDB checks whether registry has specified PDB -func (r *Registry) HasPDB(meta v1.ObjectMeta) bool { +func (r *Registry) HasPDB(meta meta.ObjectMeta) bool { return r.hasEntity(PDB, meta) } @@ -298,7 +298,7 @@ func (r *Registry) NumPDB() int { } // WalkPDB walk over specified entity types -func (r *Registry) WalkPDB(f func(meta v1.ObjectMeta)) { +func (r *Registry) WalkPDB(f func(meta meta.ObjectMeta)) { r.WalkEntityType(PDB, f) } @@ -313,7 +313,7 @@ func (r *Registry) Subtract(sub *Registry) *Registry { return r } - sub.Walk(func(entityType EntityType, entity v1.ObjectMeta) { + sub.Walk(func(entityType EntityType, entity meta.ObjectMeta) { r.deleteEntity(entityType, entity) }) @@ -321,7 +321,7 @@ func (r *Registry) Subtract(sub *Registry) *Registry { } // hasEntity -func (r *Registry) hasEntity(entityType EntityType, meta v1.ObjectMeta) bool { +func (r *Registry) hasEntity(entityType EntityType, meta meta.ObjectMeta) bool { // Try to minimize coarse grained locking at the registry level. Immediately getOrCreate for the entity type // and then begin operating on that (it uses a finer grained lock). setForType := r.ensureObjectSetForType(entityType) @@ -331,12 +331,12 @@ func (r *Registry) hasEntity(entityType EntityType, meta v1.ObjectMeta) bool { } // isEqual -func (r *Registry) isEqual(a, b v1.ObjectMeta) bool { +func (r *Registry) isEqual(a, b meta.ObjectMeta) bool { return (a.Namespace == b.Namespace) && (a.Name == b.Name) } // deleteEntity -func (r *Registry) deleteEntity(entityType EntityType, meta v1.ObjectMeta) bool { +func (r *Registry) deleteEntity(entityType EntityType, meta meta.ObjectMeta) bool { // Try to minimize coarse grained locking at the registry level. Immediately getOrCreate for the entity type // and then begin operating on that (it uses a finer grained lock). setForType := r.ensureObjectSetForType(entityType) @@ -373,7 +373,7 @@ func (r *Registry) ensureObjectSetForType(entityType EntityType) *objectMetaSet } // objIdentity derives a objectMetaIdentity from an ObjectMeta -func (s *objectMetaSet) objIdentity(obj *v1.ObjectMeta) objectMetaIdentity { +func (s *objectMetaSet) objIdentity(obj *meta.ObjectMeta) objectMetaIdentity { return objectMetaIdentity{ name: obj.Name, namespace: obj.Namespace, @@ -381,7 +381,7 @@ func (s *objectMetaSet) objIdentity(obj *v1.ObjectMeta) objectMetaIdentity { } // maybeAdd adds an ObjectMeta to the set if an object with an equivalent identity is not already present -func (s *objectMetaSet) maybeAdd(o *v1.ObjectMeta) bool { +func (s *objectMetaSet) maybeAdd(o *meta.ObjectMeta) bool { s.Lock() defer s.Unlock() if _, ok := s.contents[s.objIdentity(o)]; ok { @@ -392,7 +392,7 @@ func (s *objectMetaSet) maybeAdd(o *v1.ObjectMeta) bool { } // remove deletes an ObjectMeta from the set, matching only on identity -func (s *objectMetaSet) remove(o *v1.ObjectMeta) bool { +func (s *objectMetaSet) remove(o *meta.ObjectMeta) bool { s.Lock() defer s.Unlock() if _, ok := s.contents[s.objIdentity(o)]; !ok { @@ -403,7 +403,7 @@ func (s *objectMetaSet) remove(o *v1.ObjectMeta) bool { } // contains determines if an ObjectMeta exists in the set (based on identity only) -func (s *objectMetaSet) contains(o *v1.ObjectMeta) bool { +func (s *objectMetaSet) contains(o *meta.ObjectMeta) bool { s.RLock() defer s.RUnlock() _, ok := s.contents[s.objIdentity(o)] @@ -413,7 +413,7 @@ func (s *objectMetaSet) contains(o *v1.ObjectMeta) bool { // walk provides an iterator-like access to the ObjectMetas contained in the set // Note that this function is not safe to call recursively, due to the RWLock usage. // This seems unlikely to be a problem. -func (s *objectMetaSet) walk(f func(meta v1.ObjectMeta)) { +func (s *objectMetaSet) walk(f func(meta meta.ObjectMeta)) { s.RLock() defer s.RUnlock() @@ -434,6 +434,6 @@ func (s *objectMetaSet) len() int { func newObjectMetaSet(entityType EntityType) *objectMetaSet { return &objectMetaSet{ entityType: entityType, - contents: make(map[objectMetaIdentity]v1.ObjectMeta), + contents: make(map[objectMetaIdentity]meta.ObjectMeta), } } diff --git a/pkg/model/chi/volumer.go b/pkg/model/chi/volumer.go index 3c1d4e316..5ed0dd178 100644 --- a/pkg/model/chi/volumer.go +++ b/pkg/model/chi/volumer.go @@ -15,12 +15,12 @@ package chi import ( - coreV1 "k8s.io/api/core/v1" + core "k8s.io/api/core/v1" - chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" ) -func (c *Creator) getVolumeClaimTemplate(volumeMount *coreV1.VolumeMount) (*chop.ChiVolumeClaimTemplate, bool) { +func (c *Creator) getVolumeClaimTemplate(volumeMount *core.VolumeMount) (*api.ChiVolumeClaimTemplate, bool) { volumeClaimTemplateName := volumeMount.Name volumeClaimTemplate, ok := c.chi.GetVolumeClaimTemplate(volumeClaimTemplateName) // Sometimes it is impossible to find VolumeClaimTemplate related to specified volumeMount. @@ -28,7 +28,7 @@ func (c *Creator) getVolumeClaimTemplate(volumeMount *coreV1.VolumeMount) (*chop return volumeClaimTemplate, ok } -func GetVolumeClaimTemplate(host *chop.ChiHost, volumeMount *coreV1.VolumeMount) (*chop.ChiVolumeClaimTemplate, bool) { +func GetVolumeClaimTemplate(host *api.ChiHost, volumeMount *core.VolumeMount) (*api.ChiVolumeClaimTemplate, bool) { volumeClaimTemplateName := volumeMount.Name volumeClaimTemplate, ok := host.CHI.GetVolumeClaimTemplate(volumeClaimTemplateName) // Sometimes it is impossible to find VolumeClaimTemplate related to specified volumeMount. @@ -36,34 +36,34 @@ func GetVolumeClaimTemplate(host *chop.ChiHost, volumeMount *coreV1.VolumeMount) return volumeClaimTemplate, ok } -func getPVCReclaimPolicy(host *chop.ChiHost, template *chop.ChiVolumeClaimTemplate) chop.PVCReclaimPolicy { +func getPVCReclaimPolicy(host *api.ChiHost, template *api.ChiVolumeClaimTemplate) api.PVCReclaimPolicy { // Order by priority // VolumeClaimTemplate.PVCReclaimPolicy, in case specified - if template.PVCReclaimPolicy != chop.PVCReclaimPolicyUnspecified { + if template.PVCReclaimPolicy != api.PVCReclaimPolicyUnspecified { return template.PVCReclaimPolicy } - if host.CHI.Spec.Defaults.StorageManagement.PVCReclaimPolicy != chop.PVCReclaimPolicyUnspecified { + if host.CHI.Spec.Defaults.StorageManagement.PVCReclaimPolicy != api.PVCReclaimPolicyUnspecified { return host.CHI.Spec.Defaults.StorageManagement.PVCReclaimPolicy } // Default value - return chop.PVCReclaimPolicyDelete + return api.PVCReclaimPolicyDelete } -func getPVCProvisioner(host *chop.ChiHost, template *chop.ChiVolumeClaimTemplate) chop.PVCProvisioner { +func getPVCProvisioner(host *api.ChiHost, template *api.ChiVolumeClaimTemplate) api.PVCProvisioner { // Order by priority // VolumeClaimTemplate.PVCProvisioner, in case specified - if template.PVCProvisioner != chop.PVCProvisionerUnspecified { + if template.PVCProvisioner != api.PVCProvisionerUnspecified { return template.PVCProvisioner } - if host.CHI.Spec.Defaults.StorageManagement.PVCProvisioner != chop.PVCProvisionerUnspecified { + if host.CHI.Spec.Defaults.StorageManagement.PVCProvisioner != api.PVCProvisionerUnspecified { return host.CHI.Spec.Defaults.StorageManagement.PVCProvisioner } // Default value - return chop.PVCProvisionerStatefulSet + return api.PVCProvisionerStatefulSet } diff --git a/pkg/model/chk/chk_config_generator.go b/pkg/model/chk/chk_config_generator.go index 7afbd8186..42e474fde 100644 --- a/pkg/model/chk/chk_config_generator.go +++ b/pkg/model/chk/chk_config_generator.go @@ -17,11 +17,12 @@ package chk import ( "bytes" "fmt" + "strings" + apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1" apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/util" "github.com/altinity/clickhouse-operator/pkg/xml" - "strings" ) func defaultKeeperSettings(path string) *apiChi.Settings { diff --git a/pkg/model/chk/normalizer.go b/pkg/model/chk/normalizer.go index 6c2af178c..978a2516e 100644 --- a/pkg/model/chk/normalizer.go +++ b/pkg/model/chk/normalizer.go @@ -17,11 +17,12 @@ package chk import ( "strings" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1" apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" "github.com/altinity/clickhouse-operator/pkg/model/chi" - core "k8s.io/api/core/v1" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) // NormalizerContext specifies CHI-related normalization context diff --git a/pkg/model/chk/templates.go b/pkg/model/chk/templates.go index d80dd5360..f5dd6486f 100644 --- a/pkg/model/chk/templates.go +++ b/pkg/model/chk/templates.go @@ -15,19 +15,20 @@ package chk import ( - api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1" + core "k8s.io/api/core/v1" + + apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1" apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" - v1 "k8s.io/api/core/v1" ) -func getPodTemplate(chk *api.ClickHouseKeeperInstallation) apiChi.ChiPodTemplate { +func getPodTemplate(chk *apiChk.ClickHouseKeeperInstallation) apiChi.ChiPodTemplate { if len(chk.Spec.GetTemplates().GetPodTemplates()) < 1 { return apiChi.ChiPodTemplate{} } return chk.Spec.GetTemplates().GetPodTemplates()[0] } -func getPodTemplateAnnotations(chk *api.ClickHouseKeeperInstallation) map[string]string { +func getPodTemplateAnnotations(chk *apiChk.ClickHouseKeeperInstallation) map[string]string { if len(chk.Spec.GetTemplates().GetPodTemplates()) < 1 { return nil } @@ -35,7 +36,7 @@ func getPodTemplateAnnotations(chk *api.ClickHouseKeeperInstallation) map[string return getPodTemplate(chk).ObjectMeta.Annotations } -func getPodTemplateLabels(chk *api.ClickHouseKeeperInstallation) map[string]string { +func getPodTemplateLabels(chk *apiChk.ClickHouseKeeperInstallation) map[string]string { if len(chk.Spec.GetTemplates().GetPodTemplates()) < 1 { return nil } @@ -43,9 +44,9 @@ func getPodTemplateLabels(chk *api.ClickHouseKeeperInstallation) map[string]stri return getPodTemplate(chk).ObjectMeta.Labels } -func getVolumeClaimTemplates(chk *api.ClickHouseKeeperInstallation) (claims []v1.PersistentVolumeClaim) { +func getVolumeClaimTemplates(chk *apiChk.ClickHouseKeeperInstallation) (claims []core.PersistentVolumeClaim) { for _, template := range chk.Spec.GetTemplates().GetVolumeClaimTemplates() { - pvc := v1.PersistentVolumeClaim{ + pvc := core.PersistentVolumeClaim{ ObjectMeta: template.ObjectMeta, Spec: template.Spec, } diff --git a/pkg/model/clickhouse/connection_params_cluster.go b/pkg/model/clickhouse/connection_params_cluster.go index 21f66aa68..65bd42e18 100644 --- a/pkg/model/clickhouse/connection_params_cluster.go +++ b/pkg/model/clickhouse/connection_params_cluster.go @@ -15,7 +15,7 @@ package clickhouse import ( - v1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" ) // ClusterConnectionParams represents connection parameters to the whole cluster @@ -34,7 +34,7 @@ func NewClusterConnectionParams(scheme, username, password, rootCA string, port // NewClusterConnectionParamsFromCHOpConfig is the same as NewClusterConnectionParams, but works with // CHOp config to get parameters from -func NewClusterConnectionParamsFromCHOpConfig(config *v1.OperatorConfig) *ClusterConnectionParams { +func NewClusterConnectionParamsFromCHOpConfig(config *api.OperatorConfig) *ClusterConnectionParams { params := NewClusterConnectionParams( config.ClickHouse.Access.Scheme, config.ClickHouse.Access.Username,