From 43365b0e82d8abdc016d0d5e7ba3a1b22e50bad6 Mon Sep 17 00:00:00 2001 From: "alexandr.zimin@flant.com" Date: Mon, 10 Jun 2024 00:12:29 +0300 Subject: [PATCH 01/21] some fixes --- .gitignore | 3 + crds/cephcluster.yaml | 87 +++ crds/cephstorageclass.yaml | 156 +++++ images/controller/Dockerfile | 17 + .../controller/api/v1alpha1/ceph_cluster.go | 46 ++ .../api/v1alpha1/ceph_storage_class.go | 62 ++ images/controller/api/v1alpha1/register.go | 51 ++ .../api/v1alpha1/zz_generated.deepcopy.go | 137 +++++ images/controller/cmd/main.go | 131 +++++ images/controller/go.mod | 71 +++ images/controller/go.sum | 206 +++++++ images/controller/pkg/config/config.go | 72 +++ .../controller/ceph_storage_class_watcher.go | 206 +++++++ .../pkg/controller/controller_suite_test.go | 66 +++ images/controller/pkg/controller/good_func.go | 549 ++++++++++++++++++ images/controller/pkg/kubutils/kubernetes.go | 35 ++ images/controller/pkg/logger/logger.go | 84 +++ templates/cephfs/storage-classes.yaml | 37 -- templates/cephfs/volume-snapshot-class.yaml | 21 - templates/controller/deployment.yaml | 95 +++ templates/controller/rbac-for-us.yaml | 110 ++++ templates/rbd/storage-classes.yaml | 36 -- templates/rbd/volume-snapshot-class.yaml | 22 - templates/secret.yaml | 16 - 24 files changed, 2184 insertions(+), 132 deletions(-) create mode 100644 crds/cephcluster.yaml create mode 100644 crds/cephstorageclass.yaml create mode 100644 images/controller/Dockerfile create mode 100644 images/controller/api/v1alpha1/ceph_cluster.go create mode 100644 images/controller/api/v1alpha1/ceph_storage_class.go create mode 100644 images/controller/api/v1alpha1/register.go create mode 100644 images/controller/api/v1alpha1/zz_generated.deepcopy.go create mode 100644 images/controller/cmd/main.go create mode 100644 images/controller/go.mod create mode 100644 images/controller/go.sum create mode 100644 images/controller/pkg/config/config.go create mode 100644 images/controller/pkg/controller/ceph_storage_class_watcher.go create mode 100644 images/controller/pkg/controller/controller_suite_test.go create mode 100644 images/controller/pkg/controller/good_func.go create mode 100644 images/controller/pkg/kubutils/kubernetes.go create mode 100644 images/controller/pkg/logger/logger.go delete mode 100644 templates/cephfs/storage-classes.yaml delete mode 100644 templates/cephfs/volume-snapshot-class.yaml create mode 100644 templates/controller/deployment.yaml create mode 100644 templates/controller/rbac-for-us.yaml delete mode 100644 templates/rbd/storage-classes.yaml delete mode 100644 templates/rbd/volume-snapshot-class.yaml delete mode 100644 templates/secret.yaml diff --git a/.gitignore b/.gitignore index 4fd25ed..dcabfa3 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,6 @@ __pycache__/ *.py[cod] *$py.class .pytest_cache/ + +# dev +images/controller/Makefile diff --git a/crds/cephcluster.yaml b/crds/cephcluster.yaml new file mode 100644 index 0000000..14a7997 --- /dev/null +++ b/crds/cephcluster.yaml @@ -0,0 +1,87 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephclusters.storage.deckhouse.io + labels: + heritage: deckhouse + module: csi-ceph +spec: + group: storage.deckhouse.io + scope: Cluster + names: + plural: cephclusters + singular: cephcluster + kind: CephCluster + preserveUnknownFields: false + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + description: | + Ceph cluster connection parameters. + required: + - spec + properties: + spec: + type: object + required: + - clusterID + - userID + - userKey + - monitors + properties: + clusterID: + description: | + Ceph cluster FSID/UUID. + + Use `ceph fsid` to get Ceph cluster FSID/UUID. + type: string + userID: + description: | + Username without `client.`. + type: string + userKey: + description: | + Ceph auth key corresponding to the `userID`. + type: string + monitors: + description: | + List of ceph-mon IP addresses in the format `10.0.0.10:6789`. + type: array + items: + type: string + status: + type: object + description: | + Displays current information about the resources managed by the CephCluster custom resource. + properties: + phase: + type: string + description: | + The current state of resources managed by the CephCluster custom resource. Might be: + - Failed (if the controller received incorrect resource configuration or some errors occurred during the operation) + - Create (if everything went fine) + enum: + - Failed + - Created + reason: + type: string + description: | + Additional information about the resources managed by the CephCluster custom resource. + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.reason + name: Reason + type: string + priority: 1 + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + description: The age of this resource diff --git a/crds/cephstorageclass.yaml b/crds/cephstorageclass.yaml new file mode 100644 index 0000000..3aeed71 --- /dev/null +++ b/crds/cephstorageclass.yaml @@ -0,0 +1,156 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephstorageclasses.storage.deckhouse.io + labels: + heritage: deckhouse + module: csi-ceph +spec: + group: storage.deckhouse.io + scope: Cluster + names: + plural: cephstorageclasses + singular: cephstorageclass + kind: CephStorageClass + preserveUnknownFields: false + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + description: | + CephStorageClass is a Kubernetes Custom Resource that defines a configuration for a Kubernetes Storage class. + required: + - spec + properties: + spec: + type: object + required: + - clusterName + - allowVolumeExpansion + - pool + - reclaimPolicy + - type + oneOf: + - required: + - rbd + - required: + - cephfs + properties: + clusterName: + description: | + Name of the CephCluster custom resource. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + minLength: 1 + allowVolumeExpansion: + description: | + AllowVolumeExpansion is a flag that enables or disables volume expansion for the storage class. + type: boolean + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + pool: + description: | + Name of the Ceph pool. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + minLength: 1 + reclaimPolicy: + description: | + The storage class's reclaim policy. Might be: + - Delete (If the Persistent Volume Claim is deleted, deletes the Persistent Volume and its associated storage as well) + - Retain (If the Persistent Volume Claim is deleted, remains the Persistent Volume and its associated storage) + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + enum: + - Delete + - Retain + type: + description: | + The type of the storage class. Might be: + - cephfs (CephFS) + - rbd (Rados Block Device) + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + enum: + - cephfs + - rbd + cephfs: + type: object + description: | + CephFS specific parameters. + required: + - fsName + properties: + fsName: + description: | + Name of the CephFS file system. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + minLength: 1 + rbd: + type: object + description: | + Rados Block Device specific parameters. + required: + - defaultFSType + properties: + defaultFSType: + description: | + Default file system type for the Rados Block Device. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + minLength: 1 + mountOptions: + description: | + Mount options. + type: array + items: + type: string + status: + type: object + description: | + Displays current information about the Storage Class. + properties: + phase: + type: string + description: | + The Storage class current state. Might be: + - Failed (if the controller received incorrect resource configuration or some errors occurred during the operation) + - Create (if everything went fine) + enum: + - Failed + - Created + reason: + type: string + description: | + Additional information about the current state of the Storage Class. + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.reason + name: Reason + type: string + priority: 1 + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + description: The age of this resource diff --git a/images/controller/Dockerfile b/images/controller/Dockerfile new file mode 100644 index 0000000..a66fefe --- /dev/null +++ b/images/controller/Dockerfile @@ -0,0 +1,17 @@ +ARG BASE_SCRATCH=registry.deckhouse.io/base_images/scratch@sha256:b054705fcc9f2205777d80a558d920c0b4209efdc3163c22b5bfcb5dda1db5fc +ARG BASE_GOLANG_ALPINE_BUILDER=registry.deckhouse.io/base_images/golang:1.22.3-alpine@sha256:dbf216b880b802c22e3f4f2ef0a78396b4a9a6983cb9b767c5efc351ebf946b0 + +FROM $BASE_GOLANG_ALPINE_BUILDER as builder + +WORKDIR /go/src +ADD go.mod . +ADD go.sum . +RUN go mod download +COPY . . +WORKDIR /go/src/cmd +RUN GOOS=linux GOARCH=amd64 go build -o controller + +FROM --platform=linux/amd64 $BASE_SCRATCH +COPY --from=builder /go/src/cmd/controller /go/src/cmd/controller + +ENTRYPOINT ["/go/src/cmd/controller"] diff --git a/images/controller/api/v1alpha1/ceph_cluster.go b/images/controller/api/v1alpha1/ceph_cluster.go new file mode 100644 index 0000000..558da97 --- /dev/null +++ b/images/controller/api/v1alpha1/ceph_cluster.go @@ -0,0 +1,46 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type CephCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec CephClusterSpec `json:"spec"` + Status *CephClusterStatus `json:"status,omitempty"` +} + +type CephClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephCluster `json:"items"` +} + +type CephClusterSpec struct { + ClusterID string `json:"clusterID"` + UserID string `json:"userID"` + UserKey string `json:"userKey"` + Monitors []string `json:"monitors"` +} + +type CephClusterStatus struct { + Phase string `json:"phase,omitempty"` + Reason string `json:"reason,omitempty"` +} diff --git a/images/controller/api/v1alpha1/ceph_storage_class.go b/images/controller/api/v1alpha1/ceph_storage_class.go new file mode 100644 index 0000000..eba164d --- /dev/null +++ b/images/controller/api/v1alpha1/ceph_storage_class.go @@ -0,0 +1,62 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +const ( + CephStorageClassTypeRBD = "rbd" + CephStorageClassTypeCephFS = "cephfs" +) + +type CephStorageClass struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec CephStorageClassSpec `json:"spec"` + Status *CephStorageClassStatus `json:"status,omitempty"` +} + +// CephStorageClassList contains a list of empty block device +type CephStorageClassList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephStorageClass `json:"items"` +} + +type CephStorageClassSpec struct { + ClusterName string `json:"clusterName"` + ReclaimPolicy string `json:"reclaimPolicy"` + AllowVolumeExpansion string `json:"allowVolumeExpansion"` + Pool string `json:"pool"` + Type string `json:"type"` + RBD *CephStorageClassRBD `json:"rbd,omitempty"` + CephFS *CephStorageClassCephFS `json:"cephfs,omitempty"` + MountOptions []string `json:"mountOptions,omitempty"` +} + +type CephStorageClassRBD struct { + DefaultFSType string `json:"defaultFSType"` +} + +type CephStorageClassCephFS struct { + FSName string `json:"fsName,omitempty"` +} + +type CephStorageClassStatus struct { + Phase string `json:"phase,omitempty"` + Reason string `json:"reason,omitempty"` +} diff --git a/images/controller/api/v1alpha1/register.go b/images/controller/api/v1alpha1/register.go new file mode 100644 index 0000000..fd792db --- /dev/null +++ b/images/controller/api/v1alpha1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + CephStorageClassKind = "CephStorageClass" + APIGroup = "storage.deckhouse.io" + APIVersion = "v1alpha1" +) + +// SchemeGroupVersion is group version used to register these objects +var ( + SchemeGroupVersion = schema.GroupVersion{ + Group: APIGroup, + Version: APIVersion, + } + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CephStorageClass{}, + &CephStorageClassList{}, + &CephCluster{}, + &CephClusterList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/images/controller/api/v1alpha1/zz_generated.deepcopy.go b/images/controller/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..299e38d --- /dev/null +++ b/images/controller/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,137 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import "k8s.io/apimachinery/pkg/runtime" + +// CephStorageClass + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephStorageClass) DeepCopyInto(out *CephStorageClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyBlockDevice. +func (in *CephStorageClass) DeepCopy() *CephStorageClass { + if in == nil { + return nil + } + out := new(CephStorageClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephStorageClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephStorageClassList) DeepCopyInto(out *CephStorageClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephStorageClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuestbookList. +func (in *CephStorageClassList) DeepCopy() *CephStorageClassList { + if in == nil { + return nil + } + out := new(CephStorageClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephStorageClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// CephCluster +func (in *CephCluster) DeepCopyInto(out *CephCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyBlockDevice. +func (in *CephCluster) DeepCopy() *CephCluster { + if in == nil { + return nil + } + out := new(CephCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephClusterList) DeepCopyInto(out *CephClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuestbookList. +func (in *CephClusterList) DeepCopy() *CephClusterList { + if in == nil { + return nil + } + out := new(CephClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/images/controller/cmd/main.go b/images/controller/cmd/main.go new file mode 100644 index 0000000..b6a073f --- /dev/null +++ b/images/controller/cmd/main.go @@ -0,0 +1,131 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "d8-controller/api/v1alpha1" + "d8-controller/pkg/config" + "d8-controller/pkg/controller" + "d8-controller/pkg/kubutils" + "d8-controller/pkg/logger" + "fmt" + "os" + goruntime "runtime" + + "sigs.k8s.io/controller-runtime/pkg/cache" + + v1 "k8s.io/api/core/v1" + sv1 "k8s.io/api/storage/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + apiruntime "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +var ( + resourcesSchemeFuncs = []func(*apiruntime.Scheme) error{ + v1alpha1.AddToScheme, + clientgoscheme.AddToScheme, + extv1.AddToScheme, + v1.AddToScheme, + sv1.AddToScheme, + } +) + +func main() { + ctx := context.Background() + cfgParams := config.NewConfig() + + log, err := logger.NewLogger(cfgParams.Loglevel) + if err != nil { + fmt.Println(fmt.Sprintf("unable to create NewLogger, err: %v", err)) + os.Exit(1) + } + + log.Info(fmt.Sprintf("[main] Go Version:%s ", goruntime.Version())) + log.Info(fmt.Sprintf("[main] OS/Arch:Go OS/Arch:%s/%s ", goruntime.GOOS, goruntime.GOARCH)) + + log.Info("[main] CfgParams has been successfully created") + log.Info(fmt.Sprintf("[main] %s = %s", config.LogLevelEnvName, cfgParams.Loglevel)) + log.Info(fmt.Sprintf("[main] RequeueStorageClassInterval = %d", cfgParams.RequeueStorageClassInterval)) + + kConfig, err := kubutils.KubernetesDefaultConfigCreate() + if err != nil { + log.Error(err, "[main] unable to KubernetesDefaultConfigCreate") + } + log.Info("[main] kubernetes config has been successfully created.") + + scheme := runtime.NewScheme() + for _, f := range resourcesSchemeFuncs { + err := f(scheme) + if err != nil { + log.Error(err, "[main] unable to add scheme to func") + os.Exit(1) + } + } + log.Info("[main] successfully read scheme CR") + + cacheOpt := cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + cfgParams.ControllerNamespace: {}, + }, + } + + managerOpts := manager.Options{ + Scheme: scheme, + Cache: cacheOpt, + //MetricsBindAddress: cfgParams.MetricsPort, + HealthProbeBindAddress: cfgParams.HealthProbeBindAddress, + LeaderElection: true, + LeaderElectionNamespace: cfgParams.ControllerNamespace, + LeaderElectionID: config.ControllerName, + Logger: log.GetLogger(), + } + + mgr, err := manager.New(kConfig, managerOpts) + if err != nil { + log.Error(err, "[main] unable to manager.New") + os.Exit(1) + } + log.Info("[main] successfully created kubernetes manager") + + if _, err = controller.RunCephStorageClassWatcherController(mgr, *cfgParams, *log); err != nil { + log.Error(err, fmt.Sprintf("[main] unable to run %s", controller.CephStorageClassCtrlName)) + os.Exit(1) + } + + if err = mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + log.Error(err, "[main] unable to mgr.AddHealthzCheck") + os.Exit(1) + } + log.Info("[main] successfully AddHealthzCheck") + + if err = mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + log.Error(err, "[main] unable to mgr.AddReadyzCheck") + os.Exit(1) + } + log.Info("[main] successfully AddReadyzCheck") + + err = mgr.Start(ctx) + if err != nil { + log.Error(err, "[main] unable to mgr.Start") + os.Exit(1) + } +} diff --git a/images/controller/go.mod b/images/controller/go.mod new file mode 100644 index 0000000..5ce50e4 --- /dev/null +++ b/images/controller/go.mod @@ -0,0 +1,71 @@ +module d8-controller + +go 1.22 + +require ( + github.com/go-logr/logr v1.4.1 + github.com/onsi/ginkgo/v2 v2.14.0 + github.com/onsi/gomega v1.30.0 + k8s.io/api v0.29.2 + k8s.io/apiextensions-apiserver v0.29.2 + k8s.io/apimachinery v0.29.2 + k8s.io/client-go v0.29.2 + k8s.io/klog/v2 v2.120.1 + k8s.io/utils v0.0.0-20240102154912-e7106e64919e + sigs.k8s.io/controller-runtime v0.17.5 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.18.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.16.1 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/component-base v0.29.2 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/images/controller/go.sum b/images/controller/go.sum new file mode 100644 index 0000000..b1f77a7 --- /dev/null +++ b/images/controller/go.sum @@ -0,0 +1,206 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= +github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= +github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= +k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= +k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.17.4 h1:AMf1E0+93/jLQ13fb76S6Atwqp24EQFCmNbG84GJxew= +sigs.k8s.io/controller-runtime v0.17.4/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= +sigs.k8s.io/controller-runtime v0.17.5 h1:1FI9Lm7NiOOmBsgTV36/s2XrEFXnO2C4sbg/Zme72Rw= +sigs.k8s.io/controller-runtime v0.17.5/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/images/controller/pkg/config/config.go b/images/controller/pkg/config/config.go new file mode 100644 index 0000000..d72572c --- /dev/null +++ b/images/controller/pkg/config/config.go @@ -0,0 +1,72 @@ +/* +Copyright 2024 Flant JSC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "d8-controller/pkg/logger" + "log" + "os" + "time" +) + +const ( + LogLevelEnvName = "LOG_LEVEL" + ControllerNamespaceEnv = "CONTROLLER_NAMESPACE" + HardcodedControllerNS = "d8-csi-ceph" + ControllerName = "d8-controller" + DefaultHealthProbeBindAddressEnvName = "HEALTH_PROBE_BIND_ADDRESS" + DefaultHealthProbeBindAddress = ":8081" + DefaultRequeueStorageClassInterval = 10 +) + +type Options struct { + Loglevel logger.Verbosity + RequeueStorageClassInterval time.Duration + HealthProbeBindAddress string + ControllerNamespace string +} + +func NewConfig() *Options { + var opts Options + + loglevel := os.Getenv(LogLevelEnvName) + if loglevel == "" { + opts.Loglevel = logger.DebugLevel + } else { + opts.Loglevel = logger.Verbosity(loglevel) + } + + opts.HealthProbeBindAddress = os.Getenv(DefaultHealthProbeBindAddressEnvName) + if opts.HealthProbeBindAddress == "" { + opts.HealthProbeBindAddress = DefaultHealthProbeBindAddress + } + + opts.ControllerNamespace = os.Getenv(ControllerNamespaceEnv) + if opts.ControllerNamespace == "" { + + namespace, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + log.Printf("Failed to get namespace from filesystem: %v", err) + log.Printf("Using hardcoded namespace: %s", HardcodedControllerNS) + opts.ControllerNamespace = HardcodedControllerNS + } else { + log.Printf("Got namespace from filesystem: %s", string(namespace)) + opts.ControllerNamespace = string(namespace) + } + } + + opts.RequeueStorageClassInterval = DefaultRequeueStorageClassInterval + + return &opts +} diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher.go b/images/controller/pkg/controller/ceph_storage_class_watcher.go new file mode 100644 index 0000000..1e70f99 --- /dev/null +++ b/images/controller/pkg/controller/ceph_storage_class_watcher.go @@ -0,0 +1,206 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + v1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/config" + "d8-controller/pkg/logger" + "errors" + "fmt" + "reflect" + "time" + + v1 "k8s.io/api/storage/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +const ( + CephStorageClassCtrlName = "ceph-storage-class-controller" + + StorageClassKind = "StorageClass" + StorageClassAPIVersion = "storage.k8s.io/v1" + + CephStorageClassRBDProvisioner = "rbd.csi.ceph.com" + CephStorageClassCephFSProvisioner = "cephfs.csi.ceph.com" + + CephStorageClassControllerFinalizerName = "storage.deckhouse.io/ceph-storage-class-controller" + CephStorageClassManagedLabelKey = "storage.deckhouse.io/managed-by" + CephStorageClassManagedLabelValue = "ceph-storage-class-controller" + + FailedStatusPhase = "Failed" + CreatedStatusPhase = "Created" + + CreateReconcile = "Create" + UpdateReconcile = "Update" + DeleteReconcile = "Delete" + + // serverParamKey = "server" + // shareParamKey = "share" + // MountPermissionsParamKey = "mountPermissions" + // SubDirParamKey = "subdir" + // MountOptionsSecretKey = "mountOptions" + + // SecretForMountOptionsPrefix = "ceph-mount-options-for-" + // StorageClassSecretNameKey = "csi.storage.k8s.io/provisioner-secret-name" + // StorageClassSecretNSKey = "csi.storage.k8s.io/provisioner-secret-namespace" +) + +var ( + allowedProvisioners = []string{CephStorageClassRBDProvisioner, CephStorageClassCephFSProvisioner} +) + +func RunCephStorageClassWatcherController( + mgr manager.Manager, + cfg config.Options, + log logger.Logger, +) (controller.Controller, error) { + cl := mgr.GetClient() + + c, err := controller.New(CephStorageClassCtrlName, mgr, controller.Options{ + Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + log.Info(fmt.Sprintf("[CephStorageClassReconciler] starts Reconcile for the CephStorageClass %q", request.Name)) + cephSC := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, request.NamespacedName, cephSC) + if err != nil && !k8serr.IsNotFound(err) { + log.Error(err, fmt.Sprintf("[CephStorageClassReconciler] unable to get CephStorageClass, name: %s", request.Name)) + return reconcile.Result{}, err + } + + if cephSC.Name == "" { + log.Info(fmt.Sprintf("[CephStorageClassReconciler] seems like the CephStorageClass for the request %s was deleted. Reconcile retrying will stop.", request.Name)) + return reconcile.Result{}, nil + } + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + if err != nil { + log.Error(err, "[CephStorageClassReconciler] unable to list Storage Classes") + return reconcile.Result{}, err + } + + shouldRequeue, err := RunEventReconcile(ctx, cl, log, scList, cephSC, cfg.ControllerNamespace) + if err != nil { + log.Error(err, fmt.Sprintf("[CephStorageClassReconciler] an error occured while reconciles the CephStorageClass, name: %s", cephSC.Name)) + } + + if shouldRequeue { + log.Warning(fmt.Sprintf("[CephStorageClassReconciler] Reconciler will requeue the request, name: %s", request.Name)) + return reconcile.Result{ + RequeueAfter: cfg.RequeueStorageClassInterval * time.Second, + }, nil + } + + log.Info(fmt.Sprintf("[CephStorageClassReconciler] ends Reconcile for the CephStorageClass %q", request.Name)) + return reconcile.Result{}, nil + }), + }) + if err != nil { + log.Error(err, "[RunCephStorageClassWatcherController] unable to create controller") + return nil, err + } + + err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.CephStorageClass{}), handler.Funcs{ + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + log.Info(fmt.Sprintf("[CreateFunc] get event for CephStorageClass %q. Add to the queue", e.Object.GetName())) + request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} + q.Add(request) + }, + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + log.Info(fmt.Sprintf("[UpdateFunc] get event for CephStorageClass %q. Check if it should be reconciled", e.ObjectNew.GetName())) + + oldCephSC, ok := e.ObjectOld.(*v1alpha1.CephStorageClass) + if !ok { + err = errors.New("unable to cast event object to a given type") + log.Error(err, "[UpdateFunc] an error occurred while handling create event") + return + } + newCephSC, ok := e.ObjectNew.(*v1alpha1.CephStorageClass) + if !ok { + err = errors.New("unable to cast event object to a given type") + log.Error(err, "[UpdateFunc] an error occurred while handling create event") + return + } + + if reflect.DeepEqual(oldCephSC.Spec, newCephSC.Spec) && newCephSC.DeletionTimestamp == nil { + log.Info(fmt.Sprintf("[UpdateFunc] an update event for the CephStorageClass %s has no Spec field updates. It will not be reconciled", newCephSC.Name)) + return + } + + log.Info(fmt.Sprintf("[UpdateFunc] the CephStorageClass %q will be reconciled. Add to the queue", newCephSC.Name)) + request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: newCephSC.Namespace, Name: newCephSC.Name}} + q.Add(request) + }, + }) + if err != nil { + log.Error(err, "[RunCephStorageClassWatcherController] unable to watch the events") + return nil, err + } + + return c, nil +} + +func RunEventReconcile(ctx context.Context, cl client.Client, log logger.Logger, scList *v1.StorageClassList, cephSC *v1alpha1.CephStorageClass, controllerNamespace string) (shouldRequeue bool, err error) { + added, err := addFinalizerIfNotExists(ctx, cl, cephSC, CephStorageClassControllerFinalizerName) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to add a finalizer %s to the CephStorageClass %s: %w", CephStorageClassControllerFinalizerName, cephSC.Name, err) + return true, err + } + log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] finalizer %s was added to the CephStorageClass %s: %t", CephStorageClassControllerFinalizerName, cephSC.Name, added)) + + reconcileTypeForStorageClass, err := IdentifyReconcileFuncForStorageClass(log, scList, cephSC, controllerNamespace) + if err != nil { + err = fmt.Errorf("[runEventReconcile] error occured while identifying the reconcile function for StorageClass %s: %w", cephSC.Name, err) + return true, err + } + + shouldRequeue = false + log.Debug(fmt.Sprintf("[runEventReconcile] reconcile operation for StorageClass %q: %q", cephSC.Name, reconcileTypeForStorageClass)) + switch reconcileTypeForStorageClass { + case CreateReconcile: + log.Debug(fmt.Sprintf("[runEventReconcile] CreateReconcile starts reconciliataion of StorageClass, name: %s", cephSC.Name)) + shouldRequeue, err = ReconcileStorageClassCreateFunc(ctx, cl, log, scList, cephSC, controllerNamespace) + case UpdateReconcile: + log.Debug(fmt.Sprintf("[runEventReconcile] UpdateReconcile starts reconciliataion of StorageClass, name: %s", cephSC.Name)) + shouldRequeue, err = reconcileStorageClassUpdateFunc(ctx, cl, log, scList, cephSC, controllerNamespace) + case DeleteReconcile: + log.Debug(fmt.Sprintf("[runEventReconcile] DeleteReconcile starts reconciliataion of StorageClass, name: %s", cephSC.Name)) + shouldRequeue, err = reconcileStorageClassDeleteFunc(ctx, cl, log, scList, cephSC) + default: + log.Debug(fmt.Sprintf("[runEventReconcile] StorageClass for CephStorageClass %s should not be reconciled", cephSC.Name)) + } + log.Debug(fmt.Sprintf("[runEventReconcile] ends reconciliataion of StorageClass, name: %s, shouldRequeue: %t, err: %v", cephSC.Name, shouldRequeue, err)) + + if err != nil || shouldRequeue { + return shouldRequeue, err + } + + log.Debug(fmt.Sprintf("[runEventReconcile] Finish all reconciliations for CephStorageClass %q.", cephSC.Name)) + return false, nil + +} diff --git a/images/controller/pkg/controller/controller_suite_test.go b/images/controller/pkg/controller/controller_suite_test.go new file mode 100644 index 0000000..a5c07a7 --- /dev/null +++ b/images/controller/pkg/controller/controller_suite_test.go @@ -0,0 +1,66 @@ +/* +Copyright 2023 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller_test + +import ( + v1alpha1 "d8-controller/api/v1alpha1" + "fmt" + "os" + "testing" + + v1 "k8s.io/api/apps/v1" + + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + sv1 "k8s.io/api/storage/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + apiruntime "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestController(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Controller Suite") +} + +func NewFakeClient() client.Client { + resourcesSchemeFuncs := []func(*apiruntime.Scheme) error{ + v1alpha1.AddToScheme, + clientgoscheme.AddToScheme, + extv1.AddToScheme, + v1.AddToScheme, + sv1.AddToScheme, + } + scheme := apiruntime.NewScheme() + for _, f := range resourcesSchemeFuncs { + err := f(scheme) + if err != nil { + println(fmt.Sprintf("Error adding scheme: %s", err)) + os.Exit(1) + } + } + + // See https://github.com/kubernetes-sigs/controller-runtime/issues/2362#issuecomment-1837270195 + builder := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&v1alpha1.CephStorageClass{}) + + cl := builder.Build() + return cl +} diff --git a/images/controller/pkg/controller/good_func.go b/images/controller/pkg/controller/good_func.go new file mode 100644 index 0000000..e496125 --- /dev/null +++ b/images/controller/pkg/controller/good_func.go @@ -0,0 +1,549 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + storagev1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/logger" + "errors" + "fmt" + "reflect" + "strconv" + + "slices" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func ReconcileStorageClassCreateFunc( + ctx context.Context, + cl client.Client, + log logger.Logger, + scList *v1.StorageClassList, + cephSC *storagev1alpha1.CephStorageClass, + controllerNamespace string, +) (bool, error) { + log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] starts for CephStorageClass %q", cephSC.Name)) + log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] starts storage class configuration for the CephStorageClass, name: %s", cephSC.Name)) + newSC, err := ConfigureStorageClass(cephSC, controllerNamespace) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to configure a Storage Class for the CephStorageClass %s: %w", cephSC.Name, err) + upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) + if upError != nil { + upError = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upError) + err = errors.Join(err, upError) + } + return false, err + } + + log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] successfully configurated storage class for the CephStorageClass, name: %s", cephSC.Name)) + log.Trace(fmt.Sprintf("[reconcileStorageClassCreateFunc] storage class: %+v", newSC)) + + created, err := createStorageClassIfNotExists(ctx, cl, scList, newSC) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to create a Storage Class %s: %w", newSC.Name, err) + upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) + if upError != nil { + upError = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upError) + err = errors.Join(err, upError) + } + return true, err + } + log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] a storage class %s was created: %t", newSC.Name, created)) + if created { + log.Info(fmt.Sprintf("[reconcileStorageClassCreateFunc] successfully create storage class, name: %s", newSC.Name)) + } else { + log.Warning(fmt.Sprintf("[reconcileLSCCreateFunc] Storage class %s already exists. Adding event to requeue.", newSC.Name)) + return true, nil + } + + return false, nil +} + +func reconcileStorageClassUpdateFunc( + ctx context.Context, + cl client.Client, + log logger.Logger, + scList *v1.StorageClassList, + cephSC *storagev1alpha1.CephStorageClass, + controllerNamespace string, +) (bool, error) { + + log.Debug(fmt.Sprintf("[reconcileStorageClassUpdateFunc] starts for CephStorageClass %q", cephSC.Name)) + + var oldSC *v1.StorageClass + for _, s := range scList.Items { + if s.Name == cephSC.Name { + oldSC = &s + break + } + } + + if oldSC == nil { + err := fmt.Errorf("a storage class %s does not exist", cephSC.Name) + err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to find a storage class for the CephStorageClass %s: %w", cephSC.Name, err) + upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) + if upError != nil { + upError = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upError) + err = errors.Join(err, upError) + } + return true, err + } + + log.Debug(fmt.Sprintf("[reconcileStorageClassUpdateFunc] successfully found a storage class for the CephStorageClass, name: %s", cephSC.Name)) + + log.Trace(fmt.Sprintf("[reconcileStorageClassUpdateFunc] storage class: %+v", oldSC)) + newSC, err := ConfigureStorageClass(cephSC, controllerNamespace) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to configure a Storage Class for the CephStorageClass %s: %w", cephSC.Name, err) + upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) + if upError != nil { + upError = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upError) + err = errors.Join(err, upError) + } + return false, err + } + + diff, err := GetSCDiff(oldSC, newSC) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassUpdateFunc] error occured while identifying the difference between the existed StorageClass %s and the new one: %w", newSC.Name, err) + upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) + if upError != nil { + upError = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upError) + err = errors.Join(err, upError) + } + return true, err + } + + if diff != "" { + log.Info(fmt.Sprintf("[reconcileStorageClassUpdateFunc] current Storage Class LVMVolumeGroups do not match CephStorageClass ones. The Storage Class %s will be recreated with new ones", cephSC.Name)) + + err = recreateStorageClass(ctx, cl, oldSC, newSC) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to recreate a Storage Class %s: %w", newSC.Name, err) + upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) + if upError != nil { + upError = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upError) + err = errors.Join(err, upError) + } + return true, err + } + + log.Info(fmt.Sprintf("[reconcileStorageClassUpdateFunc] a Storage Class %s was successfully recreated", newSC.Name)) + } + + return false, nil +} + +func IdentifyReconcileFuncForStorageClass(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace string) (reconcileType string, err error) { + if shouldReconcileByDeleteFunc(cephSC) { + return DeleteReconcile, nil + } + + if shouldReconcileStorageClassByCreateFunc(scList, cephSC) { + return CreateReconcile, nil + } + + should, err := shouldReconcileStorageClassByUpdateFunc(log, scList, cephSC, controllerNamespace) + if err != nil { + return "", err + } + if should { + return UpdateReconcile, nil + } + + return "", nil +} + +func shouldReconcileStorageClassByCreateFunc(scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass) bool { + if cephSC.DeletionTimestamp != nil { + return false + } + + for _, sc := range scList.Items { + if sc.Name == cephSC.Name { + return false + } + } + + return true +} + +func shouldReconcileStorageClassByUpdateFunc(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace string) (bool, error) { + if cephSC.DeletionTimestamp != nil { + return false, nil + } + + for _, oldSC := range scList.Items { + if oldSC.Name == cephSC.Name { + if slices.Contains(allowedProvisioners, oldSC.Provisioner) { + newSC, err := ConfigureStorageClass(cephSC, controllerNamespace) + if err != nil { + return false, err + } + + diff, err := GetSCDiff(&oldSC, newSC) + if err != nil { + return false, err + } + + if diff != "" { + log.Debug(fmt.Sprintf("[shouldReconcileStorageClassByUpdateFunc] a storage class %s should be updated. Diff: %s", oldSC.Name, diff)) + return true, nil + } + + if cephSC.Status != nil && cephSC.Status.Phase == FailedStatusPhase { + return true, nil + } + + return false, nil + + } else { + err := fmt.Errorf("a storage class %s with provisioner % s does not belong to allowed provisioners: %v", oldSC.Name, oldSC.Provisioner, allowedProvisioners) + return false, err + } + } + } + + err := fmt.Errorf("a storage class %s does not exist", cephSC.Name) + return false, err +} + +func reconcileStorageClassDeleteFunc( + ctx context.Context, + cl client.Client, + log logger.Logger, + scList *v1.StorageClassList, + cephSC *storagev1alpha1.CephStorageClass, +) (bool, error) { + log.Debug(fmt.Sprintf("[reconcileStorageClassDeleteFunc] tries to find a storage class for the CephStorageClass %s", cephSC.Name)) + var sc *v1.StorageClass + for _, s := range scList.Items { + if s.Name == cephSC.Name { + sc = &s + break + } + } + if sc == nil { + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] no storage class found for the CephStorageClass, name: %s", cephSC.Name)) + } + + if sc != nil { + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] successfully found a storage class for the CephStorageClass %s", cephSC.Name)) + log.Debug(fmt.Sprintf("[reconcileStorageClassDeleteFunc] starts identifying a provisioner for the storage class %s", sc.Name)) + + if slices.Contains(allowedProvisioners, sc.Provisioner) { + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] the storage class %s provisioner %s belongs to allowed provisioners: %v", sc.Name, sc.Provisioner, allowedProvisioners)) + + err := deleteStorageClass(ctx, cl, sc) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassDeleteFunc] unable to delete a storage class %s: %w", sc.Name, err) + upErr := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, fmt.Sprintf("Unable to delete a storage class, err: %s", err.Error())) + if upErr != nil { + upErr = fmt.Errorf("[reconcileStorageClassDeleteFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upErr) + err = errors.Join(err, upErr) + } + return true, err + } + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] successfully deleted a storage class, name: %s", sc.Name)) + } + + if !slices.Contains(allowedProvisioners, sc.Provisioner) { + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] the storage class %s provisioner %s does not belong to allowed provisioners: %v", sc.Name, sc.Provisioner, allowedProvisioners)) + + } + } + + log.Debug("[reconcileStorageClassDeleteFunc] ends the reconciliation") + return false, nil +} + +func shouldReconcileByDeleteFunc(cephSC *storagev1alpha1.CephStorageClass) bool { + if cephSC.DeletionTimestamp != nil { + return true + } + + return false +} + +func removeFinalizerIfExists(ctx context.Context, cl client.Client, obj metav1.Object, finalizerName string) (bool, error) { + removed := false + finalizers := obj.GetFinalizers() + for i, f := range finalizers { + if f == finalizerName { + finalizers = append(finalizers[:i], finalizers[i+1:]...) + removed = true + break + } + } + + if removed { + obj.SetFinalizers(finalizers) + err := cl.Update(ctx, obj.(client.Object)) + if err != nil { + return false, err + } + } + + return removed, nil +} + +func addFinalizerIfNotExists(ctx context.Context, cl client.Client, obj metav1.Object, finalizerName string) (bool, error) { + added := false + finalizers := obj.GetFinalizers() + if !slices.Contains(finalizers, finalizerName) { + finalizers = append(finalizers, finalizerName) + added = true + } + + if added { + obj.SetFinalizers(finalizers) + err := cl.Update(ctx, obj.(client.Object)) + if err != nil { + return false, err + } + } + return true, nil +} + +func ConfigureStorageClass(cephSC *storagev1alpha1.CephStorageClass, controllerNamespace string) (*v1.StorageClass, error) { + if cephSC.Spec.ReclaimPolicy == "" { + err := fmt.Errorf("CephStorageClass %q: the ReclaimPolicy field is empty", cephSC.Name) + return nil, err + } + + if cephSC.Spec.AllowVolumeExpansion == "" { + err := fmt.Errorf("CephStorageClass %q: the AllowVolumeExpansion field is empty", cephSC.Name) + return nil, err + } + + provisioner, err := GetStorageClassProvisioner(cephSC) + if err != nil { + err = fmt.Errorf("CephStorageClass %q: unable to get a provisioner: %w", cephSC.Name, err) + return nil, err + } + + allowVolumeExpansion, err := strconv.ParseBool(cephSC.Spec.AllowVolumeExpansion) + if err != nil { + err = fmt.Errorf("CephStorageClass %q: the AllowVolumeExpansion field is not a boolean value: %w", cephSC.Name, err) + return nil, err + } + + reclaimPolicy := corev1.PersistentVolumeReclaimPolicy(cephSC.Spec.ReclaimPolicy) + volumeBindingMode := v1.VolumeBindingWaitForFirstConsumer + + params, err := GetStoragecClassParams(cephSC, controllerNamespace) + if err != nil { + err = fmt.Errorf("CephStorageClass %q: unable to get a storage class parameters: %w", cephSC.Name, err) + return nil, err + } + + sc := &v1.StorageClass{ + TypeMeta: metav1.TypeMeta{ + Kind: StorageClassKind, + APIVersion: StorageClassAPIVersion, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cephSC.Name, + Namespace: cephSC.Namespace, + Finalizers: []string{CephStorageClassControllerFinalizerName}, + }, + Parameters: params, + MountOptions: cephSC.Spec.MountOptions, + Provisioner: provisioner, + ReclaimPolicy: &reclaimPolicy, + VolumeBindingMode: &volumeBindingMode, + AllowVolumeExpansion: &allowVolumeExpansion, + } + + return sc, nil +} + +func GetStorageClassProvisioner(cephSC *storagev1alpha1.CephStorageClass) (string, error) { + if cephSC.Spec.Type == "" { + err := fmt.Errorf("CephStorageClass %q: the Type field is empty", cephSC.Name) + return "", err + } + + if cephSC.Spec.Type == storagev1alpha1.CephStorageClassTypeRBD && cephSC.Spec.RBD == nil { + err := fmt.Errorf("CephStorageClass %q type is %q, but the rbd field is empty", cephSC.Name, storagev1alpha1.CephStorageClassTypeRBD) + return "", err + } + + if cephSC.Spec.Type == storagev1alpha1.CephStorageClassTypeCephFS && cephSC.Spec.CephFS == nil { + err := fmt.Errorf("CephStorageClass %q type is %q, but the cephfs field is empty", cephSC.Name, storagev1alpha1.CephStorageClassTypeCephFS) + return "", err + } + + provisioner := "" + switch cephSC.Spec.Type { + case storagev1alpha1.CephStorageClassTypeRBD: + provisioner = CephStorageClassRBDProvisioner + case storagev1alpha1.CephStorageClassTypeCephFS: + provisioner = CephStorageClassCephFSProvisioner + default: + err := fmt.Errorf("CephStorageClass %q: the Type field is not valid: %s", cephSC.Name, cephSC.Spec.Type) + return "", err + } + + return provisioner, nil + +} + +func GetStoragecClassParams(cephSC *storagev1alpha1.CephStorageClass, controllerNamespace string) (map[string]string, error) { + + if cephSC.Spec.ClusterName == "" { + err := errors.New("CephStorageClass ClusterName is empty") + return nil, err + } + + if cephSC.Spec.Pool == "" { + err := errors.New("CephStorageClass Pool is empty") + return nil, err + } + + secretName := fmt.Sprintf("csi-ceph-secret-for-%s", cephSC.Spec.ClusterName) + + params := map[string]string{ + "clusterID": cephSC.Spec.ClusterName, + "pool": cephSC.Spec.Pool, + "csi.storage.k8s.io/provisioner-secret-name": secretName, + "csi.storage.k8s.io/provisioner-secret-namespace": controllerNamespace, + "csi.storage.k8s.io/controller-expand-secret-name": secretName, + "csi.storage.k8s.io/controller-expand-secret-namespace": controllerNamespace, + "csi.storage.k8s.io/node-stage-secret-name": secretName, + "csi.storage.k8s.io/node-stage-secret-namespace": controllerNamespace, + } + + if cephSC.Spec.Type == storagev1alpha1.CephStorageClassTypeRBD { + params["imageFeatures"] = "layering" + params["csi.storage.k8s.io/fstype"] = cephSC.Spec.RBD.DefaultFSType + } + + if cephSC.Spec.Type == storagev1alpha1.CephStorageClassTypeCephFS { + params["fsName"] = cephSC.Spec.CephFS.FSName + } + + return params, nil +} + +func updateCephStorageClassPhase(ctx context.Context, cl client.Client, cephSC *storagev1alpha1.CephStorageClass, phase, reason string) error { + if cephSC.Status == nil { + cephSC.Status = &storagev1alpha1.CephStorageClassStatus{} + } + cephSC.Status.Phase = phase + cephSC.Status.Reason = reason + + // TODO: add retry logic + err := cl.Status().Update(ctx, cephSC) + if err != nil { + return err + } + + return nil +} + +func createStorageClassIfNotExists(ctx context.Context, cl client.Client, scList *v1.StorageClassList, sc *v1.StorageClass) (bool, error) { + for _, s := range scList.Items { + if s.Name == sc.Name { + return false, nil + } + } + + err := cl.Create(ctx, sc) + if err != nil { + return false, err + } + + return true, err +} + +func GetSCDiff(oldSC, newSC *v1.StorageClass) (string, error) { + + if oldSC.Provisioner != newSC.Provisioner { + err := fmt.Errorf("CephStorageClass %q: the provisioner field is different in the StorageClass %q", newSC.Name, oldSC.Name) + return "", err + } + + if *oldSC.ReclaimPolicy != *newSC.ReclaimPolicy { + diff := fmt.Sprintf("ReclaimPolicy: %q -> %q", *oldSC.ReclaimPolicy, *newSC.ReclaimPolicy) + return diff, nil + } + + if *oldSC.VolumeBindingMode != *newSC.VolumeBindingMode { + diff := fmt.Sprintf("VolumeBindingMode: %q -> %q", *oldSC.VolumeBindingMode, *newSC.VolumeBindingMode) + return diff, nil + } + + if *oldSC.AllowVolumeExpansion != *newSC.AllowVolumeExpansion { + diff := fmt.Sprintf("AllowVolumeExpansion: %t -> %t", *oldSC.AllowVolumeExpansion, *newSC.AllowVolumeExpansion) + return diff, nil + } + + if !reflect.DeepEqual(oldSC.Parameters, newSC.Parameters) { + diff := fmt.Sprintf("Parameters: %+v -> %+v", oldSC.Parameters, newSC.Parameters) + return diff, nil + } + + if !reflect.DeepEqual(oldSC.MountOptions, newSC.MountOptions) { + diff := fmt.Sprintf("MountOptions: %v -> %v", oldSC.MountOptions, newSC.MountOptions) + return diff, nil + } + + return "", nil +} + +func recreateStorageClass(ctx context.Context, cl client.Client, oldSC, newSC *v1.StorageClass) error { + // It is necessary to pass the original StorageClass to the delete operation because + // the deletion will not succeed if the fields in the StorageClass provided to delete + // differ from those currently in the cluster. + err := deleteStorageClass(ctx, cl, oldSC) + if err != nil { + err = fmt.Errorf("[recreateStorageClass] unable to delete a storage class %s: %s", oldSC.Name, err.Error()) + return err + } + + err = cl.Create(ctx, newSC) + if err != nil { + err = fmt.Errorf("[recreateStorageClass] unable to create a storage class %s: %s", newSC.Name, err.Error()) + return err + } + + return nil +} + +func deleteStorageClass(ctx context.Context, cl client.Client, sc *v1.StorageClass) error { + if !slices.Contains(allowedProvisioners, sc.Provisioner) { + return fmt.Errorf("a storage class %s with provisioner %s does not belong to allowed provisioners: %v", sc.Name, sc.Provisioner, allowedProvisioners) + } + + _, err := removeFinalizerIfExists(ctx, cl, sc, CephStorageClassControllerFinalizerName) + if err != nil { + return err + } + + err = cl.Delete(ctx, sc) + if err != nil { + return err + } + + return nil +} diff --git a/images/controller/pkg/kubutils/kubernetes.go b/images/controller/pkg/kubutils/kubernetes.go new file mode 100644 index 0000000..4714cfe --- /dev/null +++ b/images/controller/pkg/kubutils/kubernetes.go @@ -0,0 +1,35 @@ +/* +Copyright 2024 Flant JSC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubutils + +import ( + "fmt" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +func KubernetesDefaultConfigCreate() (*rest.Config, error) { + //todo validate empty + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + clientcmd.NewDefaultClientConfigLoadingRules(), + &clientcmd.ConfigOverrides{}, + ) + + // Get a config to talk to API server + config, err := clientConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("config kubernetes error %w", err) + } + return config, nil +} diff --git a/images/controller/pkg/logger/logger.go b/images/controller/pkg/logger/logger.go new file mode 100644 index 0000000..345af2b --- /dev/null +++ b/images/controller/pkg/logger/logger.go @@ -0,0 +1,84 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logger + +import ( + "flag" + "fmt" + "github.com/go-logr/logr" + "k8s.io/klog/v2" + "k8s.io/klog/v2/klogr" +) + +const ( + ErrorLevel Verbosity = "0" + WarningLevel Verbosity = "1" + InfoLevel Verbosity = "2" + DebugLevel Verbosity = "3" + TraceLevel Verbosity = "4" +) + +const ( + warnLvl = iota + 1 + infoLvl + debugLvl + traceLvl +) + +type ( + Verbosity string +) + +type Logger struct { + log logr.Logger +} + +func NewLogger(level Verbosity) (*Logger, error) { + klog.InitFlags(nil) + if err := flag.Set("v", string(level)); err != nil { + return nil, err + } + flag.Parse() + + log := klogr.New().WithCallDepth(1) + + return &Logger{log: log}, nil +} + +func (l Logger) GetLogger() logr.Logger { + return l.log +} + +func (l Logger) Error(err error, message string, keysAndValues ...interface{}) { + l.log.Error(err, fmt.Sprintf("ERROR %s", message), keysAndValues...) +} + +func (l Logger) Warning(message string, keysAndValues ...interface{}) { + l.log.V(warnLvl).Info(fmt.Sprintf("WARNING %s", message), keysAndValues...) +} + +func (l Logger) Info(message string, keysAndValues ...interface{}) { + l.log.V(infoLvl).Info(fmt.Sprintf("INFO %s", message), keysAndValues...) +} + +func (l Logger) Debug(message string, keysAndValues ...interface{}) { + l.log.V(debugLvl).Info(fmt.Sprintf("DEBUG %s", message), keysAndValues...) +} + +func (l Logger) Trace(message string, keysAndValues ...interface{}) { + l.log.V(traceLvl).Info(fmt.Sprintf("TRACE %s", message), keysAndValues...) +} diff --git a/templates/cephfs/storage-classes.yaml b/templates/cephfs/storage-classes.yaml deleted file mode 100644 index 7350c09..0000000 --- a/templates/cephfs/storage-classes.yaml +++ /dev/null @@ -1,37 +0,0 @@ -{{- range $cr := .Values.csiCeph.internal.crs }} - {{- if $cr.spec.cephfs }} - {{- range $sc := $cr.spec.cephfs.storageClasses }} ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ $cr.name }}-{{ $sc.namePostfix }} - namespace: d8-{{ $.Chart.Name }} - annotations: - migration-volume-binding-mode-changed: "" -{{ include "helm_lib_module_labels" (list $ (dict "app" $.Chart.Name)) | indent 2 }} -provisioner: cephfs.csi.ceph.com -reclaimPolicy: {{ $sc.reclaimPolicy }} - {{- if $sc.allowVolumeExpansion }} -allowVolumeExpansion: {{ $sc.allowVolumeExpansion }} - {{- end }} - {{- if $sc.mountOptions }} -mountOptions: - {{- range $option := $sc.mountOptions }} - - {{ $option }} - {{- end }} - {{- end }} -volumeBindingMode: WaitForFirstConsumer -parameters: - csi.storage.k8s.io/provisioner-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/provisioner-secret-namespace: d8-{{ $.Chart.Name }} - csi.storage.k8s.io/controller-expand-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/controller-expand-secret-namespace: d8-{{ $.Chart.Name }} - csi.storage.k8s.io/node-stage-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/node-stage-secret-namespace: d8-{{ $.Chart.Name }} - clusterID: {{ $cr.spec.clusterID }} - fsName: {{ $sc.fsName }} - pool: {{ $sc.pool }} - {{- end }} - {{- end }} -{{- end }} diff --git a/templates/cephfs/volume-snapshot-class.yaml b/templates/cephfs/volume-snapshot-class.yaml deleted file mode 100644 index e547011..0000000 --- a/templates/cephfs/volume-snapshot-class.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Source https://github.com/ceph/ceph-csi/blob/devel/examples/cephfs/snapshotclass.yaml -{{- if (.Values.global.enabledModules | has "snapshot-controller") }} - {{- range $cr := .Values.csiCeph.internal.crs }} - {{- if $cr.spec.cephfs }} - {{- range $sc := $cr.spec.cephfs.storageClasses }} ---- -apiVersion: snapshot.storage.k8s.io/v1 -kind: VolumeSnapshotClass -metadata: - {{- include "helm_lib_module_labels" (list $ (dict "app" $.Chart.Name)) | nindent 2 }} - name: {{ $cr.name }}-{{ $sc.namePostfix }} -driver: cephfs.csi.ceph.com -parameters: - clusterID: {{ $cr.spec.clusterID }} - csi.storage.k8s.io/snapshotter-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/snapshotter-secret-namespace: d8-{{ $.Chart.Name }} -deletionPolicy: Delete - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/templates/controller/deployment.yaml b/templates/controller/deployment.yaml new file mode 100644 index 0000000..a3e3b43 --- /dev/null +++ b/templates/controller/deployment.yaml @@ -0,0 +1,95 @@ +{{- define "controller_resources" }} +cpu: 10m +memory: 25Mi +{{- end }} + +{{- if (.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} +--- +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} +spec: + targetRef: + apiVersion: "apps/v1" + kind: Deployment + name: controller + updatePolicy: + updateMode: "Auto" + resourcePolicy: + containerPolicies: + - containerName: "controller" + minAllowed: + {{- include "controller_resources" . | nindent 8 }} + maxAllowed: + cpu: 200m + memory: 100Mi +{{- end }} +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "controller" )) | nindent 2 }} +spec: + minAvailable: {{ include "helm_lib_is_ha_to_value" (list . 1 0) }} + selector: + matchLabels: + app: controller +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} +spec: + {{- include "helm_lib_deployment_on_master_strategy_and_replicas_for_ha" . | nindent 2 }} + revisionHistoryLimit: 2 + selector: + matchLabels: + app: controller + template: + metadata: + labels: + app: controller + spec: + {{- include "helm_lib_priority_class" (tuple . "cluster-medium") | nindent 6 }} + {{- include "helm_lib_node_selector" (tuple . "system") | nindent 6 }} + {{- include "helm_lib_tolerations" (tuple . "system") | nindent 6 }} + {{- include "helm_lib_module_pod_security_context_run_as_user_nobody" . | nindent 6 }} + imagePullSecrets: + - name: {{ .Chart.Name }}-module-registry + serviceAccountName: controller + containers: + - name: controller + image: {{ include "helm_lib_module_image" (list . "controller") }} + imagePullPolicy: IfNotPresent + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 14 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "controller_resources" . | nindent 14 }} +{{- end }} + securityContext: + privileged: false + env: + - name: LOG_LEVEL +{{- if eq .Values.csiCeph.logLevel "ERROR" }} + value: "0" +{{- else if eq .Values.csiCeph.logLevel "WARN" }} + value: "1" +{{- else if eq .Values.csiCeph.logLevel "INFO" }} + value: "2" +{{- else if eq .Values.csiCeph.logLevel "DEBUG" }} + value: "3" +{{- else if eq .Values.csiCeph.logLevel "TRACE" }} + value: "4" +{{- end }} + - name: CONTROLLER_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace diff --git a/templates/controller/rbac-for-us.yaml b/templates/controller/rbac-for-us.yaml new file mode 100644 index 0000000..763f1e1 --- /dev/null +++ b/templates/controller/rbac-for-us.yaml @@ -0,0 +1,110 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: d8:{{ .Chart.Name }}:controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - list + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: d8:{{ .Chart.Name }}:controller + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} +rules: + - apiGroups: + - storage.deckhouse.io + resources: + - cephstorageclasses + - cephstorageclasses/status + - cephclusters + - cephclusters/status + verbs: + - get + - list + - create + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - create + - delete + - list + - get + - watch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: d8:{{ .Chart.Name }}:controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} +subjects: + - kind: ServiceAccount + name: controller + namespace: d8-{{ .Chart.Name }} +roleRef: + kind: Role + name: d8:{{ .Chart.Name }}:controller + apiGroup: rbac.authorization.k8s.io + + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: d8:{{ .Chart.Name }}:controller + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} +subjects: + - kind: ServiceAccount + name: controller + namespace: d8-{{ .Chart.Name }} +roleRef: + kind: ClusterRole + name: d8:{{ .Chart.Name }}:controller + apiGroup: rbac.authorization.k8s.io + + diff --git a/templates/rbd/storage-classes.yaml b/templates/rbd/storage-classes.yaml deleted file mode 100644 index ac1b5ce..0000000 --- a/templates/rbd/storage-classes.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- range $cr := .Values.csiCeph.internal.crs }} - {{- if $cr.spec.rbd }} - {{- range $sc := $cr.spec.rbd.storageClasses }} ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ $cr.name }}-{{ $sc.namePostfix }} - namespace: d8-{{ $.Chart.Name }} - annotations: - migration-volume-binding-mode-changed: "" -{{ include "helm_lib_module_labels" (list $ (dict "app" $.Chart.Name)) | indent 2 }} -provisioner: rbd.csi.ceph.com -volumeBindingMode: WaitForFirstConsumer -parameters: - clusterID: {{ $cr.spec.clusterID }} - pool: {{ $sc.pool }} - imageFeatures: layering - csi.storage.k8s.io/provisioner-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/provisioner-secret-namespace: d8-{{ $.Chart.Name }} - csi.storage.k8s.io/controller-expand-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/controller-expand-secret-namespace: d8-{{ $.Chart.Name }} - csi.storage.k8s.io/node-stage-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/node-stage-secret-namespace: d8-{{ $.Chart.Name }} - csi.storage.k8s.io/fstype: {{ $sc.defaultFSType }} -reclaimPolicy: {{ $sc.reclaimPolicy }} -allowVolumeExpansion: {{ $sc.allowVolumeExpansion }} - {{- if $sc.mountOptions }} -mountOptions: - {{- range $option := $sc.mountOptions }} - - {{ $option }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/templates/rbd/volume-snapshot-class.yaml b/templates/rbd/volume-snapshot-class.yaml deleted file mode 100644 index 26f22d3..0000000 --- a/templates/rbd/volume-snapshot-class.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Source https://github.com/ceph/ceph-csi/blob/devel/examples/rbd/snapshotclass.yaml -{{- if (.Values.global.enabledModules | has "snapshot-controller") }} - {{- range $cr := .Values.csiCeph.internal.crs }} - {{- if $cr.spec.rbd }} - {{- range $sc := $cr.spec.rbd.storageClasses }} ---- -apiVersion: snapshot.storage.k8s.io/v1 -kind: VolumeSnapshotClass -metadata: - {{- include "helm_lib_module_labels" (list $ (dict "app" $.Chart.Name)) | nindent 2 }} - name: {{ $cr.name }}-{{ $sc.namePostfix }} -driver: rbd.csi.ceph.com -parameters: - clusterID: {{ $cr.spec.clusterID }} - imageFeatures: layering - csi.storage.k8s.io/snapshotter-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/snapshotter-secret-namespace: d8-{{ $.Chart.Name }} -deletionPolicy: Delete - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/templates/secret.yaml b/templates/secret.yaml deleted file mode 100644 index ce3da8a..0000000 --- a/templates/secret.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- range $cr := .Values.csiCeph.internal.crs }} ---- -apiVersion: v1 -kind: Secret -metadata: - name: csi-{{ $cr.name }} - namespace: d8-{{ $.Chart.Name }} -{{ include "helm_lib_module_labels" (list $ (dict "app" $.Chart.Name)) | indent 2 }} -stringData: - # Credentials for RBD - userID: {{ $cr.spec.userID }} - userKey: {{ $cr.spec.userKey }} - # Credentials for CephFS - adminID: {{ $cr.spec.userID }} - adminKey: {{ $cr.spec.userKey }} -{{- end }} From bad1fd5a8c582215fdb3932857f7c59447047b41 Mon Sep 17 00:00:00 2001 From: "alexandr.zimin@flant.com" Date: Sat, 15 Jun 2024 01:18:47 +0300 Subject: [PATCH 02/21] Some changes --- ...luster.yaml => cephclusterconnection.yaml} | 14 +- crds/cephstorageclass.yaml | 49 ++- ..._cluster.go => ceph_cluster_connection.go} | 14 +- .../api/v1alpha1/ceph_storage_class.go | 19 +- images/controller/api/v1alpha1/const.go | 22 ++ images/controller/api/v1alpha1/register.go | 4 +- .../api/v1alpha1/zz_generated.deepcopy.go | 20 +- images/controller/cmd/main.go | 5 + .../ceph_cluster_connection_watcher.go | 190 ++++++++++++ .../ceph_cluster_connection_watcher_func.go | 278 ++++++++++++++++++ .../controller/ceph_storage_class_watcher.go | 62 ++-- ....go => ceph_storage_class_watcher_func.go} | 226 +++++++------- .../controller/pkg/controller/common_func.go | 73 +++++ 13 files changed, 772 insertions(+), 204 deletions(-) rename crds/{cephcluster.yaml => cephclusterconnection.yaml} (89%) rename images/controller/api/v1alpha1/{ceph_cluster.go => ceph_cluster_connection.go} (74%) create mode 100644 images/controller/api/v1alpha1/const.go create mode 100644 images/controller/pkg/controller/ceph_cluster_connection_watcher.go create mode 100644 images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go rename images/controller/pkg/controller/{good_func.go => ceph_storage_class_watcher_func.go} (81%) create mode 100644 images/controller/pkg/controller/common_func.go diff --git a/crds/cephcluster.yaml b/crds/cephclusterconnection.yaml similarity index 89% rename from crds/cephcluster.yaml rename to crds/cephclusterconnection.yaml index 14a7997..4b15e31 100644 --- a/crds/cephcluster.yaml +++ b/crds/cephclusterconnection.yaml @@ -1,7 +1,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: cephclusters.storage.deckhouse.io + name: cephclusterconnections.storage.deckhouse.io labels: heritage: deckhouse module: csi-ceph @@ -9,9 +9,9 @@ spec: group: storage.deckhouse.io scope: Cluster names: - plural: cephclusters - singular: cephcluster - kind: CephCluster + plural: cephclusterconnections + singular: cephclusterconnection + kind: CephClusterConnection preserveUnknownFields: false versions: - name: v1alpha1 @@ -56,12 +56,12 @@ spec: status: type: object description: | - Displays current information about the resources managed by the CephCluster custom resource. + Displays current information about the resources managed by the CephClusterConnection custom resource. properties: phase: type: string description: | - The current state of resources managed by the CephCluster custom resource. Might be: + The current state of resources managed by the CephClusterConnection custom resource. Might be: - Failed (if the controller received incorrect resource configuration or some errors occurred during the operation) - Create (if everything went fine) enum: @@ -70,7 +70,7 @@ spec: reason: type: string description: | - Additional information about the resources managed by the CephCluster custom resource. + Additional information about the resources managed by the CephClusterConnection custom resource. subresources: status: {} additionalPrinterColumns: diff --git a/crds/cephstorageclass.yaml b/crds/cephstorageclass.yaml index 3aeed71..840db24 100644 --- a/crds/cephstorageclass.yaml +++ b/crds/cephstorageclass.yaml @@ -28,9 +28,7 @@ spec: spec: type: object required: - - clusterName - - allowVolumeExpansion - - pool + - clusterConnectionName - reclaimPolicy - type oneOf: @@ -39,24 +37,9 @@ spec: - required: - cephfs properties: - clusterName: + clusterConnectionName: description: | - Name of the CephCluster custom resource. - type: string - x-kubernetes-validations: - - rule: self == oldSelf - message: Value is immutable. - minLength: 1 - allowVolumeExpansion: - description: | - AllowVolumeExpansion is a flag that enables or disables volume expansion for the storage class. - type: boolean - x-kubernetes-validations: - - rule: self == oldSelf - message: Value is immutable. - pool: - description: | - Name of the Ceph pool. + Name of the CephClusterConnection custom resource. type: string x-kubernetes-validations: - rule: self == oldSelf @@ -92,6 +75,7 @@ spec: CephFS specific parameters. required: - fsName + - pool properties: fsName: description: | @@ -101,27 +85,38 @@ spec: - rule: self == oldSelf message: Value is immutable. minLength: 1 + pool: + description: | + Name of the Ceph pool. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + minLength: 1 rbd: type: object description: | Rados Block Device specific parameters. required: - - defaultFSType + - pool properties: defaultFSType: description: | Default file system type for the Rados Block Device. type: string + default: ext4 + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + minLength: 1 + pool: + description: | + Name of the Ceph pool. + type: string x-kubernetes-validations: - rule: self == oldSelf message: Value is immutable. minLength: 1 - mountOptions: - description: | - Mount options. - type: array - items: - type: string status: type: object description: | diff --git a/images/controller/api/v1alpha1/ceph_cluster.go b/images/controller/api/v1alpha1/ceph_cluster_connection.go similarity index 74% rename from images/controller/api/v1alpha1/ceph_cluster.go rename to images/controller/api/v1alpha1/ceph_cluster_connection.go index 558da97..747d490 100644 --- a/images/controller/api/v1alpha1/ceph_cluster.go +++ b/images/controller/api/v1alpha1/ceph_cluster_connection.go @@ -20,27 +20,27 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -type CephCluster struct { +type CephClusterConnection struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec CephClusterSpec `json:"spec"` - Status *CephClusterStatus `json:"status,omitempty"` + Spec CephClusterConnectionSpec `json:"spec"` + Status *CephClusterConnectionStatus `json:"status,omitempty"` } -type CephClusterList struct { +type CephClusterConnectionList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` - Items []CephCluster `json:"items"` + Items []CephClusterConnection `json:"items"` } -type CephClusterSpec struct { +type CephClusterConnectionSpec struct { ClusterID string `json:"clusterID"` UserID string `json:"userID"` UserKey string `json:"userKey"` Monitors []string `json:"monitors"` } -type CephClusterStatus struct { +type CephClusterConnectionStatus struct { Phase string `json:"phase,omitempty"` Reason string `json:"reason,omitempty"` } diff --git a/images/controller/api/v1alpha1/ceph_storage_class.go b/images/controller/api/v1alpha1/ceph_storage_class.go index eba164d..750d400 100644 --- a/images/controller/api/v1alpha1/ceph_storage_class.go +++ b/images/controller/api/v1alpha1/ceph_storage_class.go @@ -23,6 +23,10 @@ const ( CephStorageClassTypeCephFS = "cephfs" ) +var ( + DefaultMountOptions = []string{"discard"} +) + type CephStorageClass struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -38,22 +42,21 @@ type CephStorageClassList struct { } type CephStorageClassSpec struct { - ClusterName string `json:"clusterName"` - ReclaimPolicy string `json:"reclaimPolicy"` - AllowVolumeExpansion string `json:"allowVolumeExpansion"` - Pool string `json:"pool"` - Type string `json:"type"` - RBD *CephStorageClassRBD `json:"rbd,omitempty"` - CephFS *CephStorageClassCephFS `json:"cephfs,omitempty"` - MountOptions []string `json:"mountOptions,omitempty"` + ClusterConnectionName string `json:"clusterConnectionName"` + ReclaimPolicy string `json:"reclaimPolicy"` + Type string `json:"type"` + RBD *CephStorageClassRBD `json:"rbd,omitempty"` + CephFS *CephStorageClassCephFS `json:"cephfs,omitempty"` } type CephStorageClassRBD struct { DefaultFSType string `json:"defaultFSType"` + Pool string `json:"pool"` } type CephStorageClassCephFS struct { FSName string `json:"fsName,omitempty"` + Pool string `json:"pool"` } type CephStorageClassStatus struct { diff --git a/images/controller/api/v1alpha1/const.go b/images/controller/api/v1alpha1/const.go new file mode 100644 index 0000000..b903922 --- /dev/null +++ b/images/controller/api/v1alpha1/const.go @@ -0,0 +1,22 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + PhaseFailed = "Failed" + PhaseReady = "Ready" +) diff --git a/images/controller/api/v1alpha1/register.go b/images/controller/api/v1alpha1/register.go index fd792db..f106772 100644 --- a/images/controller/api/v1alpha1/register.go +++ b/images/controller/api/v1alpha1/register.go @@ -43,8 +43,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &CephStorageClass{}, &CephStorageClassList{}, - &CephCluster{}, - &CephClusterList{}, + &CephClusterConnection{}, + &CephClusterConnectionList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/images/controller/api/v1alpha1/zz_generated.deepcopy.go b/images/controller/api/v1alpha1/zz_generated.deepcopy.go index 299e38d..3fe6764 100644 --- a/images/controller/api/v1alpha1/zz_generated.deepcopy.go +++ b/images/controller/api/v1alpha1/zz_generated.deepcopy.go @@ -78,8 +78,8 @@ func (in *CephStorageClassList) DeepCopyObject() runtime.Object { return nil } -// CephCluster -func (in *CephCluster) DeepCopyInto(out *CephCluster) { +// CephClusterConnection +func (in *CephClusterConnection) DeepCopyInto(out *CephClusterConnection) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -87,17 +87,17 @@ func (in *CephCluster) DeepCopyInto(out *CephCluster) { } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyBlockDevice. -func (in *CephCluster) DeepCopy() *CephCluster { +func (in *CephClusterConnection) DeepCopy() *CephClusterConnection { if in == nil { return nil } - out := new(CephCluster) + out := new(CephClusterConnection) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephCluster) DeepCopyObject() runtime.Object { +func (in *CephClusterConnection) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -105,13 +105,13 @@ func (in *CephCluster) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephClusterList) DeepCopyInto(out *CephClusterList) { +func (in *CephClusterConnectionList) DeepCopyInto(out *CephClusterConnectionList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]CephCluster, len(*in)) + *out = make([]CephClusterConnection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -119,17 +119,17 @@ func (in *CephClusterList) DeepCopyInto(out *CephClusterList) { } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuestbookList. -func (in *CephClusterList) DeepCopy() *CephClusterList { +func (in *CephClusterConnectionList) DeepCopy() *CephClusterConnectionList { if in == nil { return nil } - out := new(CephClusterList) + out := new(CephClusterConnectionList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephClusterList) DeepCopyObject() runtime.Object { +func (in *CephClusterConnectionList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } diff --git a/images/controller/cmd/main.go b/images/controller/cmd/main.go index b6a073f..3202dd0 100644 --- a/images/controller/cmd/main.go +++ b/images/controller/cmd/main.go @@ -111,6 +111,11 @@ func main() { os.Exit(1) } + if _, err = controller.RunCephClusterConnectionWatcherController(mgr, *cfgParams, *log); err != nil { + log.Error(err, fmt.Sprintf("[main] unable to run %s", controller.CephClusterConnectionCtrlName)) + os.Exit(1) + } + if err = mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { log.Error(err, "[main] unable to mgr.AddHealthzCheck") os.Exit(1) diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher.go new file mode 100644 index 0000000..c3a6b12 --- /dev/null +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher.go @@ -0,0 +1,190 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + v1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/config" + "d8-controller/pkg/logger" + "errors" + "fmt" + "reflect" + "time" + + corev1 "k8s.io/api/core/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +const ( + // This value used as a name for the controller and the value for managed-by label. + CephClusterConnectionCtrlName = "ceph-cluster-controller" + CephClusterConnectionControllerFinalizerName = "storage.deckhouse.io/ceph-cluster-controller" + StorageManagedLabelKey = "storage.deckhouse.io/managed-by" + + SecretForCephClusterConnectionPrefix = "csi-ceph-secret-for-" +) + +func RunCephClusterConnectionWatcherController( + mgr manager.Manager, + cfg config.Options, + log logger.Logger, +) (controller.Controller, error) { + cl := mgr.GetClient() + + c, err := controller.New(CephClusterConnectionCtrlName, mgr, controller.Options{ + Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + log.Info(fmt.Sprintf("[CephClusterConnectionReconciler] starts Reconcile for the CephClusterConnection %q", request.Name)) + cephClusterConnection := &v1alpha1.CephClusterConnection{} + err := cl.Get(ctx, request.NamespacedName, cephClusterConnection) + if err != nil && !k8serr.IsNotFound(err) { + log.Error(err, fmt.Sprintf("[CephClusterConnectionReconciler] unable to get CephClusterConnection, name: %s", request.Name)) + return reconcile.Result{}, err + } + + if cephClusterConnection.Name == "" { + log.Info(fmt.Sprintf("[CephClusterConnectionReconciler] seems like the CephClusterConnection for the request %s was deleted. Reconcile retrying will stop.", request.Name)) + return reconcile.Result{}, nil + } + + secretList := &corev1.SecretList{} + err = cl.List(ctx, secretList, client.InNamespace(cfg.ControllerNamespace)) + if err != nil { + log.Error(err, "[CephClusterConnectionReconciler] unable to list Secrets") + return reconcile.Result{}, err + } + + shouldRequeue, err := RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, cephClusterConnection, cfg.ControllerNamespace) + if err != nil { + log.Error(err, fmt.Sprintf("[CephClusterConnectionReconciler] an error occured while reconciles the CephClusterConnection, name: %s", cephClusterConnection.Name)) + } + + if shouldRequeue { + log.Warning(fmt.Sprintf("[CephClusterConnectionReconciler] Reconciler will requeue the request, name: %s", request.Name)) + return reconcile.Result{ + RequeueAfter: cfg.RequeueStorageClassInterval * time.Second, + }, nil + } + + log.Info(fmt.Sprintf("[CephClusterConnectionReconciler] ends Reconcile for the CephClusterConnection %q", request.Name)) + return reconcile.Result{}, nil + }), + }) + if err != nil { + log.Error(err, "[RunCephClusterConnectionWatcherController] unable to create controller") + return nil, err + } + + err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.CephClusterConnection{}), handler.Funcs{ + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + log.Info(fmt.Sprintf("[CreateFunc] get event for CephClusterConnection %q. Add to the queue", e.Object.GetName())) + request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} + q.Add(request) + }, + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + log.Info(fmt.Sprintf("[UpdateFunc] get event for CephClusterConnection %q. Check if it should be reconciled", e.ObjectNew.GetName())) + + oldCephClusterConnection, ok := e.ObjectOld.(*v1alpha1.CephClusterConnection) + if !ok { + err = errors.New("unable to cast event object to a given type") + log.Error(err, "[UpdateFunc] an error occurred while handling create event") + return + } + newCephClusterConnection, ok := e.ObjectNew.(*v1alpha1.CephClusterConnection) + if !ok { + err = errors.New("unable to cast event object to a given type") + log.Error(err, "[UpdateFunc] an error occurred while handling create event") + return + } + + if reflect.DeepEqual(oldCephClusterConnection.Spec, newCephClusterConnection.Spec) && newCephClusterConnection.DeletionTimestamp == nil { + log.Info(fmt.Sprintf("[UpdateFunc] an update event for the CephClusterConnection %s has no Spec field updates. It will not be reconciled", newCephClusterConnection.Name)) + return + } + + log.Info(fmt.Sprintf("[UpdateFunc] the CephClusterConnection %q will be reconciled. Add to the queue", newCephClusterConnection.Name)) + request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: newCephClusterConnection.Namespace, Name: newCephClusterConnection.Name}} + q.Add(request) + }, + }) + if err != nil { + log.Error(err, "[RunCephClusterConnectionWatcherController] unable to watch the events") + return nil, err + } + + return c, nil +} + +func RunCephClusterConnectionEventReconcile(ctx context.Context, cl client.Client, log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace string) (shouldRequeue bool, err error) { + err = validateCephClusterConnectionSpec(cephClusterConnection) + if err != nil { + log.Error(err, fmt.Sprintf("[RunCephClusterConnectionEventReconcile] an error occured while validating the CephClusterConnection %q", cephClusterConnection.Name)) + upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, v1alpha1.PhaseFailed, err.Error()) + if upError != nil { + upError = fmt.Errorf("[RunCephClusterConnectionEventReconcile] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upError) + err = errors.Join(err, upError) + } + return false, err + } + + added, err := addFinalizerIfNotExists(ctx, cl, cephClusterConnection, CephClusterConnectionControllerFinalizerName) + if err != nil { + err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] unable to add a finalizer %s to the CephClusterConnection %s: %w", CephClusterConnectionControllerFinalizerName, cephClusterConnection.Name, err) + return true, err + } + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] finalizer %s was added to the CephClusterConnection %s: %t", CephClusterConnectionControllerFinalizerName, cephClusterConnection.Name, added)) + + secretName := SecretForCephClusterConnectionPrefix + cephClusterConnection.Name + reconcileTypeForSecret, err := IdentifyReconcileFuncForSecret(log, secretList, cephClusterConnection, controllerNamespace, secretName) + if err != nil { + log.Error(err, fmt.Sprintf("[RunCephClusterConnectionEventReconcile] error occured while identifying the reconcile function for the Secret %q", SecretForCephClusterConnectionPrefix+cephClusterConnection.Name)) + return true, err + } + + shouldRequeue = false + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] reconcile operation of CephClusterConnection %s for Secret %s: %s", cephClusterConnection.Name, secretName, reconcileTypeForSecret)) + switch reconcileTypeForSecret { + case CreateReconcile: + shouldRequeue, err = reconcileSecretCreateFunc(ctx, cl, log, cephClusterConnection, controllerNamespace, secretName) + case UpdateReconcile: + shouldRequeue, err = reconcileSecretUpdateFunc(ctx, cl, log, secretList, cephClusterConnection, controllerNamespace, secretName) + case DeleteReconcile: + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] DeleteReconcile: starts reconciliataion of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) + shouldRequeue, err = reconcileSecretDeleteFunc(ctx, cl, log, secretList, cephClusterConnection, secretName) + default: + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] StorageClass for CephClusterConnection %s should not be reconciled", cephClusterConnection.Name)) + } + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] ends reconciliataion of StorageClass, name: %s, shouldRequeue: %t, err: %v", cephClusterConnection.Name, shouldRequeue, err)) + + if err != nil || shouldRequeue { + return shouldRequeue, err + } + + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] Finish all reconciliations for CephClusterConnection %q.", cephClusterConnection.Name)) + return false, nil + +} diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go new file mode 100644 index 0000000..eb8b739 --- /dev/null +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go @@ -0,0 +1,278 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + v1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/logger" + "errors" + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func validateCephClusterConnectionSpec(cephClusterConnection *v1alpha1.CephClusterConnection) error { + if cephClusterConnection.Spec.ClusterID == "" { + return fmt.Errorf("[validateCephClusterConnectionSpec] %s: spec.clusterID is required", cephClusterConnection.Name) + } + + if cephClusterConnection.Spec.Monitors == nil { + return fmt.Errorf("[validateCephClusterConnectionSpec] %s: spec.monitors is required", cephClusterConnection.Name) + } + + if cephClusterConnection.Spec.UserID == "" { + return fmt.Errorf("[validateCephClusterConnectionSpec] %s: spec.userID is required", cephClusterConnection.Name) + } + + if cephClusterConnection.Spec.UserKey == "" { + return fmt.Errorf("[validateCephClusterConnectionSpec] %s: spec.userKey is required", cephClusterConnection.Name) + } + + return nil +} + +func IdentifyReconcileFuncForSecret(log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (reconcileType string, err error) { + if shouldReconcileByDeleteFunc(cephClusterConnection) { + return DeleteReconcile, nil + } + + if shouldReconcileSecretByCreateFunc(secretList, cephClusterConnection, secretName) { + return CreateReconcile, nil + } + + should, err := shouldReconcileSecretByUpdateFunc(log, secretList, cephClusterConnection, controllerNamespace, secretName) + if err != nil { + return "", err + } + if should { + return UpdateReconcile, nil + } + + return "", nil +} + +func shouldReconcileSecretByCreateFunc(secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, secretName string) bool { + if cephClusterConnection.DeletionTimestamp != nil { + return false + } + + for _, s := range secretList.Items { + if s.Name == secretName { + return false + } + } + + return true +} + +func shouldReconcileSecretByUpdateFunc(log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (bool, error) { + if cephClusterConnection.DeletionTimestamp != nil { + return false, nil + } + + secretSelector := labels.Set(map[string]string{ + StorageManagedLabelKey: CephClusterConnectionCtrlName, + }) + + for _, oldSecret := range secretList.Items { + if oldSecret.Name == secretName { + newSecret := configureSecret(cephClusterConnection, controllerNamespace, secretName) + equal := areSecretsEqual(&oldSecret, newSecret) + if !equal { + log.Debug(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] a secret %s should be updated", secretName)) + if !labels.Set(oldSecret.Labels).AsSelector().Matches(secretSelector) { + err := fmt.Errorf("a secret %q does not have a label %s=%s", oldSecret.Name, StorageManagedLabelKey, CephClusterConnectionCtrlName) + return false, err + } + return true, nil + } + + if !labels.Set(oldSecret.Labels).AsSelector().Matches(secretSelector) { + log.Debug(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] a secret %s should be updated. The label %s=%s is missing", oldSecret.Name, StorageManagedLabelKey, CephClusterConnectionCtrlName)) + return true, nil + } + + return false, nil + } + } + + log.Debug(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] a secret %s not found in the list: %+v. It should be created", secretName, secretList.Items)) + return true, nil +} + +func configureSecret(cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) *corev1.Secret { + userID := cephClusterConnection.Spec.UserID + userKey := cephClusterConnection.Spec.UserKey + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: controllerNamespace, + Labels: map[string]string{ + StorageManagedLabelKey: CephClusterConnectionCtrlName, + }, + Finalizers: []string{CephClusterConnectionControllerFinalizerName}, + }, + StringData: map[string]string{ + // Credentials for RBD + "userID": userID, + "userKey": userKey, + + // Credentials for CephFS + "adminID": userID, + "adminKey": userKey, + }, + } + + return secret +} + +func areSecretsEqual(old, new *corev1.Secret) bool { + if reflect.DeepEqual(old.StringData, new.StringData) && reflect.DeepEqual(old.Labels, new.Labels) { + return true + } + + return true +} + +func reconcileSecretCreateFunc(ctx context.Context, cl client.Client, log logger.Logger, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (shouldRequeue bool, err error) { + log.Debug(fmt.Sprintf("[reconcileSecretCreateFunc] starts reconciliataion of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) + + newSecret := configureSecret(cephClusterConnection, controllerNamespace, secretName) + log.Debug(fmt.Sprintf("[reconcileSecretCreateFunc] successfully configurated secret %s for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) + log.Trace(fmt.Sprintf("[reconcileSecretCreateFunc] secret: %+v", newSecret)) + + err = cl.Create(ctx, newSecret) + if err != nil { + err = fmt.Errorf("[reconcileSecretCreateFunc] unable to create a Secret %s for CephClusterConnection %s: %w", newSecret.Name, cephClusterConnection.Name, err) + upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, FailedStatusPhase, err.Error()) + if upError != nil { + upError = fmt.Errorf("[reconcileSecretCreateFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upError) + err = errors.Join(err, upError) + } + return true, err + } + + return false, nil +} + +func reconcileSecretUpdateFunc(ctx context.Context, cl client.Client, log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (shouldRequeue bool, err error) { + log.Debug(fmt.Sprintf("[reconcileSecretUpdateFunc] starts reconciliataion of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) + + var oldSecret *corev1.Secret + for _, s := range secretList.Items { + if s.Name == secretName { + oldSecret = &s + break + } + } + + if oldSecret == nil { + err := fmt.Errorf("[reconcileSecretUpdateFunc] unable to find a secret %s for the CephClusterConnection %s", secretName, cephClusterConnection.Name) + upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, FailedStatusPhase, err.Error()) + if upError != nil { + upError = fmt.Errorf("[reconcileSecretUpdateFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upError) + err = errors.Join(err, upError) + } + return true, err + } + + log.Debug(fmt.Sprintf("[reconcileSecretUpdateFunc] secret %s was found for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) + + newSecret := configureSecret(cephClusterConnection, controllerNamespace, secretName) + log.Debug(fmt.Sprintf("[reconcileSecretUpdateFunc] successfully configurated new secret %s for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) + log.Trace(fmt.Sprintf("[reconcileSecretUpdateFunc] new secret: %+v", newSecret)) + log.Trace(fmt.Sprintf("[reconcileSecretUpdateFunc] old secret: %+v", oldSecret)) + + err = cl.Update(ctx, newSecret) + if err != nil { + err = fmt.Errorf("[reconcileSecretUpdateFunc] unable to update the Secret %s for CephClusterConnection %s: %w", newSecret.Name, cephClusterConnection.Name, err) + upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, FailedStatusPhase, err.Error()) + if upError != nil { + upError = fmt.Errorf("[reconcileSecretUpdateFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upError) + err = errors.Join(err, upError) + } + return true, err + } + + return false, nil +} + +func reconcileSecretDeleteFunc(ctx context.Context, cl client.Client, log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, secretName string) (shouldRequeue bool, err error) { + log.Debug(fmt.Sprintf("[reconcileSecretDeleteFunc] starts reconciliataion of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) + + var secret *corev1.Secret + for _, s := range secretList.Items { + if s.Name == secretName { + secret = &s + break + } + } + + if secret == nil { + log.Info(fmt.Sprintf("[reconcileSecretDeleteFunc] no secret with name %s found for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) + } + + if secret != nil { + log.Info(fmt.Sprintf("[reconcileSecretDeleteFunc] successfully found a secret %s for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) + log.Debug(fmt.Sprintf("[reconcileSecretDeleteFunc] starts removing a finalizer %s from the Secret %s", CephClusterConnectionControllerFinalizerName, secret.Name)) + _, err := removeFinalizerIfExists(ctx, cl, secret, CephClusterConnectionControllerFinalizerName) + if err != nil { + err = fmt.Errorf("[reconcileSecretDeleteFunc] unable to remove a finalizer %s from the Secret %s: %w", CephClusterConnectionControllerFinalizerName, secret.Name, err) + upErr := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, FailedStatusPhase, fmt.Sprintf("Unable to remove a finalizer, err: %s", err.Error())) + if upErr != nil { + upErr = fmt.Errorf("[reconcileSecretDeleteFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upErr) + err = errors.Join(err, upErr) + } + return true, err + } + + err = cl.Delete(ctx, secret) + if err != nil { + err = fmt.Errorf("[reconcileSecretDeleteFunc] unable to delete a secret %s: %w", secret.Name, err) + upErr := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, FailedStatusPhase, fmt.Sprintf("Unable to delete a secret, err: %s", err.Error())) + if upErr != nil { + upErr = fmt.Errorf("[reconcileSecretDeleteFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upErr) + err = errors.Join(err, upErr) + } + return true, err + } + } + + log.Info(fmt.Sprintf("[reconcileSecretDeleteFunc] ends reconciliataion of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) + + return false, nil +} + +func updateCephClusterConnectionPhase(ctx context.Context, cl client.Client, cephClusterConnection *v1alpha1.CephClusterConnection, phase, reason string) error { + if cephClusterConnection.Status == nil { + cephClusterConnection.Status = &v1alpha1.CephClusterConnectionStatus{} + } + cephClusterConnection.Status.Phase = phase + cephClusterConnection.Status.Reason = reason + + err := cl.Status().Update(ctx, cephClusterConnection) + if err != nil { + return err + } + + return nil +} diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher.go b/images/controller/pkg/controller/ceph_storage_class_watcher.go index 1e70f99..64c6ec0 100644 --- a/images/controller/pkg/controller/ceph_storage_class_watcher.go +++ b/images/controller/pkg/controller/ceph_storage_class_watcher.go @@ -59,16 +59,6 @@ const ( CreateReconcile = "Create" UpdateReconcile = "Update" DeleteReconcile = "Delete" - - // serverParamKey = "server" - // shareParamKey = "share" - // MountPermissionsParamKey = "mountPermissions" - // SubDirParamKey = "subdir" - // MountOptionsSecretKey = "mountOptions" - - // SecretForMountOptionsPrefix = "ceph-mount-options-for-" - // StorageClassSecretNameKey = "csi.storage.k8s.io/provisioner-secret-name" - // StorageClassSecretNSKey = "csi.storage.k8s.io/provisioner-secret-namespace" ) var ( @@ -104,7 +94,7 @@ func RunCephStorageClassWatcherController( return reconcile.Result{}, err } - shouldRequeue, err := RunEventReconcile(ctx, cl, log, scList, cephSC, cfg.ControllerNamespace) + shouldRequeue, err := RunStorageClassEventReconcile(ctx, cl, log, scList, cephSC, cfg.ControllerNamespace) if err != nil { log.Error(err, fmt.Sprintf("[CephStorageClassReconciler] an error occured while reconciles the CephStorageClass, name: %s", cephSC.Name)) } @@ -165,42 +155,64 @@ func RunCephStorageClassWatcherController( return c, nil } -func RunEventReconcile(ctx context.Context, cl client.Client, log logger.Logger, scList *v1.StorageClassList, cephSC *v1alpha1.CephStorageClass, controllerNamespace string) (shouldRequeue bool, err error) { +func RunStorageClassEventReconcile(ctx context.Context, cl client.Client, log logger.Logger, scList *v1.StorageClassList, cephSC *v1alpha1.CephStorageClass, controllerNamespace string) (shouldRequeue bool, err error) { added, err := addFinalizerIfNotExists(ctx, cl, cephSC, CephStorageClassControllerFinalizerName) if err != nil { - err = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to add a finalizer %s to the CephStorageClass %s: %w", CephStorageClassControllerFinalizerName, cephSC.Name, err) + err = fmt.Errorf("[RunStorageClassEventReconcile] unable to add a finalizer %s to the CephStorageClass %s: %w", CephStorageClassControllerFinalizerName, cephSC.Name, err) + return true, err + } + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] finalizer %s was added to the CephStorageClass %s: %t", CephStorageClassControllerFinalizerName, cephSC.Name, added)) + + valid, msg := validateCephStorageClassSpec(cephSC) + if !valid { + err = fmt.Errorf("[RunStorageClassEventReconcile] CephStorageClass %s has invalid spec: %s", cephSC.Name, msg) + upErr := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, msg) + if upErr != nil { + upErr = fmt.Errorf("[RunStorageClassEventReconcile] unable to update the CephStorageClass %s: %w", cephSC.Name, upErr) + err = errors.Join(err, upErr) + } + return false, err + } + + clusterID, err := getClusterID(ctx, cl, cephSC) + if err != nil { + err = fmt.Errorf("[RunStorageClassEventReconcile] unable to get clusterID for CephStorageClass %s: %w", cephSC.Name, err) + upErr := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) + if upErr != nil { + upErr = fmt.Errorf("[RunStorageClassEventReconcile] unable to update the CephStorageClass %s: %w", cephSC.Name, upErr) + err = errors.Join(err, upErr) + } return true, err } - log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] finalizer %s was added to the CephStorageClass %s: %t", CephStorageClassControllerFinalizerName, cephSC.Name, added)) - reconcileTypeForStorageClass, err := IdentifyReconcileFuncForStorageClass(log, scList, cephSC, controllerNamespace) + reconcileTypeForStorageClass, err := IdentifyReconcileFuncForStorageClass(log, scList, cephSC, controllerNamespace, clusterID) if err != nil { - err = fmt.Errorf("[runEventReconcile] error occured while identifying the reconcile function for StorageClass %s: %w", cephSC.Name, err) + err = fmt.Errorf("[RunStorageClassEventReconcile] error occured while identifying the reconcile function for StorageClass %s: %w", cephSC.Name, err) return true, err } shouldRequeue = false - log.Debug(fmt.Sprintf("[runEventReconcile] reconcile operation for StorageClass %q: %q", cephSC.Name, reconcileTypeForStorageClass)) + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] reconcile operation for StorageClass %q: %q", cephSC.Name, reconcileTypeForStorageClass)) switch reconcileTypeForStorageClass { case CreateReconcile: - log.Debug(fmt.Sprintf("[runEventReconcile] CreateReconcile starts reconciliataion of StorageClass, name: %s", cephSC.Name)) - shouldRequeue, err = ReconcileStorageClassCreateFunc(ctx, cl, log, scList, cephSC, controllerNamespace) + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] CreateReconcile starts reconciliataion of StorageClass, name: %s", cephSC.Name)) + shouldRequeue, err = RunStorageClassEventReconcile(ctx, cl, log, scList, cephSC, controllerNamespace) case UpdateReconcile: - log.Debug(fmt.Sprintf("[runEventReconcile] UpdateReconcile starts reconciliataion of StorageClass, name: %s", cephSC.Name)) - shouldRequeue, err = reconcileStorageClassUpdateFunc(ctx, cl, log, scList, cephSC, controllerNamespace) + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] UpdateReconcile starts reconciliataion of StorageClass, name: %s", cephSC.Name)) + shouldRequeue, err = reconcileStorageClassUpdateFunc(ctx, cl, log, scList, cephSC, controllerNamespace, clusterID) case DeleteReconcile: - log.Debug(fmt.Sprintf("[runEventReconcile] DeleteReconcile starts reconciliataion of StorageClass, name: %s", cephSC.Name)) + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] DeleteReconcile starts reconciliataion of StorageClass, name: %s", cephSC.Name)) shouldRequeue, err = reconcileStorageClassDeleteFunc(ctx, cl, log, scList, cephSC) default: - log.Debug(fmt.Sprintf("[runEventReconcile] StorageClass for CephStorageClass %s should not be reconciled", cephSC.Name)) + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] StorageClass for CephStorageClass %s should not be reconciled", cephSC.Name)) } - log.Debug(fmt.Sprintf("[runEventReconcile] ends reconciliataion of StorageClass, name: %s, shouldRequeue: %t, err: %v", cephSC.Name, shouldRequeue, err)) + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] ends reconciliataion of StorageClass, name: %s, shouldRequeue: %t, err: %v", cephSC.Name, shouldRequeue, err)) if err != nil || shouldRequeue { return shouldRequeue, err } - log.Debug(fmt.Sprintf("[runEventReconcile] Finish all reconciliations for CephStorageClass %q.", cephSC.Name)) + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] Finish all reconciliations for CephStorageClass %q.", cephSC.Name)) return false, nil } diff --git a/images/controller/pkg/controller/good_func.go b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go similarity index 81% rename from images/controller/pkg/controller/good_func.go rename to images/controller/pkg/controller/ceph_storage_class_watcher_func.go index e496125..82f9984 100644 --- a/images/controller/pkg/controller/good_func.go +++ b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go @@ -23,7 +23,7 @@ import ( "errors" "fmt" "reflect" - "strconv" + "strings" "slices" @@ -39,11 +39,11 @@ func ReconcileStorageClassCreateFunc( log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, - controllerNamespace string, + controllerNamespace, clusterID string, ) (bool, error) { log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] starts for CephStorageClass %q", cephSC.Name)) log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] starts storage class configuration for the CephStorageClass, name: %s", cephSC.Name)) - newSC, err := ConfigureStorageClass(cephSC, controllerNamespace) + newSC, err := ConfigureStorageClass(cephSC, controllerNamespace, clusterID) if err != nil { err = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to configure a Storage Class for the CephStorageClass %s: %w", cephSC.Name, err) upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) @@ -84,7 +84,7 @@ func reconcileStorageClassUpdateFunc( log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, - controllerNamespace string, + controllerNamespace, clusterID string, ) (bool, error) { log.Debug(fmt.Sprintf("[reconcileStorageClassUpdateFunc] starts for CephStorageClass %q", cephSC.Name)) @@ -111,7 +111,7 @@ func reconcileStorageClassUpdateFunc( log.Debug(fmt.Sprintf("[reconcileStorageClassUpdateFunc] successfully found a storage class for the CephStorageClass, name: %s", cephSC.Name)) log.Trace(fmt.Sprintf("[reconcileStorageClassUpdateFunc] storage class: %+v", oldSC)) - newSC, err := ConfigureStorageClass(cephSC, controllerNamespace) + newSC, err := ConfigureStorageClass(cephSC, controllerNamespace, clusterID) if err != nil { err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to configure a Storage Class for the CephStorageClass %s: %w", cephSC.Name, err) upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) @@ -153,7 +153,7 @@ func reconcileStorageClassUpdateFunc( return false, nil } -func IdentifyReconcileFuncForStorageClass(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace string) (reconcileType string, err error) { +func IdentifyReconcileFuncForStorageClass(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (reconcileType string, err error) { if shouldReconcileByDeleteFunc(cephSC) { return DeleteReconcile, nil } @@ -162,7 +162,7 @@ func IdentifyReconcileFuncForStorageClass(log logger.Logger, scList *v1.StorageC return CreateReconcile, nil } - should, err := shouldReconcileStorageClassByUpdateFunc(log, scList, cephSC, controllerNamespace) + should, err := shouldReconcileStorageClassByUpdateFunc(log, scList, cephSC, controllerNamespace, clusterID) if err != nil { return "", err } @@ -187,7 +187,7 @@ func shouldReconcileStorageClassByCreateFunc(scList *v1.StorageClassList, cephSC return true } -func shouldReconcileStorageClassByUpdateFunc(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace string) (bool, error) { +func shouldReconcileStorageClassByUpdateFunc(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (bool, error) { if cephSC.DeletionTimestamp != nil { return false, nil } @@ -195,7 +195,7 @@ func shouldReconcileStorageClassByUpdateFunc(log logger.Logger, scList *v1.Stora for _, oldSC := range scList.Items { if oldSC.Name == cephSC.Name { if slices.Contains(allowedProvisioners, oldSC.Provisioner) { - newSC, err := ConfigureStorageClass(cephSC, controllerNamespace) + newSC, err := ConfigureStorageClass(cephSC, controllerNamespace, clusterID) if err != nil { return false, err } @@ -276,86 +276,20 @@ func reconcileStorageClassDeleteFunc( return false, nil } -func shouldReconcileByDeleteFunc(cephSC *storagev1alpha1.CephStorageClass) bool { - if cephSC.DeletionTimestamp != nil { - return true - } - - return false -} - -func removeFinalizerIfExists(ctx context.Context, cl client.Client, obj metav1.Object, finalizerName string) (bool, error) { - removed := false - finalizers := obj.GetFinalizers() - for i, f := range finalizers { - if f == finalizerName { - finalizers = append(finalizers[:i], finalizers[i+1:]...) - removed = true - break - } - } - - if removed { - obj.SetFinalizers(finalizers) - err := cl.Update(ctx, obj.(client.Object)) - if err != nil { - return false, err - } - } - - return removed, nil -} - -func addFinalizerIfNotExists(ctx context.Context, cl client.Client, obj metav1.Object, finalizerName string) (bool, error) { - added := false - finalizers := obj.GetFinalizers() - if !slices.Contains(finalizers, finalizerName) { - finalizers = append(finalizers, finalizerName) - added = true - } - - if added { - obj.SetFinalizers(finalizers) - err := cl.Update(ctx, obj.(client.Object)) - if err != nil { - return false, err - } - } - return true, nil -} - -func ConfigureStorageClass(cephSC *storagev1alpha1.CephStorageClass, controllerNamespace string) (*v1.StorageClass, error) { - if cephSC.Spec.ReclaimPolicy == "" { - err := fmt.Errorf("CephStorageClass %q: the ReclaimPolicy field is empty", cephSC.Name) - return nil, err - } - - if cephSC.Spec.AllowVolumeExpansion == "" { - err := fmt.Errorf("CephStorageClass %q: the AllowVolumeExpansion field is empty", cephSC.Name) - return nil, err - } - - provisioner, err := GetStorageClassProvisioner(cephSC) - if err != nil { - err = fmt.Errorf("CephStorageClass %q: unable to get a provisioner: %w", cephSC.Name, err) - return nil, err - } - - allowVolumeExpansion, err := strconv.ParseBool(cephSC.Spec.AllowVolumeExpansion) - if err != nil { - err = fmt.Errorf("CephStorageClass %q: the AllowVolumeExpansion field is not a boolean value: %w", cephSC.Name, err) - return nil, err - } - +func ConfigureStorageClass(cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (*v1.StorageClass, error) { + provisioner := GetStorageClassProvisioner(cephSC) + allowVolumeExpansion := true reclaimPolicy := corev1.PersistentVolumeReclaimPolicy(cephSC.Spec.ReclaimPolicy) - volumeBindingMode := v1.VolumeBindingWaitForFirstConsumer + volumeBindingMode := v1.VolumeBindingImmediate - params, err := GetStoragecClassParams(cephSC, controllerNamespace) + params, err := GetStoragecClassParams(cephSC, controllerNamespace, clusterID) if err != nil { err = fmt.Errorf("CephStorageClass %q: unable to get a storage class parameters: %w", cephSC.Name, err) return nil, err } + mountOpt := storagev1alpha1.DefaultMountOptions + sc := &v1.StorageClass{ TypeMeta: metav1.TypeMeta{ Kind: StorageClassKind, @@ -367,64 +301,34 @@ func ConfigureStorageClass(cephSC *storagev1alpha1.CephStorageClass, controllerN Finalizers: []string{CephStorageClassControllerFinalizerName}, }, Parameters: params, - MountOptions: cephSC.Spec.MountOptions, Provisioner: provisioner, ReclaimPolicy: &reclaimPolicy, VolumeBindingMode: &volumeBindingMode, AllowVolumeExpansion: &allowVolumeExpansion, + MountOptions: mountOpt, } return sc, nil } -func GetStorageClassProvisioner(cephSC *storagev1alpha1.CephStorageClass) (string, error) { - if cephSC.Spec.Type == "" { - err := fmt.Errorf("CephStorageClass %q: the Type field is empty", cephSC.Name) - return "", err - } - - if cephSC.Spec.Type == storagev1alpha1.CephStorageClassTypeRBD && cephSC.Spec.RBD == nil { - err := fmt.Errorf("CephStorageClass %q type is %q, but the rbd field is empty", cephSC.Name, storagev1alpha1.CephStorageClassTypeRBD) - return "", err - } - - if cephSC.Spec.Type == storagev1alpha1.CephStorageClassTypeCephFS && cephSC.Spec.CephFS == nil { - err := fmt.Errorf("CephStorageClass %q type is %q, but the cephfs field is empty", cephSC.Name, storagev1alpha1.CephStorageClassTypeCephFS) - return "", err - } - +func GetStorageClassProvisioner(cephSC *storagev1alpha1.CephStorageClass) string { provisioner := "" switch cephSC.Spec.Type { case storagev1alpha1.CephStorageClassTypeRBD: provisioner = CephStorageClassRBDProvisioner case storagev1alpha1.CephStorageClassTypeCephFS: provisioner = CephStorageClassCephFSProvisioner - default: - err := fmt.Errorf("CephStorageClass %q: the Type field is not valid: %s", cephSC.Name, cephSC.Spec.Type) - return "", err } - return provisioner, nil + return provisioner } -func GetStoragecClassParams(cephSC *storagev1alpha1.CephStorageClass, controllerNamespace string) (map[string]string, error) { - - if cephSC.Spec.ClusterName == "" { - err := errors.New("CephStorageClass ClusterName is empty") - return nil, err - } - - if cephSC.Spec.Pool == "" { - err := errors.New("CephStorageClass Pool is empty") - return nil, err - } - - secretName := fmt.Sprintf("csi-ceph-secret-for-%s", cephSC.Spec.ClusterName) +func GetStoragecClassParams(cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (map[string]string, error) { + secretName := SecretForCephClusterConnectionPrefix + cephSC.Spec.ClusterConnectionName params := map[string]string{ - "clusterID": cephSC.Spec.ClusterName, - "pool": cephSC.Spec.Pool, + "clusterID": clusterID, "csi.storage.k8s.io/provisioner-secret-name": secretName, "csi.storage.k8s.io/provisioner-secret-namespace": controllerNamespace, "csi.storage.k8s.io/controller-expand-secret-name": secretName, @@ -436,10 +340,12 @@ func GetStoragecClassParams(cephSC *storagev1alpha1.CephStorageClass, controller if cephSC.Spec.Type == storagev1alpha1.CephStorageClassTypeRBD { params["imageFeatures"] = "layering" params["csi.storage.k8s.io/fstype"] = cephSC.Spec.RBD.DefaultFSType + params["pool"] = cephSC.Spec.RBD.Pool } if cephSC.Spec.Type == storagev1alpha1.CephStorageClassTypeCephFS { params["fsName"] = cephSC.Spec.CephFS.FSName + params["pool"] = cephSC.Spec.CephFS.Pool } return params, nil @@ -547,3 +453,87 @@ func deleteStorageClass(ctx context.Context, cl client.Client, sc *v1.StorageCla return nil } + +func validateCephStorageClassSpec(cephSC *storagev1alpha1.CephStorageClass) (bool, string) { + if cephSC.DeletionTimestamp != nil { + return true, "" + } + + var ( + failedMsgBuilder strings.Builder + validationPassed = true + ) + + failedMsgBuilder.WriteString("Validation of CephStorageClass failed: ") + + if cephSC.Spec.ClusterConnectionName == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.clusterConnectionName field is empty") + } + + if cephSC.Spec.ReclaimPolicy == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.reclaimPolicy field is empty") + } + + if cephSC.Spec.Type == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.type field is empty") + } + + switch cephSC.Spec.Type { + case storagev1alpha1.CephStorageClassTypeRBD: + if cephSC.Spec.RBD == nil { + validationPassed = false + failedMsgBuilder.WriteString("the spec.rbd field is empty") + } + + if cephSC.Spec.RBD.DefaultFSType == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.rbd.defaultFSType field is empty") + } + + if cephSC.Spec.RBD.Pool == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.rbd.pool field is empty") + } + case storagev1alpha1.CephStorageClassTypeCephFS: + if cephSC.Spec.CephFS == nil { + validationPassed = false + failedMsgBuilder.WriteString("the spec.cephfs field is empty") + } + + if cephSC.Spec.CephFS.FSName == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.cephfs.fsName field is empty") + } + + if cephSC.Spec.CephFS.Pool == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.cephfs.pool field is empty") + } + default: + validationPassed = false + failedMsgBuilder.WriteString(fmt.Sprintf("the spec.type field is not valid: %s. Allowed values: %s, %s", cephSC.Spec.Type, storagev1alpha1.CephStorageClassTypeRBD, storagev1alpha1.CephStorageClassTypeCephFS)) + } + + return validationPassed, failedMsgBuilder.String() +} + +func getClusterID(ctx context.Context, cl client.Client, cephSC *storagev1alpha1.CephStorageClass) (string, error) { + clusterConnectionName := cephSC.Spec.ClusterConnectionName + clusterConnection := &storagev1alpha1.CephClusterConnection{} + err := cl.Get(ctx, client.ObjectKey{Namespace: cephSC.Namespace, Name: clusterConnectionName}, clusterConnection) + if err != nil { + err = fmt.Errorf("[getClusterID] CephStorageClass %q: unable to get a CephClusterConnection %q: %w", cephSC.Name, clusterConnectionName, err) + return "", err + } + + clusterID := clusterConnection.Spec.ClusterID + if clusterID == "" { + err = fmt.Errorf("[getClusterID] CephStorageClass %q: the CephClusterConnection %q has an empty spec.clusterID field", cephSC.Name, clusterConnectionName) + return "", err + } + + return clusterID, nil +} diff --git a/images/controller/pkg/controller/common_func.go b/images/controller/pkg/controller/common_func.go new file mode 100644 index 0000000..57738d2 --- /dev/null +++ b/images/controller/pkg/controller/common_func.go @@ -0,0 +1,73 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "slices" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func shouldReconcileByDeleteFunc(obj metav1.Object) bool { + if obj.GetDeletionTimestamp() != nil { + return true + } + + return false +} + +func removeFinalizerIfExists(ctx context.Context, cl client.Client, obj metav1.Object, finalizerName string) (bool, error) { + removed := false + finalizers := obj.GetFinalizers() + for i, f := range finalizers { + if f == finalizerName { + finalizers = append(finalizers[:i], finalizers[i+1:]...) + removed = true + break + } + } + + if removed { + obj.SetFinalizers(finalizers) + err := cl.Update(ctx, obj.(client.Object)) + if err != nil { + return false, err + } + } + + return removed, nil +} + +func addFinalizerIfNotExists(ctx context.Context, cl client.Client, obj metav1.Object, finalizerName string) (bool, error) { + added := false + finalizers := obj.GetFinalizers() + if !slices.Contains(finalizers, finalizerName) { + finalizers = append(finalizers, finalizerName) + added = true + } + + if added { + obj.SetFinalizers(finalizers) + err := cl.Update(ctx, obj.(client.Object)) + if err != nil { + return false, err + } + } + return true, nil +} From 7718339f6d33bacd5ed8c4fbb5a3a80ce3d0302a Mon Sep 17 00:00:00 2001 From: "alexandr.zimin@flant.com" Date: Sat, 15 Jun 2024 23:55:16 +0300 Subject: [PATCH 03/21] finish sc watcher Signed-off-by: Aleksandr Zimin --- .../ceph_cluster_connection_watcher_func.go | 10 +- .../controller/ceph_storage_class_watcher.go | 66 ++--- .../ceph_storage_class_watcher_func.go | 245 ++++++++---------- 3 files changed, 142 insertions(+), 179 deletions(-) diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go index eb8b739..98ed943 100644 --- a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go @@ -163,7 +163,7 @@ func reconcileSecretCreateFunc(ctx context.Context, cl client.Client, log logger err = cl.Create(ctx, newSecret) if err != nil { err = fmt.Errorf("[reconcileSecretCreateFunc] unable to create a Secret %s for CephClusterConnection %s: %w", newSecret.Name, cephClusterConnection.Name, err) - upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, FailedStatusPhase, err.Error()) + upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, PhaseFailed, err.Error()) if upError != nil { upError = fmt.Errorf("[reconcileSecretCreateFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upError) err = errors.Join(err, upError) @@ -187,7 +187,7 @@ func reconcileSecretUpdateFunc(ctx context.Context, cl client.Client, log logger if oldSecret == nil { err := fmt.Errorf("[reconcileSecretUpdateFunc] unable to find a secret %s for the CephClusterConnection %s", secretName, cephClusterConnection.Name) - upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, FailedStatusPhase, err.Error()) + upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, PhaseFailed, err.Error()) if upError != nil { upError = fmt.Errorf("[reconcileSecretUpdateFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upError) err = errors.Join(err, upError) @@ -205,7 +205,7 @@ func reconcileSecretUpdateFunc(ctx context.Context, cl client.Client, log logger err = cl.Update(ctx, newSecret) if err != nil { err = fmt.Errorf("[reconcileSecretUpdateFunc] unable to update the Secret %s for CephClusterConnection %s: %w", newSecret.Name, cephClusterConnection.Name, err) - upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, FailedStatusPhase, err.Error()) + upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, PhaseFailed, err.Error()) if upError != nil { upError = fmt.Errorf("[reconcileSecretUpdateFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upError) err = errors.Join(err, upError) @@ -237,7 +237,7 @@ func reconcileSecretDeleteFunc(ctx context.Context, cl client.Client, log logger _, err := removeFinalizerIfExists(ctx, cl, secret, CephClusterConnectionControllerFinalizerName) if err != nil { err = fmt.Errorf("[reconcileSecretDeleteFunc] unable to remove a finalizer %s from the Secret %s: %w", CephClusterConnectionControllerFinalizerName, secret.Name, err) - upErr := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, FailedStatusPhase, fmt.Sprintf("Unable to remove a finalizer, err: %s", err.Error())) + upErr := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, PhaseFailed, fmt.Sprintf("Unable to remove a finalizer, err: %s", err.Error())) if upErr != nil { upErr = fmt.Errorf("[reconcileSecretDeleteFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upErr) err = errors.Join(err, upErr) @@ -248,7 +248,7 @@ func reconcileSecretDeleteFunc(ctx context.Context, cl client.Client, log logger err = cl.Delete(ctx, secret) if err != nil { err = fmt.Errorf("[reconcileSecretDeleteFunc] unable to delete a secret %s: %w", secret.Name, err) - upErr := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, FailedStatusPhase, fmt.Sprintf("Unable to delete a secret, err: %s", err.Error())) + upErr := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, PhaseFailed, fmt.Sprintf("Unable to delete a secret, err: %s", err.Error())) if upErr != nil { upErr = fmt.Errorf("[reconcileSecretDeleteFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upErr) err = errors.Join(err, upErr) diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher.go b/images/controller/pkg/controller/ceph_storage_class_watcher.go index 64c6ec0..86cbeb1 100644 --- a/images/controller/pkg/controller/ceph_storage_class_watcher.go +++ b/images/controller/pkg/controller/ceph_storage_class_watcher.go @@ -53,8 +53,8 @@ const ( CephStorageClassManagedLabelKey = "storage.deckhouse.io/managed-by" CephStorageClassManagedLabelValue = "ceph-storage-class-controller" - FailedStatusPhase = "Failed" - CreatedStatusPhase = "Created" + PhaseFailed = "Failed" + PhaseCreated = "Created" CreateReconcile = "Create" UpdateReconcile = "Update" @@ -94,9 +94,21 @@ func RunCephStorageClassWatcherController( return reconcile.Result{}, err } - shouldRequeue, err := RunStorageClassEventReconcile(ctx, cl, log, scList, cephSC, cfg.ControllerNamespace) + shouldRequeue, msg, err := RunStorageClassEventReconcile(ctx, cl, log, scList, cephSC, cfg.ControllerNamespace) + log.Info(fmt.Sprintf("[CephStorageClassReconciler] CephStorageClass %s has been reconciled with message: %s", cephSC.Name, msg)) + phase := PhaseCreated if err != nil { log.Error(err, fmt.Sprintf("[CephStorageClassReconciler] an error occured while reconciles the CephStorageClass, name: %s", cephSC.Name)) + phase = PhaseFailed + } + + if msg != "" { + log.Debug(fmt.Sprintf("[CephStorageClassReconciler] Update the CephStorageClass %s with %s status phase and message: %s", cephSC.Name, phase, msg)) + upErr := updateCephStorageClassPhase(ctx, cl, cephSC, phase, msg) + if upErr != nil { + log.Error(upErr, fmt.Sprintf("[CephStorageClassReconciler] unable to update the CephStorageClass %s: %s", cephSC.Name, upErr.Error())) + shouldRequeue = true + } } if shouldRequeue { @@ -155,64 +167,52 @@ func RunCephStorageClassWatcherController( return c, nil } -func RunStorageClassEventReconcile(ctx context.Context, cl client.Client, log logger.Logger, scList *v1.StorageClassList, cephSC *v1alpha1.CephStorageClass, controllerNamespace string) (shouldRequeue bool, err error) { +func RunStorageClassEventReconcile(ctx context.Context, cl client.Client, log logger.Logger, scList *v1.StorageClassList, cephSC *v1alpha1.CephStorageClass, controllerNamespace string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] starts reconciliataion of CephStorageClass, name: %s", cephSC.Name)) + valid, msg := validateCephStorageClassSpec(cephSC) + if !valid { + return false, msg, err + } + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] CephStorageClass %s has valid spec", cephSC.Name)) + added, err := addFinalizerIfNotExists(ctx, cl, cephSC, CephStorageClassControllerFinalizerName) if err != nil { err = fmt.Errorf("[RunStorageClassEventReconcile] unable to add a finalizer %s to the CephStorageClass %s: %w", CephStorageClassControllerFinalizerName, cephSC.Name, err) - return true, err + return true, err.Error(), err } log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] finalizer %s was added to the CephStorageClass %s: %t", CephStorageClassControllerFinalizerName, cephSC.Name, added)) - valid, msg := validateCephStorageClassSpec(cephSC) - if !valid { - err = fmt.Errorf("[RunStorageClassEventReconcile] CephStorageClass %s has invalid spec: %s", cephSC.Name, msg) - upErr := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, msg) - if upErr != nil { - upErr = fmt.Errorf("[RunStorageClassEventReconcile] unable to update the CephStorageClass %s: %w", cephSC.Name, upErr) - err = errors.Join(err, upErr) - } - return false, err - } - clusterID, err := getClusterID(ctx, cl, cephSC) if err != nil { err = fmt.Errorf("[RunStorageClassEventReconcile] unable to get clusterID for CephStorageClass %s: %w", cephSC.Name, err) - upErr := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) - if upErr != nil { - upErr = fmt.Errorf("[RunStorageClassEventReconcile] unable to update the CephStorageClass %s: %w", cephSC.Name, upErr) - err = errors.Join(err, upErr) - } - return true, err + return true, err.Error(), err } reconcileTypeForStorageClass, err := IdentifyReconcileFuncForStorageClass(log, scList, cephSC, controllerNamespace, clusterID) if err != nil { err = fmt.Errorf("[RunStorageClassEventReconcile] error occured while identifying the reconcile function for StorageClass %s: %w", cephSC.Name, err) - return true, err + return true, err.Error(), err } shouldRequeue = false - log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] reconcile operation for StorageClass %q: %q", cephSC.Name, reconcileTypeForStorageClass)) + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] Successfully identified the reconcile type for StorageClass %s: %s", cephSC.Name, reconcileTypeForStorageClass)) switch reconcileTypeForStorageClass { case CreateReconcile: - log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] CreateReconcile starts reconciliataion of StorageClass, name: %s", cephSC.Name)) - shouldRequeue, err = RunStorageClassEventReconcile(ctx, cl, log, scList, cephSC, controllerNamespace) + shouldRequeue, msg, err = reconcileStorageClassCreateFunc(ctx, cl, log, scList, cephSC, controllerNamespace, clusterID) case UpdateReconcile: - log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] UpdateReconcile starts reconciliataion of StorageClass, name: %s", cephSC.Name)) - shouldRequeue, err = reconcileStorageClassUpdateFunc(ctx, cl, log, scList, cephSC, controllerNamespace, clusterID) + shouldRequeue, msg, err = reconcileStorageClassUpdateFunc(ctx, cl, log, scList, cephSC, controllerNamespace, clusterID) case DeleteReconcile: - log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] DeleteReconcile starts reconciliataion of StorageClass, name: %s", cephSC.Name)) - shouldRequeue, err = reconcileStorageClassDeleteFunc(ctx, cl, log, scList, cephSC) + shouldRequeue, msg, err = reconcileStorageClassDeleteFunc(ctx, cl, log, scList, cephSC) default: log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] StorageClass for CephStorageClass %s should not be reconciled", cephSC.Name)) + msg = "Successfully reconciled" } log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] ends reconciliataion of StorageClass, name: %s, shouldRequeue: %t, err: %v", cephSC.Name, shouldRequeue, err)) if err != nil || shouldRequeue { - return shouldRequeue, err + return shouldRequeue, msg, err } log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] Finish all reconciliations for CephStorageClass %q.", cephSC.Name)) - return false, nil - + return false, msg, nil } diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher_func.go b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go index 82f9984..19275f4 100644 --- a/images/controller/pkg/controller/ceph_storage_class_watcher_func.go +++ b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go @@ -20,7 +20,6 @@ import ( "context" storagev1alpha1 "d8-controller/api/v1alpha1" "d8-controller/pkg/logger" - "errors" "fmt" "reflect" "strings" @@ -33,25 +32,95 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func ReconcileStorageClassCreateFunc( +func IdentifyReconcileFuncForStorageClass(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (reconcileType string, err error) { + if shouldReconcileByDeleteFunc(cephSC) { + return DeleteReconcile, nil + } + + if shouldReconcileStorageClassByCreateFunc(scList, cephSC) { + return CreateReconcile, nil + } + + should, err := shouldReconcileStorageClassByUpdateFunc(log, scList, cephSC, controllerNamespace, clusterID) + if err != nil { + return "", err + } + if should { + return UpdateReconcile, nil + } + + return "", nil +} + +func shouldReconcileStorageClassByCreateFunc(scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass) bool { + if cephSC.DeletionTimestamp != nil { + return false + } + + for _, sc := range scList.Items { + if sc.Name == cephSC.Name { + return false + } + } + + return true +} + +func shouldReconcileStorageClassByUpdateFunc(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (bool, error) { + if cephSC.DeletionTimestamp != nil { + return false, nil + } + + for _, oldSC := range scList.Items { + if oldSC.Name == cephSC.Name { + if slices.Contains(allowedProvisioners, oldSC.Provisioner) { + newSC, err := ConfigureStorageClass(cephSC, controllerNamespace, clusterID) + if err != nil { + return false, err + } + + diff, err := GetSCDiff(&oldSC, newSC) + if err != nil { + return false, err + } + + if diff != "" { + log.Debug(fmt.Sprintf("[shouldReconcileStorageClassByUpdateFunc] a storage class %s should be updated. Diff: %s", oldSC.Name, diff)) + return true, nil + } + + if cephSC.Status != nil && cephSC.Status.Phase == PhaseFailed { + return true, nil + } + + return false, nil + + } else { + err := fmt.Errorf("a storage class %s with provisioner % s does not belong to allowed provisioners: %v", oldSC.Name, oldSC.Provisioner, allowedProvisioners) + return false, err + } + } + } + + err := fmt.Errorf("a storage class %s does not exist", cephSC.Name) + return false, err +} + +func reconcileStorageClassCreateFunc( ctx context.Context, cl client.Client, log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string, -) (bool, error) { +) (shouldRequeue bool, msg string, err error) { log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] starts for CephStorageClass %q", cephSC.Name)) + log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] starts storage class configuration for the CephStorageClass, name: %s", cephSC.Name)) newSC, err := ConfigureStorageClass(cephSC, controllerNamespace, clusterID) if err != nil { err = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to configure a Storage Class for the CephStorageClass %s: %w", cephSC.Name, err) - upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) - if upError != nil { - upError = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upError) - err = errors.Join(err, upError) - } - return false, err + return false, err.Error(), err } log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] successfully configurated storage class for the CephStorageClass, name: %s", cephSC.Name)) @@ -60,22 +129,18 @@ func ReconcileStorageClassCreateFunc( created, err := createStorageClassIfNotExists(ctx, cl, scList, newSC) if err != nil { err = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to create a Storage Class %s: %w", newSC.Name, err) - upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) - if upError != nil { - upError = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upError) - err = errors.Join(err, upError) - } - return true, err + return true, err.Error(), err } + log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] a storage class %s was created: %t", newSC.Name, created)) if created { log.Info(fmt.Sprintf("[reconcileStorageClassCreateFunc] successfully create storage class, name: %s", newSC.Name)) } else { - log.Warning(fmt.Sprintf("[reconcileLSCCreateFunc] Storage class %s already exists. Adding event to requeue.", newSC.Name)) - return true, nil + err = fmt.Errorf("[reconcileStorageClassCreateFunc] Storage class %s already exists", newSC.Name) + return true, err.Error(), err } - return false, nil + return false, "Successfully created", nil } func reconcileStorageClassUpdateFunc( @@ -85,8 +150,7 @@ func reconcileStorageClassUpdateFunc( scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string, -) (bool, error) { - +) (shouldRequeue bool, msg string, err error) { log.Debug(fmt.Sprintf("[reconcileStorageClassUpdateFunc] starts for CephStorageClass %q", cephSC.Name)) var oldSC *v1.StorageClass @@ -98,39 +162,23 @@ func reconcileStorageClassUpdateFunc( } if oldSC == nil { - err := fmt.Errorf("a storage class %s does not exist", cephSC.Name) err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to find a storage class for the CephStorageClass %s: %w", cephSC.Name, err) - upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) - if upError != nil { - upError = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upError) - err = errors.Join(err, upError) - } - return true, err + return true, err.Error(), err } log.Debug(fmt.Sprintf("[reconcileStorageClassUpdateFunc] successfully found a storage class for the CephStorageClass, name: %s", cephSC.Name)) - log.Trace(fmt.Sprintf("[reconcileStorageClassUpdateFunc] storage class: %+v", oldSC)) + newSC, err := ConfigureStorageClass(cephSC, controllerNamespace, clusterID) if err != nil { err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to configure a Storage Class for the CephStorageClass %s: %w", cephSC.Name, err) - upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) - if upError != nil { - upError = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upError) - err = errors.Join(err, upError) - } - return false, err + return false, err.Error(), err } diff, err := GetSCDiff(oldSC, newSC) if err != nil { err = fmt.Errorf("[reconcileStorageClassUpdateFunc] error occured while identifying the difference between the existed StorageClass %s and the new one: %w", newSC.Name, err) - upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) - if upError != nil { - upError = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upError) - err = errors.Join(err, upError) - } - return true, err + return true, err.Error(), err } if diff != "" { @@ -139,92 +187,13 @@ func reconcileStorageClassUpdateFunc( err = recreateStorageClass(ctx, cl, oldSC, newSC) if err != nil { err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to recreate a Storage Class %s: %w", newSC.Name, err) - upError := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, err.Error()) - if upError != nil { - upError = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upError) - err = errors.Join(err, upError) - } - return true, err + return true, err.Error(), err } log.Info(fmt.Sprintf("[reconcileStorageClassUpdateFunc] a Storage Class %s was successfully recreated", newSC.Name)) } - return false, nil -} - -func IdentifyReconcileFuncForStorageClass(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (reconcileType string, err error) { - if shouldReconcileByDeleteFunc(cephSC) { - return DeleteReconcile, nil - } - - if shouldReconcileStorageClassByCreateFunc(scList, cephSC) { - return CreateReconcile, nil - } - - should, err := shouldReconcileStorageClassByUpdateFunc(log, scList, cephSC, controllerNamespace, clusterID) - if err != nil { - return "", err - } - if should { - return UpdateReconcile, nil - } - - return "", nil -} - -func shouldReconcileStorageClassByCreateFunc(scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass) bool { - if cephSC.DeletionTimestamp != nil { - return false - } - - for _, sc := range scList.Items { - if sc.Name == cephSC.Name { - return false - } - } - - return true -} - -func shouldReconcileStorageClassByUpdateFunc(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (bool, error) { - if cephSC.DeletionTimestamp != nil { - return false, nil - } - - for _, oldSC := range scList.Items { - if oldSC.Name == cephSC.Name { - if slices.Contains(allowedProvisioners, oldSC.Provisioner) { - newSC, err := ConfigureStorageClass(cephSC, controllerNamespace, clusterID) - if err != nil { - return false, err - } - - diff, err := GetSCDiff(&oldSC, newSC) - if err != nil { - return false, err - } - - if diff != "" { - log.Debug(fmt.Sprintf("[shouldReconcileStorageClassByUpdateFunc] a storage class %s should be updated. Diff: %s", oldSC.Name, diff)) - return true, nil - } - - if cephSC.Status != nil && cephSC.Status.Phase == FailedStatusPhase { - return true, nil - } - - return false, nil - - } else { - err := fmt.Errorf("a storage class %s with provisioner % s does not belong to allowed provisioners: %v", oldSC.Name, oldSC.Provisioner, allowedProvisioners) - return false, err - } - } - } - - err := fmt.Errorf("a storage class %s does not exist", cephSC.Name) - return false, err + return false, "Successfully updated", nil } func reconcileStorageClassDeleteFunc( @@ -233,8 +202,9 @@ func reconcileStorageClassDeleteFunc( log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, -) (bool, error) { - log.Debug(fmt.Sprintf("[reconcileStorageClassDeleteFunc] tries to find a storage class for the CephStorageClass %s", cephSC.Name)) +) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileStorageClassDeleteFunc] starts for CephStorageClass %q", cephSC.Name)) + var sc *v1.StorageClass for _, s := range scList.Items { if s.Name == cephSC.Name { @@ -242,6 +212,7 @@ func reconcileStorageClassDeleteFunc( break } } + if sc == nil { log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] no storage class found for the CephStorageClass, name: %s", cephSC.Name)) } @@ -250,30 +221,22 @@ func reconcileStorageClassDeleteFunc( log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] successfully found a storage class for the CephStorageClass %s", cephSC.Name)) log.Debug(fmt.Sprintf("[reconcileStorageClassDeleteFunc] starts identifying a provisioner for the storage class %s", sc.Name)) - if slices.Contains(allowedProvisioners, sc.Provisioner) { - log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] the storage class %s provisioner %s belongs to allowed provisioners: %v", sc.Name, sc.Provisioner, allowedProvisioners)) - - err := deleteStorageClass(ctx, cl, sc) - if err != nil { - err = fmt.Errorf("[reconcileStorageClassDeleteFunc] unable to delete a storage class %s: %w", sc.Name, err) - upErr := updateCephStorageClassPhase(ctx, cl, cephSC, FailedStatusPhase, fmt.Sprintf("Unable to delete a storage class, err: %s", err.Error())) - if upErr != nil { - upErr = fmt.Errorf("[reconcileStorageClassDeleteFunc] unable to update the CephStorageClass %s: %w", cephSC.Name, upErr) - err = errors.Join(err, upErr) - } - return true, err - } - log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] successfully deleted a storage class, name: %s", sc.Name)) - } - if !slices.Contains(allowedProvisioners, sc.Provisioner) { - log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] the storage class %s provisioner %s does not belong to allowed provisioners: %v", sc.Name, sc.Provisioner, allowedProvisioners)) + err = fmt.Errorf("[reconcileStorageClassDeleteFunc] a storage class %s with provisioner %s does not belong to allowed provisioners: %v", sc.Name, sc.Provisioner, allowedProvisioners) + return true, err.Error(), err + } + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] the storage class %s provisioner %s belongs to allowed provisioners: %v", sc.Name, sc.Provisioner, allowedProvisioners)) + err := deleteStorageClass(ctx, cl, sc) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassDeleteFunc] unable to delete a storage class %s: %w", sc.Name, err) + return true, err.Error(), err } + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] successfully deleted a storage class, name: %s", sc.Name)) } log.Debug("[reconcileStorageClassDeleteFunc] ends the reconciliation") - return false, nil + return false, "", nil } func ConfigureStorageClass(cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (*v1.StorageClass, error) { From d36b8215d8a3602b0dd90910134cae1acf9adae7 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Sat, 15 Jun 2024 23:58:29 +0300 Subject: [PATCH 04/21] some fixes Signed-off-by: Aleksandr Zimin --- .../pkg/controller/ceph_cluster_connection_watcher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher.go index c3a6b12..3ace54c 100644 --- a/images/controller/pkg/controller/ceph_cluster_connection_watcher.go +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher.go @@ -41,7 +41,7 @@ import ( ) const ( - // This value used as a name for the controller and the value for managed-by label. + // This value used as a name for the controller AND the value for managed-by label. CephClusterConnectionCtrlName = "ceph-cluster-controller" CephClusterConnectionControllerFinalizerName = "storage.deckhouse.io/ceph-cluster-controller" StorageManagedLabelKey = "storage.deckhouse.io/managed-by" From 25388401fa78cf87ee56e7d5d02d943cda2f91af Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Sun, 16 Jun 2024 01:12:56 +0300 Subject: [PATCH 05/21] some changes Signed-off-by: Aleksandr Zimin --- images/controller/api/v1alpha1/const.go | 4 +- .../ceph_cluster_connection_watcher.go | 78 ++++++----- .../ceph_cluster_connection_watcher_func.go | 130 +++++++++--------- .../controller/ceph_storage_class_watcher.go | 22 ++- .../ceph_storage_class_watcher_func.go | 58 ++++---- images/controller/pkg/internal/const.go | 26 ++++ 6 files changed, 179 insertions(+), 139 deletions(-) create mode 100644 images/controller/pkg/internal/const.go diff --git a/images/controller/api/v1alpha1/const.go b/images/controller/api/v1alpha1/const.go index b903922..00abd68 100644 --- a/images/controller/api/v1alpha1/const.go +++ b/images/controller/api/v1alpha1/const.go @@ -17,6 +17,6 @@ limitations under the License. package v1alpha1 const ( - PhaseFailed = "Failed" - PhaseReady = "Ready" + PhaseFailed = "Failed" + PhaseCreated = "Created" ) diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher.go index 3ace54c..04c42c7 100644 --- a/images/controller/pkg/controller/ceph_cluster_connection_watcher.go +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher.go @@ -20,6 +20,7 @@ import ( "context" v1alpha1 "d8-controller/api/v1alpha1" "d8-controller/pkg/config" + "d8-controller/pkg/internal" "d8-controller/pkg/logger" "errors" "fmt" @@ -42,11 +43,8 @@ import ( const ( // This value used as a name for the controller AND the value for managed-by label. - CephClusterConnectionCtrlName = "ceph-cluster-controller" + CephClusterConnectionCtrlName = "d8-ceph-cluster-controller" CephClusterConnectionControllerFinalizerName = "storage.deckhouse.io/ceph-cluster-controller" - StorageManagedLabelKey = "storage.deckhouse.io/managed-by" - - SecretForCephClusterConnectionPrefix = "csi-ceph-secret-for-" ) func RunCephClusterConnectionWatcherController( @@ -78,9 +76,21 @@ func RunCephClusterConnectionWatcherController( return reconcile.Result{}, err } - shouldRequeue, err := RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, cephClusterConnection, cfg.ControllerNamespace) + shouldRequeue, msg, err := RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, cephClusterConnection, cfg.ControllerNamespace) + log.Info(fmt.Sprintf("[CephClusterConnectionReconciler] CeohClusterConnection %s has been reconciled with message: %s", cephClusterConnection.Name, msg)) + phase := v1alpha1.PhaseCreated if err != nil { log.Error(err, fmt.Sprintf("[CephClusterConnectionReconciler] an error occured while reconciles the CephClusterConnection, name: %s", cephClusterConnection.Name)) + phase = v1alpha1.PhaseFailed + } + + if msg != "" { + log.Debug(fmt.Sprintf("[CephClusterConnectionReconciler] update the CephClusterConnection %s with the phase %s and message: %s", cephClusterConnection.Name, phase, msg)) + upErr := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, phase, msg) + if upErr != nil { + log.Error(upErr, fmt.Sprintf("[CephClusterConnectionReconciler] unable to update the CephClusterConnection %s: %s", cephClusterConnection.Name, upErr.Error())) + shouldRequeue = true + } } if shouldRequeue { @@ -139,52 +149,56 @@ func RunCephClusterConnectionWatcherController( return c, nil } -func RunCephClusterConnectionEventReconcile(ctx context.Context, cl client.Client, log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace string) (shouldRequeue bool, err error) { - err = validateCephClusterConnectionSpec(cephClusterConnection) - if err != nil { - log.Error(err, fmt.Sprintf("[RunCephClusterConnectionEventReconcile] an error occured while validating the CephClusterConnection %q", cephClusterConnection.Name)) - upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, v1alpha1.PhaseFailed, err.Error()) - if upError != nil { - upError = fmt.Errorf("[RunCephClusterConnectionEventReconcile] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upError) - err = errors.Join(err, upError) - } - return false, err +func RunCephClusterConnectionEventReconcile(ctx context.Context, cl client.Client, log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace string) (shouldRequeue bool, msg string, err error) { + valid, msg := validateCephClusterConnectionSpec(cephClusterConnection) + if !valid { + err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] CephClusterConnection %s has invalid spec: %s", cephClusterConnection.Name, msg) + return false, msg, err } + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] CephClusterConnection %s has valid spec", cephClusterConnection.Name)) added, err := addFinalizerIfNotExists(ctx, cl, cephClusterConnection, CephClusterConnectionControllerFinalizerName) if err != nil { err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] unable to add a finalizer %s to the CephClusterConnection %s: %w", CephClusterConnectionControllerFinalizerName, cephClusterConnection.Name, err) - return true, err + return true, err.Error(), err } log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] finalizer %s was added to the CephClusterConnection %s: %t", CephClusterConnectionControllerFinalizerName, cephClusterConnection.Name, added)) - secretName := SecretForCephClusterConnectionPrefix + cephClusterConnection.Name + secretName := internal.CephClusterConnectionSecretPrefix + cephClusterConnection.Name reconcileTypeForSecret, err := IdentifyReconcileFuncForSecret(log, secretList, cephClusterConnection, controllerNamespace, secretName) if err != nil { - log.Error(err, fmt.Sprintf("[RunCephClusterConnectionEventReconcile] error occured while identifying the reconcile function for the Secret %q", SecretForCephClusterConnectionPrefix+cephClusterConnection.Name)) - return true, err + err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] error occurred while identifying the reconcile function for CephClusterConnection %s on Secret %s: %w", cephClusterConnection.Name, secretName, err) + return true, err.Error(), err } shouldRequeue = false - log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] reconcile operation of CephClusterConnection %s for Secret %s: %s", cephClusterConnection.Name, secretName, reconcileTypeForSecret)) + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] successfully identified the reconcile type for CephClusterConnection %s to be performed on Secret %s: %s", cephClusterConnection.Name, secretName, reconcileTypeForSecret)) switch reconcileTypeForSecret { - case CreateReconcile: - shouldRequeue, err = reconcileSecretCreateFunc(ctx, cl, log, cephClusterConnection, controllerNamespace, secretName) - case UpdateReconcile: - shouldRequeue, err = reconcileSecretUpdateFunc(ctx, cl, log, secretList, cephClusterConnection, controllerNamespace, secretName) - case DeleteReconcile: - log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] DeleteReconcile: starts reconciliataion of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) - shouldRequeue, err = reconcileSecretDeleteFunc(ctx, cl, log, secretList, cephClusterConnection, secretName) + case internal.CreateReconcile: + shouldRequeue, msg, err = reconcileSecretCreateFunc(ctx, cl, log, cephClusterConnection, controllerNamespace, secretName) + case internal.UpdateReconcile: + shouldRequeue, msg, err = reconcileSecretUpdateFunc(ctx, cl, log, secretList, cephClusterConnection, controllerNamespace, secretName) + case internal.DeleteReconcile: + shouldRequeue, msg, err = reconcileSecretDeleteFunc(ctx, cl, log, secretList, cephClusterConnection, secretName) default: - log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] StorageClass for CephClusterConnection %s should not be reconciled", cephClusterConnection.Name)) + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] no reconcile action required for CephClusterConnection %s on Secret %s. No changes will be made.", cephClusterConnection.Name, secretName)) + msg = "Successfully reconciled" } - log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] ends reconciliataion of StorageClass, name: %s, shouldRequeue: %t, err: %v", cephClusterConnection.Name, shouldRequeue, err)) + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] completed reconcile operation for CephClusterConnection %s on Secret %s.", cephClusterConnection.Name, secretName)) if err != nil || shouldRequeue { - return shouldRequeue, err + return shouldRequeue, msg, err + } + + confgigMap := &corev1.ConfigMap{} + err = cl.Get(ctx, types.NamespacedName{Name: internal.CSICephConfigMapName, Namespace: controllerNamespace}, confgigMap) + if err != nil { + err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] unable to get ConfigMap %s in namespace %s: %w", internal.CSICephConfigMapName, controllerNamespace, err) + return true, err.Error(), err } - log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] Finish all reconciliations for CephClusterConnection %q.", cephClusterConnection.Name)) - return false, nil + // TODO: Implement the reconcile for the ConfigMap + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] finish all reconciliations for CephClusterConnection %q.", cephClusterConnection.Name)) + return false, msg, nil } diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go index 98ed943..ab95051 100644 --- a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go @@ -19,10 +19,11 @@ package controller import ( "context" v1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/internal" "d8-controller/pkg/logger" - "errors" "fmt" "reflect" + "strings" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,33 +31,48 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func validateCephClusterConnectionSpec(cephClusterConnection *v1alpha1.CephClusterConnection) error { +func validateCephClusterConnectionSpec(cephClusterConnection *v1alpha1.CephClusterConnection) (bool, string) { + if cephClusterConnection.DeletionTimestamp != nil { + return true, "" + } + + var ( + failedMsgBuilder strings.Builder + validationPassed = true + ) + + failedMsgBuilder.WriteString("Validation of CeohClusterConnection failed: ") + if cephClusterConnection.Spec.ClusterID == "" { - return fmt.Errorf("[validateCephClusterConnectionSpec] %s: spec.clusterID is required", cephClusterConnection.Name) + validationPassed = false + failedMsgBuilder.WriteString("the spec.clusterID field is empty; ") } if cephClusterConnection.Spec.Monitors == nil { - return fmt.Errorf("[validateCephClusterConnectionSpec] %s: spec.monitors is required", cephClusterConnection.Name) + validationPassed = false + failedMsgBuilder.WriteString("the spec.monitors field is empty; ") } if cephClusterConnection.Spec.UserID == "" { - return fmt.Errorf("[validateCephClusterConnectionSpec] %s: spec.userID is required", cephClusterConnection.Name) + validationPassed = false + failedMsgBuilder.WriteString("the spec.userID field is empty; ") } if cephClusterConnection.Spec.UserKey == "" { - return fmt.Errorf("[validateCephClusterConnectionSpec] %s: spec.userKey is required", cephClusterConnection.Name) + validationPassed = false + failedMsgBuilder.WriteString("the spec.userKey field is empty; ") } - return nil + return validationPassed, failedMsgBuilder.String() } func IdentifyReconcileFuncForSecret(log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (reconcileType string, err error) { if shouldReconcileByDeleteFunc(cephClusterConnection) { - return DeleteReconcile, nil + return internal.DeleteReconcile, nil } if shouldReconcileSecretByCreateFunc(secretList, cephClusterConnection, secretName) { - return CreateReconcile, nil + return internal.CreateReconcile, nil } should, err := shouldReconcileSecretByUpdateFunc(log, secretList, cephClusterConnection, controllerNamespace, secretName) @@ -64,7 +80,7 @@ func IdentifyReconcileFuncForSecret(log logger.Logger, secretList *corev1.Secret return "", err } if should { - return UpdateReconcile, nil + return internal.UpdateReconcile, nil } return "", nil @@ -90,7 +106,7 @@ func shouldReconcileSecretByUpdateFunc(log logger.Logger, secretList *corev1.Sec } secretSelector := labels.Set(map[string]string{ - StorageManagedLabelKey: CephClusterConnectionCtrlName, + internal.StorageManagedLabelKey: CephClusterConnectionCtrlName, }) for _, oldSecret := range secretList.Items { @@ -100,23 +116,22 @@ func shouldReconcileSecretByUpdateFunc(log logger.Logger, secretList *corev1.Sec if !equal { log.Debug(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] a secret %s should be updated", secretName)) if !labels.Set(oldSecret.Labels).AsSelector().Matches(secretSelector) { - err := fmt.Errorf("a secret %q does not have a label %s=%s", oldSecret.Name, StorageManagedLabelKey, CephClusterConnectionCtrlName) + err := fmt.Errorf("a secret %q does not have a label %s=%s", oldSecret.Name, internal.StorageManagedLabelKey, CephClusterConnectionCtrlName) return false, err } return true, nil } if !labels.Set(oldSecret.Labels).AsSelector().Matches(secretSelector) { - log.Debug(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] a secret %s should be updated. The label %s=%s is missing", oldSecret.Name, StorageManagedLabelKey, CephClusterConnectionCtrlName)) + log.Debug(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] a secret %s should be updated. The label %s=%s is missing", oldSecret.Name, internal.StorageManagedLabelKey, CephClusterConnectionCtrlName)) return true, nil } return false, nil } } - - log.Debug(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] a secret %s not found in the list: %+v. It should be created", secretName, secretList.Items)) - return true, nil + err := fmt.Errorf("[shouldReconcileSecretByUpdateFunc] a secret %s not found in the list: %+v. It should be created", secretName, secretList.Items) + return false, err } func configureSecret(cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) *corev1.Secret { @@ -127,7 +142,7 @@ func configureSecret(cephClusterConnection *v1alpha1.CephClusterConnection, cont Name: secretName, Namespace: controllerNamespace, Labels: map[string]string{ - StorageManagedLabelKey: CephClusterConnectionCtrlName, + internal.StorageManagedLabelKey: CephClusterConnectionCtrlName, }, Finalizers: []string{CephClusterConnectionControllerFinalizerName}, }, @@ -150,11 +165,11 @@ func areSecretsEqual(old, new *corev1.Secret) bool { return true } - return true + return false } -func reconcileSecretCreateFunc(ctx context.Context, cl client.Client, log logger.Logger, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (shouldRequeue bool, err error) { - log.Debug(fmt.Sprintf("[reconcileSecretCreateFunc] starts reconciliataion of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) +func reconcileSecretCreateFunc(ctx context.Context, cl client.Client, log logger.Logger, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileSecretCreateFunc] starts reconciliation of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) newSecret := configureSecret(cephClusterConnection, controllerNamespace, secretName) log.Debug(fmt.Sprintf("[reconcileSecretCreateFunc] successfully configurated secret %s for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) @@ -163,19 +178,14 @@ func reconcileSecretCreateFunc(ctx context.Context, cl client.Client, log logger err = cl.Create(ctx, newSecret) if err != nil { err = fmt.Errorf("[reconcileSecretCreateFunc] unable to create a Secret %s for CephClusterConnection %s: %w", newSecret.Name, cephClusterConnection.Name, err) - upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, PhaseFailed, err.Error()) - if upError != nil { - upError = fmt.Errorf("[reconcileSecretCreateFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upError) - err = errors.Join(err, upError) - } - return true, err + return true, err.Error(), err } - return false, nil + return false, "Successfully created", nil } -func reconcileSecretUpdateFunc(ctx context.Context, cl client.Client, log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (shouldRequeue bool, err error) { - log.Debug(fmt.Sprintf("[reconcileSecretUpdateFunc] starts reconciliataion of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) +func reconcileSecretUpdateFunc(ctx context.Context, cl client.Client, log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileSecretUpdateFunc] starts reconciliation of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) var oldSecret *corev1.Secret for _, s := range secretList.Items { @@ -187,12 +197,7 @@ func reconcileSecretUpdateFunc(ctx context.Context, cl client.Client, log logger if oldSecret == nil { err := fmt.Errorf("[reconcileSecretUpdateFunc] unable to find a secret %s for the CephClusterConnection %s", secretName, cephClusterConnection.Name) - upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, PhaseFailed, err.Error()) - if upError != nil { - upError = fmt.Errorf("[reconcileSecretUpdateFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upError) - err = errors.Join(err, upError) - } - return true, err + return true, err.Error(), err } log.Debug(fmt.Sprintf("[reconcileSecretUpdateFunc] secret %s was found for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) @@ -205,19 +210,16 @@ func reconcileSecretUpdateFunc(ctx context.Context, cl client.Client, log logger err = cl.Update(ctx, newSecret) if err != nil { err = fmt.Errorf("[reconcileSecretUpdateFunc] unable to update the Secret %s for CephClusterConnection %s: %w", newSecret.Name, cephClusterConnection.Name, err) - upError := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, PhaseFailed, err.Error()) - if upError != nil { - upError = fmt.Errorf("[reconcileSecretUpdateFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upError) - err = errors.Join(err, upError) - } - return true, err + return true, err.Error(), err } - return false, nil + log.Info(fmt.Sprintf("[reconcileSecretUpdateFunc] successfully updated the Secret %s for the CephClusterConnection %s", newSecret.Name, cephClusterConnection.Name)) + + return false, "Successfully updated", nil } -func reconcileSecretDeleteFunc(ctx context.Context, cl client.Client, log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, secretName string) (shouldRequeue bool, err error) { - log.Debug(fmt.Sprintf("[reconcileSecretDeleteFunc] starts reconciliataion of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) +func reconcileSecretDeleteFunc(ctx context.Context, cl client.Client, log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, secretName string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileSecretDeleteFunc] starts reconciliation of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) var secret *corev1.Secret for _, s := range secretList.Items { @@ -233,33 +235,17 @@ func reconcileSecretDeleteFunc(ctx context.Context, cl client.Client, log logger if secret != nil { log.Info(fmt.Sprintf("[reconcileSecretDeleteFunc] successfully found a secret %s for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) - log.Debug(fmt.Sprintf("[reconcileSecretDeleteFunc] starts removing a finalizer %s from the Secret %s", CephClusterConnectionControllerFinalizerName, secret.Name)) - _, err := removeFinalizerIfExists(ctx, cl, secret, CephClusterConnectionControllerFinalizerName) - if err != nil { - err = fmt.Errorf("[reconcileSecretDeleteFunc] unable to remove a finalizer %s from the Secret %s: %w", CephClusterConnectionControllerFinalizerName, secret.Name, err) - upErr := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, PhaseFailed, fmt.Sprintf("Unable to remove a finalizer, err: %s", err.Error())) - if upErr != nil { - upErr = fmt.Errorf("[reconcileSecretDeleteFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upErr) - err = errors.Join(err, upErr) - } - return true, err - } + err = deleteSecret(ctx, cl, secret) - err = cl.Delete(ctx, secret) if err != nil { - err = fmt.Errorf("[reconcileSecretDeleteFunc] unable to delete a secret %s: %w", secret.Name, err) - upErr := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, PhaseFailed, fmt.Sprintf("Unable to delete a secret, err: %s", err.Error())) - if upErr != nil { - upErr = fmt.Errorf("[reconcileSecretDeleteFunc] unable to update the CephClusterConnection %s: %w", cephClusterConnection.Name, upErr) - err = errors.Join(err, upErr) - } - return true, err + err = fmt.Errorf("[reconcileSecretDeleteFunc] unable to delete the Secret %s for the CephCluster %s: %w", secret.Name, cephClusterConnection.Name, err) + return true, err.Error(), err } } - log.Info(fmt.Sprintf("[reconcileSecretDeleteFunc] ends reconciliataion of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) + log.Info(fmt.Sprintf("[reconcileSecretDeleteFunc] ends reconciliation of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) - return false, nil + return false, "", nil } func updateCephClusterConnectionPhase(ctx context.Context, cl client.Client, cephClusterConnection *v1alpha1.CephClusterConnection, phase, reason string) error { @@ -276,3 +262,17 @@ func updateCephClusterConnectionPhase(ctx context.Context, cl client.Client, cep return nil } + +func deleteSecret(ctx context.Context, cl client.Client, secret *corev1.Secret) error { + _, err := removeFinalizerIfExists(ctx, cl, secret, CephClusterConnectionControllerFinalizerName) + if err != nil { + return err + } + + err = cl.Delete(ctx, secret) + if err != nil { + return err + } + + return nil +} diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher.go b/images/controller/pkg/controller/ceph_storage_class_watcher.go index 86cbeb1..40dcba7 100644 --- a/images/controller/pkg/controller/ceph_storage_class_watcher.go +++ b/images/controller/pkg/controller/ceph_storage_class_watcher.go @@ -20,6 +20,7 @@ import ( "context" v1alpha1 "d8-controller/api/v1alpha1" "d8-controller/pkg/config" + "d8-controller/pkg/internal" "d8-controller/pkg/logger" "errors" "fmt" @@ -41,7 +42,8 @@ import ( ) const ( - CephStorageClassCtrlName = "ceph-storage-class-controller" + // This value used as a name for the controller AND the value for managed-by label. + CephStorageClassCtrlName = "d8-ceph-storage-class-controller" StorageClassKind = "StorageClass" StorageClassAPIVersion = "storage.k8s.io/v1" @@ -52,13 +54,6 @@ const ( CephStorageClassControllerFinalizerName = "storage.deckhouse.io/ceph-storage-class-controller" CephStorageClassManagedLabelKey = "storage.deckhouse.io/managed-by" CephStorageClassManagedLabelValue = "ceph-storage-class-controller" - - PhaseFailed = "Failed" - PhaseCreated = "Created" - - CreateReconcile = "Create" - UpdateReconcile = "Update" - DeleteReconcile = "Delete" ) var ( @@ -96,10 +91,10 @@ func RunCephStorageClassWatcherController( shouldRequeue, msg, err := RunStorageClassEventReconcile(ctx, cl, log, scList, cephSC, cfg.ControllerNamespace) log.Info(fmt.Sprintf("[CephStorageClassReconciler] CephStorageClass %s has been reconciled with message: %s", cephSC.Name, msg)) - phase := PhaseCreated + phase := v1alpha1.PhaseCreated if err != nil { log.Error(err, fmt.Sprintf("[CephStorageClassReconciler] an error occured while reconciles the CephStorageClass, name: %s", cephSC.Name)) - phase = PhaseFailed + phase = v1alpha1.PhaseFailed } if msg != "" { @@ -171,6 +166,7 @@ func RunStorageClassEventReconcile(ctx context.Context, cl client.Client, log lo log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] starts reconciliataion of CephStorageClass, name: %s", cephSC.Name)) valid, msg := validateCephStorageClassSpec(cephSC) if !valid { + err = fmt.Errorf("[RunStorageClassEventReconcile] CephStorageClass %s has invalid spec: %s", cephSC.Name, msg) return false, msg, err } log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] CephStorageClass %s has valid spec", cephSC.Name)) @@ -197,11 +193,11 @@ func RunStorageClassEventReconcile(ctx context.Context, cl client.Client, log lo shouldRequeue = false log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] Successfully identified the reconcile type for StorageClass %s: %s", cephSC.Name, reconcileTypeForStorageClass)) switch reconcileTypeForStorageClass { - case CreateReconcile: + case internal.CreateReconcile: shouldRequeue, msg, err = reconcileStorageClassCreateFunc(ctx, cl, log, scList, cephSC, controllerNamespace, clusterID) - case UpdateReconcile: + case internal.UpdateReconcile: shouldRequeue, msg, err = reconcileStorageClassUpdateFunc(ctx, cl, log, scList, cephSC, controllerNamespace, clusterID) - case DeleteReconcile: + case internal.DeleteReconcile: shouldRequeue, msg, err = reconcileStorageClassDeleteFunc(ctx, cl, log, scList, cephSC) default: log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] StorageClass for CephStorageClass %s should not be reconciled", cephSC.Name)) diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher_func.go b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go index 19275f4..8360923 100644 --- a/images/controller/pkg/controller/ceph_storage_class_watcher_func.go +++ b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go @@ -18,7 +18,9 @@ package controller import ( "context" + "d8-controller/api/v1alpha1" storagev1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/internal" "d8-controller/pkg/logger" "fmt" "reflect" @@ -34,11 +36,11 @@ import ( func IdentifyReconcileFuncForStorageClass(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (reconcileType string, err error) { if shouldReconcileByDeleteFunc(cephSC) { - return DeleteReconcile, nil + return internal.DeleteReconcile, nil } if shouldReconcileStorageClassByCreateFunc(scList, cephSC) { - return CreateReconcile, nil + return internal.CreateReconcile, nil } should, err := shouldReconcileStorageClassByUpdateFunc(log, scList, cephSC, controllerNamespace, clusterID) @@ -46,7 +48,7 @@ func IdentifyReconcileFuncForStorageClass(log logger.Logger, scList *v1.StorageC return "", err } if should { - return UpdateReconcile, nil + return internal.UpdateReconcile, nil } return "", nil @@ -89,7 +91,7 @@ func shouldReconcileStorageClassByUpdateFunc(log logger.Logger, scList *v1.Stora return true, nil } - if cephSC.Status != nil && cephSC.Status.Phase == PhaseFailed { + if cephSC.Status != nil && cephSC.Status.Phase == v1alpha1.PhaseFailed { return true, nil } @@ -174,24 +176,17 @@ func reconcileStorageClassUpdateFunc( err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to configure a Storage Class for the CephStorageClass %s: %w", cephSC.Name, err) return false, err.Error(), err } + log.Debug(fmt.Sprintf("[reconcileStorageClassUpdateFunc] successfully configurated storage class for the CephStorageClass, name: %s", cephSC.Name)) + log.Trace(fmt.Sprintf("[reconcileStorageClassUpdateFunc] new storage class: %+v", newSC)) + log.Trace(fmt.Sprintf("[reconcileStorageClassUpdateFunc] old storage class: %+v", oldSC)) - diff, err := GetSCDiff(oldSC, newSC) + err = recreateStorageClass(ctx, cl, oldSC, newSC) if err != nil { - err = fmt.Errorf("[reconcileStorageClassUpdateFunc] error occured while identifying the difference between the existed StorageClass %s and the new one: %w", newSC.Name, err) + err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to recreate a Storage Class %s: %w", newSC.Name, err) return true, err.Error(), err } - if diff != "" { - log.Info(fmt.Sprintf("[reconcileStorageClassUpdateFunc] current Storage Class LVMVolumeGroups do not match CephStorageClass ones. The Storage Class %s will be recreated with new ones", cephSC.Name)) - - err = recreateStorageClass(ctx, cl, oldSC, newSC) - if err != nil { - err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to recreate a Storage Class %s: %w", newSC.Name, err) - return true, err.Error(), err - } - - log.Info(fmt.Sprintf("[reconcileStorageClassUpdateFunc] a Storage Class %s was successfully recreated", newSC.Name)) - } + log.Info(fmt.Sprintf("[reconcileStorageClassUpdateFunc] a Storage Class %s was successfully recreated", newSC.Name)) return false, "Successfully updated", nil } @@ -235,6 +230,12 @@ func reconcileStorageClassDeleteFunc( log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] successfully deleted a storage class, name: %s", sc.Name)) } + _, err = removeFinalizerIfExists(ctx, cl, cephSC, CephStorageClassControllerFinalizerName) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassDeleteFunc] unable to remove a finalizer %s from the CephStorageClass %s: %w", CephStorageClassControllerFinalizerName, cephSC.Name, err) + return true, err.Error(), err + } + log.Debug("[reconcileStorageClassDeleteFunc] ends the reconciliation") return false, "", nil } @@ -262,6 +263,9 @@ func ConfigureStorageClass(cephSC *storagev1alpha1.CephStorageClass, controllerN Name: cephSC.Name, Namespace: cephSC.Namespace, Finalizers: []string{CephStorageClassControllerFinalizerName}, + Labels: map[string]string{ + internal.StorageManagedLabelKey: CephStorageClassCtrlName, + }, }, Parameters: params, Provisioner: provisioner, @@ -288,7 +292,7 @@ func GetStorageClassProvisioner(cephSC *storagev1alpha1.CephStorageClass) string } func GetStoragecClassParams(cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (map[string]string, error) { - secretName := SecretForCephClusterConnectionPrefix + cephSC.Spec.ClusterConnectionName + secretName := internal.CephClusterConnectionSecretPrefix + cephSC.Spec.ClusterConnectionName params := map[string]string{ "clusterID": clusterID, @@ -431,49 +435,49 @@ func validateCephStorageClassSpec(cephSC *storagev1alpha1.CephStorageClass) (boo if cephSC.Spec.ClusterConnectionName == "" { validationPassed = false - failedMsgBuilder.WriteString("the spec.clusterConnectionName field is empty") + failedMsgBuilder.WriteString("the spec.clusterConnectionName field is empty; ") } if cephSC.Spec.ReclaimPolicy == "" { validationPassed = false - failedMsgBuilder.WriteString("the spec.reclaimPolicy field is empty") + failedMsgBuilder.WriteString("the spec.reclaimPolicy field is empty; ") } if cephSC.Spec.Type == "" { validationPassed = false - failedMsgBuilder.WriteString("the spec.type field is empty") + failedMsgBuilder.WriteString("the spec.type field is empty; ") } switch cephSC.Spec.Type { case storagev1alpha1.CephStorageClassTypeRBD: if cephSC.Spec.RBD == nil { validationPassed = false - failedMsgBuilder.WriteString("the spec.rbd field is empty") + failedMsgBuilder.WriteString("the spec.rbd field is empty; ") } if cephSC.Spec.RBD.DefaultFSType == "" { validationPassed = false - failedMsgBuilder.WriteString("the spec.rbd.defaultFSType field is empty") + failedMsgBuilder.WriteString("the spec.rbd.defaultFSType field is empty; ") } if cephSC.Spec.RBD.Pool == "" { validationPassed = false - failedMsgBuilder.WriteString("the spec.rbd.pool field is empty") + failedMsgBuilder.WriteString("the spec.rbd.pool field is empty; ") } case storagev1alpha1.CephStorageClassTypeCephFS: if cephSC.Spec.CephFS == nil { validationPassed = false - failedMsgBuilder.WriteString("the spec.cephfs field is empty") + failedMsgBuilder.WriteString("the spec.cephfs field is empty; ") } if cephSC.Spec.CephFS.FSName == "" { validationPassed = false - failedMsgBuilder.WriteString("the spec.cephfs.fsName field is empty") + failedMsgBuilder.WriteString("the spec.cephfs.fsName field is empty; ") } if cephSC.Spec.CephFS.Pool == "" { validationPassed = false - failedMsgBuilder.WriteString("the spec.cephfs.pool field is empty") + failedMsgBuilder.WriteString("the spec.cephfs.pool field is empty; ") } default: validationPassed = false diff --git a/images/controller/pkg/internal/const.go b/images/controller/pkg/internal/const.go new file mode 100644 index 0000000..fc25093 --- /dev/null +++ b/images/controller/pkg/internal/const.go @@ -0,0 +1,26 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +const ( + CephClusterConnectionSecretPrefix = "csi-ceph-secret-for-" + StorageManagedLabelKey = "storage.deckhouse.io/managed-by" + CSICephConfigMapName = "ceph-csi-config" + CreateReconcile = "Create" + UpdateReconcile = "Update" + DeleteReconcile = "Delete" +) From 316aed912c4d3e70cc24a440bab22d5c966dae15 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Sun, 16 Jun 2024 23:32:50 +0300 Subject: [PATCH 06/21] some changes Signed-off-by: Aleksandr Zimin --- .../api/v1alpha1/ceph_cluster_connection.go | 25 +- .../ceph_cluster_connection_watcher.go | 36 ++- .../ceph_cluster_connection_watcher_func.go | 281 +++++++++++++++++- images/controller/pkg/internal/const.go | 3 + 4 files changed, 322 insertions(+), 23 deletions(-) diff --git a/images/controller/api/v1alpha1/ceph_cluster_connection.go b/images/controller/api/v1alpha1/ceph_cluster_connection.go index 747d490..66c8ad4 100644 --- a/images/controller/api/v1alpha1/ceph_cluster_connection.go +++ b/images/controller/api/v1alpha1/ceph_cluster_connection.go @@ -34,13 +34,30 @@ type CephClusterConnectionList struct { } type CephClusterConnectionSpec struct { - ClusterID string `json:"clusterID"` - UserID string `json:"userID"` - UserKey string `json:"userKey"` - Monitors []string `json:"monitors"` + ClusterID string `json:"clusterID"` + UserID string `json:"userID"` + UserKey string `json:"userKey"` + Monitors []string `json:"monitors"` + CephFS CephClusterConnectionSpecCephFS `json:"cephFS"` +} + +type CephClusterConnectionSpecCephFS struct { + SubvolumeGroup string `json:"subvolumeGroup"` } type CephClusterConnectionStatus struct { Phase string `json:"phase,omitempty"` Reason string `json:"reason,omitempty"` } + +type ClusterConfig struct { + CephFS map[string]string `json:"cephFS"` + ClusterID string `json:"clusterID"` + Monitors []string `json:"monitors"` +} + +type ClusterConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []ClusterConfig `json:"items"` +} diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher.go index 04c42c7..7e89a46 100644 --- a/images/controller/pkg/controller/ceph_cluster_connection_watcher.go +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher.go @@ -43,8 +43,8 @@ import ( const ( // This value used as a name for the controller AND the value for managed-by label. - CephClusterConnectionCtrlName = "d8-ceph-cluster-controller" - CephClusterConnectionControllerFinalizerName = "storage.deckhouse.io/ceph-cluster-controller" + CephClusterConnectionCtrlName = "d8-ceph-cluster-connection-controller" + CephClusterConnectionControllerFinalizerName = "storage.deckhouse.io/ceph-cluster-connection-controller" ) func RunCephClusterConnectionWatcherController( @@ -190,14 +190,38 @@ func RunCephClusterConnectionEventReconcile(ctx context.Context, cl client.Clien return shouldRequeue, msg, err } - confgigMap := &corev1.ConfigMap{} - err = cl.Get(ctx, types.NamespacedName{Name: internal.CSICephConfigMapName, Namespace: controllerNamespace}, confgigMap) + configMapList := &corev1.ConfigMapList{} + err = cl.List(ctx, configMapList, client.InNamespace(controllerNamespace)) if err != nil { - err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] unable to get ConfigMap %s in namespace %s: %w", internal.CSICephConfigMapName, controllerNamespace, err) + err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] unable to list ConfigMaps in namespace %s: %w", controllerNamespace, err) return true, err.Error(), err } - // TODO: Implement the reconcile for the ConfigMap + configMapName := internal.CSICephConfigMapName + + reconcileTypeForConfigMap, err := IdentifyReconcileFuncForConfigMap(log, configMapList, cephClusterConnection, controllerNamespace, configMapName) + if err != nil { + err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] error occurred while identifying the reconcile function for CephClusterConnection %s on ConfigMap %s: %w", cephClusterConnection.Name, internal.CSICephConfigMapName, err) + return true, err.Error(), err + } + + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] successfully identified the reconcile type for CephClusterConnection %s to be performed on ConfigMap %s: %s", cephClusterConnection.Name, internal.CSICephConfigMapName, reconcileTypeForConfigMap)) + switch reconcileTypeForConfigMap { + case internal.CreateReconcile: + shouldRequeue, msg, err = reconcileConfigMapCreateFunc(ctx, cl, log, cephClusterConnection, controllerNamespace, configMapName) + case internal.UpdateReconcile: + shouldRequeue, msg, err = reconcileConfigMapUpdateFunc(ctx, cl, log, configMapList, cephClusterConnection, configMapName) + case internal.DeleteReconcile: + shouldRequeue, msg, err = reconcileConfigMapDeleteFunc(ctx, cl, log, configMapList, cephClusterConnection, configMapName) + default: + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] no reconcile action required for CephClusterConnection %s on ConfigMap %s. No changes will be made.", cephClusterConnection.Name, internal.CSICephConfigMapName)) + msg = "Successfully reconciled" + } + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] completed reconcile operation for CephClusterConnection %s on ConfigMap %s.", cephClusterConnection.Name, internal.CSICephConfigMapName)) + + if err != nil || shouldRequeue { + return shouldRequeue, msg, err + } log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] finish all reconciliations for CephClusterConnection %q.", cephClusterConnection.Name)) return false, msg, nil diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go index ab95051..9a3b623 100644 --- a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go @@ -21,8 +21,10 @@ import ( v1alpha1 "d8-controller/api/v1alpha1" "d8-controller/pkg/internal" "d8-controller/pkg/logger" + "encoding/json" "fmt" "reflect" + "slices" "strings" corev1 "k8s.io/api/core/v1" @@ -105,25 +107,17 @@ func shouldReconcileSecretByUpdateFunc(log logger.Logger, secretList *corev1.Sec return false, nil } - secretSelector := labels.Set(map[string]string{ - internal.StorageManagedLabelKey: CephClusterConnectionCtrlName, - }) - for _, oldSecret := range secretList.Items { if oldSecret.Name == secretName { newSecret := configureSecret(cephClusterConnection, controllerNamespace, secretName) equal := areSecretsEqual(&oldSecret, newSecret) + + log.Trace(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] old secret: %+v", oldSecret)) + log.Trace(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] new secret: %+v", newSecret)) + log.Trace(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] are secrets equal: %t", equal)) + if !equal { log.Debug(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] a secret %s should be updated", secretName)) - if !labels.Set(oldSecret.Labels).AsSelector().Matches(secretSelector) { - err := fmt.Errorf("a secret %q does not have a label %s=%s", oldSecret.Name, internal.StorageManagedLabelKey, CephClusterConnectionCtrlName) - return false, err - } - return true, nil - } - - if !labels.Set(oldSecret.Labels).AsSelector().Matches(secretSelector) { - log.Debug(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] a secret %s should be updated. The label %s=%s is missing", oldSecret.Name, internal.StorageManagedLabelKey, CephClusterConnectionCtrlName)) return true, nil } @@ -276,3 +270,264 @@ func deleteSecret(ctx context.Context, cl client.Client, secret *corev1.Secret) return nil } + +// ConfigMap +func IdentifyReconcileFuncForConfigMap(log logger.Logger, configMapList *corev1.ConfigMapList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, configMapName string) (reconcileType string, err error) { + if shouldReconcileByDeleteFunc(cephClusterConnection) { + return internal.DeleteReconcile, nil + } + + if shouldReconcileConfigMapByCreateFunc(configMapList, cephClusterConnection, configMapName) { + return internal.CreateReconcile, nil + } + + should, err := shouldReconcileConfigMapByUpdateFunc(log, configMapList, cephClusterConnection, controllerNamespace, configMapName) + if err != nil { + return "", err + } + if should { + return internal.UpdateReconcile, nil + } + + return "", nil +} + +func shouldReconcileConfigMapByCreateFunc(configMapList *corev1.ConfigMapList, cephClusterConnection *v1alpha1.CephClusterConnection, configMapName string) bool { + if cephClusterConnection.DeletionTimestamp != nil { + return false + } + + for _, cm := range configMapList.Items { + if cm.Name == configMapName { + if cm.Data["config.json"] == "" { + return true + } + + return false + } + } + + return true +} + +func shouldReconcileConfigMapByUpdateFunc(log logger.Logger, configMapList *corev1.ConfigMapList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, configMapName string) (bool, error) { + if cephClusterConnection.DeletionTimestamp != nil { + return false, nil + } + + configMapSelector := labels.Set(map[string]string{ + internal.StorageManagedLabelKey: CephClusterConnectionCtrlName, + }) + + for _, oldConfigMap := range configMapList.Items { + if oldConfigMap.Name == configMapName { + oldClusterConfigs, err := getClusterConfigsFromConfigMap(oldConfigMap) + if err != nil { + return false, err + } + + equal := false + clusterConfigExists := false + for _, oldClusterConfig := range oldClusterConfigs.Items { + if oldClusterConfig.ClusterID == cephClusterConnection.Spec.ClusterID { + clusterConfigExists = true + newClusterConfig := configureClusterConfig(cephClusterConnection) + equal = reflect.DeepEqual(oldClusterConfig, newClusterConfig) + + log.Trace(fmt.Sprintf("[shouldReconcileConfigMapByUpdateFunc] old cluster config: %+v", oldClusterConfig)) + log.Trace(fmt.Sprintf("[shouldReconcileConfigMapByUpdateFunc] new cluster config: %+v", newClusterConfig)) + log.Trace(fmt.Sprintf("[shouldReconcileConfigMapByUpdateFunc] are cluster configs equal: %t", equal)) + break + } + } + + if !equal || !labels.Set(oldConfigMap.Labels).AsSelector().Matches(configMapSelector) { + if !clusterConfigExists { + log.Trace(fmt.Sprintf("[shouldReconcileConfigMapByUpdateFunc] a cluster config for the cluster %s does not exist in the ConfigMap %+v", cephClusterConnection.Spec.ClusterID, oldConfigMap)) + } + if !labels.Set(oldConfigMap.Labels).AsSelector().Matches(configMapSelector) { + log.Trace(fmt.Sprintf("[shouldReconcileConfigMapByUpdateFunc] a configMap %s labels %+v does not match the selector %+v", oldConfigMap.Name, oldConfigMap.Labels, configMapSelector)) + } + + log.Debug(fmt.Sprintf("[shouldReconcileConfigMapByUpdateFunc] a configMap %s should be updated", configMapName)) + return true, nil + } + + return false, nil + } + } + + err := fmt.Errorf("[shouldReconcileConfigMapByUpdateFunc] a configMap %s not found in the list: %+v. It should be created", configMapName, configMapList.Items) + return false, err +} + +func getClusterConfigsFromConfigMap(configMap corev1.ConfigMap) (v1alpha1.ClusterConfigList, error) { + jsonData, ok := configMap.Data["config.json"] + if !ok { + return v1alpha1.ClusterConfigList{}, fmt.Errorf("[getClusterConfigsFromConfigMap] config.json key not found in the ConfigMap %s", configMap.Name) + } + + clusterConfigs := v1alpha1.ClusterConfigList{} + err := json.Unmarshal([]byte(jsonData), &clusterConfigs) + if err != nil { + return v1alpha1.ClusterConfigList{}, fmt.Errorf("[getClusterConfigsFromConfigMap] unable to unmarshal data from the ConfigMap %s: %w", configMap.Name, err) + } + + return clusterConfigs, nil +} + +func configureClusterConfig(cephClusterConnection *v1alpha1.CephClusterConnection) v1alpha1.ClusterConfig { + cephFs := map[string]string{} + if cephClusterConnection.Spec.CephFS.SubvolumeGroup != "" { + cephFs = map[string]string{ + "subvolumeGroup": cephClusterConnection.Spec.CephFS.SubvolumeGroup, + } + } + + clusterConfig := v1alpha1.ClusterConfig{ + ClusterID: cephClusterConnection.Spec.ClusterID, + Monitors: cephClusterConnection.Spec.Monitors, + CephFS: cephFs, + } + + return clusterConfig +} + +func reconcileConfigMapCreateFunc(ctx context.Context, cl client.Client, log logger.Logger, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, configMapName string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileConfigMapCreateFunc] starts reconciliation of ConfigMap %s for CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + + newClusterConfig := configureClusterConfig(cephClusterConnection) + newConfigMap := createConfigMap(newClusterConfig, controllerNamespace, configMapName) + log.Debug(fmt.Sprintf("[reconcileConfigMapCreateFunc] successfully configurated ConfigMap %s for the CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + log.Trace(fmt.Sprintf("[reconcileConfigMapCreateFunc] configMap: %+v", newConfigMap)) + + err = cl.Create(ctx, newConfigMap) + if err != nil { + err = fmt.Errorf("[reconcileConfigMapCreateFunc] unable to create a ConfigMap %s for CephClusterConnection %s: %w", newConfigMap.Name, cephClusterConnection.Name, err) + return true, err.Error(), err + } + + return false, "Successfully created", nil +} + +func reconcileConfigMapUpdateFunc(ctx context.Context, cl client.Client, log logger.Logger, configMapList *corev1.ConfigMapList, cephClusterConnection *v1alpha1.CephClusterConnection, configMapName string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileConfigMapUpdateFunc] starts reconciliation of ConfigMap %s for CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + + var oldConfigMap *corev1.ConfigMap + for _, cm := range configMapList.Items { + if cm.Name == configMapName { + oldConfigMap = &cm + break + } + } + + if oldConfigMap == nil { + err := fmt.Errorf("[reconcileConfigMapUpdateFunc] unable to find a ConfigMap %s for the CephClusterConnection %s", configMapName, cephClusterConnection.Name) + return true, err.Error(), err + } + + log.Debug(fmt.Sprintf("[reconcileConfigMapUpdateFunc] ConfigMap %s was found for the CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + + updatedConfigMap := updateConfigMap(oldConfigMap, cephClusterConnection, internal.UpdateConfigMapActionUpdate) + log.Debug(fmt.Sprintf("[reconcileConfigMapUpdateFunc] successfully configurated new ConfigMap %s for the CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + log.Trace(fmt.Sprintf("[reconcileConfigMapUpdateFunc] updated ConfigMap: %+v", updatedConfigMap)) + log.Trace(fmt.Sprintf("[reconcileConfigMapUpdateFunc] old ConfigMap: %+v", oldConfigMap)) + + err = cl.Update(ctx, updatedConfigMap) + if err != nil { + err = fmt.Errorf("[reconcileConfigMapUpdateFunc] unable to update the ConfigMap %s for CephClusterConnection %s: %w", updatedConfigMap.Name, cephClusterConnection.Name, err) + return true, err.Error(), err + } + + log.Info(fmt.Sprintf("[reconcileConfigMapUpdateFunc] successfully updated the ConfigMap %s for the CephClusterConnection %s", updatedConfigMap.Name, cephClusterConnection.Name)) + + return false, "Successfully updated", nil +} + +func reconcileConfigMapDeleteFunc(ctx context.Context, cl client.Client, log logger.Logger, configMapList *corev1.ConfigMapList, cephClusterConnection *v1alpha1.CephClusterConnection, configMapName string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileConfigMapDeleteFunc] starts reconciliation of ConfigMap %s for CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + + var configMap *corev1.ConfigMap + for _, cm := range configMapList.Items { + if cm.Name == configMapName { + configMap = &cm + break + } + } + + if configMap == nil { + log.Info(fmt.Sprintf("[reconcileConfigMapDeleteFunc] no ConfigMap with name %s found for the CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + } + + if configMap != nil { + log.Info(fmt.Sprintf("[reconcileConfigMapDeleteFunc] successfully found a ConfigMap %s for the CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + newConfigMap := updateConfigMap(configMap, cephClusterConnection, internal.UpdateConfigMapActionDelete) + + err := cl.Update(ctx, newConfigMap) + if err != nil { + err = fmt.Errorf("[reconcileConfigMapDeleteFunc] unable to delete cluster config for the CephClusterConnection %s from the ConfigMap %s: %w", cephClusterConnection.Name, configMapName, err) + return true, err.Error(), err + } + } + + _, err = removeFinalizerIfExists(ctx, cl, cephClusterConnection, CephClusterConnectionControllerFinalizerName) + if err != nil { + err = fmt.Errorf("[reconcileConfigMapDeleteFunc] unable to remove finalizer from the CephClusterConnection %s: %w", cephClusterConnection.Name, err) + return true, err.Error(), err + } + + log.Info(fmt.Sprintf("[reconcileConfigMapDeleteFunc] ends reconciliation of ConfigMap %s for CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + + return false, "", nil +} + +func createConfigMap(clusterConfig v1alpha1.ClusterConfig, controllerNamespace, configMapName string) *corev1.ConfigMap { + clusterConfigs := v1alpha1.ClusterConfigList{ + Items: []v1alpha1.ClusterConfig{clusterConfig}, + } + jsonData, _ := json.Marshal(clusterConfigs) + + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: controllerNamespace, + Labels: map[string]string{ + internal.StorageManagedLabelKey: CephClusterConnectionCtrlName, + }, + Finalizers: []string{CephClusterConnectionControllerFinalizerName}, + }, + Data: map[string]string{ + "config.json": string(jsonData), + }, + } + + return configMap +} + +func updateConfigMap(oldConfigMap *corev1.ConfigMap, cephClusterConnection *v1alpha1.CephClusterConnection, updateAction string) *corev1.ConfigMap { + clusterConfigs, _ := getClusterConfigsFromConfigMap(*oldConfigMap) + + for i, clusterConfig := range clusterConfigs.Items { + if clusterConfig.ClusterID == cephClusterConnection.Spec.ClusterID { + clusterConfigs.Items = slices.Delete(clusterConfigs.Items, i, i+1) + } + } + + if updateAction == internal.UpdateConfigMapActionUpdate { + newClusterConfig := configureClusterConfig(cephClusterConnection) + clusterConfigs.Items = append(clusterConfigs.Items, newClusterConfig) + } + + newJsonData, _ := json.Marshal(clusterConfigs) + + configMap := oldConfigMap.DeepCopy() + configMap.Data["config.json"] = string(newJsonData) + + if configMap.Labels == nil { + configMap.Labels = map[string]string{} + } + + configMap.Labels[internal.StorageManagedLabelKey] = CephClusterConnectionCtrlName + + return configMap +} diff --git a/images/controller/pkg/internal/const.go b/images/controller/pkg/internal/const.go index fc25093..cd2ed60 100644 --- a/images/controller/pkg/internal/const.go +++ b/images/controller/pkg/internal/const.go @@ -23,4 +23,7 @@ const ( CreateReconcile = "Create" UpdateReconcile = "Update" DeleteReconcile = "Delete" + + UpdateConfigMapActionUpdate = "update" + UpdateConfigMapActionDelete = "delete" ) From e326cf6686d1694d925b2971fa06cc152de0a55c Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Mon, 17 Jun 2024 00:40:43 +0300 Subject: [PATCH 07/21] Some fixes Signed-off-by: Aleksandr Zimin --- .../ceph_cluster_connection_watcher_func.go | 9 +- .../ceph_storage_class_watcher_func.go | 6 +- .../ceph_storage_class_watcher_test.go | 319 ++++++++++++++++++ .../pkg/controller/controller_suite_test.go | 2 +- templates/controller/rbac-for-us.yaml | 5 +- 5 files changed, 334 insertions(+), 7 deletions(-) create mode 100644 images/controller/pkg/controller/ceph_storage_class_watcher_test.go diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go index 9a3b623..4828421 100644 --- a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go @@ -526,8 +526,15 @@ func updateConfigMap(oldConfigMap *corev1.ConfigMap, cephClusterConnection *v1al if configMap.Labels == nil { configMap.Labels = map[string]string{} } - configMap.Labels[internal.StorageManagedLabelKey] = CephClusterConnectionCtrlName + if configMap.Finalizers == nil { + configMap.Finalizers = []string{} + } + + if !slices.Contains(configMap.Finalizers, CephClusterConnectionControllerFinalizerName) { + configMap.Finalizers = append(configMap.Finalizers, CephClusterConnectionControllerFinalizerName) + } + return configMap } diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher_func.go b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go index 8360923..c4032d6 100644 --- a/images/controller/pkg/controller/ceph_storage_class_watcher_func.go +++ b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go @@ -241,7 +241,7 @@ func reconcileStorageClassDeleteFunc( } func ConfigureStorageClass(cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (*v1.StorageClass, error) { - provisioner := GetStorageClassProvisioner(cephSC) + provisioner := GetStorageClassProvisioner(cephSC.Spec.Type) allowVolumeExpansion := true reclaimPolicy := corev1.PersistentVolumeReclaimPolicy(cephSC.Spec.ReclaimPolicy) volumeBindingMode := v1.VolumeBindingImmediate @@ -278,9 +278,9 @@ func ConfigureStorageClass(cephSC *storagev1alpha1.CephStorageClass, controllerN return sc, nil } -func GetStorageClassProvisioner(cephSC *storagev1alpha1.CephStorageClass) string { +func GetStorageClassProvisioner(cephStorageClasstype string) string { provisioner := "" - switch cephSC.Spec.Type { + switch cephStorageClasstype { case storagev1alpha1.CephStorageClassTypeRBD: provisioner = CephStorageClassRBDProvisioner case storagev1alpha1.CephStorageClassTypeCephFS: diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher_test.go b/images/controller/pkg/controller/ceph_storage_class_watcher_test.go new file mode 100644 index 0000000..546885a --- /dev/null +++ b/images/controller/pkg/controller/ceph_storage_class_watcher_test.go @@ -0,0 +1,319 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller_test + +import ( + "context" + v1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/controller" + "d8-controller/pkg/internal" + "d8-controller/pkg/logger" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/storage/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe(controller.CephStorageClassCtrlName, func() { + const ( + controllerNamespace = "test-namespace" + nameForTestResource = "example-ceph" + ) + var ( + ctx = context.Background() + cl = NewFakeClient() + log = logger.Logger{} + + clusterConnectionName = "ceph-connection" + clusterID1 = "clusterID1" + reclaimPolicy = "Delete" + storageType = "cephfs" + fsName = "myfs" + pool = "mypool" + // defaultFSType = "ext4" + ) + + It("Create_ceph_sc_with_not_existing_ceph_connection", func() { + cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ + Name: nameForTestResource, + ClusterConnectionName: "not-existing", + ReclaimPolicy: reclaimPolicy, + Type: storageType, + CephFS: &CephFSConfig{ + FSName: fsName, + Pool: pool, + }, + }) + + err := cl.Create(ctx, cephSCtemplate) + Expect(err).NotTo(HaveOccurred()) + + csc := &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + Expect(err).NotTo(HaveOccurred()) + + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForTestResource)) + Expect(csc.Finalizers).To(HaveLen(0)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeTrue()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + csc.Finalizers = nil + err = cl.Update(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + err = cl.Delete(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("Create_ceph_cluster_connection", func() { + cephClusterConnection := &v1alpha1.CephClusterConnection{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterConnectionName, + }, + Spec: v1alpha1.CephClusterConnectionSpec{ + ClusterID: clusterID1, + Monitors: []string{"mon1", "mon2", "mon3"}, + UserID: "admin", + UserKey: "key", + }, + } + + err := cl.Create(ctx, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + }) + + It("Create_ceph_sc_with_cephfs", func() { + cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ + Name: nameForTestResource, + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicy, + Type: storageType, + CephFS: &CephFSConfig{ + FSName: fsName, + Pool: pool, + }, + }) + + err := cl.Create(ctx, cephSCtemplate) + Expect(err).NotTo(HaveOccurred()) + + csc := &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + Expect(err).NotTo(HaveOccurred()) + + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForTestResource)) + Expect(csc.Finalizers).To(HaveLen(0)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandardChecksForCephSc(sc, nameForTestResource, controllerNamespace, CephStorageClassConfig{ + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicy, + Type: storageType, + CephFS: &CephFSConfig{ + FSName: fsName, + Pool: pool, + }, + }) + }) + + It("Update_ceph_sc", func() { + csc := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + Expect(err).NotTo(HaveOccurred()) + + csc.Spec.ReclaimPolicy = "Retain" + + err = cl.Update(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + Expect(err).NotTo(HaveOccurred()) + + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForTestResource)) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandardChecksForCephSc(sc, nameForTestResource, controllerNamespace, CephStorageClassConfig{ + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: "Retain", + Type: storageType, + CephFS: &CephFSConfig{ + FSName: fsName, + Pool: pool, + }, + }) + }) + + It("Remove_ceph_sc", func() { + csc := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Delete(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + csc = &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + // Дополнительные тесты можно добавить здесь +}) + +type CephStorageClassConfig struct { + Name string + ClusterConnectionName string + ReclaimPolicy string + Type string + CephFS *CephFSConfig + RBD *RBDConfig +} + +type CephFSConfig struct { + FSName string + Pool string +} + +type RBDConfig struct { + DefaultFSType string + Pool string +} + +func generateCephStorageClass(cfg CephStorageClassConfig) *v1alpha1.CephStorageClass { + var cephFS *v1alpha1.CephStorageClassCephFS + var rbd *v1alpha1.CephStorageClassRBD + + if cfg.CephFS != nil { + cephFS = &v1alpha1.CephStorageClassCephFS{ + FSName: cfg.CephFS.FSName, + Pool: cfg.CephFS.Pool, + } + } + + if cfg.RBD != nil { + rbd = &v1alpha1.CephStorageClassRBD{ + DefaultFSType: cfg.RBD.DefaultFSType, + Pool: cfg.RBD.Pool, + } + } + + return &v1alpha1.CephStorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: cfg.Name, + }, + Spec: v1alpha1.CephStorageClassSpec{ + ClusterConnectionName: cfg.ClusterConnectionName, + ReclaimPolicy: cfg.ReclaimPolicy, + Type: cfg.Type, + CephFS: cephFS, + RBD: rbd, + }, + } +} + +func performStandardChecksForCephSc(sc *v1.StorageClass, nameForTestResource, controllerNamespace string, cfg CephStorageClassConfig) { + Expect(sc).NotTo(BeNil()) + Expect(sc.Name).To(Equal(nameForTestResource)) + Expect(sc.Finalizers).To(HaveLen(1)) + Expect(sc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + Expect(sc.Provisioner).To(Equal(controller.GetStorageClassProvisioner(cfg.Type))) + Expect(*sc.ReclaimPolicy).To(Equal(corev1.PersistentVolumeReclaimPolicy(cfg.ReclaimPolicy))) + Expect(*sc.VolumeBindingMode).To(Equal(v1.VolumeBindingImmediate)) + Expect(*sc.AllowVolumeExpansion).To(BeTrue()) + Expect(sc.Parameters).To(HaveKeyWithValue("csi.storage.k8s.io/provisioner-secret-name", internal.CephClusterConnectionSecretPrefix+cfg.ClusterConnectionName)) + Expect(sc.Parameters).To(HaveKeyWithValue("csi.storage.k8s.io/provisioner-secret-namespace", controllerNamespace)) + + if cfg.Type == "cephfs" { + Expect(sc.Parameters).To(HaveKeyWithValue("fsName", cfg.CephFS.FSName)) + Expect(sc.Parameters).To(HaveKeyWithValue("pool", cfg.CephFS.Pool)) + } else if cfg.Type == "rbd" { + Expect(sc.Parameters).To(HaveKeyWithValue("pool", cfg.RBD.Pool)) + Expect(sc.Parameters).To(HaveKeyWithValue("csi.storage.k8s.io/fstype", cfg.RBD.DefaultFSType)) + } +} diff --git a/images/controller/pkg/controller/controller_suite_test.go b/images/controller/pkg/controller/controller_suite_test.go index a5c07a7..7574aa5 100644 --- a/images/controller/pkg/controller/controller_suite_test.go +++ b/images/controller/pkg/controller/controller_suite_test.go @@ -59,7 +59,7 @@ func NewFakeClient() client.Client { } // See https://github.com/kubernetes-sigs/controller-runtime/issues/2362#issuecomment-1837270195 - builder := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&v1alpha1.CephStorageClass{}) + builder := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&v1alpha1.CephStorageClass{}, &v1alpha1.CephClusterConnection{}) cl := builder.Build() return cl diff --git a/templates/controller/rbac-for-us.yaml b/templates/controller/rbac-for-us.yaml index 763f1e1..3cf96a4 100644 --- a/templates/controller/rbac-for-us.yaml +++ b/templates/controller/rbac-for-us.yaml @@ -18,6 +18,7 @@ rules: - "" resources: - secrets + - configmaps verbs: - get - list @@ -56,8 +57,8 @@ rules: resources: - cephstorageclasses - cephstorageclasses/status - - cephclusters - - cephclusters/status + - cephclusterconnections + - cephclusterconnections/status verbs: - get - list From f7de86ef2f57ee1e2741eae261d3b18490d69ac1 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Mon, 17 Jun 2024 01:20:43 +0300 Subject: [PATCH 08/21] Some fixes Signed-off-by: Aleksandr Zimin --- .../ceph_storage_class_watcher_func.go | 19 +- .../ceph_storage_class_watcher_test.go | 303 +++++++++++++++--- 2 files changed, 276 insertions(+), 46 deletions(-) diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher_func.go b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go index c4032d6..dd5b0a2 100644 --- a/images/controller/pkg/controller/ceph_storage_class_watcher_func.go +++ b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go @@ -216,18 +216,19 @@ func reconcileStorageClassDeleteFunc( log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] successfully found a storage class for the CephStorageClass %s", cephSC.Name)) log.Debug(fmt.Sprintf("[reconcileStorageClassDeleteFunc] starts identifying a provisioner for the storage class %s", sc.Name)) - if !slices.Contains(allowedProvisioners, sc.Provisioner) { - err = fmt.Errorf("[reconcileStorageClassDeleteFunc] a storage class %s with provisioner %s does not belong to allowed provisioners: %v", sc.Name, sc.Provisioner, allowedProvisioners) - return true, err.Error(), err + if slices.Contains(allowedProvisioners, sc.Provisioner) { + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] the storage class %s provisioner %s belongs to allowed provisioners: %v", sc.Name, sc.Provisioner, allowedProvisioners)) + err := deleteStorageClass(ctx, cl, sc) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassDeleteFunc] unable to delete a storage class %s: %w", sc.Name, err) + return true, err.Error(), err + } + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] successfully deleted a storage class, name: %s", sc.Name)) } - log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] the storage class %s provisioner %s belongs to allowed provisioners: %v", sc.Name, sc.Provisioner, allowedProvisioners)) - err := deleteStorageClass(ctx, cl, sc) - if err != nil { - err = fmt.Errorf("[reconcileStorageClassDeleteFunc] unable to delete a storage class %s: %w", sc.Name, err) - return true, err.Error(), err + if !slices.Contains(allowedProvisioners, sc.Provisioner) { + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] a storage class %s with provisioner %s does not belong to allowed provisioners: %v. Skip deletion of storage class", sc.Name, sc.Provisioner, allowedProvisioners)) } - log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] successfully deleted a storage class, name: %s", sc.Name)) } _, err = removeFinalizerIfExists(ctx, cl, cephSC, CephStorageClassControllerFinalizerName) diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher_test.go b/images/controller/pkg/controller/ceph_storage_class_watcher_test.go index 546885a..7a1aa9a 100644 --- a/images/controller/pkg/controller/ceph_storage_class_watcher_test.go +++ b/images/controller/pkg/controller/ceph_storage_class_watcher_test.go @@ -36,7 +36,8 @@ import ( var _ = Describe(controller.CephStorageClassCtrlName, func() { const ( controllerNamespace = "test-namespace" - nameForTestResource = "example-ceph" + nameForCephSC = "example-ceph-fs" + nameForRBDSC = "example-rbd" ) var ( ctx = context.Background() @@ -45,8 +46,10 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { clusterConnectionName = "ceph-connection" clusterID1 = "clusterID1" - reclaimPolicy = "Delete" - storageType = "cephfs" + reclaimPolicyDelete = "Delete" + reclaimPolicyRetain = "Retain" + storageTypeCephFS = "cephfs" + storageTypeRBD = "rbd" fsName = "myfs" pool = "mypool" // defaultFSType = "ext4" @@ -54,10 +57,10 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { It("Create_ceph_sc_with_not_existing_ceph_connection", func() { cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ - Name: nameForTestResource, + Name: nameForCephSC, ClusterConnectionName: "not-existing", - ReclaimPolicy: reclaimPolicy, - Type: storageType, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeCephFS, CephFS: &CephFSConfig{ FSName: fsName, Pool: pool, @@ -68,11 +71,11 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { Expect(err).NotTo(HaveOccurred()) csc := &v1alpha1.CephStorageClass{} - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) Expect(err).NotTo(HaveOccurred()) Expect(csc).NotTo(BeNil()) - Expect(csc.Name).To(Equal(nameForTestResource)) + Expect(csc.Name).To(Equal(nameForCephSC)) Expect(csc.Finalizers).To(HaveLen(0)) scList := &v1.StorageClassList{} @@ -83,13 +86,13 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { Expect(err).To(HaveOccurred()) Expect(shouldRequeue).To(BeTrue()) - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) Expect(err).NotTo(HaveOccurred()) Expect(csc.Finalizers).To(HaveLen(1)) Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) sc := &v1.StorageClass{} - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, sc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, sc) Expect(k8serrors.IsNotFound(err)).To(BeTrue()) csc.Finalizers = nil @@ -98,7 +101,7 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { err = cl.Delete(ctx, csc) Expect(err).NotTo(HaveOccurred()) - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) Expect(k8serrors.IsNotFound(err)).To(BeTrue()) }) @@ -122,10 +125,10 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { It("Create_ceph_sc_with_cephfs", func() { cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ - Name: nameForTestResource, + Name: nameForCephSC, ClusterConnectionName: clusterConnectionName, - ReclaimPolicy: reclaimPolicy, - Type: storageType, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeCephFS, CephFS: &CephFSConfig{ FSName: fsName, Pool: pool, @@ -136,11 +139,11 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { Expect(err).NotTo(HaveOccurred()) csc := &v1alpha1.CephStorageClass{} - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) Expect(err).NotTo(HaveOccurred()) Expect(csc).NotTo(BeNil()) - Expect(csc.Name).To(Equal(nameForTestResource)) + Expect(csc.Name).To(Equal(nameForCephSC)) Expect(csc.Finalizers).To(HaveLen(0)) scList := &v1.StorageClassList{} @@ -151,18 +154,18 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { Expect(err).NotTo(HaveOccurred()) Expect(shouldRequeue).To(BeFalse()) - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) Expect(err).NotTo(HaveOccurred()) Expect(csc.Finalizers).To(HaveLen(1)) Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) sc := &v1.StorageClass{} - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, sc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, sc) Expect(err).NotTo(HaveOccurred()) - performStandardChecksForCephSc(sc, nameForTestResource, controllerNamespace, CephStorageClassConfig{ + performStandardChecksForCephSc(sc, nameForCephSC, controllerNamespace, CephStorageClassConfig{ ClusterConnectionName: clusterConnectionName, - ReclaimPolicy: reclaimPolicy, - Type: storageType, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeCephFS, CephFS: &CephFSConfig{ FSName: fsName, Pool: pool, @@ -170,21 +173,21 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { }) }) - It("Update_ceph_sc", func() { + It("Update_ceph_sc_with_cephfs", func() { csc := &v1alpha1.CephStorageClass{} - err := cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + err := cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) Expect(err).NotTo(HaveOccurred()) - csc.Spec.ReclaimPolicy = "Retain" + csc.Spec.ReclaimPolicy = reclaimPolicyRetain err = cl.Update(ctx, csc) Expect(err).NotTo(HaveOccurred()) - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) Expect(err).NotTo(HaveOccurred()) Expect(csc).NotTo(BeNil()) - Expect(csc.Name).To(Equal(nameForTestResource)) + Expect(csc.Name).To(Equal(nameForCephSC)) Expect(csc.Finalizers).To(HaveLen(1)) Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) @@ -196,18 +199,18 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { Expect(err).NotTo(HaveOccurred()) Expect(shouldRequeue).To(BeFalse()) - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) Expect(err).NotTo(HaveOccurred()) Expect(csc.Finalizers).To(HaveLen(1)) Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) sc := &v1.StorageClass{} - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, sc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, sc) Expect(err).NotTo(HaveOccurred()) - performStandardChecksForCephSc(sc, nameForTestResource, controllerNamespace, CephStorageClassConfig{ + performStandardChecksForCephSc(sc, nameForCephSC, controllerNamespace, CephStorageClassConfig{ ClusterConnectionName: clusterConnectionName, - ReclaimPolicy: "Retain", - Type: storageType, + ReclaimPolicy: reclaimPolicyRetain, + Type: storageTypeCephFS, CephFS: &CephFSConfig{ FSName: fsName, Pool: pool, @@ -215,16 +218,16 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { }) }) - It("Remove_ceph_sc", func() { + It("Remove_ceph_sc_with_cephfs", func() { csc := &v1alpha1.CephStorageClass{} - err := cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + err := cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) Expect(err).NotTo(HaveOccurred()) err = cl.Delete(ctx, csc) Expect(err).NotTo(HaveOccurred()) csc = &v1alpha1.CephStorageClass{} - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) Expect(err).NotTo(HaveOccurred()) scList := &v1.StorageClassList{} @@ -235,15 +238,241 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { Expect(err).NotTo(HaveOccurred()) Expect(shouldRequeue).To(BeFalse()) - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, csc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) Expect(k8serrors.IsNotFound(err)).To(BeTrue()) sc := &v1.StorageClass{} - err = cl.Get(ctx, client.ObjectKey{Name: nameForTestResource}, sc) + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, sc) Expect(k8serrors.IsNotFound(err)).To(BeTrue()) }) - // Дополнительные тесты можно добавить здесь + It("Create_ceph_sc_with_rbd", func() { + cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ + Name: nameForRBDSC, + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeRBD, + RBD: &RBDConfig{ + DefaultFSType: "ext4", + Pool: pool, + }, + }) + + err := cl.Create(ctx, cephSCtemplate) + Expect(err).NotTo(HaveOccurred()) + + csc := &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForRBDSC)) + Expect(csc.Finalizers).To(HaveLen(0)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandardChecksForCephSc(sc, nameForRBDSC, controllerNamespace, CephStorageClassConfig{ + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeRBD, + RBD: &RBDConfig{ + DefaultFSType: "ext4", + Pool: pool, + }, + }) + }) + + It("Update_ceph_sc_with_rbd", func() { + csc := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + csc.Spec.ReclaimPolicy = reclaimPolicyRetain + + err = cl.Update(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForRBDSC)) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandardChecksForCephSc(sc, nameForRBDSC, controllerNamespace, CephStorageClassConfig{ + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyRetain, + Type: storageTypeRBD, + RBD: &RBDConfig{ + DefaultFSType: "ext4", + Pool: pool, + }, + }) + }) + + It("Remove_ceph_sc_with_rbd", func() { + csc := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Delete(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + csc = &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("Create_ceph_sc_when_sc_with_another_provisioner_exists", func() { + sc := &v1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: nameForRBDSC, + }, + Provisioner: "test-provisioner", + } + + err := cl.Create(ctx, sc) + Expect(err).NotTo(HaveOccurred()) + + cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ + Name: nameForRBDSC, + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeCephFS, + CephFS: &CephFSConfig{ + FSName: fsName, + Pool: pool, + }, + }) + + err = cl.Create(ctx, cephSCtemplate) + Expect(err).NotTo(HaveOccurred()) + + csc := &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeTrue()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, sc) + Expect(err).NotTo(HaveOccurred()) + Expect(sc.Provisioner).To(Equal("test-provisioner")) + Expect(sc.Finalizers).To(HaveLen(0)) + Expect(sc.Labels).To(HaveLen(0)) + }) + + It("Update_ceph_sc_when_sc_with_another_provisioner_exists", func() { + csc := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + csc.Spec.ReclaimPolicy = reclaimPolicyRetain + + err = cl.Update(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeTrue()) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, sc) + Expect(err).NotTo(HaveOccurred()) + Expect(sc.Provisioner).To(Equal("test-provisioner")) + Expect(sc.Finalizers).To(HaveLen(0)) + Expect(sc.Labels).To(HaveLen(0)) + }) + + It("Remove_ceph_sc_when_sc_with_another_provisioner_exists", func() { + csc := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Delete(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + csc = &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + Expect(csc.DeletionTimestamp).NotTo(BeNil()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, sc) + Expect(err).NotTo(HaveOccurred()) + Expect(sc.Provisioner).To(Equal("test-provisioner")) + Expect(sc.Finalizers).To(HaveLen(0)) + Expect(sc.Labels).To(HaveLen(0)) + }) + }) type CephStorageClassConfig struct { From d09603da8869b783d92717cecef3f776beabe4c3 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Mon, 17 Jun 2024 02:09:59 +0300 Subject: [PATCH 09/21] finish tests Signed-off-by: Aleksandr Zimin --- .../ceph_cluster_connection_watcher_func.go | 2 +- .../ceph_cluster_connection_watcher_test.go | 286 ++++++++++++++++++ .../ceph_storage_class_watcher_test.go | 14 + 3 files changed, 301 insertions(+), 1 deletion(-) create mode 100644 images/controller/pkg/controller/ceph_cluster_connection_watcher_test.go diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go index 4828421..2d5da21 100644 --- a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go @@ -50,7 +50,7 @@ func validateCephClusterConnectionSpec(cephClusterConnection *v1alpha1.CephClust failedMsgBuilder.WriteString("the spec.clusterID field is empty; ") } - if cephClusterConnection.Spec.Monitors == nil { + if len(cephClusterConnection.Spec.Monitors) == 0 { validationPassed = false failedMsgBuilder.WriteString("the spec.monitors field is empty; ") } diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher_test.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher_test.go new file mode 100644 index 0000000..2f21364 --- /dev/null +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher_test.go @@ -0,0 +1,286 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller_test + +import ( + "context" + "encoding/json" + + v1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/controller" + "d8-controller/pkg/internal" + "d8-controller/pkg/logger" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe(controller.CephClusterConnectionCtrlName, func() { + const ( + controllerNamespace = "test-namespace" + nameForClusterConnection = "example-ceph-connection" + clusterID = "clusterID1" + userID = "admin" + userKey = "key" + configMapName = internal.CSICephConfigMapName + secretNamePrefix = internal.CephClusterConnectionSecretPrefix + ) + + var ( + ctx = context.Background() + cl = NewFakeClient() + log = logger.Logger{} + monitors = []string{"mon1", "mon2", "mon3"} + ) + + It("CephClusterConnection positive operations", func() { + cephClusterConnection := &v1alpha1.CephClusterConnection{ + ObjectMeta: metav1.ObjectMeta{ + Name: nameForClusterConnection, + }, + Spec: v1alpha1.CephClusterConnectionSpec{ + ClusterID: clusterID, + Monitors: monitors, + UserID: userID, + UserKey: userKey, + }, + } + + By("Creating CephClusterConnection") + err := cl.Create(ctx, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + createdCephClusterConnection := &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForClusterConnection}, createdCephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + Expect(createdCephClusterConnection).NotTo(BeNil()) + Expect(createdCephClusterConnection.Name).To(Equal(nameForClusterConnection)) + Expect(createdCephClusterConnection.Spec.ClusterID).To(Equal(clusterID)) + Expect(createdCephClusterConnection.Spec.UserID).To(Equal(userID)) + Expect(createdCephClusterConnection.Spec.UserKey).To(Equal(userKey)) + Expect(createdCephClusterConnection.Spec.Monitors).To(ConsistOf(monitors)) + Expect(createdCephClusterConnection.Finalizers).To(HaveLen(0)) + + By("Running reconcile for CephClusterConnection creation") + secretList := &corev1.SecretList{} + err = cl.List(ctx, secretList) + Expect(err).NotTo(HaveOccurred()) + + shouldReconcile, _, err := controller.RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, createdCephClusterConnection, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldReconcile).To(BeFalse()) + + By("Verifying dependent Secret") + verifySecret(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Verifying dependent ConfigMap") + verifyConfigMap(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Verifying CephClusterConnection after create reconcile") + createdCephClusterConnection = &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForClusterConnection}, createdCephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + Expect(createdCephClusterConnection).NotTo(BeNil()) + Expect(createdCephClusterConnection.Finalizers).To(HaveLen(1)) + Expect(createdCephClusterConnection.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) + // Expect(createdCephClusterConnection.Status).NotTo(BeNil()) + // Expect(createdCephClusterConnection.Status.Phase).To(Equal(v1alpha1.PhaseCreated)) + + By("Updating CephClusterConnection") + newMonitors := []string{"mon4", "mon5", "mon6"} + createdCephClusterConnection.Spec.Monitors = newMonitors + err = cl.Update(ctx, createdCephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + updatedCephClusterConnection := &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForClusterConnection}, updatedCephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedCephClusterConnection).NotTo(BeNil()) + Expect(updatedCephClusterConnection.Spec.Monitors).To(ConsistOf(newMonitors)) + + By("Running reconcile for CephClusterConnection update") + secretList = &corev1.SecretList{} + err = cl.List(ctx, secretList) + Expect(err).NotTo(HaveOccurred()) + + shouldReconcile, _, err = controller.RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, updatedCephClusterConnection, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldReconcile).To(BeFalse()) + + By("Verifying updated Secret") + verifySecret(ctx, cl, updatedCephClusterConnection, controllerNamespace) + + By("Verifying updated ConfigMap") + verifyConfigMap(ctx, cl, updatedCephClusterConnection, controllerNamespace) + + By("Verifying CephClusterConnection after update reconcile") + updatedCephClusterConnection = &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForClusterConnection}, updatedCephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedCephClusterConnection).NotTo(BeNil()) + Expect(updatedCephClusterConnection.Finalizers).To(HaveLen(1)) + Expect(updatedCephClusterConnection.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) + // Expect(updatedCephClusterConnection.Status).NotTo(BeNil()) + // Expect(updatedCephClusterConnection.Status.Phase).To(Equal(v1alpha1.PhaseCreated)) + + By("Deleting CephClusterConnection") + err = cl.Delete(ctx, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + By("Running reconcile for CephClusterConnection deletion") + secretList = &corev1.SecretList{} + err = cl.List(ctx, secretList) + Expect(err).NotTo(HaveOccurred()) + + deletedCephClusterConnection := &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForClusterConnection}, deletedCephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + Expect(deletedCephClusterConnection).NotTo(BeNil()) + Expect(deletedCephClusterConnection.Finalizers).To(HaveLen(1)) + Expect(deletedCephClusterConnection.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) + + shouldReconcile, _, err = controller.RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, deletedCephClusterConnection, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldReconcile).To(BeFalse()) + + By("Verifying ConfigMap update after deletion") + verifyConfigMapWithoutClusterConnection(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Verifying Secret deletion") + verifySecretNotExists(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Verifying CephClusterConnection after delete reconcile") + deletedCephClusterConnection = &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForClusterConnection}, deletedCephClusterConnection) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("handles invalid CephClusterConnection spec", func() { + By("Creating CephClusterConnection with empty ClusterID") + cephClusterConnection := &v1alpha1.CephClusterConnection{ + ObjectMeta: metav1.ObjectMeta{ + Name: nameForClusterConnection, + }, + Spec: v1alpha1.CephClusterConnectionSpec{ + ClusterID: "", + Monitors: []string{"mon1", "mon2", "mon3"}, + UserID: userID, + UserKey: userKey, + }, + } + + err := cl.Create(ctx, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + By("Running reconcile for invalid CephClusterConnection") + secretList := &corev1.SecretList{} + err = cl.List(ctx, secretList) + Expect(err).NotTo(HaveOccurred()) + + shouldReconcile, _, err := controller.RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, cephClusterConnection, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldReconcile).To(BeFalse()) + + By("Verifying no Secret created for invalid CephClusterConnection") + verifySecretNotExists(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Verifying no ConfigMap entry created for invalid CephClusterConnection") + verifyConfigMapWithoutClusterConnection(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Creating CephClusterConnection with empty Monitors") + cephClusterConnection.Spec.ClusterID = clusterID + cephClusterConnection.Spec.Monitors = []string{} + + err = cl.Update(ctx, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + Expect(cephClusterConnection.Spec.Monitors).To(HaveLen(0)) + + By("Running reconcile for CephClusterConnection with empty Monitors") + shouldReconcile, _, err = controller.RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, cephClusterConnection, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldReconcile).To(BeFalse()) + + By("Verifying no Secret created for CephClusterConnection with empty Monitors") + verifySecretNotExists(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Verifying no ConfigMap entry created for CephClusterConnection with empty Monitors") + verifyConfigMapWithoutClusterConnection(ctx, cl, cephClusterConnection, controllerNamespace) + }) +}) + +func verifySecret(ctx context.Context, cl client.Client, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace string) { + secretName := internal.CephClusterConnectionSecretPrefix + cephClusterConnection.Name + secret := &corev1.Secret{} + err := cl.Get(ctx, client.ObjectKey{Name: secretName, Namespace: controllerNamespace}, secret) + Expect(err).NotTo(HaveOccurred()) + Expect(secret).NotTo(BeNil()) + Expect(secret.Finalizers).To(HaveLen(1)) + Expect(secret.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) + Expect(secret.StringData).To(HaveKeyWithValue("userID", cephClusterConnection.Spec.UserID)) + Expect(secret.StringData).To(HaveKeyWithValue("userKey", cephClusterConnection.Spec.UserKey)) + Expect(secret.StringData).To(HaveKeyWithValue("adminID", cephClusterConnection.Spec.UserID)) + Expect(secret.StringData).To(HaveKeyWithValue("adminKey", cephClusterConnection.Spec.UserKey)) +} + +func verifyConfigMap(ctx context.Context, cl client.Client, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace string) { + configMap := &corev1.ConfigMap{} + err := cl.Get(ctx, client.ObjectKey{Name: internal.CSICephConfigMapName, Namespace: controllerNamespace}, configMap) + Expect(err).NotTo(HaveOccurred()) + Expect(configMap).NotTo(BeNil()) + Expect(configMap.Finalizers).To(HaveLen(1)) + Expect(configMap.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) + + clusterConfigs := v1alpha1.ClusterConfigList{} + err = json.Unmarshal([]byte(configMap.Data["config.json"]), &clusterConfigs) + Expect(err).NotTo(HaveOccurred()) + found := false + for _, cfg := range clusterConfigs.Items { + if cfg.ClusterID == cephClusterConnection.Spec.ClusterID { + Expect(cfg.Monitors).To(ConsistOf(cephClusterConnection.Spec.Monitors)) + found = true + break + } + } + Expect(found).To(BeTrue(), "Cluster config not found in ConfigMap") +} + +func verifyConfigMapWithoutClusterConnection(ctx context.Context, cl client.Client, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace string) { + configMap := &corev1.ConfigMap{} + err := cl.Get(ctx, client.ObjectKey{Name: internal.CSICephConfigMapName, Namespace: controllerNamespace}, configMap) + Expect(err).NotTo(HaveOccurred()) + Expect(configMap).NotTo(BeNil()) + Expect(configMap.Finalizers).To(HaveLen(1)) + Expect(configMap.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) + + clusterConfigs := v1alpha1.ClusterConfigList{} + err = json.Unmarshal([]byte(configMap.Data["config.json"]), &clusterConfigs) + Expect(err).NotTo(HaveOccurred()) + for _, cfg := range clusterConfigs.Items { + Expect(cfg.ClusterID).NotTo(Equal(cephClusterConnection.Spec.ClusterID)) + } +} + +func verifySecretNotExists(ctx context.Context, cl client.Client, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace string) { + secretName := internal.CephClusterConnectionSecretPrefix + cephClusterConnection.Name + secret := &corev1.Secret{} + err := cl.Get(ctx, client.ObjectKey{Name: secretName, Namespace: controllerNamespace}, secret) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) +} diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher_test.go b/images/controller/pkg/controller/ceph_storage_class_watcher_test.go index 7a1aa9a..1c334e6 100644 --- a/images/controller/pkg/controller/ceph_storage_class_watcher_test.go +++ b/images/controller/pkg/controller/ceph_storage_class_watcher_test.go @@ -473,6 +473,20 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { Expect(sc.Labels).To(HaveLen(0)) }) + It("Remove_ceph_cluster_connection", func() { + + cephClusterConnection := &v1alpha1.CephClusterConnection{} + err := cl.Get(ctx, client.ObjectKey{Name: clusterConnectionName}, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Delete(ctx, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + cephClusterConnection = &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: clusterConnectionName}, cephClusterConnection) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + }) type CephStorageClassConfig struct { From 7aeb42511f899f8cd13d1f052cfb8267df4b6ddc Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Mon, 17 Jun 2024 02:12:47 +0300 Subject: [PATCH 10/21] fix enabled Signed-off-by: Aleksandr Zimin --- enabled | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/enabled b/enabled index 81a65ba..f24185d 100755 --- a/enabled +++ b/enabled @@ -17,7 +17,15 @@ source /deckhouse/shell_lib.sh function __main__() { - echo "true" > $MODULE_ENABLED_RESULT + enabled::disable_module_if_cluster_is_not_bootstraped + enabled::disable_module_in_kubernetes_versions_less_than 1.23.0 + + if ! values::array_has global.enabledModules ceph-csi" ; then + echo "You must disable the ceph-csi module for the csi-ceph module to work." + echo "false" > "$MODULE_ENABLED_RESULT" + else + echo "true" > "$MODULE_ENABLED_RESULT" + fi } enabled::run $@ From 502942d2f568f87d680395e5057d7a36c00bfb4c Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Tue, 18 Jun 2024 23:08:50 +0300 Subject: [PATCH 11/21] fix enabled script Signed-off-by: Aleksandr Zimin --- enabled | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/enabled b/enabled index f24185d..a79917e 100755 --- a/enabled +++ b/enabled @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2021 Flant JSC +# Copyright 2024 Flant JSC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ function __main__() { enabled::disable_module_if_cluster_is_not_bootstraped enabled::disable_module_in_kubernetes_versions_less_than 1.23.0 - if ! values::array_has global.enabledModules ceph-csi" ; then + if ! values::array_has global.enabledModules "ceph-csi" ; then echo "You must disable the ceph-csi module for the csi-ceph module to work." echo "false" > "$MODULE_ENABLED_RESULT" else From 4283134c8a4147c4d8fb5fbdd80ab3b3a4c0708d Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Tue, 18 Jun 2024 23:29:39 +0300 Subject: [PATCH 12/21] fix workflow Signed-off-by: Aleksandr Zimin --- .github/workflows/deploy_dev.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/deploy_dev.yml b/.github/workflows/deploy_dev.yml index ba1a5bd..d1a4400 100644 --- a/.github/workflows/deploy_dev.yml +++ b/.github/workflows/deploy_dev.yml @@ -9,6 +9,7 @@ env: MODULES_REGISTRY_PASSWORD: ${{ secrets.DEV_MODULES_REGISTRY_PASSWORD }} RELEASE_CHANNEL: ${{ github.event.inputs.channel }} MODULES_MODULE_TAG: ${{ github.event.inputs.tag }} + SOURCE_REPO: ${{ secrets.SOURCE_REPO }} on: workflow_dispatch: From fa74e4cae57f5caca66ca2e9aab48d7ab08a1ae1 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Tue, 18 Jun 2024 23:31:03 +0300 Subject: [PATCH 13/21] fix workflow2 Signed-off-by: Aleksandr Zimin --- .github/workflows/deploy_dev.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/deploy_dev.yml b/.github/workflows/deploy_dev.yml index d1a4400..fadd54a 100644 --- a/.github/workflows/deploy_dev.yml +++ b/.github/workflows/deploy_dev.yml @@ -9,6 +9,7 @@ env: MODULES_REGISTRY_PASSWORD: ${{ secrets.DEV_MODULES_REGISTRY_PASSWORD }} RELEASE_CHANNEL: ${{ github.event.inputs.channel }} MODULES_MODULE_TAG: ${{ github.event.inputs.tag }} + GOPROXY: ${{ secrets.GOPROXY }} SOURCE_REPO: ${{ secrets.SOURCE_REPO }} on: From ab4c8f72d1c3e9e15f27696ef9977665736f42e6 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Wed, 19 Jun 2024 00:06:26 +0300 Subject: [PATCH 14/21] fix enabled Signed-off-by: Aleksandr Zimin --- enabled | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enabled b/enabled index a79917e..734674c 100755 --- a/enabled +++ b/enabled @@ -20,7 +20,7 @@ function __main__() { enabled::disable_module_if_cluster_is_not_bootstraped enabled::disable_module_in_kubernetes_versions_less_than 1.23.0 - if ! values::array_has global.enabledModules "ceph-csi" ; then + if values::array_has global.enabledModules "ceph-csi" ; then echo "You must disable the ceph-csi module for the csi-ceph module to work." echo "false" > "$MODULE_ENABLED_RESULT" else From 1dff6c549d104cd521b139c39481493b7b266606 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Wed, 19 Jun 2024 00:32:11 +0300 Subject: [PATCH 15/21] fix registry secret Signed-off-by: Aleksandr Zimin --- templates/registry-secret.yaml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/templates/registry-secret.yaml b/templates/registry-secret.yaml index 151b76a..c1a4f75 100644 --- a/templates/registry-secret.yaml +++ b/templates/registry-secret.yaml @@ -2,9 +2,13 @@ apiVersion: v1 kind: Secret metadata: - name: deckhouse-registry - namespace: d8-{{ $.Chart.Name }} + name: {{ .Chart.Name }}-module-registry + namespace: d8-{{ .Chart.Name }} {{- include "helm_lib_module_labels" (list .) | nindent 2 }} type: kubernetes.io/dockerconfigjson data: - .dockerconfigjson: {{ $.Values.global.modulesImages.registry.dockercfg }} +{{- if dig "registry" "dockercfg" false .Values.csiNfs }} + .dockerconfigjson: {{ .Values.csiNfs.registry.dockercfg }} +{{- else }} + .dockerconfigjson: "eyJhdXRocyI6IHsgInJlZ2lzdHJ5LmRlY2tob3VzZS5pbyI6IHt9fX0=" +{{- end }} From aa16ff7a944003d0d8d4532e41ff6c3abd1d35c3 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Wed, 19 Jun 2024 00:47:39 +0300 Subject: [PATCH 16/21] fix Signed-off-by: Aleksandr Zimin --- templates/registry-secret.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/templates/registry-secret.yaml b/templates/registry-secret.yaml index c1a4f75..6a1ac52 100644 --- a/templates/registry-secret.yaml +++ b/templates/registry-secret.yaml @@ -7,8 +7,8 @@ metadata: {{- include "helm_lib_module_labels" (list .) | nindent 2 }} type: kubernetes.io/dockerconfigjson data: -{{- if dig "registry" "dockercfg" false .Values.csiNfs }} - .dockerconfigjson: {{ .Values.csiNfs.registry.dockercfg }} +{{- if dig "registry" "dockercfg" false .Values.csiCeph }} + .dockerconfigjson: {{ .Values.csiCeph.registry.dockercfg }} {{- else }} .dockerconfigjson: "eyJhdXRocyI6IHsgInJlZ2lzdHJ5LmRlY2tob3VzZS5pbyI6IHt9fX0=" {{- end }} From f4e0e6d7569b48f781c16a72a3b895255f374573 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Wed, 19 Jun 2024 01:33:26 +0300 Subject: [PATCH 17/21] update lib helm Signed-off-by: Aleksandr Zimin --- charts/deckhouse_lib_helm-1.21.0.tgz | Bin 24064 -> 0 bytes charts/deckhouse_lib_helm-1.22.0.tgz | Bin 0 -> 24121 bytes 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 charts/deckhouse_lib_helm-1.21.0.tgz create mode 100644 charts/deckhouse_lib_helm-1.22.0.tgz diff --git a/charts/deckhouse_lib_helm-1.21.0.tgz b/charts/deckhouse_lib_helm-1.21.0.tgz deleted file mode 100644 index d00bd9f9445d4794ba4eae22cdf655de876d8626..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24064 zcmV)gK%~DPiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMZ{b{jX6IJ|%BDKPEK87ZGgU3{C-j&nXmQWE`Q%lafI**Tef z9k3fDF>V4K04D7{Wan5{AE&M@gMtl##LqRck;t2{gDVsIXgl(hg&g`k)Wx9zrQ2N3}uQY z5-BoE6RZe|$(S;#-%B*+0-d`4gJwL=5+a9N4A00Bs&}j{HBZSAO6W*nG2gmzXb*;m z2gCiXPc4am&tJp%ce<8)VI*l7ai#=M5+a5wUCAVLkN>C7pFe+E9{-2?&!0bj82|V2 zd;fl5#xy|zRtiT`B0@nV&qPFIfIfcQiqh-|9qiAxW@N_2{0JRBeMPtQhlItt!Ilps z75s+kfqh3ff=K$$JHn6*r*g`bx+9!~-VjElOa&j2J3%T2(;_fW_Zz_MeTjaMctVcQ z`}c^lD9K`i0zGPB_?CxjIKzyNiBvK`+XBN71;C(&Ll0@m)iBZtjhRVE=FDY+KzM(V1Gvy;{c>&{3t0II&~fMUhl z0HpG!1BR#M1~?6GJ79S-_=$_RM93BixwjsygZ(D?G=QnTYK38C&;X+HrWJz6eFKz+ zx9$AIF@$?KImtWQ8M`Sn*4CyQ%(0j(1Osil!TiXRY(^GBa?Sbxd&M)RdP1~q+rVPV zs@79XJ>EwD=-ALpR9W<=o_D}1zt zkXb2t|K5v+fqMNfnc|2Xq4>`O{oXNQ*5)qqmcV73m=ioAiL42BJEoCB0Z!AP9$p8x zT)a&86Z^{23gLWE|N9VZFDCGnAcQjvYjv^N)eA*4UM9*WQmzpFuoiQwV>EPqg% z5H=I+%zJNYn`Zi(Ak_`h$|TD|tutcV_2#2tkziA7*lD=WRl@|dl{2qxZ|+)_FYbZE zI{hVM+?#{eg_HXk7oK73OL6l8%^Jx)*l&<|)vXwlTey--9-mSvGFX`ZI*TVneddm2 zX9u-8h8ceM1H(6%CVI>qp@S`iBuPl5xG(~oVHHhZ0#YYYoThaKwb*LjqvW2ApZWY{ z)gmfE(u77>8X*Z{T+DY=DlX

5Qr)bhxtk5Ryt@MJCokn*Et85($D8(JtKX1X!@1 z5cMx9VQ9sX+si4Imx7GxI}}6-&*B02%$SHl&@O8L%otY~_ME%{1|ZC|co0oV^j2oG z(z@t&Dkv*dFnnXV8Gel=xx{LUg1sDy)x}_j8J-ZaC$$vJE@-ghkAFGEhfkl$Z02+4 znNFuV{hM;BE{M`0)e)=!+UGHoHZVaaNv7kdZ>W&!C#t63nNMs(h>$O{B)JB1a`p$P zvdK0OWJ0A9^Q{J(mUq!qP%fyNhf#v1?xQM86H;&&$hlNxHi!&SgGf+_U<1E{J}VT{ zjD_)-gsw+6AaRl7GB6cSh%jBC0U~DefyS!CSE{CijKLlyPUt^LTu0>m_cAjBJ!IL~ ztr@lxYOO%Z<4}?)GtxGALXj~=XC$->HUSzoQAQF;?uaGCGR!0qVctL5a25Sl8xe6s zBXS%?JY&kZ&`47IHutTyPUwZc%|5k0{j;DNh$lD>IZNiD;9P}cnh?_`%M5tV*A5b6 zd1{lv59qgyE8;iDV&a*R0SX2KWfq8ER6w|OztC2rf=envBQ4rMSi6r@l ztWJ&XqJy#2PT>W4r=C=(NC?)Z8-_XMtclP{lR@rbUwpBBas2A+`ttbXY^U;(8GeHZ zr#PZ&UX`j$Fw1Ob94BZ+qh{lh;M81xVB&rosb0(~S+_y}|?)(|YNXDadabkxG_H@3cT9 zost<5I0x)-=9Q}kd0{sl}x&*i$-@$Z3bU5qQ# zVc&mhBT?ETTHZ!f@(6w(CPMn8KbVFG)3DGq^jiV%VHr9MM*BLqr>LUMr#2k5ZP(?E zM?><^@56wUVvL3~p9fpAep~V{WKH_-I4x^YoDxbqLZ8}FG^8!`vlYEY`c$cOKvI2gods1PQ$>EeskXs}_Pk*MckcUTA~^X^pE zg0=RBACTgI!?UExjJ4z=bf@7qSeEVt-z6x}zmi4shpM)N-x5_Gy{Q@dQ-TvU{b%_V z7piQZeIvOEt7DH=e8UC83}x&sqD3+D>1?pdzzW%}=Q~LhpSVHb0u~I+mJ7 zQVTOn4WVvjlw0=JiT8(pBy5EI(j{kgAlj>0<=}{q> zqb7`Z-87*HRIttObH5H$;r!^@ONkR|m`z z{`VHEXsZ8V5|;G8Vv-O=f?t+L6(yL?dg3!F$9=GvG@?FWYrw2av)XO|ZQesFPcK3B zAeY<_R;9K>+Exy`C`em)uDe;596 z;r{jG>z6;gI`gKLD8eZo(S)k`aO)8|*U+#+5td{ZHCYyPOOpfvv32sc9%-s$qKKGj z4+P{3z@>Ayqzo&xXcQv1>LNh2HUUmu~@dZ=?o5Ff*U9`hvOx0Fo~G=Y*WZb?!i zE>J?SB+@@Pjz(CL7;(1s2;J(Tt;Zr!@aeDh%?UM6$*)6nmIK9tpqQkBL|Bm+QHI9& zh6_#6OF&i|!3}=ML2DeVxhHai)_VPtN3ve3W@#hNMNFBm_Bwtel{FxLgf6vZlQJ)u zb8>xtZ7kM_&1D0L`8FD+o?2&GgK0KOsGMrE^GM6knmgn&;SgnGE@p0*t}Vk_F1I*&t9i~+U>oxL>{bP4LpfSSyrD6it;JkhZmT=Ra ztmMMh=dHZy=hmYlPp|ZJ`5CE$^E;W2dg+P3-?`}GInf5E5u1pG*TGW_@1EI7ETd09 zr`Y>`I^$;_cza^_Jfd!opwA)X{-n|6y*c>LMd+V}cVBWy6i$5WQJ!1tKU?0fTPVL1 zw_fBE|9-uCi{D%0vT`5TjdO*SAT?6i?UXmacKkWG{d?)GpS{;Fr0{t-{eqqcpMPII z|1**KXW{af8xN&fyPp_;$LYKWYJuz5QuSMFS_hSfpF?O+J~i+ega_;109|26cx^kJ zt8Waibx*aB*5c)b!t$+mL)lT*7~y=Tk;uJ`jn6(BDNEw>h)BvbJwzq<6_tEOa{nwM zljX;Tmq6I-{s+cmvg*wb=GT4y!@<*M&mULre|i4&@X5pd5BKr&vp=D5lgSARZ9OU6 zd{4n)&+85V;wk`oB((zUJ6>^6g;6fde?ONSn8H=XxBBneWdOy)rfUF-w=EX{7^Qym z|D9g=K)t6;P`%eJV9OPwN)_hIZ#Fv+FnUMf7|wHXZ3i$K{tS| zit)8p1t`j>%9Hd@_-uw*>;q|h-|~5_(=@!@WO?{fU%PVlV?hmSEt}Kw7k8>e-GqG& zwW!;e+9@3g zoEM*IE{|(ZQ65d5W`eTGbu=Y$rnx$wFrGg?dq<)Su3{`5E7UNrEz@s^m{m?p4vY-W z-lc+=gv;uI{sH1C5m?I%owLeIgx-?*5g;iUt356KT3aY3P(z?moM?x3<_>7KHMk^V zhM6n+-_YP%VMU|bThE!2-#n${U27ml+Re1R)!q?&TvLOx^YcmSpr&+w@)OnVHLR^v zw0F4zyNuh=VhId=Yt{U?--g$( zPreT?kKcT^v!JetkE>qXBq&uk32I+AVQhkOgT-D#M|+lf*dj}lY(kmbD})KN-H#tT z@O}To*Jt6y>(jG4R(SQRs-z@>IsAoR#I?J=fg!zTeS@O1pQo|*PyJ^1aAqGyN5Utv z$7%h0I;_7+=((^oiw2sbbcW)@!0`oG)JQ3rxYE!sR(x8?ti6x8Cyck$&r~_6P zMV*f?xVjWX`muSw6=fn#4RVmjKA%-b>A2Xy7KyAbP*>h*v9Z5RH1ZT^7svKO!IAB5dd!I?Rg ziv`vG+MJ8d{YV9LOj5#Pi8w<)0|7`9r6`-oU$$NDWNDo$=252MgRR}WIVj%R+oIq; z6caE^)hv0GVu5Ew5g`Z0^T+`p+UeXru1ckG>v*WYcLlNB{Zq&_(4Gw05N6k=gX|)r zqN37OcMV6oNpxb_0*A;h=fnNs{_r3`!NIe? z3=f|^8QT91mZHc$JmoV?*>b$__&OUgqE60Fua^4K28G_&ckdfcLe(jqd;UV3kqE2e z$r^C@TT8C8_v@yv=)U}5xAb>|rXI$4Q}^or_CAB3hu?zuZ%QZA&|cMPN4dcZN7JDLtW*%*gZmkcE&HixxwD!YWWX}~I zSqYw8CYX_P`5g?2V>cr1MwR`mE34LIbk#f16o+;hH9*6T!40)?P50eJw^JHT^}trJ z6ceM-98GwhVu7$kaOGs@fc7zOVm9?JHM(rT7BH@?Dx?yWO@dMY^+^ZSN+fP-EG-97aSaI;Ig; zUOX7&8H7QD-?$#iC6mNdwwCCh@i;2$x5u)cId=~k~!$=v6F_f>@Jxx z!ox_YBlN)S{QWlFk}v< z=|*8H$e6y<)-~w0wG=cNZ}_c!=>Y_sasiTRTZ6d7YO1ZHHj9qd3LGtD&0o-KgUR`V zMuoVbI*0oLrGhdwM!_Go=>91GsLg9@hsp#EWi%zF8B|AmLqurwUOQkAl#~{Mh{ znP+qoujy{;7_AWP1YyfKSPkxXVf_2r|5_%_dhP#{XVv}x!{^T)_W$qY*Wdp4OUJ4i z{ipk{Xk~+6e}{fe_+-FSC5JQoZZM_^L0_WMGfDqRenq!DOJW`R;W#$2Uy7xU0!u5W z)x{}C>%f&u*T8@bKn_+Hh(;z>>)}Ou@b6J_<^iV+&B%z3=88y%sYW3PAIC8oqNiGn z%O8sGlVIJDyRkL}*%Ut#FRv7+#tv9lQiR9gou+UNN`X^%zY9yg+OL=WpAbd_R{3FR zJ;zzX1uU}v2hXePKaZb0e%SxJmtTLo($oq3DS-ZaU}K}s@Lxx#1_m4{&C{>I5@ZtW zCEQTttpi1wRD71F7H>yHsKE>~JRzbxPpgyq`-kVO~vF^w;hP52i*es<4hzq-@(w)`OfUJ3ed@ZT9 zU(9$+y!DWK*&;5P)V3{RBdN-6Xk%lOeTjk@oX;*_kZbsR(fGv>-uW2HcdwVe?_M7A z;FT&p4O;X0JWOlD<4>0g0h8ZQa7v}kEBK8_bP%wki?rY_eVw7vY1X@7d%NUo<{ z{efAF_0XbGrjQTC1%{A+<+A+RIRgQJV+9<3#MIJ#^enXa8!{iN8n>rgx;j>!l5W4> zoI~kPn?mE0x>704a0Q;mJeO3DDA`A_$tZc*;Kn;ApT{^Y;0g<|K+xSn)=CX+S97%! zLkfDe5lripMS6+px;-Rxp#I`tjQaPgn#Jbx&V!>%cvOZllluo@G{s3m*o1_VCpUVq z*GALoD(b+pLA{yS>Q3UR1m?~s37#qP^l_kLGM>fb1PkqbVmhG;Cp;pUxpy)13Y|4( z#`d28EyDkwS972`Ien%RrBS0fji9~V=Tbu?4 zxM&M{2`|w|%tm@(b#mS-JjwBw<4Rj!u^edu9_ZTFE@Z3)Ie|NNuj{j+JsX?3Z<|wjQ@L#W+oTSC-D5hfLG#q&_sN)96hv($z)OgFCNv zeHXDVZG)>J0o`4RqCaRUK=I?uE9 zG7qM8k&asT1wjYu)|Et9Yw=fZW|>rIL{NY?mxJ|HFN z>^MnK@OQYg$RW#XXqMP1#dHqZ3XxlBrF){CKVf>#`WQ7LSq8^;9uvbcb;r-^)Kn_W z7wBtTk@A&8S~;h5LZqjU?%f!uHY$fPd?ZT@R8-(|uP2TrYy}NO2oCoZZ96Av3Yz{= z4*w`&VU2NQ&;?!SJaL0_w18VUcXcHN0Kou!XX(0})tK%7g)>AYgw@Xcex>yO#*3;}H^+30#yn$j zZ@XNeez4E~*7mt>o7Ze7m$!U(w0-Ncew(y^t6RW3*ua_<>^6%l*uut@|K$v$f05ta z?c!MQnLVYj#D;vChToxqyu0-Xj0}SN2<_T%Efv>Q`|0x5YCVp<>mtgv_Tyz-elt-I z9lN(r+)NK^F_$aM|NE`~_*=KjuLCVU|M$Fl{`=tZ^N01H`}i#}@hI=#cjQyK-EnAX zERZ!lF8Km+;rsx3NA>7}R<;VAy<5A_jHFD{ruWk~yI;33A>Plm=-Nyi`VFfa5{qg8 zIy=;ft~>d*4gJGb!RPnuY5%DzE;(G<0W7fpPY({O=f4jRpFgbs-^|b4Y~8iU`w6$4OKnL2xuB zF}mW>+iRZCYFUV0F30b8VKn8pEM$Dd<9TQh5b)te_ zwp#8+6d5~~7o1&ju8vGK;o|uZl87U;Up)9$@GLb?suu(fyabe*M9MesHPg6NyW*^R zxmjnT%7XeVmCK=4TqJJlZ-uG<{g0hGFaHL^=gHH@Pmkcg55wnvhYyL~h6!cayYMX$ zjQHy^U%fz2hJP6zJYRm`km#+chGKxC8`aOWN$&A5HkL<&R zx0>e$tK3O#iCKF!Zhpt~+116_%kbp&#r2!3V|OjKcZE(}j-@R6jg7B{3s9@W>{AD{ z55J{;z2<)=w|3VA=*WW3Y6Rd}5JEoD2VbU}}fy(z|QqBld z_7P>1V5h?`=DiOY=zUz7qhs^#%3N$|Z*p}M0@t>gd8EzilnAOoggHxF$gX;pFV(AV z&c4qRgIzAcVQDwL7@)*F8acp%|3e34a(92AEA#QZF~ox zK3vZBFZ}Ce|Cg-ySp6-q|NBo4A6M=F;lusE_w(!A{H^R)aeY0SS^OI3g$*1Skjt^a15Op${ly zimHTE51W2!mE7yE*Z41$mR$e-@8RR>{SSu+`w!#)UVi=KzsCBaOBH_2?|o>zP5_oR z;2vEx{w-#GLct12^@tFDpItRqS5GK%x7`Gb(?S}rVAv^QGJCq~Ob9Oi@P|L3J{LH^ z`7cWZ`g`z4`S$>&SSccy)`n46NhyB_x|66@xkjQa&WAwqet*5}e|2SRNf*#<{|}x& zdt9ymdiebCq5kW={Fbo)iO=9PID!!+$zl!X=Nd%i9%2J#Yz&--#%3-rK{~ z_dOO$%4}J8CVwrV(TXYq2Ynqnb+GQF(-qVNyW17zm}eVRiMfX4R#1gU2?@vD33_YL z5xms+2*OcBB;3ykn}7M{Zm-q6)%p8;G>g#qUQ}J$L)wILJp}7eod@pTmG$K54=H5o4C*6A+j9pxQJ}UOd<0f@3I9D6e?*3ZTWeD7?Vq7XoC6T0@MzUT7C^{pd zE&skQ`AZT-e3qhAaJcH~%Ah&5Mdt6IjWV_CR)(ef(vWR-_)U$YG6yRkN^&V9HP#qc zAx;v08>WKZ(1c7#NMwW)a{*(QUKS|D%apO1ru_EO;W*1tn6B6F6w}IFw1~ZpCO00| zE^R?@Y%($QpbjCuZY~v>8A~h)B*Lsw>KdWBmrWx?1wyb=98M*h53kQ&YTS^fgcTjp zgsS=7w9=Q`wVO7A{PvWwdLj~UXPwc#u(hCoWs4NZhQh;WLh+_a$LKVtZ623DCaW7 z@r<(jkes$Y7rjV&-|_ZW@%y)KLePjz&)lx~-fMR1aoIy=&6Qxf;8&b&@0 zKQkBAWCAp?&ITO6eEG?HMu=G);y4aakymKy*nfZi@@0SmZKy8aTpgdBbtV$7DSX+5 z&gA+JSb08kS5KGv*u8eGLQ3n{qIjw0%g2rUe41Y9oKty|!>_Ho_I~;q^~U?@a-Uzv z`j_iDcijx#gb~|?F6Hl?o}X^4Cw-oqs;lguwk<#3t4F?%{r`UXANC~BX6nB@e)6QA z|M&RtA^+n(e*H~isS|{lF>Y?TC~EGbFKk5=+Y}0b3>lu0R-@+%!(4k`AP;sQQ`JFO zb3vGLHXyqDsm3TYAlnZ!8WL?q9Mh%-qk10s(12)$(OTSc^S8eG>N$F04#fC{a_n(m z@-D?pQqG{te#pn;M62)UgvJ=eeLdQGQVd3hZ8&KcoZHcnw0+JsS;Q7o(#r?LJJGZ~CBe zVBrsJ$*4f9Bxu)3QmaO)Y0pReey#LqOSg%sjQ9-^^F{{iB}g@vRUp+xwLx*UR|YNJ zB)&oX@B39}2LIs>Ecy<@CuIlb4f&0T@Bb~1ge;2x7*$+QHVG#Jk1@kxjOBF1v55Nz zf(zomXHTA2>pwky{`}$m@4ftb#D6u-hY*Ru0a6*-!Y|xA67pA=kj7Meg?^4QAqa!N ze(9L{>KUt=5|rZ@7#z%QGYdZR!0uI`ct{YHc5@mh=zstFfApL;hUs%oDBMjv=NXDH z)2|UVi_R$)isB@hqX}U|U`5%aAgDVO?Jc845$PI^r)V*qQc*n}?BJdj965p3jy-#A zeA!kiBHC9LR}hqoqpQ+^=Gqr8N)vA1Ycw>?d2%gFF^Sjq+{NMBG8>_7AIb+L(}XHm zqzd)|^Z_M=?Vy7lR1;klio@Oq#F!_ssSGO%FgT76%FuT_`fdE1Vl~~hZZh}%u*xv} zHsK>%5xFFpVDHP}KV(s(#L+;#BKM|NcmwHJDzsBMYwr52`LaR;@hgLS*nnei!qFx6 zodyY2&S@e|9h}VEsnnG>RZViv1euU`SLV~>BzZ{{5jp_<``Pr4Dvghr%}r-xundG?LKBgcNqH zeg$9&xl_K^b#~gO4qwP@bV`NHCwJ8epg#a=ov(caxFJxMkp^}Ra&H|3s zO?m#r>dynY4@0_ccn)`Ro$Up!g0Sow@P<5x>I2Fc{cXoAT;JUnds?$5%wnq3=9AfH zp}dol&JEu#A72EVdltc<@W6YkyWsZP!0Wl}x#w!Z2UunZQo(0LO-UvPkse|@P66vv zc+e@yhQ7J1{@V=25Cw&T%`#G8j6{IG$lo-Eg685~j11_apLD9dPDhUrtC&3ADbT7o zq*An13#FQt>uDfYk6hJUETVv={7l- z(;P~}b*}NBp+;g!B#Xj!L$1O|JnZ2hxz|~{CZgWs^%M~ z{P|VQU)^Q;U!X#O>Jv`<4gN0A(!y>Re&<%NTXb%QU5@8@`y?5Z^RxRKR^pkdFa z3EI#>+d+X!lj<$lKGYXTrAgPaL_XI1?SRr&L>*)sw1zE(Mpj=`;)P;iAiO@gI=(y$ z&oADbUHy3cG7JN6svhgGXW~e+AVR0evSVe(h1{& zgq($08e>I%^->=oPVgz_v}iBO?xR@xUvH5%I&>-i0wx)iQx|W_{x2) z!3;%tx8CQ;Yd`dNCTlyWdZMqb*hJfi_V0oQ%?3Mm>#}m0V{L1+On#tyj@M(i|0s7L zq3`v@_72+Kp5b@f8k;m|K&*dwp&bhsrVSoR!c?`;j^)4G zjb%xf`POU6&Mvd$yRH4%2bCr}4-3DY3%}Lbe=*4$MqCgshY_C@%K(?T<1dqjt~{UhmS(+`U}(_2#9b%r_cza6ezlyZdQQ&8V2Ne()jCqpp_v6s z=F4>|g~f!ZP(mmfDmo)PQ=udgXR+Ve?3>Q67XKrY3fY$Ck_f1!39u1$acb|LwScxh zJ?^gk%`wx~+ui_)InR_KOI|Ih7I?^IV792$;>^09(SBvwBB#5C*vP&9{NqRICmOnD z4vzPFQ#IDR*MAw0Nr=bb4CfoV|KafI^QZf@`yZY@d079wkDnb;KN3O5b93d8Bne#D zg`^T#k;#16X*yiyuDw(Ux{L!6IuM23$gAIK+n%4V8w))mSseH_I6#3rK^@GnR73=* z90r-mri+3z_LlKmmeY;?9{lm61Uuhi`IJxqBSkNhP85-tYPU{V+kXzy8Oh$3N)27^C30eDZQkI%0f;#_g*XG z+V$~|AGiF!&FrnCy?@rSwB~A;@;h%@_j1|)e4Ubrj%jqkV{*bXroNtE@q{#pO7~#3 zV{-3HwCZUmkI6vrgp{|8-@k8|8-M(Ggo0p0ROfnFjav8lCTYya<_aXblaD@arCFe( zlnYoi^ZI3ODR!!FEIm69-SuCkyEb--ikY&0p{(=%T%jOJG7aJt00OT1cIonQ6OBml zWRND9kqr(z$MFiHzAq9tVi^`L$+)OX@q`FmRG8}>b1j&GdvF7^t+F&BXjlnI)O3(B zSp35Y{U?d-L!PODJ*hNE1s^43CTn+H)Iz=z!h4uFuN?7tgf_-|BP_`ZL0^whuegfr z+JL7f;xpPQC`fE^iK4xuhY~qNZ@7L_g#OT+TFIq?jOjb8-C);TZUNVtFK(|D9}H`2 zSY;dLiX9Z!akPY}4HFj>G_r)Kgo`_=AaT&VS=JbdHpBIY*)n_n`|pVXLa~HjT>QIbU|-gmb9PD zhka?wakP`yW|TcMa8#TI`pFL6BQ&8mM52_48HMd^X;vDi1P<{-VLZEtU@F_NIZ~K1 zCDC>!?ZXIX=9obDkSgqj9~ijK7UsRpwJY0NKNF;7r0ji(jxDuE1>Mx#y=+5@f(^5> zV42A&Z=t;~d*$v>ryjGpnQeDbU-$*zFPfbM5PZbhm`;w6cI$%1M4}N%_^oG6b~TY| zN(9k%1@w}`LefM5MTN*}Oi>A_efM0>tZ8qT@9!4-Y5P_7Uga+22s zB5GZ$nb5-RE+W&l>__Ca4c!V7ZJE6`!N*MQqKF7Jm|=z|ewKO{{VqIMM7^zv)=GO@ z215DbR?nwasB=E1$*c|aehY?~7> zP?@ziHPhMSBlqylJS8YN(WH3&C#W(T_8Ih*Ip+ZxjA(k?ATG(HdvD3HjmG=lrQ-Nyd`v%iQ zYVz~Ju1TZJl*S^liL;m3PH3K(bZ;iOXngUSbmRPF(_0KqR$u#q1Z%7Pa>gPqV$MjM z2PC$`t#hW^HgjDRnSA`XH9&94{HS#i#Vl?tYE@dtaEXalzGjM`gOk=Fb>+y+^>2apGjBrv={Dyx`pgoDdp4VHo8M^$6Lv8D4gsGwF zg+<8rJ($GZ!i!2!CA81)!n7bx*H_2yjW0e(P`0NdW^NR-!hzRA(K)9 z#QJ%KUPoIQq|ekHpjcUNMB#-CS(_%k{f|X@Y!2T)H!bE2ZA1fWA4%RWQ@8Y+rXQc1 z?3?=jR_h#St|OSz;`X38CwFpj{wE1m4W`kl;P2*0aTB_sh*57Em@oRkT08_Qc&uj- zCnv8^j=)6jn_i&r5z91B(JG=S$2R3njZ)J#&q~H)5~(8;T<~ivo}dUA8s@jHGpgZ& zMXwfy7!M;!^UrI3ei=wH35!MIVM}<>*ux;^E57x5n6wLpk)Vo3IPp6abR#wp1jlo0 z7r3zh$lxMsI75?(+Jtuv9jZ+7${ngAwMvI7N5G6L!)!+zo~Z#J4`M3ccC?z285i?` z6HiA=4X97Ml*WCUWlRtpP4z^!i&3I(xp+Is7{0+Yf!2MyQhmE(nckH9mRO8p$qJ(=JVV5qTGpRC%H6`n%*(`;7}brYn)#)Hk(oh6!aOs^H*9 z!lfGgmT{5I+M0~Hh{%BQ0TF_WwpQj0)Id+52mFSJB@D;_%$TR^^1cG&=;ZpmShe3p zw^JHT(Jhw9v(rY8){5`K>F+7NA^P)3Qjj)^1-&5x2{NJD^csIg)kbU#aCHacVvKv2 zn=)*v!H9FE<(Z~SdaJ%Fsw_#h2Ni|lKtoj81)-9XPEKBJK;WG;uB-;*vmMli7$SH0 zn$hN!&2#y+r=Y4v0fR$OGr4VXh*9j)>Ehk_-zks7m`ahsY5j1N#S^mZ?f(nwf9^lo zujYRrK0bJ;|9LOJ9>Xn-M`4V~j5A4^?^|_S4wt82qZxjOJXha+>ZA<0F+pbN*HK?S zF=mF}{ZLSIgbw!q_-|6#x8HBU_!pF$yFXUD`N8}ajQ@lE$4{!~Kb}87co_fp@#{1G zOP#>?xbBUq&TXo_fn(kKvgxRrYwoT=7^5-(x7B9D@Lj~!l!#j@Ne+T>HKlB_;4E92 z-UWGgcQP;TG_FMnHP}1L!CjVRP_GHMN;N@Wh$dI8Mv&PdsMFcD2k>AU|2=-)_TL@r zm!Zk;fSsr{-~V&)?8)Kt+WkLI9_qi{%ddz1Z|VdvGss`K6U%?ipV*W%o4oNA`kACt zGJ{$jb}a&uTZvS%t1g_W2})vA0G1?Dr9wI-Ot8?Z^fa@}L-gUOe9qa!pnvsCgj|lq z<#A4Hg|eT?J3LDh^2_#AsZ<{A?UknPF^@7$2Q=QhIoPvXz@UQujCokh8$oiZyfuLSP8DN_+RIR!gqCW|^gZ6BB_COZub zcY_$_stIyQx&^|ZNxJPaY`x{AR~3V$WZiBMHZkN=K_p?_=|xMC-)aN$dFk(I+=hNa&1KhE@GO zCJ)_Biic5}dAJs|9L^{!wXTiRrZF0&nJw?v0?{voR;8t|2z}@S30&!CO=L!C_EI-^ zMU#X^To`Ezp0OB8-;I0Q(+#lu++u4+(yE_W8wyo2`5A+u*s`bycT+{w5o z<~IwP*j*&viqGkmx7ljzsT|XIwf1KP)YHvha62wBTM`Kn-qsKB~d;JFa?AVPU(#)G51ye>@8(+@fX80Qv8+Q zIQ&&d*wAOLaFb#<($W?$U=|~bQ;;zcghk!U)C<`53l{;{-fOVc4jKd}v7xgO&42GE|04KQ*&HeGm7n@V}m zp8WnIDkGdLGKTxiy_$+p#i`mEKn$j+V(}i-25*XLik{x8ldFdDzGK6uj~};8vC&JO z&}gnnGJn4UTcH6rSW#mz@=P?xZUH0LCp@zERu78D7ZtLLu43)a48J=z+5bn@&_!w1 zMh`>|_#G-IG6NW}#;tr7%~I|Wy0T9Gn8n~)WaWHf zmkxv1<8g4e(uT{$c7N5eGQ4S+vQ0^n5>pi^;_QY9sQH(FffPpttmQ3iD^Z?3F^FI~ zR>VT(dHfX1>ud}MX>^2WzVLmc5_HzqqUNRb1FSf8%|4)6mYCc92Yt?OEv=fCcHOVI*-Fye2PyPaivuwN~N{i*}@Lh#=%1pjR}_=OnW zEX43;A%=xWE*B!X?1N+>p0kB`&U)fmi0XT~5SQ1ag6r_ff?l;4_^U49eery|7|*vo z@$^OcW--b)%cJaz{c1{S5p%~#PgcAk+`f5Wek>|Jk8Gt#BUIEqV zxE7!;FfPC8UkiTWyoAPM4oz&;gU z60{)Nu;Ksey6Z*8T{H*+JJ_FXwF&D}Z_D1>uY3PrrX&i-T!d3PnT9rvjwVYM175KI ze{i_}ta|_3;nQai`~Uax>tPg99*3AIniqmLJpvbX%1bvhC?b`)t)OM0F6A+Dpv*#D zXEs+Wc5fSM%~@Rl*4#qTc8j|4$hHk!BFE!nrs%O_3|!3o@KN7DVv>Y`q@{;*xU?}tp`q`BZ&Dl#}?5N?e`Y!sy z)R;edk;|yE*ltdufBa}xOHAv^vdH^tSqjBdp76>1donlvw8(j%a;X@ekvl~qF-a3X zH@m$oG(`&Xf)XKSaVSWd&g)I4?uBAy*Ll?egy{oC|j z40lF5?xnj`^;k?wW%J)xi^B#b)#uRl5F^!koOQ_MkZ7x-f3uJRX>+Bo0>7|qtzzwq za_eFKqK;sr?yVGgHJYBg7o=`5v#SAZG_Y{dj3>R41uYlitM`JQ_ITZ5S!j#8CcSY|Nl)SP}{Y+K9PG`K44zSQeZ(gv=1QpAB_$K-{Pko)(yG951KT~5|t?Jn~ zH%F`3lv+`Dsa=`krT}jdBpuqT#+RAkZ75o7?hT)M73|;l*RcNk^8DoN;`(fTIJ(z= z51$=8uAKirc(VVn{(C<^wCH!7;%G|HOBxX-$yP^8m*#dj+}}n22eS-|IXc|mf71HV zRH^i6Z}0Z@c8CGZkc-J)Vo=FFNR)kZcJ=BS9bcTHlh+rg=WotmUtFVaUSFXfuFrPS z)!F6M>(d`j^y6J=#snx%Ta*j6ekIq5o}7oD+U5fOC4wDa>zIK|ZfKloSOWR|!vY}hh={1vg!q?3KtD5s0Mt_J zYS*xt8$kL&wWp{~CBBADOk zVaD!^#1up9=dt}07jJ7u;g*ZHKrz@M-P4L8K$&AB9{@&lmaQ3%2`b9PnDOt~rCkI? zy=-Aj-IZ`|7o2rtT+xW+=FNt;vBs^+P^Iq0j4L}%5J~We3-=cnPR;&kvcR;LgR_(%eT;6W zJb^)h+)>E}o!9NLvs2^J@ZUvM@mcAa%3x+{atqHvFoMi5&BrH6v4EbU^P-ES@|)Jz)9?SJ~9_-jVQP-g^vrZpJ-EVeSD+Ft79M^vL3!% zfbQ6b^`$u-8dymu(1&d?GMR6e8ri^CT(lCu>kSH}tv+WsNu1f1*=Rj0GXPhzdb&cm%~@tKQts|I96YX0}CkbvT&neq6J3g*i{NQ9`3+t~+X);Q4Ox zBqhSQS_w~VVByc({4hVa#SkplFgCbzuq{SGk39ulVr_K)1+=qmOWtCM;Cd|^BGJ4! z!wyOfub%d1_$}ErC0BKn=Lmv%G#`(3oX3$QNwRDIOJ^w;$|$C&{bd4W8x?|?ah$<@ zW}qX76{l&Ur<QMwD-&l?+-g-gUpGYOW3B&uE+iFI zx=-gyQFLNYzfQ1z4MxsR+_wvdmB-eCOF006AxP@Ybo^xgrVL?};9HqdrCCkL#5e)0 za^j1apkfuf3>^3bL%6h4?E+3zC}-~2a{8D-q^hRGgkt5M=t3qp8MPzDO*D!zW?i0( zs*HEgLsV}WoQVfXLw#%wyG71cuHMtQ?f@L zQJ$+(t!Hf6Y}fQ7Zf-B+xkoNfn~e)^v1Gf&R0sr8bgda7M$2QKB>dL+cW1kfw(@z^ zj^Q+u3NDQV^2~fgkcg(#RO$9Y-+Xqce>Fxi_=oCD;y=NSI-Da9r&+iuLTNpZ((`w- zs3UY25`0FP?k8sUC_VI0%EWbP4O36uCm;-B;FK{(9%exlrfk>6&E6CqW(RCuW6ndI zBP=?^t{!29cigqTdsmyqnCQ4^*9#_~i>hL{S>DVNL<2ollUk14jP-OdfVh}LK${Yw z*@WvMV8jVkI4)!(_$s4ip~XAeXyy916F;r$;Ntb0^OLgx!KRp&w;pyD2QnvS-OnFg zo(X6eDK*K0zC2KF>WPscIEFc9(dkH|SlvMdU;Ngnfn_j^01LZ)!aeyLBmJ7wdVeTy}2IAf<6-q(A_1LxfS5noDy; zmX*4pm5Qft2p-H-I$z|&i8p!G5lWdHJGQ`L%}Z2)(vue{oz}}S$je`kRtI){Y)~G~Q?SDov zZTl9DA}VGT5Vpc%y$Jv+3tyfpG!V;&Pcj!MremlV;zhX)5qz<<*RW+|z<7H_eB40I zZZT|J5&h<=h`&0Q4<&%7QG2U-1p&3)s*)GZqwa{$Oen4UQE6EwpQ%)40wsk!g~^WH z2{3cjA_|m4^aD$Xl%O>7E=_1e_4F4YdK)D9`o_E(MS6=_-ZEC}(p3@ZYmC)}924q| z{MF}Wre-$G0y4c0V?dbDHFmp?=KBk-^hf#b4fvN4H$R{0iLhI|ZZZtW)S#Qwb0*s`d z578-=Fm)nAxB3YSEth#d2y#+Kb2EX3nYW%|6=ndm2$Pp$6>PUqm>v7FAaz^QN^mr- zPEq~VRLQb(cg!X3C^)`G=hp%H`uO_%+6nik^EcnU{^1S!>Giu` zOX|*VqI2ze5xLQQSJPo&Fpd&f>{L_2#oSwJaXVRx3*|2vFfvK#gs_P0?B=`OyQQVv zeC20hf8RENf!=J8>yXUTnVfksYc2?fMHu)^z5p?VzAx0C+?`yY$x1w*a_$HElvEj!?#E(o~C$0b|EIn zG~r`RlS~+Igp+Z`ir@yEU_&NI#Ah?zd3|{p#6<4wLO+Ciqbmz>7Dm39j^i5&o1|l# zz9A*GLU73q7Q7h&e;K0VNc#jW1!s9RX75@fZ&3U+)uDTNxK=j{JGa|h6dFxAH>;qq z(plPUhc#D((U^ce*+m#A#4I9)iPS8ETI)B5en)1Es(d`=ySIs>9r00OSD_$o+0$l6 zN4{pOg-Vs5bJ0DkwB197=sSL^XF_HwnF|c$=>abIgNzoIc)KBa%x$+tU=i0o)`qk& zqd@Pj%dj=g3yWT?6nWjit_kZo6CE4-tOvXq?;x9TE}NK)35(4aQ=Y^POXpb3z!JIO zI+tQGoHHSc-7>p!jHM((4i~X{wy^EQxi9pVMJy2cFyZ_GTete9jbpmv4l^q}~uPL_ZOP z6UmVvrnOxsbyghOnttOh`G9E`gqf*x#%^nMM2eIZ*dcY4Nc~{|HAzW~QYiI}Wo75n zCckJh36?b9kGGO@`>y%&OtHv;MKq;1&QcbeiCSuBH?Tr`_D-83?W#;#1GKdLBlf0C z<({+a=Ypbb-5^i5-1$!p!c9fA4KSX|uAI9Fo-#WF1*@2MraZdwCj5K^3uU;pTIc#O zR<5Tn$~LuW&x6C^K77(AIByF(Y`WQ&zd%`%mm`nLQiHbO`;L*fi@8B;*MyWvaQ$1_ zlR7BDY?9##K@)yM1gj?X*@coKRF`$Wh7TG2zms1>{{Pk4@#(9x;VfPkj_&;b=TG+! z9#`}KA3uKbkpF)lzenghlFU#-M*^EVVCd9!340%x+}e8d2)zNb$H&F$_tuB{L-b+m z!(cG@u=N3b`Q;hAk&v}@DfqkjmtX3a=;!>*5n(sxR1}nU`epl3%bOi!zz9R=TKBZ96*IBMajTCFO=VMK&=!Xz1zB+^%(b~mqzzf^%8@YP6B%k(9g=rS;0ELizo}}b z^*cQkfRB4&$y-;L{nl<8p%XZ80QIli_;njhp*fro>Zq;(c~i3r*6)-`WgD7I&`?I2 zfd=5k%N9XFL2Ug;)7Qnz&aZVxwAEgPyOKVH^g_|bbiWTfu~IhC(MqTi*)$jzwe9AJ zr*l_jp=Ch!Y2S%Ml1LEMEg-K=rAwU8Iz}4RFB^cu^A@7uUC4Mi#x!aB*1d0mVS8{J zdjE9~2pz;UHLD89s2st~g{^|J{Q{<4O-%dmYY3=9dF}Fc3{-jL38N{$Wg+7u9?!!Y zo@9;6U;I&<5}Q6OBDUoaR=?|ulSFUBgtF{i_?8Gp8avaXZz{4j)y3LmY>J9CpfN8h zur48?6P@xIrmPL5$pZ~zXw(3?^uB`NnnW4g*m+`3X}99wu^!-=xq8&J4MoO6EW=FZ zWnP-F_xrL%VGF-DJ#7`d=F7X`HO^g6jkTQ)$4!b|71XMf3`Phd7L$>0bpE(I7#?zm7$zilB30T$TXzT}bjkK-GGks=&_Adc&_mm6S~qQnd8}Ro^v8=u+?*QB#shbd@Ef zE}t}S5DML(StRYPs`OS{c0uX*)?B3(%1GdpggV~gnbIAXv$zdr=QmC^7d$4&RgbPO zpfW`S!-<{3F&>l9m9WmsT9wvS`+QL4v((CmR(etWO7Vn<`mpgH+b_c69)W~samYnD zosR^K-I~-d+m9Busba4%>S668m!{@5YB8G|36weQZcu}AOOrU#gQ^WmQ-d0mSw4hX z;WRg?K^hC#8fd4dsX+~j^>uA9>aXk4%`+@Y+Q=AK9-PpBLbSHr2Ys=C)>6So37N@0 zSV|xC#o}DYGKi|*^yesyB<;tFZrK;E8ICqVRV=TbuJ4@VU5LXq>;<*ucEN>6TE6Rrv+>@%g%}&Zs^aUs?#M^c*{}+= z$3;9o>>kZs2ft291bf4acXiut04a^4mNr#}mcCQLN3!(Cy|DMF{=JZh9xys+SpoGT zOk0m0q4M^>e)I_Cu*zHfTU+Qjj?s5--du*4SFiu;Z@cjCYx4)>&Rx8=e-L&<1!s_= zcY_6_aLdZoQ(Gt|DPgfhoS~o1g;SC!McG95y)`(0J8*Tt$iNmju3i{y^CSi~T>)5q zWJ=p^ZT;Fov<2W|Cx+#{D#Lg5H-vaYYAh{utJO z4^l4l`2K1mHWOLsCE-HmTeCEa)V5rPHvbgNoq2@<9lHhE!oj~}zmTJbVtpN_k(VOT z&g&2GP_tylg5|A&{KjCR26>G!LkACwQu;jpif~cCHHTi7(M~^v_oBIm{6ddW#+3~X zrMcIhj*&{FJ7Z)%u%!r0(%NcTZlL0xl-`{F=*5O6k)Pl7##_!}hju*z{n1-f zqJg-yv^?;IfbKykjbbP~45O?Rs#)c{Z2N|RduAcGv{(@ zo0+6zH0BwLm*&=i{rtH06N;OehPp+jS%)ibW8a7Wd2`>b`UWYi(i@r(ep0b`8p^Yl`?Apl&G*fLU^I}<~znrT{`dgo~464MgM-0`o0}Dk!B2$0^nf66teKWsiZpP)ECx^Pjvl}98 zsX0uUx|2AnM0=w6>IHf-{LAp*`36FsCKgcXkVUi56Z_7)D9@ewv{$+>Aq?|H?Mhyp zrU42Rkt#qze!-r7MA^h%&AUc(9Udt(6_*b{$oX%8n3gOOe?|+46LOV(0bCh;We67PYbPAZ17S;Cs5l* z!oJrG>ZCgdyOb4Om8(xJ?A&v#N4z<>^>$1T>}Mjb4RueC(R=Tq?Yw@Y z@hk_}mPOLF`m!SJg-n+gO20FOD+@=1H@n3eJSxDOGJvVdFJ-+xd#N$OQ?%bRDymz72?d!xH^;LM#q6t|B#eS$qLyO_C}A-eHh-A17TCza}ZBFkf(OmP5BD zX>>Dk2MP5#&bm%HZF+$9;pa0nSj`78I|RS9%YZv{JS0dXNnOU?CVfRCYrh*ExYN4p zcPZvxBc}$i8hqw*^h&DwBpPyz2dG%rg#Qh`KY#f$K*9CjuEWbWSH~x3>tD!xsS)vU zRSe7OdNUfmU$y-kDyMT1brYj+*Si$MOnA>vH?F?VX))GY`}U9XH|w zM!1L3elaj#FgrOEe1`rC-9Dz`t39-Zpr$*6zY&#o8i^A)#*)eWsu+t3?o62T3`Llk zd|eZFq*y2l_YT=4Tg_iiL_g*BQvI$YHEL_oY}hd!2mw+l?JM zE6$bgS#X0wx_fsQ@1|m}YBdUA*Q85gWL_40_hO?8(c2p=1Za|CgE3EHA_DY0YcvZ(#qRH)HN$mt;SK2V0)E~hGAbcM$q`42fV@AM~m%TdBZ!;ApPpzhI)}j z3ds?Ilo%TzqoT4IIPnC!?ZmlL8wypHd#leI`$dBRH0U?-tC$-_e7}-xmrZBE7GGxZ zKQgJ1ZD5ZrDUQsCa2cMs!3kPgM4be9qO)(-F1R;-m<8w+Y9wv}cVGXC=Vv5|V?ti{MW2F$!G8 zlmG?AfC*6G)^`GPDm*{|X7d4q!yk{$LT&y+h&HgJ_n)}9bE!jh~Y?PeJd5K8Ps(YxEnLZ?Iy;pUw<&JmjC)Dmek z*o{PjT|L@`NvvGlUJEN2IyEn>bhNdFPV^Yc5AdjYN`}bq`VHq@rc`Q|3NR5D$iU2D z)W%hhf=I<^EonlN@(remgzLc^_nIim%q|*2`|$HipfVP55pzaj zGukP0?5wAMm-8UI3o_M+G`H)U1_a#borubNY8z3_)twfRrTMhyWP_N*aE>G%`iFpy zkUxkpi`yHz0j4nRePFzWT@Pm)!%bzV*1>LJKGwiT4y=ufdLgWJ@UTWrSWs>B_bIw% ziiK26>Kp|X$1x~E{F96GKbd@Js77E45h!k~2Z|U)d48&dlcsT&nA@1Z5SU!`lap5{ zM_?}@+nC#1Qa+_}VQuo!jSBDS%SRfHQ6#9M5gc8}#Q{QK13_>+x1EIx`ww&P6WlVD z39?pkl!;wZ9Mz&`Z4#-46iW#uA2+cW#mK?1cOloE{@k7ng~}l|s|8#bt2jJ&1zKUG z*D2Y~P^x!#3nNL}``I9pv?NQ-BMc>w7vt#U`ur$=>WZt}Vu`##VB}=&&@Rl*;pS?_ zQ6wqwgknK&2;5gqwWBtJmZ~*(Mc`*i8Fdw+Ym*#&GjQKjBjHAQx~eoL9xBQ1=)7VrX823U!xg*hccEI8|pW!_2I4c=v29~Uu`z;SMT%Y?CynI zEFb4j2Ih&~Bd|7M*SmS&J7*3z!LM(Ib6F;;SgCI+H(23xarwZevl97BJ+p`2piLc& zQg`Irq>a$Ap3BgFOKOX0af=s!EeI&#`wIP>*A+poPKm8%A{Q^U{djqtvsPi`XYvlu z(xmqpczdvY@8Dp6K&7I5uLQ4l5MFCq@FYzzBZHi~LJm8i>9bbmYSaHE$~CS>!#Z9d zJ{es&c`LGyrd-j>Cf(em7_RkfQ;Cj<%^DyQWK0BMJ&PK3Qj(873mT`? z9?APEfA*HLxcCcB1?PX|+sR*bXms)7j{$akd7d8#^d5gtS?vFP;kGp1RA?wRJAVIN zrKNP~)|NY{YfD1i5CIF=QJNuW4+jVG_N(`h76JnF*BsSX&8d9-3!}DjV_Fvsx%ycv z^3sSrK!5eov@p|Ig0)P)-|5UcY~{mV40Iyg)P87>Vz4z`x%A09bKC>@T7$&)a<bp0C%0sU3+fSE3BfaJA4a!r#wDpin>i4^QzcKsrH$rE9{nz zFuQMy8I^W$?hBx2iv4N4OVi~H8C-T&-$&j{lNxMfl&xA0xLI)i;oDCp#2(bwI<;#Y z6#op>JS9aJZDc zVQyr3R8em|NM&qo0POwyb{jXcD2(sF^%R(PW{s4eNL_rJ(HUp`6iG>RV$1p@C)u+y z`8r@XNMhUsIsjTS$M#z1AtZsDOP^c;Zg+ffCw^N=; z5+-yMPDwJ`JDFmkhI2ei{%bwI{r&y@XHTBM|L*VaSO0ha@Zj*j4xT(a+<*M!@w2DT z|7-u?>HhxX|3dp~!r%GJq{8CA_V0|V%G~edhg13^5t4Ftgl-PEVj?3!Qw4v2N0J%J z6ip;jWR@ma5fqa#WmLbHXv_sVb^3#5JkAm#hg%HK$Pub5)|Q&57=i;^rRDA1!8hHrVehBM6Qm`Ei9w4G3?&=7qQaWTl?b z@J#uL8s2){`fPu{q)g8CGytl;>VRT1k0-Vz~OB;?+D zunzW{3#9J^I@0Thh1_}((3xlqT~Md*Zsfx zX8mn8B7zY`oDb6u;q09vg5l({_#&tiHhhKu#YNL|%DC8R9;Ugytb128!3iTin{gIW z-7sW49Ala|;&PwZ$jpw8kL?5UYXW+;?Kvp2KK59 zl(_SZT~7|Li!biPWfk)pIBjC4#BC`8Ucm25zwMnktz%sSug$FZ+|qG_NdT(y_uDO| zj+n^om01fxAJ7y_6xaYT+#{MUhg@0c)4eiwO5 z;Id832_BI|)&#p9(@3EJr)f}++Jjpz-X-?xiwi*N&x{R)2+b%vP7;1g;v@T{BKwxv zbT0HlNPl%2iqH(dt3ieN<>CG;e^8odHWTd3dv9u+77#W;svD%0xtoPrXT-Ma%}2x9 z!=~7M35~EcLK4KdnD3}mT+Cn68C6H|B@1hRvfv#oML$?$e6xEL6q<;9)Qn`i5LX! zvIfA6afM+k%Nt+-!c2Y7 zCq(Q?Ed{f=8tnMvUrzDi(`Pc9`P_M?({-KxO}SJTMCp*~2o@IY^O#8+n4ps+({a={ zR7mv`RnzaxC$=F($d_4?Tmv~d`vX+jyBi2Hp;C$YR)bB;RWucp3##T}lwhfKRAp&G z3hn|qmx{~=ks)dj2?`Nx;4A2}qGOt|Fdmc8X=DQu7db8iQ}KidqXG>OF`ExGRvo@l zH63IOwmxw}|4rgLBIm!CnHlII%f@cau$@qA1yUY|l0=!2w!ssMj43)JpE(6EUz zl1Oq#EFqR*CW#1h{b<8g=&d#);)X`#IEr}2lyRYvr1ov@TWd<_g}%)`wLbl`pc;rL zI1V{W=AqzRg=3l!qmyL@Jm+f%iLpGj$>0a{TgDafn`1HY%*X%*gMl&jo!+`93{ zk3qTp&5Ng%Ct5vl919}li!Zj1PfxGTuCI3jZ=PD+fbb{+2xEd(Cdgod6_GFA*w!z! z)u`Z-O3+9R96*PptT)|IVLKxKkkzTNDmoZT?G#>+cj`%niiBWox?z}8&YB3VG#TU` z_Qe<57ss#8t}l;I&UPvvnc+8xaEc?U=2bb{#{+)Lh!A8vNXQLIUL5+O?%1oT@(E1` z#^DW|gUfkolg=$Hnh(@etE;T4jayRp)rPPeU+2ugv1*b(N9aadq9e2yNvi*UD0%d@ z_Sxm@(@=}A`rS8z&yKu@2#qO8;(P{P{7Shb2bGubk#>(qxyK)B#5FqdF1%+#K!x(Q zH{K$AUBp^|L=9ppJm53k*Z|%u3|z5jAbm0g`7I+-$ujAk_Kl=dG9v;fp^nq=goFv7 z$WXhc5EL#z2m78iJ2>>>$}wVGq3z|o)53MDR@(coTliF#28ENgTU7YePN2MazPtyh zZe!lb{CkZyWicD9QFW(rQ>N&_1pEO^K+na%*75IwY+dRry2HNz)JCGT1GT)3sAMVp zK1_u4Nq;a652j(EY3R4U-or9<7>xE+bg!d|HlNyX)HZdOHy#bCNxu&RQi?Gel87E` z$@*={AIO^Y-_KgsqPR{d?KgdDOVN-N(a%=&8tGG|(g9&{$|+MXzS#cm_4S+Z{Bq~W zM_r_eZE9#*w5tzHZ|q5hRVIg1E|mxI!G+&x&@HnCyKstMY?Q2N5qtb>%^#}T4t`5idGw}c>`w_!)b!uwS6ryFefEvyCajJ<5AqEc z2s4ziw~XI1BnXzAp_}V;N(4dWEn#%a*zY1sT|y6aeK8c8k8EH#9>JEPpPcb|BF z_~+PGILIbOIMOa~xkGE_tA;eFJpbvsv^Cw3&SUItErsVUsXN0Zb9S~Kp%=w(>rrW6 zy!!4MhA22Md9=4Tp=z3qh7q6b70K3m9VM)MUoyBk~wOkc(alh+J+u&4fG`c zTfwtb9-*Iu;Fm4m7oeYmRC6sAVai${XmUfu2wqQ!3U+nCEa87|v5KbpA7*!0|0^a5 zQ6%_fc~nt?>8vL{lhE7;i%C-I1GWatx-{F{4WP|?NEGTNs2=2!8^Ws8c1YXGVHX8y z3lDZZVYrAH+ugQ#2RG|N0Y0O~Yd59^2qH-tkJr@|{XsoK$5BM2M2cI-ZT5?1nmdd$$Nr|{X3Bi&`|5!R2VM$`d z+14X;tB1B8i$uYvzt%S=)I24>4$)Z-6bpi4k_r-GMPfu58si%-G(|4~S#1P2_#p?a zajfQ^$PHTS^-CVfda0VFjW`!EWq!BU@f)eE0r?|zsV$q7d6l1&>+@@4u}*BJ97xQ! z(J=MYeWrUb%|;28Q*Cx0X&G8`hg_Z=qHN5?%9u& zd-evUo)|_X;kUrGWmwDQ7AJ2t&shp=L-wGY(iBBgOtmgH3vZ|~7!k^BJf;!VgF?5B z^8*LyhGHGjE3C*kORkAR+qt~KZBB?nmptA6x-zN%8l2*2 zOwDue^PL`=p7OXs;x(K5#}!y6SxNF^*gYHoAzWS7q&icI;6E3ke-_?-$stiV@vTRBZms`pdB1L<{7&3@kx%^l_3ABtZ;i{! zePB1v6;^`ONM*NE-u&9}=iv75MOi<4uU|;v^KkkFJr6$rzI^^?BJ>4t-*_xHUjlH!W71y!KY!cyX?w~4{B7H(on`+02HgO2wfD8Z7;xJx#fTs>fCy zik>g7zv}8m_AGZ!^l&mCt*_)cCNs{i$)~S)uK606F)_#kirzJo7ltFNmQQ}VRJCX5 zoTX^qFo~4{&(%)X(lL}Tt`z9XPT??3FGA=X~>|H8|$z841 zMi?NT5`neM&^fETMCdJ<9|4k*vD(wpueF7$3^fEA#ff%kXYQNV}P~x7s^`k85gBc78rd9n_SLqJE+R%Z9a;sw*#737K&l zS}cK~Z*gJ4c~cEz1NzRTuiNVlH#)1$m2?R6BT1CVFcv%wagr?Z7^NZ)jnQVmS}=?1 zK%HElhi4b3m#@z+-c(D(=pZ~HDi;!*s7SGzzW8GM^y>V_v#apx?A!4A^~v|)*02niZG=2EEQ1{N%VTG{Zqf$J(NFy(UI_p>{0mOp6VW~ z5_&GIa-)IfD4n4=F>rhV7UgD2CayH}ixr=iab7?3UtS3*S~WH4Dvi;=VKcI2s<`Cs zm&Ckim^fS2uIv@NP-`W4iYM4yh#q`RO<@P1xo#U)8k8})p%Ll$C?+>8WgMCpmRbee zyZXszU;pGj&AKzPc|E(8}M5bY7tTe*v?!=PCXkH8~HPivCi=xiQ7hGKm zBK_Dr-_8xe{uY3QOup1eUxd>Ci-_2Lu1@7Me-G6-ee80T^dw8&a`1E1_ z_db3-He1U|P_M(Z_a!=xWAxpdH<#h%)$9NM$1eQ)+WY}Wk1t-^KM1>_f-`f#84IfY zwK-y*`;iLhn52Zo5^;up1_F>IN>MhEzihiS)Y3Xt%%e=h2V1*$b5Ojs*LuOlH6~z~ zs#)?V#RAWWB0>&~=aBX(Hu)ZKFFVC5BEbM@HDXXXAP^9s;C=j*WOm!?NH zfu+$>-%(JqQBv1y6P4_gm0X*!>UcV&^#$Xm`Oynxwt^21pAYwk`@@3(1qaXmHavX# zWN804Sc)S1@RZLmWy|rx|CJke}Nxiziz0ct1;kO|Eo6^ZNw0EkS>RGFVfQ#b4Cr_VL z05O}Z`s;1;dBwteiA1#>DLf-dge3{JCHAt2cDxiYb9wIfWM0J>=>9qO zhJBFNuyy{d^ue&&K4%g{=Kg>_&fhRzykM~>GHB+}*67yy(Aew`*H3FdyhZk0;gOZ# z$z_5WIhWtTkT`ZD;%-#gzq+z&O-5H;fu=aL%cubwb_{MPJ8aZ<7u`;2G}QxJ!BR|& zMsqabd5Q(X62aY|odep(youS=A8K^jfGuEL*{+aEP&Nrl0n{fQR4b9Vsj;*)F_j@S z_=Cor-SOYsY($`r_MH|07sP)DPoExCamlC3imFVF~Y-0 zs3Y{j8oD-PFs4zl#NnZu@UVRa#OHGeMVg1Pkr|lV=tC|9Jn&lLz~MAHN>I8vIYUx6ja zB-l&1?8#dPiZZGAEKe=oj)+i$8D@AwM0cK6C-wIgy=!a>qjw_>+o-ZQgKJ!w-JF>_1F~SFZup zZT}CR9UNBkKlYzIeOUjwmtTMT?@!ko!boG?k!cKTIi|5$N(m4bc2T7}tD^x~^8)!= zQft4M@tAn)A@{OHTr#O`Tf|0EmEF+B#wPm`1v5CGUA}AB@b{weiy^%8F_f>dFMZ#= zJmkSEReBn<=JR=&)`rKQE)@bMzoFoiN}E^kMTuz*uW9oc@N{eh*Jop2m}J^QVHy%E zycXts`&jT990*xZsjU2E$8fl3^{tK`M->t3l?l!4S^RS;GgD8cKGr^?G2rWr`6@Um zcnO+r#fQ^~og{hE2zE*!43G#b6#U(M`jw^1`^F_Wcp{_{%`l5GTrUoHk5)bh((MI^ zilY(_AJ7=nq{d2-{7_=AgEOB<6x65;8d&YtjFukimPL&Ze;9w>Lash(Ais@thr5$P z-N2ZVxJFTXzKdr#Axq|qnwMDKR#!&w!qL@BSNdS@r#pRw;l{b8+}5uv{7RXX-~^_t zCw^{0@R}v_bN0j4d9brn!VNwA1AEK(EklLaj-0nD^<6W0n8l`&OL-Lb)f%=PoG3wuI*DN-lcOD#F!lN>bncP1JqbW`j!X_k? zJh{<>y*8Rw@1hPY8|*g|Tir=KmB8HjB*8O9o<0tAOvbZ#oM557PfRCN;e;psdy zTSE&|l99c{C^Q=(kkHeWr)*jqasO<$pFA8JEIt5$2L`2wai>BkWiAtcO4Yy1t88m$t#xkbv%Pm(d@z6d?8q%Tuk}a;evo&TUHjlvB}FB2Buyn{axIP zRPD}FtT5E{`QMUvY;~?=*nGj{wydB1$|p57lVudYw>>E7!F||(?vZ@(2}g3%GF2PN zgU<78z08AYU8JMdeL>KHx^*QH)>{0Pn^`6m8WB_@Mf?3$rvEd~$^B7(z4rg1I^d@E z|M#CiKX_W(|KC4+*#EzeUw_X8O5i7a<@urmZG<{gzTn&5{>NfkcrNbF9;bBGL)qDU zKuXZragw0mpKxc9LzdUjEU{CH=^V5bBDc~?_e48?!t|W=F=|Az436zQCWd3`j-S`5 zsZ^LR(AT&kLti3u=|BrrfAVK9Kf;wmD8G;GG)e z6q`gvA9Cw$CEj#)T^tKOv!@i6*pM&N@H;e+cefsakwI`Dpjo?|Jq7_rcT059>eo@mpfzQQp7r$ft6< zd^(QY!y0tw|1WyNtvci@278eziwedyq{~)wV62d8&)?Y z7S#ZBcBm6wck*o;`iHH8&+pgM{!>+4a=5euSYZF39voKBe;+bj72$*F2%svJkyoj<0uNH08G}WPHTqd1x+L@&aV@0D|Mr*x0`Z%hneJz!C`( zWkS*$lFWfUxU3U$>tlNr5GB&fyD=F1`KiT>^eABM-kVRngbo)Qn_LWcPnX6ruwr@t zzWf;gE3OAyYF6#t3!9{rt>FVKMoSLoT(r;nc;!GF=7nx%Vn zqJm$xTJA;^89SC2oLzCQj!ZS-;`tAfh$FOLJor}dEHzK67X%Kx1eBUY$~W&d)3{Z; z;;eePS!bfkg8D3#%b`|WByQ5T!qor%$4;G>|A681p)IVYFKv2YIX+klJ2vQtLGP@yA ztw}0@X;I;CsXCdT#$cyM z_Tj=?&2xiQR#IDH)?SU9-!Xl5b#eAGJb8U_{pRY}U5o8qp;MP*DT{t%e9e`Jc(HT{!`4;ricUE&uE3ljjffzx(+0cZ%f%>mk~XX{2MAG!0Oo^8J{U zGs2X8MA;F|rW_8|kkk1KO@Y~Ed&i!JR(Dl%RxWXJKzp4G<@UP&xT_*rPd#iAZdalf`UPSf-pcIP|g$j zfHJ12N=Wsv>8I|Jd;Rqq|E1EB>)-!9d|bW%;qc(#Vf^3AuYdg4SYLFh!ms(g4~^Fe zz|scXql?DB#jH;#SRtt%5yJ1YtLEzJ2}SOEn)u`EpeJts)c7b*x%oW|8%&7OCDdBw=0Tz;1U+UFL`VX zvpvwPQNg~_MDQ%_5NAObPBM3(kk#IjFZ2sk&yS-NPR_E{Y=~;xv_tMp+J)})^u7_Y9UFU(j_s)9q z^zqYbXTh^g_szYHL{GZ+HW<6O{Cv9DACH^tbHTaVQ19-qrMnD)o9!5vN>WKADW{RF zw*wTNkXl^1-+pOnUIjk2q)$O#;$%@&?#P~jLkIVx0epbS&qV}UcXaJD|68z z_BNW_cv!o%1;Me&#L$B}g!H<(RAgo>u_TZPvqq_Fgyvp0jSv+G!AfyBm25t|K6|Ng zLz)s+bVL)X=6CCrzTB?ev=JP?X31Qewr_Hzx%XMr>v}kXEX<*8n{bnm8xe`i>b>4#Bpr?F+kV0Uwq3OzTVT%aF&go;E{p|Fb#%2<-!2`C;3O$2JJM)V z68EakyiO)RGZ)ol0yMGC1{}Y9`N=gS#4HYR90#b#D>QZNzdwKZGC+YgRF`kAj!(`y z6A9N8zU+cBIsE}E&u8xH>5`7!t7;WeTE`Z}OD$hMZsh0FXrXgXX^fS`N z`%$^iPqF@Rnse99&`lVzT~H~1@AUk1W194NZmO=bf7-VEe6Jq)KKB3n<$u_dK%1%m z^7zS&_P8jR|BUYo43v(dG zFO*}C`;vDlW|DFSRrW(Z9w)l{j$YVx`vRgpuHqdwfLrJx@Pdn~2JUX70seOe*I88_ z&#~sT{`^L!Q_}RoO%w;^B9rcN$DD+I7bcbpA}(P41^9_Y=V-iYV%^~UQ$nGZm?Wy) zJAoY<#%`A`s+*+mfEuqsXu4;k;p<{_l%?H= z>GDk>-i8OF*rafLtFTTdq+b44inOtim%Ym zQ6>ao@YgRLQ(rw}Ra1g;90P-c*==UQXCBzS3KS0sqS9_o;{^TB|M|aq&Ktw@IVTkE zCZ6*QMVRT=h?+&`6bnUhlFZSBFe0#`Y*G-^9g6mr(V~cS4aZZom`F?n&v#YmZg})YkThE@NJom(6$fd z1CnV%6)aK(dja}@62f-S!49g4t_sCr?*n4Ylh{;-l?50a#|LHTJ0ATu{!Ou(?pimQ z`+itu7=D}Zk*$bal1#Aoztq5Xu*Ut&<)E z9e^~Sr3W50`u_%j|72Ku62g;i1Tvu!spIJ(7*n2o5q$5eu+m8~@2qU;n;jbyH5hwI z*(4!_9jjjfSVHbP-&38PwyDDxG8>&zA@j*ybpq%QfSU5Pj{r9W$}-Zxu0igtgP{C@ zqwWR5X{ssDpVkkt{a}ioxIQXf>uFTb`5w#o;YQYCsW(ZQjXGBd&CI^uo zVmqz_*4N=drz9J6bJ_lHGZaG<6di1qkpg2R0`x`xrZE&W7w=+ZKo@$_+3j^YdW2ZT zdKeqODpe)wE<+xXw~ARHpA6)2RGq*;Tu*AKS0rzb|3-8pEAZ)!SOjD?{$; z)^Cm4FA$>j)rXbgkO7t#L#k0$q?{}-JR3OLN&e;a7Q`QL+QwfX6_`5t$mt6Y!pu0MtmRr+3?^E2G4uwPoX<)8> z+2F8D=asphuyN6#X)&PU$44k&F!)IqjEYuHj~Wc5WQUMLm@ z!t0Z(ah-c7H;trHs(4KI3?#y5pjbP^a0tO5%hQ;LZ1%2 z{~aDS;GSj4+6aI<3m4Gvc+)gIUQEN2P1EpX$E~gGeW|tO602y6$h%Y!XLD|#yJMSARqmc{TXzrF>F0-8>81sWD$5JJ$o&E ztDP=vgYA?@aP8~iS#hr#@^6{nZfp7hDSi!g;dUDL>`I1BdnsGEvI6YN?Y(=5?ME?* z;ST%w%6+ZD3`Kdj-sj0{KlFDdYdffVqOYykMB9k=?}CP&4R-9-W#ux*+SX{9{6K4t zr?K0AmOGHp_xfUc2W@Z9@Vjk|O&T;H)<3<_j)e=;29G3Rs@iBLs4}1tHC!}V+SzT} z?5$YG^55;ovecLP)@#YmF0>KN^>3c{=dr12P=u;?T@^3|hYNdL{lnCPE0rRK!QZh$0B7!PKIVcIfcst8_G7gF zgyrqUQ_W&UyU?ihFK$=qXq@P-JfHQJX1$eQXwbdHT_@}JH_u#twVbDVPR_+(iDmQE z6e;NFnFUMc%PEz@VnS3XA(RXioe`d?P?CtV*jG0Drc>48e`Qi3+tOST0kt#%Hli+0 z?X6i0XzQbKchxt?Oq;g70TOebDMOaLT2d|WkjubqQLDw7bv>i~%CbdHcMY+Td;R&x zkJ3*xbj=(b@Aal?taq>fG9Hrb5~z@4BD zW>_jB0#puz%w*F=!5MqY_$|xnM*j@{eDlu$Dv&pqtsRy!Jv}>qWvy#jAlkRxa`86d zIF=|T5Jz#=Q&1(z+{Y!_xg09@XV+a*&ps?@a?f@VX9V%_5u68NE^QLt#m;JBTDT(NqMi)FLCp=^7>-iN= zNQ0=f2CE&DdtahePdj-`27)J~yk-3UeZ$=Nb)RpN#(Zq9K%zVO z=+jo31v*N(fJHM;FLO(=Q+;FU*?CacKZ?3Gszk+1S-(*3^Zs0+AWAX~;uQb_uKKEU z`M8NjBzQ7N6U@j4hn?ei1ySD@i5syD3zuYE)TMYr1THGf^^Um~%)mXk0oqnsnh-Rs z1SD!Y$QUgC;e`I1#P%W2)WDup8l-}c5;BvuyDn-WUkTwoOq^Ga_&h=zn^^tpuIb;qNqVgmuvc z+O#ZbKba5v(w5_BC$G&YduHIMI1TiZ9lA$oLT`vfDG@UY+u72rG)@T|;)lX`b`il; zwqbLmFl9=j?M&K-5zfppf$kwy*b6@}aLN|u+UDAoZLOaP(lS!^zC_2C+M|MQYVKaP zAw|K4Sy`~maW*>EIiqYM zh?D~x)FnjJRI8cL!tE|1)3xkJGVM^TWIGpyf1xYLrR)-yyt(cxNd zp42=g)d09yx>ShFQZ+~BB1U5YC+2bSk@Xb6WC^*ric={7mnGJkh_z5heq%jeQaW*jATY%CQcQsp& zs8T0n_@%S4SfcMr!fGC?C9Sdgbpt1j`jruB3LGjl1sY;0Q>TUfMHxGNb+5cfP?`!< zmG2u&6RFA12fHSXGE*9h#3s&OVmqOEV$!{t;G*%xYtoJLlTB|iI9Yw|3lglY_RATI zxQIC;aUPIZg!_Myay%Ghup7M&LQ(qpWh{g&Zm z3z?J>AlA<-^b~DnkUmp)fMR96kq$3h$l7|++y7Y9kImux=hllkLmTOVwT~okm#JI& zP1BFhP4-Q?ztuVin(GLrw75Me&dHrzoc~3FRfB1CD)_rOQrv`YC}Px`2Ih-Cuoe%2 z3Lfhj#L3Aklp`>a`=%G@d&Dx$Q?!aG%CSv(Q=`x^o+V9~3EA;!Z<(){z9pI-)2Ou}N3c-RsiG-?>ce8sn(hDp0n7zwIqgcDz(K#kZy z5FF2~UEsq0BZG^m;S5bCY7^cyC{&r`l@+QYwTeQOBVfjrVYZ_U&(wgA2QigzJ6g@i zjEnieiKnBb2GplrO5;AwGA0O)rg|dV#VAp?T)Z7*4BudyK4)Go`X&A8DR10*vN)nkO}D+NDV^BJU!SDlc?hze+B(-?&gQU5VT#-PFPvCX|h+ zf`cCkmum1^#zi)3Ycl2{A_K|?LJF5cGPEKBJK;WG;uG|gAXFI43 zF+}e0HKWZdo9FUtuY;;R3K$%Mn#pa8Lrljmoi5&;|DEzUjHwhEoYoIVSv(=j-u}O^ z{^$Oa{c8UA;p4-H`k(jm>oMHYcofE%%s7*z`My=R<#2iWHJagf$aD4Gr%uX{8xv%P zejWAY6Juuh-46vdN9bVx&;KEnef#|ujDJD7x%*?an;*zHB&2&a@ax0NacGZP5H9<*?3c!*?s#Hj)gb5Z}m7ZpHd5At7mCreQ81%1x ziIB^YxIE5jtx)zed533dLVnqvDwWEky}i=ZJ?2rS>43(2HwSxm3wZC~V1GcRGMnaJ zLd;anl1IfAaW0DtcGL$sBL#yywJPstt$k9h^q%*gTSD-Ygc0)XcO!oFOO@L|4O6Y)c&arqadL( zS{YXL`6P&Ik>7p^|{q2TQ5>h;wTa>=H-V=2Ul zW^in!CO5FAA57BfppxQS4>_8WMwa&dHz^KfN}_xcUj!q@^uhz$``-fOVc4jKd}v7xgO&42GE|04KVV4Y`XBE zHkIlIic?h?Kn$j+V(}ij4c-*h6uo|{POcip`;HBtK7QOX z#YQiALZi7R$^88aY=s8gU`36=$TQI#y9JEAKjD$Rw|Y=KzNnC0sEV~eGyLw@Wd9#o zLl>o48$A#?;CHB;$P8e>8n^OcodteZ{c=XxlIX#)u7th*Wq!6RHcPoj=*l|zV-|yJ zk(Kj_U40n59*={&l{Q>1w)?w|mEldplx<3ql$feW5ob37Bw%eA7I6)YxV)nvc%l(Kj?FQYw50O ziI@IvL(%2>#{Won4**-fXIf}lwOigU?3SVh`e4N0EO$H0^kBbQ2>VqB?1kXJT?qc$ zZtx2+yjh6h%|Z+dkz6iBa@hyTLOf>+@tpO6J@NEK`DQW7H_M~!i~Vvj_RBkA?@P$pVnWW=MMx)2Q1Sc~Tq6+tzd!E}{`SlN z?~6v#7W=KHxux|8V=iV`9eE6SQ^*#V*2AOeNz&VFL_#sbQ3xjr{Pop_R3guB`7;1> z8odIl({U|8U0_^()4vw{!g&de#~hm2st29#8>PgUoXv2p8tM^?mGVaoYAh3h|*;bpdKJ~Wjz5TlP|7A*|aLh$GrITrB)97fj zWHI0c`~L@r`_HQPza2h5eAxfLk6#a?kn%XhOwqg$w9yD$)G06B%%F%==C*>Cg}Ri- z$bm8ob)DH3NpT=BI3O4ks||T_LHNSEk_KA=BA zr!2wo#=EE_(8KkJqM3&GZDk<~{0E!Ezhmf#}^*HO0%OTNLrT@)B3Z%`Iz6$)p zvbBn}FUqZl`HMP&jk>o|jRmM+chaJavV{x@bB7ISpC zzyGB5qp4Eq(ca$e?d=c)njsgHy~LoBdypvm=IrX#H9EdHMJKN>PS4+*zrMIe-@Lv; zKU|;fqN}sZtJkMLoao29(CYO3`pwn(*FWed0N`MVPV?)0<L%Cb_=-daNKFOL zCR6k`#BDKVm#kQ&i;Egz5l`oWPNoX+TSf%p0uiRDnj@U4DHpbyid#@^_yVf$QHdr3 zGsp*0MbCN!kO@xE8Gx=4kul96P)`t!0E(l6#Rw;f1p{Zd%_~ZzL2Z-ab`ciD%Zx*# z`L5G{CJg$RHOru|qG6m5aB=?gT zuBhN+b*t4Y&nJ$_E-EDoTQH`&{7kJ^B}kRGoCpOg04)L}m$$4|7!m ziKdv*2)hBI1ZL9YU08*JCyD)Yj4(3d0dTuz1}%&Vn>vi8P?HgevusWXgL0d3nPK00 znH0hNMh`P~XC$T=Vn2`VpSXBiGYYp{yakHE4rxs*h5%)bjeGzY(OI@;I3}nl7h}f1 zXP0&n6!o%&F|{h;+%7ol#<-#p$<3P$Z)1&Hn-!(BZvhE8YAuLu%#ckcHwOrz=u+%% z0Wo*N(0ArKjh#XYr$o>jtmq9v0mvOU;#wdwsA_=5(JTYYy@h0C;0VtTEa;&aYaCkp z1)mvPrBOk5!JSx8PpdEnB!)`5I6;EkQ0Pjn#f&REP7q1(hzs`@7p|NA(PV*XF9&BS zLHZcoPI&@@0=c7-4Sim>$IecTOT&K`RmEqeXDWl4smU!o2f+w3!!#eCB*g+WMau=) zm=QseIZ7yd3xXR_271EqjO@5hhqCfx9D$SA^?YP5)EZH6T?!u;y?&xix%KglJzgCH z`H=PSCnJReF8dci;>BEyVS@AzT%>l_+4*MC~fsQ!%5=Iw#-H|s;u#F zafiK{BuX+M&khAZ1a`a@R=8dUch~7H{6IxpWk9d>eMF`>86!S!4_^9tdlck61$pMa z_V>A2=3@j~-hwk4?do0_;RN*PR_N~-1Roi*(nop}_(CUzptPKnECvh6U+#9gqTF`* zI5_iYLNiSDyc(`bkh{Li%0nSj^;h~PoJyGy?Ls2(k+y~De(m{8w35eKzR-6)Gg8t5 zPuf~;F_lp!!OH{28Q4o3M*ajAuW(J|ofATtSEr8=XELQx#xt4B(F}{XB+iYJj*eW& zMI@atu>F)lAAw*SHBlR@;DRfJkv~?4L462U;_IAkHv$)iJiY+6eVkR0LT#QAAq)u; z5wPT=c?nN32qpO~BTOY4ZNx>&jl0$XuQ&XRAvzqQZ*^Fu(Vpb&yT}S%XU4f&U2UAq zc*D%M$^=JK_#7u{J{Su^b`ceV-0%pB!B)Mwo&T9z`pj&L?CNkZ)p}gBbcH!jvr$5$WUdu8 zP4IlTc#;xfT&;vBHn8yLZGM=a+hPcoYZx2cIoKA{L65x-y2RS({sDSt+m^h=62bLa zHbkO%afTg~8eYBLo8h-)*OXk^L_rn-gS;|v7R9aZ3g_#F=zOgE zKc5RpMU~d+yi*jN*we2QtY3qXvlI92!eQmHwct_?Kwt=xdNUnAnZGGR7$x{tW>jfb z6EZPQ0IQt%A||L<#V!K}KEV(!?Nqyf6BV5^cWk--m_ekfro@C|WleM;lbej%k>VyA z#Tc_L&qYLs#NP4TQ=J@dc@7`g*^AjUeWkwBiAZwM06l$t8tUg(?84)w3bCOKKs5Cf--Ir1G^fFo3w2 zLqMAnq1lA%Az;J_RX8qWBls$#Wue78+GyqdZzq1btAmTzZ_ZE70tB04THbotSscim zm~}sYba^JAVWiX~3;pGRa#K%?1i>-PF$<+5jbe2N6@2kqqXw42Faj*>_K~Xxrdh&` zas_3>3MB+fJ)QDb>Fp=QXoVZ4*F8Wu!m=aYKaZEH$Mf9%v@#dEYQDW_rS`0}F!1 z5WSh&?OIk+Ln{@pzae-qQ|Ww>4=3K_RYxdga_rawi#0E;&F5|d$Fcsm(DNN%BRwdN zbSt4gBVpG_U(&dwIxwHYOd};M&SoxbEoqE1EN1@ay0dE2K)~LW3yp&bR;$p++}5Z> zR@FZvn6`b3MiCXW3J6_D>M+xhfgvWD5hhm7ve>^4H0~?wAZj@WWacP zMSR>q&2BMlToL`|sffQimk%X?r%`*Wc?AKr-Kvro&ZF*#&rB$-^{BKglh0HtGl5cv zJcY@Q-3c&r)glU%L-Yenh?LN2pX5u}HK*CiH_i7w0nsUpRcXd;MF{J9+=Fe4*y93&rNV0d$Z2r|LK>?>909(FI_ z*$};P0f@U&#l_JvC+3<52)Vr-AuV{jp=QDtw~c{kM0yd8)DuE+L!smm{K*BfI&2y{ zrF9hV7OPHnn(EHPfKCX4C5QYL)1{Dbi^UwMp$x@DMuLuPSLBlpI-<SvQ(ozgch4 z-~x=Ko)6I}l`wT8Lbv(}3oVy;=ox5W$aYw=NH9Efz(AUS;=hsfSKb^n%?)49E&`-x#SH~A`&d;vV z>nnd-{`EKL_~IYv`}2#_T|}ta0e%NJ2^E}Buq&~*Oj?W}SSQ8KPR-G+5fvqsaa3$9 zyg7gK@@yAfyuKKmUwm_Qe(~+utFwzYyXe)~)ya3q7jKTgK7V=s<{zL!-<-d>IJ-9K z1IHG|<^OGN59$%r$AFeK6U!NIoYjz70lIXdbOgWPjb{$}6#>@)K>Vnf$@KjJ8 zo&&>Ul)+jl=wD%Rz4cPF+9+iuI?7CY6Ha549mTHH>S;zIdL28>J+Iw33~JG=RA z_ikw^H(&W#sPEe*P>?7l2^~Q|2gFSTmoneCa3~eRk&>`G-aI~x^(?s$F3`wzSwaD^ zT@iq;#WOr9ui@*jT}nWa8~}Ib7mI9^MKsodt=US@0gPGGr8sdworR93Sc`}VgoW8N z*IqpL)iO&|b)pN>%yJu(nJ1Ll&h+f6zhbc6xhL+Z(rhQ(DC~su_?9OA+TmLy6;D$< zA-fP0WSa0Xrb#A@H^Rv{V?}TSPOu>pB;vD~R$gBo1~HL4yP$_~Z**lL&cetS({X%5 zVUu)h(>J7~RtPS+!Gbp<;BP~89BH4RrQj^D#_U~dpm$`(sLIDs*S(aL$A%cFXL_F_w}DJ$&pU>TZ4Acr>@cM#0crOQ4Wx9?ai*+USLiIW0zk z&n`~2Uu{Z*gZIammuDBJ=l^}AyAW0j(lnXd^aVe4Prn0FZu7k)gwUIwAMDyRit=iL z3#&Ozh%mX#W;VB5%;d+EB(X$i~X{wy^EQxi9pVMJy2cFyZ_GTete9jbpmv4l^q}~uP zL_ZOP6UmVvrnOxsbyghO8ohCse898|!pzh;W4ErdZ^_BAU`0XDN%#L@l+m8(5(|d#6p2c2y>=0a{x9 zh`s4jS#x&%Tu`*F8|3MhJO4|Aa8nU&1B~agE9WkPr_9bk!7Ap;lt(w-grARKp$wN+ z>zoc_YxO(Nroo`P52EFteVtk7fOmyUDo*;K4kR&PJRvf|5sa{Qg04n5YT>9_tt!G{M1*z1C3?DBI(+7$ zFLdhfnxG#9tsyoY6_&PF!c0&fWm}sHvf@&hYjcxG8?xG!BVocPGSsd*B=3~K4agUM zQ|+17@AOmvKJJAjZ(U*bTf1q5PT;@+)W2@y*KII`=5Ru&qq+vEeG zHMkAE|GEc+4q}>`RRv_a9Kp?nt%9<80n@G~rv3Ld1XQ8Cc6mDns=V@q(Ujk^kns_Z z=iv=cvc}F|{83vcHhow`Y|A06e%BW#iQa|@W!by%EfI_~D$}BGDzY}!#oAb)Jd16j!x8mTj9^jd|depQHMaDub z!%XI7UYfA?`?96O7Jh9sZ56!c%e&z<&RtK9wVe*fO&z-`s8uP;hr%kf)uU})>fZ=_sCAGC2ko4<{hDTD7S&k*0u#=1}A;l1br*;7&DAcv7C-L z7IB?|iD?qXu1Q#&mXx8;l8_az*YlHV2X=ne8-5k4q->IqqOA|8`mRAjmx9lTnvzVS zt1KaP`J{P+Q0NBDB57|`rMKF$3rfeg<|?gFMgpfK)bS3_lvZ5M;x?F_-#FP^@R%T1 zJ-WVt$`la{Cw207RlG5a~^dg5TC^xG_je$UFU)Pr=%WoXGtNiYiS9z788qmhO*-BTK z`-m2|uG+jM?n+;XsQI&A7~M^)3vsxHy`Z+-F1Qd$%XhtSHr|`J5M$$4ReYV-9r-9a z8&;wAxQNGx-J`iv@avRBus6JTSGVm3kkTk>X;Wor={psCBujtX3ww|1-wTQ80i%PK z6;Ln2wDsr_DsTVmM~_ertGva(wS|u37=8EV&1HCb_4>d6u?zpcHh)0w+{J7A2Vpl< za0V%QH&{Rlx2#+}wS{7m5*ACu8T#2=I3V?5JPhw!x z6@b-8rnK$W)~}7FTFd;~OSOKrUmlAI&?2|C256Zwu^{)_h^mOboC~qFl~>C(H?;(F zA_GLn$VS45N;IXDX)-U$bnTk*!~gSt{%`(qz!kaBA>X?zlrI$H@E6iYJrii2V%_?g(r3yC{LP|HEFo(oJlk zI{_RDOL0Vkbt}+?gKW!~&tDfhbFueU;1HW4@L~)XeqJy$$+Oy4rln?0FsBlM5Skv{+s-YZe8j(j$@Onn_?+R>`Xy1-W@RmOAOKg>;ajH zISGq^Dr7Wb35hObG(ZRY?Ka@d!%7*Nkr6d_H&+eW;uNTLN>rG;%t_o&ig11dFx^RV zIV)>*n;j`(%`$Gv^zz28$ocyD>`WN z$FTl;kaD5N_g5RSnaDyf2^TWonx$E!w&gOk`KMs+%qtY=*e%c&4*n(kg&Z{$>riA~1}g5U)0@*Dz1YwsG8*_lTccGzK$t`}6XS*?bHHKBJend`0ul)*kzQ5kvLtz(Ns_$Q0l}rhQRZ-^{O>n{j#P$)WD> z?1l(iY7SGT?j(*X(Vi&2dV!t{|28~$zJZXZi3LD&tQ?Jdf=r+O#Ke6cmllSu>)-(7mH$M1MiZ@rJI;F99RA+OP7*qKk5o; zAMEe%!++K-<{vZ<+L;Xy`WD#Gf2=4|<8@Y(X+@ZUfwj~ET2J~myhgL?X`%Is4PI*K z1Zw+8*!P-2opk45m$IU(a`mZ&oqLY;h&KnfUd8mlekS7Dka~KI-n)jj^Y*EwE;v_f zk?;Om1*XaRM56JJ(5)sJNg^H9p;U0#DZDa3PYuj<$jPU1!(v+bh@U6B1%mS?E_Oc5 zZ!aChvm9Vs7D?CY%ZjuYGF@6I{mvAwEF2Bq>=tYAr~q%u0H!Lxl=b@TrN#u4JFKXe z@N{S4_2co=DP}8u8xZ@3CHm=vSR80vMQZM{_yWqBBvk;s!yxTGPP!(3O;Su@zTny{ zhi*;M=w{>&66$lDb)9nB^Z@I_&u3_`nh#)h2!3go0e9$lNRUR7x{SR|`ie%@em8yK zPV27UrI>q-TsMH#;4_z_SL&)y(nF5%02S++@V~+L=PzFdD7gN|b$I#a>iFbr{R^2d zH6lK)iecHl-b|0)Z@2vic24Ia>Ly0tu6HShned*UZoKVX(UeI7)vJet70rFxHDnS zGZbNF@^wwzkz%1J+&g5GY&Cy55&b&1C%46jC>TLMdu;r&?k+C+2RV$@=e`uHX0MYk zf4i}x&x&*9dluZFknY~y#k;B4t6Gf$*fr^r7@3y^-@VwVLiF|q3jvy>*kH_)n1}#< z;Z?)fkZRZwK0xRjKw5cQh`MG4s?|6u5^S%s(J<_b#t0hU^ME%v`}ATvSKjc>Gf2O> zx1nBSkwS8WASK2I$aGQJ44ilZ-FD*KsSSlH%e}kL8~a6r0W|10@~fB|MSQ=KY?n=E z!4_X;@xL;ukZoX(Eh&!7hj1C5xWNfpT11@$c%rj!)-Jd=fInx9GGuWZ%TcOTQxRjY z!ZTZ2=tpxB5t4x1d*t2^QEknr1PC(TUDCe~K=<=47jF}eV`kMCO9hyS z3uIvCFlytfM?s`ww3d28lkyFwiG=IH9QT?i%FHeriH$I}YAiQZOxoH)U;FU$OQ13q zaS?MyVl&z)bL^}~zsq@$-36IyM4H?6O#=dM^iD+On%YKGb9JXhWNAL_IoTj4F`Ofb zhyEd;BjgVv%;NTjZh$FFdmk8YVb{ai#&A;^s&%kin2$B^kppYvqFxAV9XzbDCoHHo z`uh~!GQ~nFCUuU2isKj*A^yq5`Cm*vG*lxng$NWk)&oV1qC7uU!b#IOOU!LdU|Mxpr$4tRL!okr&1wM`#wrfa zU4d2@>2*rBGnDGBZeb*8yPgdqNlUWSJi<@{c`=SouFsG1r>?llEtbd|1V&EQ4(-D1 z9B!^=97U1>Pbe1jhQNK*R6A-jXsKGWDgr-C%BZUlU7O_Kn}PeL8VNVb(^aK0@lZ*2 zCnvAgDu+(${1PCI_4TULV84i-H|@AQ{Tj{iJCw1!*igS&tq*UlN2kh_{c5v$zj~iH zXLm2$V);0KGB8i<9)Yz9yWY+F-Z^u)34VPuoXav%#Y%lsxxosji^~T#ot4O6>X|)g zgEn`{to<3`3t~UK&qFm#8 zdRWH`r0037C5Q_;I=j^veBK?|r>9)e%cj1$NikgO+14dGA~tJ)NRTlRg!L?H)JaJ` z_AF?eQhOxttNhtp%HrZLI2D}#m2W41)zPDi7k><}K%n>dd&*+}?+dr3@uosU zvDxwaSCy91rCVF>psp-H>`R)|}w*c-sm%H}ds8?7;Q+N0l_D*?x%oKH-IObKck5bi{ zr7P@~jxf7#iy4)6aPAABXNvu4yi3#N3>jQ@R^Lb7OOqOGWR$I14!Bux{^8qCCd3}p z*Ob~d4vK$0RRC1|3IR3 I;{bXB0Gzaq9smFU literal 0 HcmV?d00001 From 62f082e6573bd9d699b4467204085de3308ff67e Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Wed, 19 Jun 2024 02:27:40 +0300 Subject: [PATCH 18/21] some change in lib Signed-off-by: Aleksandr Zimin --- charts/deckhouse_lib_helm-1.22.0.tgz | Bin 24121 -> 0 bytes charts/deckhouse_lib_helm/Chart.yaml | 5 + charts/deckhouse_lib_helm/LICENSE | 201 +++ charts/deckhouse_lib_helm/README.md | 1105 +++++++++++++++++ .../templates/_csi_controller.tpl | 700 +++++++++++ .../templates/_csi_node.tpl | 193 +++ .../templates/_envs_for_proxy.tpl | 30 + .../templates/_high_availability.tpl | 39 + .../templates/_kube_rbac_proxy.tpl | 21 + .../templates/_module_documentation_uri.tpl | 15 + .../templates/_module_ephemeral_storage.tpl | 15 + .../_module_generate_common_name.tpl | 13 + .../templates/_module_https.tpl | 160 +++ .../templates/_module_image.tpl | 76 ++ .../templates/_module_ingress_class.tpl | 13 + .../templates/_module_init_container.tpl | 56 + .../templates/_module_labels.tpl | 15 + .../templates/_module_name.tpl | 11 + .../templates/_module_public_domain.tpl | 11 + .../templates/_module_security_context.tpl | 183 +++ .../templates/_module_storage_class.tpl | 38 + .../_monitoring_grafana_dashboards.tpl | 68 + .../_monitoring_prometheus_rules.tpl | 96 ++ .../templates/_node_affinity.tpl | 256 ++++ .../templates/_pod_disruption_budget.tpl | 6 + .../templates/_priority_class.tpl | 9 + .../templates/_resources_management.tpl | 160 +++ .../templates/_spec_for_high_availability.tpl | 138 ++ 28 files changed, 3633 insertions(+) delete mode 100644 charts/deckhouse_lib_helm-1.22.0.tgz create mode 100644 charts/deckhouse_lib_helm/Chart.yaml create mode 100644 charts/deckhouse_lib_helm/LICENSE create mode 100644 charts/deckhouse_lib_helm/README.md create mode 100644 charts/deckhouse_lib_helm/templates/_csi_controller.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_csi_node.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_envs_for_proxy.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_high_availability.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_kube_rbac_proxy.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_module_documentation_uri.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_module_ephemeral_storage.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_module_generate_common_name.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_module_https.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_module_image.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_module_ingress_class.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_module_init_container.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_module_labels.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_module_name.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_module_public_domain.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_module_security_context.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_module_storage_class.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_monitoring_grafana_dashboards.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_monitoring_prometheus_rules.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_node_affinity.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_pod_disruption_budget.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_priority_class.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_resources_management.tpl create mode 100644 charts/deckhouse_lib_helm/templates/_spec_for_high_availability.tpl diff --git a/charts/deckhouse_lib_helm-1.22.0.tgz b/charts/deckhouse_lib_helm-1.22.0.tgz deleted file mode 100644 index 7a6b077dc75833f62190350aa20b469f08b953f1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24121 zcmV)$K#sp3iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0POwyb{jXcD2(sF^%R(PW{s4eNL_rJ(HUp`6iG>RV$1p@C)u+y z`8r@XNMhUsIsjTS$M#z1AtZsDOP^c;Zg+ffCw^N=; z5+-yMPDwJ`JDFmkhI2ei{%bwI{r&y@XHTBM|L*VaSO0ha@Zj*j4xT(a+<*M!@w2DT z|7-u?>HhxX|3dp~!r%GJq{8CA_V0|V%G~edhg13^5t4Ftgl-PEVj?3!Qw4v2N0J%J z6ip;jWR@ma5fqa#WmLbHXv_sVb^3#5JkAm#hg%HK$Pub5)|Q&57=i;^rRDA1!8hHrVehBM6Qm`Ei9w4G3?&=7qQaWTl?b z@J#uL8s2){`fPu{q)g8CGytl;>VRT1k0-Vz~OB;?+D zunzW{3#9J^I@0Thh1_}((3xlqT~Md*Zsfx zX8mn8B7zY`oDb6u;q09vg5l({_#&tiHhhKu#YNL|%DC8R9;Ugytb128!3iTin{gIW z-7sW49Ala|;&PwZ$jpw8kL?5UYXW+;?Kvp2KK59 zl(_SZT~7|Li!biPWfk)pIBjC4#BC`8Ucm25zwMnktz%sSug$FZ+|qG_NdT(y_uDO| zj+n^om01fxAJ7y_6xaYT+#{MUhg@0c)4eiwO5 z;Id832_BI|)&#p9(@3EJr)f}++Jjpz-X-?xiwi*N&x{R)2+b%vP7;1g;v@T{BKwxv zbT0HlNPl%2iqH(dt3ieN<>CG;e^8odHWTd3dv9u+77#W;svD%0xtoPrXT-Ma%}2x9 z!=~7M35~EcLK4KdnD3}mT+Cn68C6H|B@1hRvfv#oML$?$e6xEL6q<;9)Qn`i5LX! zvIfA6afM+k%Nt+-!c2Y7 zCq(Q?Ed{f=8tnMvUrzDi(`Pc9`P_M?({-KxO}SJTMCp*~2o@IY^O#8+n4ps+({a={ zR7mv`RnzaxC$=F($d_4?Tmv~d`vX+jyBi2Hp;C$YR)bB;RWucp3##T}lwhfKRAp&G z3hn|qmx{~=ks)dj2?`Nx;4A2}qGOt|Fdmc8X=DQu7db8iQ}KidqXG>OF`ExGRvo@l zH63IOwmxw}|4rgLBIm!CnHlII%f@cau$@qA1yUY|l0=!2w!ssMj43)JpE(6EUz zl1Oq#EFqR*CW#1h{b<8g=&d#);)X`#IEr}2lyRYvr1ov@TWd<_g}%)`wLbl`pc;rL zI1V{W=AqzRg=3l!qmyL@Jm+f%iLpGj$>0a{TgDafn`1HY%*X%*gMl&jo!+`93{ zk3qTp&5Ng%Ct5vl919}li!Zj1PfxGTuCI3jZ=PD+fbb{+2xEd(Cdgod6_GFA*w!z! z)u`Z-O3+9R96*PptT)|IVLKxKkkzTNDmoZT?G#>+cj`%niiBWox?z}8&YB3VG#TU` z_Qe<57ss#8t}l;I&UPvvnc+8xaEc?U=2bb{#{+)Lh!A8vNXQLIUL5+O?%1oT@(E1` z#^DW|gUfkolg=$Hnh(@etE;T4jayRp)rPPeU+2ugv1*b(N9aadq9e2yNvi*UD0%d@ z_Sxm@(@=}A`rS8z&yKu@2#qO8;(P{P{7Shb2bGubk#>(qxyK)B#5FqdF1%+#K!x(Q zH{K$AUBp^|L=9ppJm53k*Z|%u3|z5jAbm0g`7I+-$ujAk_Kl=dG9v;fp^nq=goFv7 z$WXhc5EL#z2m78iJ2>>>$}wVGq3z|o)53MDR@(coTliF#28ENgTU7YePN2MazPtyh zZe!lb{CkZyWicD9QFW(rQ>N&_1pEO^K+na%*75IwY+dRry2HNz)JCGT1GT)3sAMVp zK1_u4Nq;a652j(EY3R4U-or9<7>xE+bg!d|HlNyX)HZdOHy#bCNxu&RQi?Gel87E` z$@*={AIO^Y-_KgsqPR{d?KgdDOVN-N(a%=&8tGG|(g9&{$|+MXzS#cm_4S+Z{Bq~W zM_r_eZE9#*w5tzHZ|q5hRVIg1E|mxI!G+&x&@HnCyKstMY?Q2N5qtb>%^#}T4t`5idGw}c>`w_!)b!uwS6ryFefEvyCajJ<5AqEc z2s4ziw~XI1BnXzAp_}V;N(4dWEn#%a*zY1sT|y6aeK8c8k8EH#9>JEPpPcb|BF z_~+PGILIbOIMOa~xkGE_tA;eFJpbvsv^Cw3&SUItErsVUsXN0Zb9S~Kp%=w(>rrW6 zy!!4MhA22Md9=4Tp=z3qh7q6b70K3m9VM)MUoyBk~wOkc(alh+J+u&4fG`c zTfwtb9-*Iu;Fm4m7oeYmRC6sAVai${XmUfu2wqQ!3U+nCEa87|v5KbpA7*!0|0^a5 zQ6%_fc~nt?>8vL{lhE7;i%C-I1GWatx-{F{4WP|?NEGTNs2=2!8^Ws8c1YXGVHX8y z3lDZZVYrAH+ugQ#2RG|N0Y0O~Yd59^2qH-tkJr@|{XsoK$5BM2M2cI-ZT5?1nmdd$$Nr|{X3Bi&`|5!R2VM$`d z+14X;tB1B8i$uYvzt%S=)I24>4$)Z-6bpi4k_r-GMPfu58si%-G(|4~S#1P2_#p?a zajfQ^$PHTS^-CVfda0VFjW`!EWq!BU@f)eE0r?|zsV$q7d6l1&>+@@4u}*BJ97xQ! z(J=MYeWrUb%|;28Q*Cx0X&G8`hg_Z=qHN5?%9u& zd-evUo)|_X;kUrGWmwDQ7AJ2t&shp=L-wGY(iBBgOtmgH3vZ|~7!k^BJf;!VgF?5B z^8*LyhGHGjE3C*kORkAR+qt~KZBB?nmptA6x-zN%8l2*2 zOwDue^PL`=p7OXs;x(K5#}!y6SxNF^*gYHoAzWS7q&icI;6E3ke-_?-$stiV@vTRBZms`pdB1L<{7&3@kx%^l_3ABtZ;i{! zePB1v6;^`ONM*NE-u&9}=iv75MOi<4uU|;v^KkkFJr6$rzI^^?BJ>4t-*_xHUjlH!W71y!KY!cyX?w~4{B7H(on`+02HgO2wfD8Z7;xJx#fTs>fCy zik>g7zv}8m_AGZ!^l&mCt*_)cCNs{i$)~S)uK606F)_#kirzJo7ltFNmQQ}VRJCX5 zoTX^qFo~4{&(%)X(lL}Tt`z9XPT??3FGA=X~>|H8|$z841 zMi?NT5`neM&^fETMCdJ<9|4k*vD(wpueF7$3^fEA#ff%kXYQNV}P~x7s^`k85gBc78rd9n_SLqJE+R%Z9a;sw*#737K&l zS}cK~Z*gJ4c~cEz1NzRTuiNVlH#)1$m2?R6BT1CVFcv%wagr?Z7^NZ)jnQVmS}=?1 zK%HElhi4b3m#@z+-c(D(=pZ~HDi;!*s7SGzzW8GM^y>V_v#apx?A!4A^~v|)*02niZG=2EEQ1{N%VTG{Zqf$J(NFy(UI_p>{0mOp6VW~ z5_&GIa-)IfD4n4=F>rhV7UgD2CayH}ixr=iab7?3UtS3*S~WH4Dvi;=VKcI2s<`Cs zm&Ckim^fS2uIv@NP-`W4iYM4yh#q`RO<@P1xo#U)8k8})p%Ll$C?+>8WgMCpmRbee zyZXszU;pGj&AKzPc|E(8}M5bY7tTe*v?!=PCXkH8~HPivCi=xiQ7hGKm zBK_Dr-_8xe{uY3QOup1eUxd>Ci-_2Lu1@7Me-G6-ee80T^dw8&a`1E1_ z_db3-He1U|P_M(Z_a!=xWAxpdH<#h%)$9NM$1eQ)+WY}Wk1t-^KM1>_f-`f#84IfY zwK-y*`;iLhn52Zo5^;up1_F>IN>MhEzihiS)Y3Xt%%e=h2V1*$b5Ojs*LuOlH6~z~ zs#)?V#RAWWB0>&~=aBX(Hu)ZKFFVC5BEbM@HDXXXAP^9s;C=j*WOm!?NH zfu+$>-%(JqQBv1y6P4_gm0X*!>UcV&^#$Xm`Oynxwt^21pAYwk`@@3(1qaXmHavX# zWN804Sc)S1@RZLmWy|rx|CJke}Nxiziz0ct1;kO|Eo6^ZNw0EkS>RGFVfQ#b4Cr_VL z05O}Z`s;1;dBwteiA1#>DLf-dge3{JCHAt2cDxiYb9wIfWM0J>=>9qO zhJBFNuyy{d^ue&&K4%g{=Kg>_&fhRzykM~>GHB+}*67yy(Aew`*H3FdyhZk0;gOZ# z$z_5WIhWtTkT`ZD;%-#gzq+z&O-5H;fu=aL%cubwb_{MPJ8aZ<7u`;2G}QxJ!BR|& zMsqabd5Q(X62aY|odep(youS=A8K^jfGuEL*{+aEP&Nrl0n{fQR4b9Vsj;*)F_j@S z_=Cor-SOYsY($`r_MH|07sP)DPoExCamlC3imFVF~Y-0 zs3Y{j8oD-PFs4zl#NnZu@UVRa#OHGeMVg1Pkr|lV=tC|9Jn&lLz~MAHN>I8vIYUx6ja zB-l&1?8#dPiZZGAEKe=oj)+i$8D@AwM0cK6C-wIgy=!a>qjw_>+o-ZQgKJ!w-JF>_1F~SFZup zZT}CR9UNBkKlYzIeOUjwmtTMT?@!ko!boG?k!cKTIi|5$N(m4bc2T7}tD^x~^8)!= zQft4M@tAn)A@{OHTr#O`Tf|0EmEF+B#wPm`1v5CGUA}AB@b{weiy^%8F_f>dFMZ#= zJmkSEReBn<=JR=&)`rKQE)@bMzoFoiN}E^kMTuz*uW9oc@N{eh*Jop2m}J^QVHy%E zycXts`&jT990*xZsjU2E$8fl3^{tK`M->t3l?l!4S^RS;GgD8cKGr^?G2rWr`6@Um zcnO+r#fQ^~og{hE2zE*!43G#b6#U(M`jw^1`^F_Wcp{_{%`l5GTrUoHk5)bh((MI^ zilY(_AJ7=nq{d2-{7_=AgEOB<6x65;8d&YtjFukimPL&Ze;9w>Lash(Ais@thr5$P z-N2ZVxJFTXzKdr#Axq|qnwMDKR#!&w!qL@BSNdS@r#pRw;l{b8+}5uv{7RXX-~^_t zCw^{0@R}v_bN0j4d9brn!VNwA1AEK(EklLaj-0nD^<6W0n8l`&OL-Lb)f%=PoG3wuI*DN-lcOD#F!lN>bncP1JqbW`j!X_k? zJh{<>y*8Rw@1hPY8|*g|Tir=KmB8HjB*8O9o<0tAOvbZ#oM557PfRCN;e;psdy zTSE&|l99c{C^Q=(kkHeWr)*jqasO<$pFA8JEIt5$2L`2wai>BkWiAtcO4Yy1t88m$t#xkbv%Pm(d@z6d?8q%Tuk}a;evo&TUHjlvB}FB2Buyn{axIP zRPD}FtT5E{`QMUvY;~?=*nGj{wydB1$|p57lVudYw>>E7!F||(?vZ@(2}g3%GF2PN zgU<78z08AYU8JMdeL>KHx^*QH)>{0Pn^`6m8WB_@Mf?3$rvEd~$^B7(z4rg1I^d@E z|M#CiKX_W(|KC4+*#EzeUw_X8O5i7a<@urmZG<{gzTn&5{>NfkcrNbF9;bBGL)qDU zKuXZragw0mpKxc9LzdUjEU{CH=^V5bBDc~?_e48?!t|W=F=|Az436zQCWd3`j-S`5 zsZ^LR(AT&kLti3u=|BrrfAVK9Kf;wmD8G;GG)e z6q`gvA9Cw$CEj#)T^tKOv!@i6*pM&N@H;e+cefsakwI`Dpjo?|Jq7_rcT059>eo@mpfzQQp7r$ft6< zd^(QY!y0tw|1WyNtvci@278eziwedyq{~)wV62d8&)?Y z7S#ZBcBm6wck*o;`iHH8&+pgM{!>+4a=5euSYZF39voKBe;+bj72$*F2%svJkyoj<0uNH08G}WPHTqd1x+L@&aV@0D|Mr*x0`Z%hneJz!C`( zWkS*$lFWfUxU3U$>tlNr5GB&fyD=F1`KiT>^eABM-kVRngbo)Qn_LWcPnX6ruwr@t zzWf;gE3OAyYF6#t3!9{rt>FVKMoSLoT(r;nc;!GF=7nx%Vn zqJm$xTJA;^89SC2oLzCQj!ZS-;`tAfh$FOLJor}dEHzK67X%Kx1eBUY$~W&d)3{Z; z;;eePS!bfkg8D3#%b`|WByQ5T!qor%$4;G>|A681p)IVYFKv2YIX+klJ2vQtLGP@yA ztw}0@X;I;CsXCdT#$cyM z_Tj=?&2xiQR#IDH)?SU9-!Xl5b#eAGJb8U_{pRY}U5o8qp;MP*DT{t%e9e`Jc(HT{!`4;ricUE&uE3ljjffzx(+0cZ%f%>mk~XX{2MAG!0Oo^8J{U zGs2X8MA;F|rW_8|kkk1KO@Y~Ed&i!JR(Dl%RxWXJKzp4G<@UP&xT_*rPd#iAZdalf`UPSf-pcIP|g$j zfHJ12N=Wsv>8I|Jd;Rqq|E1EB>)-!9d|bW%;qc(#Vf^3AuYdg4SYLFh!ms(g4~^Fe zz|scXql?DB#jH;#SRtt%5yJ1YtLEzJ2}SOEn)u`EpeJts)c7b*x%oW|8%&7OCDdBw=0Tz;1U+UFL`VX zvpvwPQNg~_MDQ%_5NAObPBM3(kk#IjFZ2sk&yS-NPR_E{Y=~;xv_tMp+J)})^u7_Y9UFU(j_s)9q z^zqYbXTh^g_szYHL{GZ+HW<6O{Cv9DACH^tbHTaVQ19-qrMnD)o9!5vN>WKADW{RF zw*wTNkXl^1-+pOnUIjk2q)$O#;$%@&?#P~jLkIVx0epbS&qV}UcXaJD|68z z_BNW_cv!o%1;Me&#L$B}g!H<(RAgo>u_TZPvqq_Fgyvp0jSv+G!AfyBm25t|K6|Ng zLz)s+bVL)X=6CCrzTB?ev=JP?X31Qewr_Hzx%XMr>v}kXEX<*8n{bnm8xe`i>b>4#Bpr?F+kV0Uwq3OzTVT%aF&go;E{p|Fb#%2<-!2`C;3O$2JJM)V z68EakyiO)RGZ)ol0yMGC1{}Y9`N=gS#4HYR90#b#D>QZNzdwKZGC+YgRF`kAj!(`y z6A9N8zU+cBIsE}E&u8xH>5`7!t7;WeTE`Z}OD$hMZsh0FXrXgXX^fS`N z`%$^iPqF@Rnse99&`lVzT~H~1@AUk1W194NZmO=bf7-VEe6Jq)KKB3n<$u_dK%1%m z^7zS&_P8jR|BUYo43v(dG zFO*}C`;vDlW|DFSRrW(Z9w)l{j$YVx`vRgpuHqdwfLrJx@Pdn~2JUX70seOe*I88_ z&#~sT{`^L!Q_}RoO%w;^B9rcN$DD+I7bcbpA}(P41^9_Y=V-iYV%^~UQ$nGZm?Wy) zJAoY<#%`A`s+*+mfEuqsXu4;k;p<{_l%?H= z>GDk>-i8OF*rafLtFTTdq+b44inOtim%Ym zQ6>ao@YgRLQ(rw}Ra1g;90P-c*==UQXCBzS3KS0sqS9_o;{^TB|M|aq&Ktw@IVTkE zCZ6*QMVRT=h?+&`6bnUhlFZSBFe0#`Y*G-^9g6mr(V~cS4aZZom`F?n&v#YmZg})YkThE@NJom(6$fd z1CnV%6)aK(dja}@62f-S!49g4t_sCr?*n4Ylh{;-l?50a#|LHTJ0ATu{!Ou(?pimQ z`+itu7=D}Zk*$bal1#Aoztq5Xu*Ut&<)E z9e^~Sr3W50`u_%j|72Ku62g;i1Tvu!spIJ(7*n2o5q$5eu+m8~@2qU;n;jbyH5hwI z*(4!_9jjjfSVHbP-&38PwyDDxG8>&zA@j*ybpq%QfSU5Pj{r9W$}-Zxu0igtgP{C@ zqwWR5X{ssDpVkkt{a}ioxIQXf>uFTb`5w#o;YQYCsW(ZQjXGBd&CI^uo zVmqz_*4N=drz9J6bJ_lHGZaG<6di1qkpg2R0`x`xrZE&W7w=+ZKo@$_+3j^YdW2ZT zdKeqODpe)wE<+xXw~ARHpA6)2RGq*;Tu*AKS0rzb|3-8pEAZ)!SOjD?{$; z)^Cm4FA$>j)rXbgkO7t#L#k0$q?{}-JR3OLN&e;a7Q`QL+QwfX6_`5t$mt6Y!pu0MtmRr+3?^E2G4uwPoX<)8> z+2F8D=asphuyN6#X)&PU$44k&F!)IqjEYuHj~Wc5WQUMLm@ z!t0Z(ah-c7H;trHs(4KI3?#y5pjbP^a0tO5%hQ;LZ1%2 z{~aDS;GSj4+6aI<3m4Gvc+)gIUQEN2P1EpX$E~gGeW|tO602y6$h%Y!XLD|#yJMSARqmc{TXzrF>F0-8>81sWD$5JJ$o&E ztDP=vgYA?@aP8~iS#hr#@^6{nZfp7hDSi!g;dUDL>`I1BdnsGEvI6YN?Y(=5?ME?* z;ST%w%6+ZD3`Kdj-sj0{KlFDdYdffVqOYykMB9k=?}CP&4R-9-W#ux*+SX{9{6K4t zr?K0AmOGHp_xfUc2W@Z9@Vjk|O&T;H)<3<_j)e=;29G3Rs@iBLs4}1tHC!}V+SzT} z?5$YG^55;ovecLP)@#YmF0>KN^>3c{=dr12P=u;?T@^3|hYNdL{lnCPE0rRK!QZh$0B7!PKIVcIfcst8_G7gF zgyrqUQ_W&UyU?ihFK$=qXq@P-JfHQJX1$eQXwbdHT_@}JH_u#twVbDVPR_+(iDmQE z6e;NFnFUMc%PEz@VnS3XA(RXioe`d?P?CtV*jG0Drc>48e`Qi3+tOST0kt#%Hli+0 z?X6i0XzQbKchxt?Oq;g70TOebDMOaLT2d|WkjubqQLDw7bv>i~%CbdHcMY+Td;R&x zkJ3*xbj=(b@Aal?taq>fG9Hrb5~z@4BD zW>_jB0#puz%w*F=!5MqY_$|xnM*j@{eDlu$Dv&pqtsRy!Jv}>qWvy#jAlkRxa`86d zIF=|T5Jz#=Q&1(z+{Y!_xg09@XV+a*&ps?@a?f@VX9V%_5u68NE^QLt#m;JBTDT(NqMi)FLCp=^7>-iN= zNQ0=f2CE&DdtahePdj-`27)J~yk-3UeZ$=Nb)RpN#(Zq9K%zVO z=+jo31v*N(fJHM;FLO(=Q+;FU*?CacKZ?3Gszk+1S-(*3^Zs0+AWAX~;uQb_uKKEU z`M8NjBzQ7N6U@j4hn?ei1ySD@i5syD3zuYE)TMYr1THGf^^Um~%)mXk0oqnsnh-Rs z1SD!Y$QUgC;e`I1#P%W2)WDup8l-}c5;BvuyDn-WUkTwoOq^Ga_&h=zn^^tpuIb;qNqVgmuvc z+O#ZbKba5v(w5_BC$G&YduHIMI1TiZ9lA$oLT`vfDG@UY+u72rG)@T|;)lX`b`il; zwqbLmFl9=j?M&K-5zfppf$kwy*b6@}aLN|u+UDAoZLOaP(lS!^zC_2C+M|MQYVKaP zAw|K4Sy`~maW*>EIiqYM zh?D~x)FnjJRI8cL!tE|1)3xkJGVM^TWIGpyf1xYLrR)-yyt(cxNd zp42=g)d09yx>ShFQZ+~BB1U5YC+2bSk@Xb6WC^*ric={7mnGJkh_z5heq%jeQaW*jATY%CQcQsp& zs8T0n_@%S4SfcMr!fGC?C9Sdgbpt1j`jruB3LGjl1sY;0Q>TUfMHxGNb+5cfP?`!< zmG2u&6RFA12fHSXGE*9h#3s&OVmqOEV$!{t;G*%xYtoJLlTB|iI9Yw|3lglY_RATI zxQIC;aUPIZg!_Myay%Ghup7M&LQ(qpWh{g&Zm z3z?J>AlA<-^b~DnkUmp)fMR96kq$3h$l7|++y7Y9kImux=hllkLmTOVwT~okm#JI& zP1BFhP4-Q?ztuVin(GLrw75Me&dHrzoc~3FRfB1CD)_rOQrv`YC}Px`2Ih-Cuoe%2 z3Lfhj#L3Aklp`>a`=%G@d&Dx$Q?!aG%CSv(Q=`x^o+V9~3EA;!Z<(){z9pI-)2Ou}N3c-RsiG-?>ce8sn(hDp0n7zwIqgcDz(K#kZy z5FF2~UEsq0BZG^m;S5bCY7^cyC{&r`l@+QYwTeQOBVfjrVYZ_U&(wgA2QigzJ6g@i zjEnieiKnBb2GplrO5;AwGA0O)rg|dV#VAp?T)Z7*4BudyK4)Go`X&A8DR10*vN)nkO}D+NDV^BJU!SDlc?hze+B(-?&gQU5VT#-PFPvCX|h+ zf`cCkmum1^#zi)3Ycl2{A_K|?LJF5cGPEKBJK;WG;uG|gAXFI43 zF+}e0HKWZdo9FUtuY;;R3K$%Mn#pa8Lrljmoi5&;|DEzUjHwhEoYoIVSv(=j-u}O^ z{^$Oa{c8UA;p4-H`k(jm>oMHYcofE%%s7*z`My=R<#2iWHJagf$aD4Gr%uX{8xv%P zejWAY6Juuh-46vdN9bVx&;KEnef#|ujDJD7x%*?an;*zHB&2&a@ax0NacGZP5H9<*?3c!*?s#Hj)gb5Z}m7ZpHd5At7mCreQ81%1x ziIB^YxIE5jtx)zed533dLVnqvDwWEky}i=ZJ?2rS>43(2HwSxm3wZC~V1GcRGMnaJ zLd;anl1IfAaW0DtcGL$sBL#yywJPstt$k9h^q%*gTSD-Ygc0)XcO!oFOO@L|4O6Y)c&arqadL( zS{YXL`6P&Ik>7p^|{q2TQ5>h;wTa>=H-V=2Ul zW^in!CO5FAA57BfppxQS4>_8WMwa&dHz^KfN}_xcUj!q@^uhz$``-fOVc4jKd}v7xgO&42GE|04KVV4Y`XBE zHkIlIic?h?Kn$j+V(}ij4c-*h6uo|{POcip`;HBtK7QOX z#YQiALZi7R$^88aY=s8gU`36=$TQI#y9JEAKjD$Rw|Y=KzNnC0sEV~eGyLw@Wd9#o zLl>o48$A#?;CHB;$P8e>8n^OcodteZ{c=XxlIX#)u7th*Wq!6RHcPoj=*l|zV-|yJ zk(Kj_U40n59*={&l{Q>1w)?w|mEldplx<3ql$feW5ob37Bw%eA7I6)YxV)nvc%l(Kj?FQYw50O ziI@IvL(%2>#{Won4**-fXIf}lwOigU?3SVh`e4N0EO$H0^kBbQ2>VqB?1kXJT?qc$ zZtx2+yjh6h%|Z+dkz6iBa@hyTLOf>+@tpO6J@NEK`DQW7H_M~!i~Vvj_RBkA?@P$pVnWW=MMx)2Q1Sc~Tq6+tzd!E}{`SlN z?~6v#7W=KHxux|8V=iV`9eE6SQ^*#V*2AOeNz&VFL_#sbQ3xjr{Pop_R3guB`7;1> z8odIl({U|8U0_^()4vw{!g&de#~hm2st29#8>PgUoXv2p8tM^?mGVaoYAh3h|*;bpdKJ~Wjz5TlP|7A*|aLh$GrITrB)97fj zWHI0c`~L@r`_HQPza2h5eAxfLk6#a?kn%XhOwqg$w9yD$)G06B%%F%==C*>Cg}Ri- z$bm8ob)DH3NpT=BI3O4ks||T_LHNSEk_KA=BA zr!2wo#=EE_(8KkJqM3&GZDk<~{0E!Ezhmf#}^*HO0%OTNLrT@)B3Z%`Iz6$)p zvbBn}FUqZl`HMP&jk>o|jRmM+chaJavV{x@bB7ISpC zzyGB5qp4Eq(ca$e?d=c)njsgHy~LoBdypvm=IrX#H9EdHMJKN>PS4+*zrMIe-@Lv; zKU|;fqN}sZtJkMLoao29(CYO3`pwn(*FWed0N`MVPV?)0<L%Cb_=-daNKFOL zCR6k`#BDKVm#kQ&i;Egz5l`oWPNoX+TSf%p0uiRDnj@U4DHpbyid#@^_yVf$QHdr3 zGsp*0MbCN!kO@xE8Gx=4kul96P)`t!0E(l6#Rw;f1p{Zd%_~ZzL2Z-ab`ciD%Zx*# z`L5G{CJg$RHOru|qG6m5aB=?gT zuBhN+b*t4Y&nJ$_E-EDoTQH`&{7kJ^B}kRGoCpOg04)L}m$$4|7!m ziKdv*2)hBI1ZL9YU08*JCyD)Yj4(3d0dTuz1}%&Vn>vi8P?HgevusWXgL0d3nPK00 znH0hNMh`P~XC$T=Vn2`VpSXBiGYYp{yakHE4rxs*h5%)bjeGzY(OI@;I3}nl7h}f1 zXP0&n6!o%&F|{h;+%7ol#<-#p$<3P$Z)1&Hn-!(BZvhE8YAuLu%#ckcHwOrz=u+%% z0Wo*N(0ArKjh#XYr$o>jtmq9v0mvOU;#wdwsA_=5(JTYYy@h0C;0VtTEa;&aYaCkp z1)mvPrBOk5!JSx8PpdEnB!)`5I6;EkQ0Pjn#f&REP7q1(hzs`@7p|NA(PV*XF9&BS zLHZcoPI&@@0=c7-4Sim>$IecTOT&K`RmEqeXDWl4smU!o2f+w3!!#eCB*g+WMau=) zm=QseIZ7yd3xXR_271EqjO@5hhqCfx9D$SA^?YP5)EZH6T?!u;y?&xix%KglJzgCH z`H=PSCnJReF8dci;>BEyVS@AzT%>l_+4*MC~fsQ!%5=Iw#-H|s;u#F zafiK{BuX+M&khAZ1a`a@R=8dUch~7H{6IxpWk9d>eMF`>86!S!4_^9tdlck61$pMa z_V>A2=3@j~-hwk4?do0_;RN*PR_N~-1Roi*(nop}_(CUzptPKnECvh6U+#9gqTF`* zI5_iYLNiSDyc(`bkh{Li%0nSj^;h~PoJyGy?Ls2(k+y~De(m{8w35eKzR-6)Gg8t5 zPuf~;F_lp!!OH{28Q4o3M*ajAuW(J|ofATtSEr8=XELQx#xt4B(F}{XB+iYJj*eW& zMI@atu>F)lAAw*SHBlR@;DRfJkv~?4L462U;_IAkHv$)iJiY+6eVkR0LT#QAAq)u; z5wPT=c?nN32qpO~BTOY4ZNx>&jl0$XuQ&XRAvzqQZ*^Fu(Vpb&yT}S%XU4f&U2UAq zc*D%M$^=JK_#7u{J{Su^b`ceV-0%pB!B)Mwo&T9z`pj&L?CNkZ)p}gBbcH!jvr$5$WUdu8 zP4IlTc#;xfT&;vBHn8yLZGM=a+hPcoYZx2cIoKA{L65x-y2RS({sDSt+m^h=62bLa zHbkO%afTg~8eYBLo8h-)*OXk^L_rn-gS;|v7R9aZ3g_#F=zOgE zKc5RpMU~d+yi*jN*we2QtY3qXvlI92!eQmHwct_?Kwt=xdNUnAnZGGR7$x{tW>jfb z6EZPQ0IQt%A||L<#V!K}KEV(!?Nqyf6BV5^cWk--m_ekfro@C|WleM;lbej%k>VyA z#Tc_L&qYLs#NP4TQ=J@dc@7`g*^AjUeWkwBiAZwM06l$t8tUg(?84)w3bCOKKs5Cf--Ir1G^fFo3w2 zLqMAnq1lA%Az;J_RX8qWBls$#Wue78+GyqdZzq1btAmTzZ_ZE70tB04THbotSscim zm~}sYba^JAVWiX~3;pGRa#K%?1i>-PF$<+5jbe2N6@2kqqXw42Faj*>_K~Xxrdh&` zas_3>3MB+fJ)QDb>Fp=QXoVZ4*F8Wu!m=aYKaZEH$Mf9%v@#dEYQDW_rS`0}F!1 z5WSh&?OIk+Ln{@pzae-qQ|Ww>4=3K_RYxdga_rawi#0E;&F5|d$Fcsm(DNN%BRwdN zbSt4gBVpG_U(&dwIxwHYOd};M&SoxbEoqE1EN1@ay0dE2K)~LW3yp&bR;$p++}5Z> zR@FZvn6`b3MiCXW3J6_D>M+xhfgvWD5hhm7ve>^4H0~?wAZj@WWacP zMSR>q&2BMlToL`|sffQimk%X?r%`*Wc?AKr-Kvro&ZF*#&rB$-^{BKglh0HtGl5cv zJcY@Q-3c&r)glU%L-Yenh?LN2pX5u}HK*CiH_i7w0nsUpRcXd;MF{J9+=Fe4*y93&rNV0d$Z2r|LK>?>909(FI_ z*$};P0f@U&#l_JvC+3<52)Vr-AuV{jp=QDtw~c{kM0yd8)DuE+L!smm{K*BfI&2y{ zrF9hV7OPHnn(EHPfKCX4C5QYL)1{Dbi^UwMp$x@DMuLuPSLBlpI-<SvQ(ozgch4 z-~x=Ko)6I}l`wT8Lbv(}3oVy;=ox5W$aYw=NH9Efz(AUS;=hsfSKb^n%?)49E&`-x#SH~A`&d;vV z>nnd-{`EKL_~IYv`}2#_T|}ta0e%NJ2^E}Buq&~*Oj?W}SSQ8KPR-G+5fvqsaa3$9 zyg7gK@@yAfyuKKmUwm_Qe(~+utFwzYyXe)~)ya3q7jKTgK7V=s<{zL!-<-d>IJ-9K z1IHG|<^OGN59$%r$AFeK6U!NIoYjz70lIXdbOgWPjb{$}6#>@)K>Vnf$@KjJ8 zo&&>Ul)+jl=wD%Rz4cPF+9+iuI?7CY6Ha549mTHH>S;zIdL28>J+Iw33~JG=RA z_ikw^H(&W#sPEe*P>?7l2^~Q|2gFSTmoneCa3~eRk&>`G-aI~x^(?s$F3`wzSwaD^ zT@iq;#WOr9ui@*jT}nWa8~}Ib7mI9^MKsodt=US@0gPGGr8sdworR93Sc`}VgoW8N z*IqpL)iO&|b)pN>%yJu(nJ1Ll&h+f6zhbc6xhL+Z(rhQ(DC~su_?9OA+TmLy6;D$< zA-fP0WSa0Xrb#A@H^Rv{V?}TSPOu>pB;vD~R$gBo1~HL4yP$_~Z**lL&cetS({X%5 zVUu)h(>J7~RtPS+!Gbp<;BP~89BH4RrQj^D#_U~dpm$`(sLIDs*S(aL$A%cFXL_F_w}DJ$&pU>TZ4Acr>@cM#0crOQ4Wx9?ai*+USLiIW0zk z&n`~2Uu{Z*gZIammuDBJ=l^}AyAW0j(lnXd^aVe4Prn0FZu7k)gwUIwAMDyRit=iL z3#&Ozh%mX#W;VB5%;d+EB(X$i~X{wy^EQxi9pVMJy2cFyZ_GTete9jbpmv4l^q}~uP zL_ZOP6UmVvrnOxsbyghO8ohCse898|!pzh;W4ErdZ^_BAU`0XDN%#L@l+m8(5(|d#6p2c2y>=0a{x9 zh`s4jS#x&%Tu`*F8|3MhJO4|Aa8nU&1B~agE9WkPr_9bk!7Ap;lt(w-grARKp$wN+ z>zoc_YxO(Nroo`P52EFteVtk7fOmyUDo*;K4kR&PJRvf|5sa{Qg04n5YT>9_tt!G{M1*z1C3?DBI(+7$ zFLdhfnxG#9tsyoY6_&PF!c0&fWm}sHvf@&hYjcxG8?xG!BVocPGSsd*B=3~K4agUM zQ|+17@AOmvKJJAjZ(U*bTf1q5PT;@+)W2@y*KII`=5Ru&qq+vEeG zHMkAE|GEc+4q}>`RRv_a9Kp?nt%9<80n@G~rv3Ld1XQ8Cc6mDns=V@q(Ujk^kns_Z z=iv=cvc}F|{83vcHhow`Y|A06e%BW#iQa|@W!by%EfI_~D$}BGDzY}!#oAb)Jd16j!x8mTj9^jd|depQHMaDub z!%XI7UYfA?`?96O7Jh9sZ56!c%e&z<&RtK9wVe*fO&z-`s8uP;hr%kf)uU})>fZ=_sCAGC2ko4<{hDTD7S&k*0u#=1}A;l1br*;7&DAcv7C-L z7IB?|iD?qXu1Q#&mXx8;l8_az*YlHV2X=ne8-5k4q->IqqOA|8`mRAjmx9lTnvzVS zt1KaP`J{P+Q0NBDB57|`rMKF$3rfeg<|?gFMgpfK)bS3_lvZ5M;x?F_-#FP^@R%T1 zJ-WVt$`la{Cw207RlG5a~^dg5TC^xG_je$UFU)Pr=%WoXGtNiYiS9z788qmhO*-BTK z`-m2|uG+jM?n+;XsQI&A7~M^)3vsxHy`Z+-F1Qd$%XhtSHr|`J5M$$4ReYV-9r-9a z8&;wAxQNGx-J`iv@avRBus6JTSGVm3kkTk>X;Wor={psCBujtX3ww|1-wTQ80i%PK z6;Ln2wDsr_DsTVmM~_ertGva(wS|u37=8EV&1HCb_4>d6u?zpcHh)0w+{J7A2Vpl< za0V%QH&{Rlx2#+}wS{7m5*ACu8T#2=I3V?5JPhw!x z6@b-8rnK$W)~}7FTFd;~OSOKrUmlAI&?2|C256Zwu^{)_h^mOboC~qFl~>C(H?;(F zA_GLn$VS45N;IXDX)-U$bnTk*!~gSt{%`(qz!kaBA>X?zlrI$H@E6iYJrii2V%_?g(r3yC{LP|HEFo(oJlk zI{_RDOL0Vkbt}+?gKW!~&tDfhbFueU;1HW4@L~)XeqJy$$+Oy4rln?0FsBlM5Skv{+s-YZe8j(j$@Onn_?+R>`Xy1-W@RmOAOKg>;ajH zISGq^Dr7Wb35hObG(ZRY?Ka@d!%7*Nkr6d_H&+eW;uNTLN>rG;%t_o&ig11dFx^RV zIV)>*n;j`(%`$Gv^zz28$ocyD>`WN z$FTl;kaD5N_g5RSnaDyf2^TWonx$E!w&gOk`KMs+%qtY=*e%c&4*n(kg&Z{$>riA~1}g5U)0@*Dz1YwsG8*_lTccGzK$t`}6XS*?bHHKBJend`0ul)*kzQ5kvLtz(Ns_$Q0l}rhQRZ-^{O>n{j#P$)WD> z?1l(iY7SGT?j(*X(Vi&2dV!t{|28~$zJZXZi3LD&tQ?Jdf=r+O#Ke6cmllSu>)-(7mH$M1MiZ@rJI;F99RA+OP7*qKk5o; zAMEe%!++K-<{vZ<+L;Xy`WD#Gf2=4|<8@Y(X+@ZUfwj~ET2J~myhgL?X`%Is4PI*K z1Zw+8*!P-2opk45m$IU(a`mZ&oqLY;h&KnfUd8mlekS7Dka~KI-n)jj^Y*EwE;v_f zk?;Om1*XaRM56JJ(5)sJNg^H9p;U0#DZDa3PYuj<$jPU1!(v+bh@U6B1%mS?E_Oc5 zZ!aChvm9Vs7D?CY%ZjuYGF@6I{mvAwEF2Bq>=tYAr~q%u0H!Lxl=b@TrN#u4JFKXe z@N{S4_2co=DP}8u8xZ@3CHm=vSR80vMQZM{_yWqBBvk;s!yxTGPP!(3O;Su@zTny{ zhi*;M=w{>&66$lDb)9nB^Z@I_&u3_`nh#)h2!3go0e9$lNRUR7x{SR|`ie%@em8yK zPV27UrI>q-TsMH#;4_z_SL&)y(nF5%02S++@V~+L=PzFdD7gN|b$I#a>iFbr{R^2d zH6lK)iecHl-b|0)Z@2vic24Ia>Ly0tu6HShned*UZoKVX(UeI7)vJet70rFxHDnS zGZbNF@^wwzkz%1J+&g5GY&Cy55&b&1C%46jC>TLMdu;r&?k+C+2RV$@=e`uHX0MYk zf4i}x&x&*9dluZFknY~y#k;B4t6Gf$*fr^r7@3y^-@VwVLiF|q3jvy>*kH_)n1}#< z;Z?)fkZRZwK0xRjKw5cQh`MG4s?|6u5^S%s(J<_b#t0hU^ME%v`}ATvSKjc>Gf2O> zx1nBSkwS8WASK2I$aGQJ44ilZ-FD*KsSSlH%e}kL8~a6r0W|10@~fB|MSQ=KY?n=E z!4_X;@xL;ukZoX(Eh&!7hj1C5xWNfpT11@$c%rj!)-Jd=fInx9GGuWZ%TcOTQxRjY z!ZTZ2=tpxB5t4x1d*t2^QEknr1PC(TUDCe~K=<=47jF}eV`kMCO9hyS z3uIvCFlytfM?s`ww3d28lkyFwiG=IH9QT?i%FHeriH$I}YAiQZOxoH)U;FU$OQ13q zaS?MyVl&z)bL^}~zsq@$-36IyM4H?6O#=dM^iD+On%YKGb9JXhWNAL_IoTj4F`Ofb zhyEd;BjgVv%;NTjZh$FFdmk8YVb{ai#&A;^s&%kin2$B^kppYvqFxAV9XzbDCoHHo z`uh~!GQ~nFCUuU2isKj*A^yq5`Cm*vG*lxng$NWk)&oV1qC7uU!b#IOOU!LdU|Mxpr$4tRL!okr&1wM`#wrfa zU4d2@>2*rBGnDGBZeb*8yPgdqNlUWSJi<@{c`=SouFsG1r>?llEtbd|1V&EQ4(-D1 z9B!^=97U1>Pbe1jhQNK*R6A-jXsKGWDgr-C%BZUlU7O_Kn}PeL8VNVb(^aK0@lZ*2 zCnvAgDu+(${1PCI_4TULV84i-H|@AQ{Tj{iJCw1!*igS&tq*UlN2kh_{c5v$zj~iH zXLm2$V);0KGB8i<9)Yz9yWY+F-Z^u)34VPuoXav%#Y%lsxxosji^~T#ot4O6>X|)g zgEn`{to<3`3t~UK&qFm#8 zdRWH`r0037C5Q_;I=j^veBK?|r>9)e%cj1$NikgO+14dGA~tJ)NRTlRg!L?H)JaJ` z_AF?eQhOxttNhtp%HrZLI2D}#m2W41)zPDi7k><}K%n>dd&*+}?+dr3@uosU zvDxwaSCy91rCVF>psp-H>`R)|}w*c-sm%H}ds8?7;Q+N0l_D*?x%oKH-IObKck5bi{ zr7P@~jxf7#iy4)6aPAABXNvu4yi3#N3>jQ@R^Lb7OOqOGWR$I14!Bux{^8qCCd3}p z*Ob~d4vK$0RRC1|3IR3 I;{bXB0Gzaq9smFU diff --git a/charts/deckhouse_lib_helm/Chart.yaml b/charts/deckhouse_lib_helm/Chart.yaml new file mode 100644 index 0000000..ccc0e4e --- /dev/null +++ b/charts/deckhouse_lib_helm/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +description: Helm utils template definitions for Deckhouse modules. +name: deckhouse_lib_helm +type: library +version: 1.22.0 diff --git a/charts/deckhouse_lib_helm/LICENSE b/charts/deckhouse_lib_helm/LICENSE new file mode 100644 index 0000000..13fe0e3 --- /dev/null +++ b/charts/deckhouse_lib_helm/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright The Events Exporter authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charts/deckhouse_lib_helm/README.md b/charts/deckhouse_lib_helm/README.md new file mode 100644 index 0000000..b120623 --- /dev/null +++ b/charts/deckhouse_lib_helm/README.md @@ -0,0 +1,1105 @@ +# Helm library for Deckhouse modules + +## Table of contents + +| Table of contents | +|---| +| **Envs For Proxy** | +| [helm_lib_envs_for_proxy](#helm_lib_envs_for_proxy) | +| **High Availability** | +| [helm_lib_is_ha_to_value](#helm_lib_is_ha_to_value) | +| [helm_lib_ha_enabled](#helm_lib_ha_enabled) | +| **Kube Rbac Proxy** | +| [helm_lib_kube_rbac_proxy_ca_certificate](#helm_lib_kube_rbac_proxy_ca_certificate) | +| **Module Documentation Uri** | +| [helm_lib_module_documentation_uri](#helm_lib_module_documentation_uri) | +| **Module Ephemeral Storage** | +| [helm_lib_module_ephemeral_storage_logs_with_extra](#helm_lib_module_ephemeral_storage_logs_with_extra) | +| [helm_lib_module_ephemeral_storage_only_logs](#helm_lib_module_ephemeral_storage_only_logs) | +| **Module Generate Common Name** | +| [helm_lib_module_generate_common_name](#helm_lib_module_generate_common_name) | +| **Module Https** | +| [helm_lib_module_uri_scheme](#helm_lib_module_uri_scheme) | +| [helm_lib_module_https_mode](#helm_lib_module_https_mode) | +| [helm_lib_module_https_cert_manager_cluster_issuer_name](#helm_lib_module_https_cert_manager_cluster_issuer_name) | +| [helm_lib_module_https_ingress_tls_enabled](#helm_lib_module_https_ingress_tls_enabled) | +| [helm_lib_module_https_copy_custom_certificate](#helm_lib_module_https_copy_custom_certificate) | +| [helm_lib_module_https_secret_name](#helm_lib_module_https_secret_name) | +| **Module Image** | +| [helm_lib_module_image](#helm_lib_module_image) | +| [helm_lib_module_image_no_fail](#helm_lib_module_image_no_fail) | +| [helm_lib_module_common_image](#helm_lib_module_common_image) | +| [helm_lib_module_common_image_no_fail](#helm_lib_module_common_image_no_fail) | +| **Module Ingress Class** | +| [helm_lib_module_ingress_class](#helm_lib_module_ingress_class) | +| **Module Init Container** | +| [helm_lib_module_init_container_chown_nobody_volume](#helm_lib_module_init_container_chown_nobody_volume) | +| [helm_lib_module_init_container_chown_deckhouse_volume](#helm_lib_module_init_container_chown_deckhouse_volume) | +| [helm_lib_module_init_container_check_linux_kernel](#helm_lib_module_init_container_check_linux_kernel) | +| **Module Labels** | +| [helm_lib_module_labels](#helm_lib_module_labels) | +| **Module Public Domain** | +| [helm_lib_module_public_domain](#helm_lib_module_public_domain) | +| **Module Security Context** | +| [helm_lib_module_pod_security_context_run_as_user_custom](#helm_lib_module_pod_security_context_run_as_user_custom) | +| [helm_lib_module_pod_security_context_run_as_user_nobody](#helm_lib_module_pod_security_context_run_as_user_nobody) | +| [helm_lib_module_pod_security_context_run_as_user_nobody_with_writable_fs](#helm_lib_module_pod_security_context_run_as_user_nobody_with_writable_fs) | +| [helm_lib_module_pod_security_context_run_as_user_deckhouse](#helm_lib_module_pod_security_context_run_as_user_deckhouse) | +| [helm_lib_module_pod_security_context_run_as_user_deckhouse_with_writable_fs](#helm_lib_module_pod_security_context_run_as_user_deckhouse_with_writable_fs) | +| [helm_lib_module_pod_security_context_run_as_user_root](#helm_lib_module_pod_security_context_run_as_user_root) | +| [helm_lib_module_pod_security_context_runtime_default](#helm_lib_module_pod_security_context_runtime_default) | +| [helm_lib_module_container_security_context_not_allow_privilege_escalation](#helm_lib_module_container_security_context_not_allow_privilege_escalation) | +| [helm_lib_module_container_security_context_read_only_root_filesystem_with_selinux](#helm_lib_module_container_security_context_read_only_root_filesystem_with_selinux) | +| [helm_lib_module_container_security_context_read_only_root_filesystem](#helm_lib_module_container_security_context_read_only_root_filesystem) | +| [helm_lib_module_container_security_context_privileged](#helm_lib_module_container_security_context_privileged) | +| [helm_lib_module_container_security_context_escalated_sys_admin_privileged](#helm_lib_module_container_security_context_escalated_sys_admin_privileged) | +| [helm_lib_module_container_security_context_privileged_read_only_root_filesystem](#helm_lib_module_container_security_context_privileged_read_only_root_filesystem) | +| [helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all](#helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all) | +| [helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all_and_add](#helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all_and_add) | +| [helm_lib_module_container_security_context_capabilities_drop_all_and_add](#helm_lib_module_container_security_context_capabilities_drop_all_and_add) | +| [helm_lib_module_container_security_context_capabilities_drop_all_and_run_as_user_custom](#helm_lib_module_container_security_context_capabilities_drop_all_and_run_as_user_custom) | +| **Module Storage Class** | +| [helm_lib_module_storage_class_annotations](#helm_lib_module_storage_class_annotations) | +| **Monitoring Grafana Dashboards** | +| [helm_lib_grafana_dashboard_definitions_recursion](#helm_lib_grafana_dashboard_definitions_recursion) | +| [helm_lib_grafana_dashboard_definitions](#helm_lib_grafana_dashboard_definitions) | +| [helm_lib_single_dashboard](#helm_lib_single_dashboard) | +| **Monitoring Prometheus Rules** | +| [helm_lib_prometheus_rules_recursion](#helm_lib_prometheus_rules_recursion) | +| [helm_lib_prometheus_rules](#helm_lib_prometheus_rules) | +| [helm_lib_prometheus_target_scrape_timeout_seconds](#helm_lib_prometheus_target_scrape_timeout_seconds) | +| **Node Affinity** | +| [helm_lib_internal_check_node_selector_strategy](#helm_lib_internal_check_node_selector_strategy) | +| [helm_lib_node_selector](#helm_lib_node_selector) | +| [helm_lib_tolerations](#helm_lib_tolerations) | +| [_helm_lib_cloud_or_hybrid_cluster](#_helm_lib_cloud_or_hybrid_cluster) | +| [helm_lib_internal_check_tolerations_strategy](#helm_lib_internal_check_tolerations_strategy) | +| [_helm_lib_any_node_tolerations](#_helm_lib_any_node_tolerations) | +| [_helm_lib_wildcard_tolerations](#_helm_lib_wildcard_tolerations) | +| [_helm_lib_monitoring_tolerations](#_helm_lib_monitoring_tolerations) | +| [_helm_lib_frontend_tolerations](#_helm_lib_frontend_tolerations) | +| [_helm_lib_system_tolerations](#_helm_lib_system_tolerations) | +| [_helm_lib_additional_tolerations_uninitialized](#_helm_lib_additional_tolerations_uninitialized) | +| [_helm_lib_additional_tolerations_node_problems](#_helm_lib_additional_tolerations_node_problems) | +| [_helm_lib_additional_tolerations_storage_problems](#_helm_lib_additional_tolerations_storage_problems) | +| [_helm_lib_additional_tolerations_no_csi](#_helm_lib_additional_tolerations_no_csi) | +| [_helm_lib_additional_tolerations_cloud_provider_uninitialized](#_helm_lib_additional_tolerations_cloud_provider_uninitialized) | +| **Pod Disruption Budget** | +| [helm_lib_pdb_daemonset](#helm_lib_pdb_daemonset) | +| **Priority Class** | +| [helm_lib_priority_class](#helm_lib_priority_class) | +| **Resources Management** | +| [helm_lib_resources_management_pod_resources](#helm_lib_resources_management_pod_resources) | +| [helm_lib_resources_management_original_pod_resources](#helm_lib_resources_management_original_pod_resources) | +| [helm_lib_resources_management_vpa_spec](#helm_lib_resources_management_vpa_spec) | +| [helm_lib_resources_management_cpu_units_to_millicores](#helm_lib_resources_management_cpu_units_to_millicores) | +| [helm_lib_resources_management_memory_units_to_bytes](#helm_lib_resources_management_memory_units_to_bytes) | +| [helm_lib_vpa_kube_rbac_proxy_resources](#helm_lib_vpa_kube_rbac_proxy_resources) | +| [helm_lib_container_kube_rbac_proxy_resources](#helm_lib_container_kube_rbac_proxy_resources) | +| **Spec For High Availability** | +| [helm_lib_pod_anti_affinity_for_ha](#helm_lib_pod_anti_affinity_for_ha) | +| [helm_lib_deployment_on_master_strategy_and_replicas_for_ha](#helm_lib_deployment_on_master_strategy_and_replicas_for_ha) | +| [helm_lib_deployment_on_master_custom_strategy_and_replicas_for_ha](#helm_lib_deployment_on_master_custom_strategy_and_replicas_for_ha) | +| [helm_lib_deployment_strategy_and_replicas_for_ha](#helm_lib_deployment_strategy_and_replicas_for_ha) | + +## Envs For Proxy + +### helm_lib_envs_for_proxy + + Add HTTP_PROXY, HTTPS_PROXY and NO_PROXY environment variables for container + depends on [proxy settings](https://deckhouse.io/documentation/v1/deckhouse-configure-global.html#parameters-modules-proxy) + +#### Usage + +`{{ include "helm_lib_envs_for_proxy" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + +## High Availability + +### helm_lib_is_ha_to_value + + returns value "yes" if cluster is highly available, else — returns "no" + +#### Usage + +`{{ include "helm_lib_is_ha_to_value" (list . yes no) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Yes value +- No value + + +### helm_lib_ha_enabled + + returns empty value, which is treated by go template as false + +#### Usage + +`{{- if (include "helm_lib_ha_enabled" .) }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + +## Kube Rbac Proxy + +### helm_lib_kube_rbac_proxy_ca_certificate + + Renders configmap with kube-rbac-proxy CA certificate which uses to verify the kube-rbac-proxy clients. + +#### Usage + +`{{ include "helm_lib_kube_rbac_proxy_ca_certificate" (list . "namespace") }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Namespace where CA configmap will be created + +## Module Documentation Uri + +### helm_lib_module_documentation_uri + + returns rendered documentation uri using publicDomainTemplate or deckhouse.io domains + +#### Usage + +`{{ include "helm_lib_module_documentation_uri" (list . "") }} ` + + +## Module Ephemeral Storage + +### helm_lib_module_ephemeral_storage_logs_with_extra + + 50Mi for container logs `log-opts.max-file * log-opts.max-size` would be added to passed value + returns ephemeral-storage size for logs with extra space + +#### Usage + +`{{ include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 }} ` + +#### Arguments + +- Extra space in mebibytes + + +### helm_lib_module_ephemeral_storage_only_logs + + 50Mi for container logs `log-opts.max-file * log-opts.max-size` would be requested + returns ephemeral-storage size for only logs + +#### Usage + +`{{ include "helm_lib_module_ephemeral_storage_only_logs" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + +## Module Generate Common Name + +### helm_lib_module_generate_common_name + + returns the commonName parameter for use in the Certificate custom resource(cert-manager) + +#### Usage + +`{{ include "helm_lib_module_generate_common_name" (list . "") }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Name portion + +## Module Https + +### helm_lib_module_uri_scheme + + return module uri scheme "http" or "https" + +#### Usage + +`{{ include "helm_lib_module_uri_scheme" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_https_mode + + returns https mode for module + +#### Usage + +`{{ if (include "helm_lib_module_https_mode" .) }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_https_cert_manager_cluster_issuer_name + + returns cluster issuer name + +#### Usage + +`{{ include "helm_lib_module_https_cert_manager_cluster_issuer_name" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_https_ingress_tls_enabled + + returns not empty string if tls should enable for ingress + +#### Usage + +`{{ if (include "helm_lib_module_https_ingress_tls_enabled" .) }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_https_copy_custom_certificate + + Renders secret with [custom certificate](https://deckhouse.io/documentation/v1/deckhouse-configure-global.html#parameters-modules-https-customcertificate) + in passed namespace with passed prefix + +#### Usage + +`{{ include "helm_lib_module_https_copy_custom_certificate" (list . "namespace" "secret_name_prefix") }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Namespace +- Secret name prefix + + +### helm_lib_module_https_secret_name + + returns custom certificate name + +#### Usage + +`{{ include "helm_lib_module_https_secret_name (list . "secret_name_prefix") }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Secret name prefix + +## Module Image + +### helm_lib_module_image + + returns image name + +#### Usage + +`{{ include "helm_lib_module_image" (list . "") }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Container name + + +### helm_lib_module_image_no_fail + + returns image name if found + +#### Usage + +`{{ include "helm_lib_module_image_no_fail" (list . "") }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Container name + + +### helm_lib_module_common_image + + returns image name from common module + +#### Usage + +`{{ include "helm_lib_module_common_image" (list . "") }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Container name + + +### helm_lib_module_common_image_no_fail + + returns image name from common module if found + +#### Usage + +`{{ include "helm_lib_module_common_image_no_fail" (list . "") }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Container name + +## Module Ingress Class + +### helm_lib_module_ingress_class + + returns ingress class from module settings or if not exists from global config + +#### Usage + +`{{ include "helm_lib_module_ingress_class" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + +## Module Init Container + +### helm_lib_module_init_container_chown_nobody_volume + + ### Migration 11.12.2020: Remove this helper with all its usages after this commit reached RockSolid + returns initContainer which chowns recursively all files and directories in passed volume + +#### Usage + +`{{ include "helm_lib_module_init_container_chown_nobody_volume" (list . "volume-name") }} ` + + + +### helm_lib_module_init_container_chown_deckhouse_volume + + returns initContainer which chowns recursively all files and directories in passed volume + +#### Usage + +`{{ include "helm_lib_module_init_container_chown_deckhouse_volume" (list . "volume-name") }} ` + + + +### helm_lib_module_init_container_check_linux_kernel + + returns initContainer which checks the kernel version on the node for compliance to semver constraint + +#### Usage + +`{{ include "helm_lib_module_init_container_check_linux_kernel" (list . ">= 4.9.17") }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Semver constraint + +## Module Labels + +### helm_lib_module_labels + + returns deckhouse labels + +#### Usage + +`{{ include "helm_lib_module_labels" (list . (dict "app" "test" "component" "testing")) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Additional labels dict + +## Module Public Domain + +### helm_lib_module_public_domain + + returns rendered publicDomainTemplate to service fqdn + +#### Usage + +`{{ include "helm_lib_module_public_domain" (list . "") }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Name portion + +## Module Security Context + +### helm_lib_module_pod_security_context_run_as_user_custom + + returns PodSecurityContext parameters for Pod with custom user and group + +#### Usage + +`{{ include "helm_lib_module_pod_security_context_run_as_user_custom" (list . 1000 1000) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- User id +- Group id + + +### helm_lib_module_pod_security_context_run_as_user_nobody + + returns PodSecurityContext parameters for Pod with user and group "nobody" + +#### Usage + +`{{ include "helm_lib_module_pod_security_context_run_as_user_nobody" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_pod_security_context_run_as_user_nobody_with_writable_fs + + returns PodSecurityContext parameters for Pod with user and group "nobody" with write access to mounted volumes + +#### Usage + +`{{ include "helm_lib_module_pod_security_context_run_as_user_nobody_with_writable_fs" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_pod_security_context_run_as_user_deckhouse + + returns PodSecurityContext parameters for Pod with user and group "deckhouse" + +#### Usage + +`{{ include "helm_lib_module_pod_security_context_run_as_user_deckhouse" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_pod_security_context_run_as_user_deckhouse_with_writable_fs + + returns PodSecurityContext parameters for Pod with user and group "deckhouse" with write access to mounted volumes + +#### Usage + +`{{ include "helm_lib_module_pod_security_context_run_as_user_deckhouse_with_writable_fs" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_pod_security_context_run_as_user_root + + returns PodSecurityContext parameters for Pod with user and group 0 + +#### Usage + +`{{ include "helm_lib_module_pod_security_context_run_as_user_root" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_pod_security_context_runtime_default + + returns PodSecurityContext parameters for Pod with seccomp profile RuntimeDefault + +#### Usage + +`{{ include "helm_lib_module_pod_security_context_runtime_default" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_container_security_context_not_allow_privilege_escalation + + returns SecurityContext parameters for Container with allowPrivilegeEscalation false + +#### Usage + +`{{ include "helm_lib_module_container_security_context_not_allow_privilege_escalation" . }} ` + + + +### helm_lib_module_container_security_context_read_only_root_filesystem_with_selinux + + returns SecurityContext parameters for Container with read only root filesystem and options for SELinux compatibility + +#### Usage + +`{{ include "helm_lib_module_container_security_context_read_only_root_filesystem_with_selinux" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_container_security_context_read_only_root_filesystem + + returns SecurityContext parameters for Container with read only root filesystem + +#### Usage + +`{{ include "helm_lib_module_container_security_context_read_only_root_filesystem" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_container_security_context_privileged + + returns SecurityContext parameters for Container running privileged + +#### Usage + +`{{ include "helm_lib_module_container_security_context_privileged" . }} ` + + + +### helm_lib_module_container_security_context_escalated_sys_admin_privileged + + returns SecurityContext parameters for Container running privileged with escalation and sys_admin + +#### Usage + +`{{ include "helm_lib_module_container_security_context_escalated_sys_admin_privileged" . }} ` + + + +### helm_lib_module_container_security_context_privileged_read_only_root_filesystem + + returns SecurityContext parameters for Container running privileged with read only root filesystem + +#### Usage + +`{{ include "helm_lib_module_container_security_context_privileged_read_only_root_filesystem" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all + + returns SecurityContext for Container with read only root filesystem and all capabilities dropped + +#### Usage + +`{{ include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all_and_add + + returns SecurityContext parameters for Container with read only root filesystem, all dropped and some added capabilities + +#### Usage + +`{{ include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all_and_add" (list . (list "KILL" "SYS_PTRACE")) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- List of capabilities + + +### helm_lib_module_container_security_context_capabilities_drop_all_and_add + + returns SecurityContext parameters for Container with all dropped and some added capabilities + +#### Usage + +`{{ include "helm_lib_module_container_security_context_capabilities_drop_all_and_add" (list . (list "KILL" "SYS_PTRACE")) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- List of capabilities + + +### helm_lib_module_container_security_context_capabilities_drop_all_and_run_as_user_custom + + returns SecurityContext parameters for Container with read only root filesystem, all dropped, and custom user ID + +#### Usage + +`{{ include "helm_lib_module_container_security_context_capabilities_drop_all_and_run_as_user_custom" (list . 1000 1000) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- User id +- Group id + +## Module Storage Class + +### helm_lib_module_storage_class_annotations + + return module StorageClass annotations + +#### Usage + +`{{ include "helm_lib_module_storage_class_annotations" (list $ $index $storageClass.name) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Storage class index +- Storage class name + +## Monitoring Grafana Dashboards + +### helm_lib_grafana_dashboard_definitions_recursion + + returns all the dashboard-definintions from / + current dir is optional — used for recursion but you can use it for partially generating dashboards + +#### Usage + +`{{ include "helm_lib_grafana_dashboard_definitions_recursion" (list . [current dir]) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Dashboards root dir +- Dashboards current dir + + +### helm_lib_grafana_dashboard_definitions + + returns dashboard-definintions from monitoring/grafana-dashboards/ + +#### Usage + +`{{ include "helm_lib_grafana_dashboard_definitions" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_single_dashboard + + renders a single dashboard + +#### Usage + +`{{ include "helm_lib_single_dashboard" (list . "dashboard-name" "folder" $dashboard) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Dashboard name +- Folder +- Dashboard definition + +## Monitoring Prometheus Rules + +### helm_lib_prometheus_rules_recursion + + returns all the prometheus rules from / + current dir is optional — used for recursion but you can use it for partially generating rules + +#### Usage + +`{{ include "helm_lib_prometheus_rules_recursion" (list . [current dir]) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Namespace for creating rules +- Rules root dir +- Current dir (optional) + + +### helm_lib_prometheus_rules + + returns all the prometheus rules from monitoring/prometheus-rules/ + +#### Usage + +`{{ include "helm_lib_prometheus_rules" (list . ) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Namespace for creating rules + + +### helm_lib_prometheus_target_scrape_timeout_seconds + + returns adjust timeout value to scrape interval / + +#### Usage + +`{{ include "helm_lib_prometheus_target_scrape_timeout_seconds" (list . ) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Target timeout in seconds + +## Node Affinity + +### helm_lib_internal_check_node_selector_strategy + + Verify node selector strategy. + + + +### helm_lib_node_selector + + Returns node selector for workloads depend on strategy. + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- strategy, one of "frontend" "monitoring" "system" "master" "any-node" "wildcard" + + +### helm_lib_tolerations + + Returns tolerations for workloads depend on strategy. + +#### Usage + +`{{ include "helm_lib_tolerations" (tuple . "any-node" "with-uninitialized" "without-storage-problems") }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- base strategy, one of "frontend" "monitoring" "system" any-node" "wildcard" +- list of additional strategies. To add strategy list it with prefix "with-", to remove strategy list it with prefix "without-". + + +### _helm_lib_cloud_or_hybrid_cluster + + Check cluster type. + Returns not empty string if this is cloud or hybrid cluster + + + +### helm_lib_internal_check_tolerations_strategy + + Verify base strategy. + Fails if strategy not in allowed list + + + +### _helm_lib_any_node_tolerations + + Base strategy for any uncordoned node in cluster. + +#### Usage + +`{{ include "helm_lib_tolerations" (tuple . "any-node") }} ` + + + +### _helm_lib_wildcard_tolerations + + Base strategy that tolerates all. + +#### Usage + +`{{ include "helm_lib_tolerations" (tuple . "wildcard") }} ` + + + +### _helm_lib_monitoring_tolerations + + Base strategy that tolerates nodes with "dedicated.deckhouse.io: monitoring" and "dedicated.deckhouse.io: system" taints. + +#### Usage + +`{{ include "helm_lib_tolerations" (tuple . "monitoring") }} ` + + + +### _helm_lib_frontend_tolerations + + Base strategy that tolerates nodes with "dedicated.deckhouse.io: frontend" taints. + +#### Usage + +`{{ include "helm_lib_tolerations" (tuple . "frontend") }} ` + + + +### _helm_lib_system_tolerations + + Base strategy that tolerates nodes with "dedicated.deckhouse.io: system" taints. + +#### Usage + +`{{ include "helm_lib_tolerations" (tuple . "system") }} ` + + + +### _helm_lib_additional_tolerations_uninitialized + + Additional strategy "uninitialized" - used for CNI's and kube-proxy to allow cni components scheduled on node after CCM initialization. + +#### Usage + +`{{ include "helm_lib_tolerations" (tuple . "any-node" "with-uninitialized") }} ` + + + +### _helm_lib_additional_tolerations_node_problems + + Additional strategy "node-problems" - used for shedule critical components on non-ready nodes or nodes under pressure. + +#### Usage + +`{{ include "helm_lib_tolerations" (tuple . "any-node" "with-node-problems") }} ` + + + +### _helm_lib_additional_tolerations_storage_problems + + Additional strategy "storage-problems" - used for shedule critical components on nodes with drbd problems. This additional strategy enabled by default in any base strategy except "wildcard". + +#### Usage + +`{{ include "helm_lib_tolerations" (tuple . "any-node" "without-storage-problems") }} ` + + + +### _helm_lib_additional_tolerations_no_csi + + Additional strategy "no-csi" - used for any node with no CSI: any node, which was initialized by deckhouse, but have no csi-node driver registered on it. + +#### Usage + +`{{ include "helm_lib_tolerations" (tuple . "any-node" "with-no-csi") }} ` + + + +### _helm_lib_additional_tolerations_cloud_provider_uninitialized + + Additional strategy "cloud-provider-uninitialized" - used for any node which is not initialized by CCM. + +#### Usage + +`{{ include "helm_lib_tolerations" (tuple . "any-node" "with-cloud-provider-uninitialized") }} ` + + +## Pod Disruption Budget + +### helm_lib_pdb_daemonset + + Returns PDB max unavailable + +#### Usage + +`{{ include "helm_lib_pdb_daemonset" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + +## Priority Class + +### helm_lib_priority_class + + returns priority class if priority-class module enabled, otherwise returns nothing + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Priority class name + +## Resources Management + +### helm_lib_resources_management_pod_resources + + returns rendered resources section based on configuration if it is + +#### Usage + +`{{ include "helm_lib_resources_management_pod_resources" (list [ephemeral storage requests]) }} ` + +#### Arguments + +list: +- VPA resource configuration [example](https://deckhouse.io/documentation/v1/modules/110-istio/configuration.html#parameters-controlplane-resourcesmanagement) +- Ephemeral storage requests + + +### helm_lib_resources_management_original_pod_resources + + returns rendered resources section based on configuration if it is present + +#### Usage + +`{{ include "helm_lib_resources_management_original_pod_resources" }} ` + +#### Arguments + +- VPA resource configuration [example](https://deckhouse.io/documentation/v1/modules/110-istio/configuration.html#parameters-controlplane-resourcesmanagement) + + +### helm_lib_resources_management_vpa_spec + + returns rendered vpa spec based on configuration and target reference + +#### Usage + +`{{ include "helm_lib_resources_management_vpa_spec" (list ) }} ` + +#### Arguments + +list: +- Target API version +- Target Kind +- Target Name +- Target container name +- VPA resource configuration [example](https://deckhouse.io/documentation/v1/modules/110-istio/configuration.html#parameters-controlplane-resourcesmanagement) + + +### helm_lib_resources_management_cpu_units_to_millicores + + helper for converting cpu units to millicores + +#### Usage + +`{{ include "helm_lib_resources_management_cpu_units_to_millicores" }} ` + + + +### helm_lib_resources_management_memory_units_to_bytes + + helper for converting memory units to bytes + +#### Usage + +`{{ include "helm_lib_resources_management_memory_units_to_bytes" }} ` + + + +### helm_lib_vpa_kube_rbac_proxy_resources + + helper for VPA resources for kube_rbac_proxy + +#### Usage + +`{{ include "helm_lib_vpa_kube_rbac_proxy_resources" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_container_kube_rbac_proxy_resources + + helper for container resources for kube_rbac_proxy + +#### Usage + +`{{ include "helm_lib_container_kube_rbac_proxy_resources" . }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + +## Spec For High Availability + +### helm_lib_pod_anti_affinity_for_ha + + returns pod affinity spec + +#### Usage + +`{{ include "helm_lib_pod_anti_affinity_for_ha" (list . (dict "app" "test")) }} ` + +#### Arguments + +list: +- Template context with .Values, .Chart, etc +- Match labels for podAntiAffinity label selector + + +### helm_lib_deployment_on_master_strategy_and_replicas_for_ha + + returns deployment strategy and replicas for ha components running on master nodes + +#### Usage + +`{{ include "helm_lib_deployment_on_master_strategy_and_replicas_for_ha" }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc + + +### helm_lib_deployment_on_master_custom_strategy_and_replicas_for_ha + + returns deployment with custom strategy and replicas for ha components running on master nodes + +#### Usage + +`{{ include "helm_lib_deployment_on_master_custom_strategy_and_replicas_for_ha" (list . (dict "strategy" "strategy_type")) }} ` + + + +### helm_lib_deployment_strategy_and_replicas_for_ha + + returns deployment strategy and replicas for ha components running not on master nodes + +#### Usage + +`{{ include "helm_lib_deployment_strategy_and_replicas_for_ha" }} ` + +#### Arguments + +- Template context with .Values, .Chart, etc diff --git a/charts/deckhouse_lib_helm/templates/_csi_controller.tpl b/charts/deckhouse_lib_helm/templates/_csi_controller.tpl new file mode 100644 index 0000000..094b365 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_csi_controller.tpl @@ -0,0 +1,700 @@ +{{- define "attacher_resources" }} +cpu: 10m +memory: 25Mi +{{- end }} + +{{- define "provisioner_resources" }} +cpu: 10m +memory: 25Mi +{{- end }} + +{{- define "resizer_resources" }} +cpu: 10m +memory: 25Mi +{{- end }} + +{{- define "snapshotter_resources" }} +cpu: 10m +memory: 25Mi +{{- end }} + +{{- define "livenessprobe_resources" }} +cpu: 10m +memory: 25Mi +{{- end }} + +{{- define "controller_resources" }} +cpu: 10m +memory: 50Mi +{{- end }} + +{{- /* Usage: {{ include "helm_lib_csi_controller_manifests" (list . $config) }} */ -}} +{{- define "helm_lib_csi_controller_manifests" }} + {{- $context := index . 0 }} + + {{- $config := index . 1 }} + {{- $fullname := $config.fullname | default "csi-controller" }} + {{- $snapshotterEnabled := dig "snapshotterEnabled" true $config }} + {{- $resizerEnabled := dig "resizerEnabled" true $config }} + {{- $topologyEnabled := dig "topologyEnabled" true $config }} + {{- $extraCreateMetadataEnabled := dig "extraCreateMetadataEnabled" false $config }} + {{- $controllerImage := $config.controllerImage | required "$config.controllerImage is required" }} + {{- $provisionerTimeout := $config.provisionerTimeout | default "600s" }} + {{- $attacherTimeout := $config.attacherTimeout | default "600s" }} + {{- $resizerTimeout := $config.resizerTimeout | default "600s" }} + {{- $snapshotterTimeout := $config.snapshotterTimeout | default "600s" }} + {{- $provisionerWorkers := $config.provisionerWorkers | default "10" }} + {{- $attacherWorkers := $config.attacherWorkers | default "10" }} + {{- $resizerWorkers := $config.resizerWorkers | default "10" }} + {{- $snapshotterWorkers := $config.snapshotterWorkers | default "10" }} + {{- $additionalControllerEnvs := $config.additionalControllerEnvs }} + {{- $additionalControllerArgs := $config.additionalControllerArgs }} + {{- $additionalControllerVolumes := $config.additionalControllerVolumes }} + {{- $additionalControllerVolumeMounts := $config.additionalControllerVolumeMounts }} + {{- $additionalContainers := $config.additionalContainers }} + {{- $livenessProbePort := $config.livenessProbePort | default 9808 }} + + {{- $kubernetesSemVer := semver $context.Values.global.discovery.kubernetesVersion }} + + {{- $provisionerImageName := join "" (list "csiExternalProvisioner" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} + {{- $provisionerImage := include "helm_lib_module_common_image_no_fail" (list $context $provisionerImageName) }} + + {{- $attacherImageName := join "" (list "csiExternalAttacher" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} + {{- $attacherImage := include "helm_lib_module_common_image_no_fail" (list $context $attacherImageName) }} + + {{- $resizerImageName := join "" (list "csiExternalResizer" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} + {{- $resizerImage := include "helm_lib_module_common_image_no_fail" (list $context $resizerImageName) }} + + {{- $snapshotterImageName := join "" (list "csiExternalSnapshotter" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} + {{- $snapshotterImage := include "helm_lib_module_common_image_no_fail" (list $context $snapshotterImageName) }} + + {{- $livenessprobeImageName := join "" (list "csiLivenessprobe" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} + {{- $livenessprobeImage := include "helm_lib_module_common_image_no_fail" (list $context $livenessprobeImageName) }} + + {{- if $provisionerImage }} + {{- if ($context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} +--- +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ $fullname }} + namespace: d8-{{ $context.Chart.Name }} + {{- include "helm_lib_module_labels" (list $context (dict "app" "csi-controller" "workload-resource-policy.deckhouse.io" "master")) | nindent 2 }} +spec: + targetRef: + apiVersion: "apps/v1" + kind: Deployment + name: {{ $fullname }} + updatePolicy: + updateMode: "Auto" + resourcePolicy: + containerPolicies: + - containerName: "provisioner" + minAllowed: + {{- include "provisioner_resources" $context | nindent 8 }} + maxAllowed: + cpu: 20m + memory: 50Mi + - containerName: "attacher" + minAllowed: + {{- include "attacher_resources" $context | nindent 8 }} + maxAllowed: + cpu: 20m + memory: 50Mi + {{- if $resizerEnabled }} + - containerName: "resizer" + minAllowed: + {{- include "resizer_resources" $context | nindent 8 }} + maxAllowed: + cpu: 20m + memory: 50Mi + {{- end }} + {{- if $snapshotterEnabled }} + - containerName: "snapshotter" + minAllowed: + {{- include "snapshotter_resources" $context | nindent 8 }} + maxAllowed: + cpu: 20m + memory: 50Mi + {{- end }} + - containerName: "livenessprobe" + minAllowed: + {{- include "livenessprobe_resources" $context | nindent 8 }} + maxAllowed: + cpu: 20m + memory: 50Mi + - containerName: "controller" + minAllowed: + {{- include "controller_resources" $context | nindent 8 }} + maxAllowed: + cpu: 20m + memory: 100Mi + {{- end }} +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ $fullname }} + namespace: d8-{{ $context.Chart.Name }} + {{- include "helm_lib_module_labels" (list $context (dict "app" "csi-controller")) | nindent 2 }} +spec: + maxUnavailable: 1 + selector: + matchLabels: + app: {{ $fullname }} +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ $fullname }} + namespace: d8-{{ $context.Chart.Name }} + {{- include "helm_lib_module_labels" (list $context (dict "app" "csi-controller")) | nindent 2 }} +spec: + replicas: 1 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: {{ $fullname }} + strategy: + type: Recreate + template: + metadata: + labels: + app: {{ $fullname }} + {{- if hasPrefix "cloud-provider-" $context.Chart.Name }} + annotations: + cloud-config-checksum: {{ include (print $context.Template.BasePath "/cloud-controller-manager/secret.yaml") $context | sha256sum }} + {{- end }} + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + imagePullSecrets: + - name: deckhouse-registry + {{- include "helm_lib_priority_class" (tuple $context "system-cluster-critical") | nindent 6 }} + {{- include "helm_lib_node_selector" (tuple $context "master") | nindent 6 }} + {{- include "helm_lib_tolerations" (tuple $context "any-node" "with-uninitialized") | nindent 6 }} +{{- if $context.Values.global.enabledModules | has "csi-nfs" }} + {{- include "helm_lib_module_pod_security_context_runtime_default" . | nindent 6 }} +{{- else }} + {{- include "helm_lib_module_pod_security_context_run_as_user_deckhouse" . | nindent 6 }} +{{- end }} + serviceAccountName: csi + containers: + - name: provisioner + {{- include "helm_lib_module_container_security_context_read_only_root_filesystem" . | nindent 8 }} + image: {{ $provisionerImage | quote }} + args: + - "--timeout={{ $provisionerTimeout }}" + - "--v=5" + - "--csi-address=$(ADDRESS)" + {{- if $topologyEnabled }} + - "--feature-gates=Topology=true" + - "--strict-topology" + {{- else }} + - "--feature-gates=Topology=false" + {{- end }} + - "--default-fstype=ext4" + - "--leader-election=true" + - "--leader-election-namespace=$(NAMESPACE)" + - "--enable-capacity" + - "--capacity-ownerref-level=2" + {{- if $extraCreateMetadataEnabled }} + - "--extra-create-metadata=true" + {{- end }} + - "--worker-threads={{ $provisionerWorkers }}" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} + {{- if not ( $context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "provisioner_resources" $context | nindent 12 }} + {{- end }} + - name: attacher + {{- include "helm_lib_module_container_security_context_read_only_root_filesystem" . | nindent 8 }} + image: {{ $attacherImage | quote }} + args: + - "--timeout={{ $attacherTimeout }}" + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--leader-election=true" + - "--leader-election-namespace=$(NAMESPACE)" + - "--worker-threads={{ $attacherWorkers }}" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} + {{- if not ( $context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "attacher_resources" $context | nindent 12 }} + {{- end }} + {{- if $resizerEnabled }} + - name: resizer + {{- include "helm_lib_module_container_security_context_read_only_root_filesystem" . | nindent 8 }} + image: {{ $resizerImage | quote }} + args: + - "--timeout={{ $resizerTimeout }}" + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--leader-election=true" + - "--leader-election-namespace=$(NAMESPACE)" + - "--workers={{ $resizerWorkers }}" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} + {{- if not ( $context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "resizer_resources" $context | nindent 12 }} + {{- end }} + {{- end }} + {{- if $snapshotterEnabled }} + - name: snapshotter + {{- include "helm_lib_module_container_security_context_read_only_root_filesystem" . | nindent 8 }} + image: {{ $snapshotterImage | quote }} + args: + - "--timeout={{ $snapshotterTimeout }}" + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--leader-election=true" + - "--leader-election-namespace=$(NAMESPACE)" + - "--worker-threads={{ $snapshotterWorkers }}" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} + {{- if not ( $context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "snapshotter_resources" $context | nindent 12 }} + {{- end }} + {{- end }} + - name: livenessprobe + {{- include "helm_lib_module_container_security_context_read_only_root_filesystem" . | nindent 8 }} + image: {{ $livenessprobeImage | quote }} + args: + - "--csi-address=$(ADDRESS)" + - "--http-endpoint=$(HOST_IP):{{ $livenessProbePort }}" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} + {{- if not ( $context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "livenessprobe_resources" $context | nindent 12 }} + {{- end }} + - name: controller +{{- if $context.Values.global.enabledModules | has "csi-nfs" }} + {{- include "helm_lib_module_container_security_context_escalated_sys_admin_privileged" . | nindent 8 }} +{{- else }} + {{- include "helm_lib_module_container_security_context_read_only_root_filesystem" . | nindent 8 }} +{{- end }} + image: {{ $controllerImage | quote }} + args: + {{- if $additionalControllerArgs }} + {{- $additionalControllerArgs | toYaml | nindent 8 }} + {{- end }} + {{- if $additionalControllerEnvs }} + env: + {{- $additionalControllerEnvs | toYaml | nindent 8 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ $livenessProbePort }} + volumeMounts: + - name: socket-dir + mountPath: /csi + {{- /* For an unknown reason vSphere csi-controller won't start without `/tmp` directory */ -}} + {{- if eq $context.Chart.Name "cloud-provider-vsphere" }} + - name: tmp + mountPath: /tmp + {{- end }} + {{- if $additionalControllerVolumeMounts }} + {{- $additionalControllerVolumeMounts | toYaml | nindent 8 }} + {{- end }} + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} + {{- if not ( $context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "controller_resources" $context | nindent 12 }} + {{- end }} + {{- if $additionalContainers }} + {{- $additionalContainers | toYaml | nindent 6 }} + {{- end }} + volumes: + - name: socket-dir + emptyDir: {} + {{- /* For an unknown reason vSphere csi-controller won't start without `/tmp` directory */ -}} + {{- if eq $context.Chart.Name "cloud-provider-vsphere" }} + - name: tmp + emptyDir: {} + {{- end }} + {{- if $additionalControllerVolumes }} + {{- $additionalControllerVolumes | toYaml | nindent 6 }} + {{- end }} + {{- end }} +{{- end }} + + +{{- /* Usage: {{ include "helm_lib_csi_controller_rbac" . }} */ -}} +{{- define "helm_lib_csi_controller_rbac" }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} + +# =========== +# provisioner +# =========== +# Source https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: d8:{{ .Chart.Name }}:csi:controller:external-provisioner + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +rules: +- apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] +- apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] +- apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] +# Access to volumeattachments is only needed when the CSI driver +# has the PUBLISH_UNPUBLISH_VOLUME controller capability. +# In that case, external-provisioner will watch volumeattachments +# to determine when it is safe to delete a volume. +- apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: d8:{{ .Chart.Name }}:csi:controller:external-provisioner + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +subjects: +- kind: ServiceAccount + name: csi + namespace: d8-{{ .Chart.Name }} +roleRef: + kind: ClusterRole + name: d8:{{ .Chart.Name }}:csi:controller:external-provisioner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi:controller:external-provisioner + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +rules: +# Only one of the following rules for endpoints or leases is required based on +# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases. +- apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +# Permissions for CSIStorageCapacity are only needed enabling the publishing +# of storage capacity information. +- apiGroups: ["storage.k8s.io"] + resources: ["csistoragecapacities"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +# The GET permissions below are needed for walking up the ownership chain +# for CSIStorageCapacity. They are sufficient for deployment via +# StatefulSet (only needs to get Pod) and Deployment (needs to get +# Pod and then ReplicaSet to find the Deployment). +- apiGroups: [""] + resources: ["pods"] + verbs: ["get"] +- apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi:controller:external-provisioner + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +subjects: +- kind: ServiceAccount + name: csi + namespace: d8-{{ .Chart.Name }} +roleRef: + kind: Role + name: csi:controller:external-provisioner + apiGroup: rbac.authorization.k8s.io + +# ======== +# attacher +# ======== +# Source https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: d8:{{ .Chart.Name }}:csi:controller:external-attacher + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +rules: +- apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] +- apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] +- apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] +- apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: d8:{{ .Chart.Name }}:csi:controller:external-attacher + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +subjects: +- kind: ServiceAccount + name: csi + namespace: d8-{{ .Chart.Name }} +roleRef: + kind: ClusterRole + name: d8:{{ .Chart.Name }}:csi:controller:external-attacher + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi:controller:external-attacher + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi:controller:external-attacher + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +subjects: +- kind: ServiceAccount + name: csi + namespace: d8-{{ .Chart.Name }} +roleRef: + kind: Role + name: csi:controller:external-attacher + apiGroup: rbac.authorization.k8s.io + +# ======= +# resizer +# ======= +# Source https://github.com/kubernetes-csi/external-resizer/blob/master/deploy/kubernetes/rbac.yaml +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: d8:{{ .Chart.Name }}:csi:controller:external-resizer + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +rules: +- apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "patch"] +- apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["patch"] +- apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: d8:{{ .Chart.Name }}:csi:controller:external-resizer + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +subjects: +- kind: ServiceAccount + name: csi + namespace: d8-{{ .Chart.Name }} +roleRef: + kind: ClusterRole + name: d8:{{ .Chart.Name }}:csi:controller:external-resizer + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi:controller:external-resizer + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi:controller:external-resizer + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +subjects: +- kind: ServiceAccount + name: csi + namespace: d8-{{ .Chart.Name }} +roleRef: + kind: Role + name: csi:controller:external-resizer + apiGroup: rbac.authorization.k8s.io +# ======== +# snapshotter +# ======== +# Source https://github.com/kubernetes-csi/external-snapshotter/blob/master/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: d8:{{ .Chart.Name }}:csi:controller:external-snapshotter + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +rules: +- apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: d8:{{ .Chart.Name }}:csi:controller:external-snapshotter + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +subjects: +- kind: ServiceAccount + name: csi + namespace: d8-{{ .Chart.Name }} +roleRef: + kind: ClusterRole + name: d8:{{ .Chart.Name }}:csi:controller:external-snapshotter + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi:controller:external-snapshotter + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi:controller:external-snapshotter + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} +subjects: +- kind: ServiceAccount + name: csi + namespace: d8-{{ .Chart.Name }} +roleRef: + kind: Role + name: csi:controller:external-snapshotter + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_csi_node.tpl b/charts/deckhouse_lib_helm/templates/_csi_node.tpl new file mode 100644 index 0000000..0b5ba04 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_csi_node.tpl @@ -0,0 +1,193 @@ +{{- define "node_driver_registrar_resources" }} +cpu: 12m +memory: 25Mi +{{- end }} + +{{- define "node_resources" }} +cpu: 12m +memory: 25Mi +{{- end }} + +{{- /* Usage: {{ include "helm_lib_csi_node_manifests" (list . $config) }} */ -}} +{{- define "helm_lib_csi_node_manifests" }} + {{- $context := index . 0 }} + + {{- $config := index . 1 }} + {{- $fullname := $config.fullname | default "csi-node" }} + {{- $nodeImage := $config.nodeImage | required "$config.nodeImage is required" }} + {{- $driverFQDN := $config.driverFQDN | required "$config.driverFQDN is required" }} + {{- $serviceAccount := $config.serviceAccount | default "" }} + {{- $additionalNodeEnvs := $config.additionalNodeEnvs }} + {{- $additionalNodeArgs := $config.additionalNodeArgs }} + {{- $additionalNodeVolumes := $config.additionalNodeVolumes }} + {{- $additionalNodeVolumeMounts := $config.additionalNodeVolumeMounts }} + {{- $initContainerCommand := $config.initContainerCommand }} + {{- $initContainerImage := $config.initContainerImage }} + + {{- $kubernetesSemVer := semver $context.Values.global.discovery.kubernetesVersion }} + {{- $driverRegistrarImageName := join "" (list "csiNodeDriverRegistrar" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} + {{- $driverRegistrarImage := include "helm_lib_module_common_image_no_fail" (list $context $driverRegistrarImageName) }} + {{- if $driverRegistrarImage }} + {{- if or (include "_helm_lib_cloud_or_hybrid_cluster" $context) ($context.Values.global.enabledModules | has "ceph-csi") ($context.Values.global.enabledModules | has "csi-nfs") ($context.Values.global.enabledModules | has "csi-cep") }} + {{- if ($context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} +--- +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ $fullname }} + namespace: d8-{{ $context.Chart.Name }} + {{- include "helm_lib_module_labels" (list $context (dict "app" "csi-node" "workload-resource-policy.deckhouse.io" "every-node")) | nindent 2 }} +spec: + targetRef: + apiVersion: "apps/v1" + kind: DaemonSet + name: {{ $fullname }} + updatePolicy: + updateMode: "Auto" + resourcePolicy: + containerPolicies: + - containerName: "node-driver-registrar" + minAllowed: + {{- include "node_driver_registrar_resources" $context | nindent 8 }} + maxAllowed: + cpu: 25m + memory: 50Mi + - containerName: "node" + minAllowed: + {{- include "node_resources" $context | nindent 8 }} + maxAllowed: + cpu: 25m + memory: 50Mi + {{- end }} +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ $fullname }} + namespace: d8-{{ $context.Chart.Name }} + {{- include "helm_lib_module_labels" (list $context (dict "app" "csi-node")) | nindent 2 }} +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ $fullname }} + template: + metadata: + labels: + app: {{ $fullname }} + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - operator: In + key: node.deckhouse.io/type + values: + - CloudEphemeral + - CloudPermanent + - CloudStatic + {{- if or (eq $fullname "csi-node-rbd") (eq $fullname "csi-node-cephfs") (eq $fullname "csi-nfs") }} + - Static + {{- end }} + imagePullSecrets: + - name: deckhouse-registry + {{- include "helm_lib_priority_class" (tuple $context "system-node-critical") | nindent 6 }} + {{- include "helm_lib_tolerations" (tuple $context "any-node" "with-no-csi") | nindent 6 }} + {{- include "helm_lib_module_pod_security_context_run_as_user_root" . | nindent 6 }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: node-driver-registrar + {{- include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all" $context | nindent 8 }} + image: {{ $driverRegistrarImage | quote }} + args: + - "--v=5" + - "--csi-address=$(CSI_ENDPOINT)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + env: + - name: CSI_ENDPOINT + value: "/csi/csi.sock" + - name: DRIVER_REG_SOCK_PATH + value: "/var/lib/kubelet/csi-plugins/{{ $driverFQDN }}/csi.sock" + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" 10 | nindent 12 }} + {{- if not ($context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "node_driver_registrar_resources" $context | nindent 12 }} + {{- end }} + - name: node + securityContext: + privileged: true + image: {{ $nodeImage }} + args: + {{- if $additionalNodeArgs }} + {{- $additionalNodeArgs | toYaml | nindent 8 }} + {{- end }} + {{- if $additionalNodeEnvs }} + env: + {{- $additionalNodeEnvs | toYaml | nindent 8 }} + {{- end }} + volumeMounts: + - name: kubelet-dir + mountPath: /var/lib/kubelet + mountPropagation: "Bidirectional" + - name: plugin-dir + mountPath: /csi + - name: device-dir + mountPath: /dev + {{- if $additionalNodeVolumeMounts }} + {{- $additionalNodeVolumeMounts | toYaml | nindent 8 }} + {{- end }} + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} + {{- if not ($context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "node_resources" $context | nindent 12 }} + {{- end }} + {{- if $initContainerCommand }} + initContainers: + - command: + {{- $initContainerCommand | toYaml | nindent 8 }} + image: {{ $initContainerImage }} + imagePullPolicy: IfNotPresent + name: csi-node-init-container + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} + {{- end }} + serviceAccount: {{ $serviceAccount | quote }} + serviceAccountName: {{ $serviceAccount | quote }} + volumes: + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + - name: kubelet-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/csi-plugins/{{ $driverFQDN }}/ + type: DirectoryOrCreate + - name: device-dir + hostPath: + path: /dev + type: Directory + {{- if $additionalNodeVolumes }} + {{- $additionalNodeVolumes | toYaml | nindent 6 }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_envs_for_proxy.tpl b/charts/deckhouse_lib_helm/templates/_envs_for_proxy.tpl new file mode 100644 index 0000000..177bb1c --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_envs_for_proxy.tpl @@ -0,0 +1,30 @@ +{{- /* Usage: {{ include "helm_lib_envs_for_proxy" . }} */ -}} +{{- /* Add HTTP_PROXY, HTTPS_PROXY and NO_PROXY environment variables for container */ -}} +{{- /* depends on [proxy settings](https://deckhouse.io/documentation/v1/deckhouse-configure-global.html#parameters-modules-proxy) */ -}} +{{- define "helm_lib_envs_for_proxy" }} + {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- if $context.Values.global.clusterConfiguration }} + {{- if $context.Values.global.clusterConfiguration.proxy }} + {{- if $context.Values.global.clusterConfiguration.proxy.httpProxy }} +- name: HTTP_PROXY + value: {{ $context.Values.global.clusterConfiguration.proxy.httpProxy | quote }} +- name: http_proxy + value: {{ $context.Values.global.clusterConfiguration.proxy.httpProxy | quote }} + {{- end }} + {{- if $context.Values.global.clusterConfiguration.proxy.httpsProxy }} +- name: HTTPS_PROXY + value: {{ $context.Values.global.clusterConfiguration.proxy.httpsProxy | quote }} +- name: https_proxy + value: {{ $context.Values.global.clusterConfiguration.proxy.httpsProxy | quote }} + {{- end }} + {{- $noProxy := list "127.0.0.1" "169.254.169.254" $context.Values.global.clusterConfiguration.clusterDomain $context.Values.global.clusterConfiguration.podSubnetCIDR $context.Values.global.clusterConfiguration.serviceSubnetCIDR }} + {{- if $context.Values.global.clusterConfiguration.proxy.noProxy }} + {{- $noProxy = concat $noProxy $context.Values.global.clusterConfiguration.proxy.noProxy }} + {{- end }} +- name: NO_PROXY + value: {{ $noProxy | join "," | quote }} +- name: no_proxy + value: {{ $noProxy | join "," | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_high_availability.tpl b/charts/deckhouse_lib_helm/templates/_high_availability.tpl new file mode 100644 index 0000000..8c7da23 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_high_availability.tpl @@ -0,0 +1,39 @@ +{{- /* Usage: {{ include "helm_lib_is_ha_to_value" (list . yes no) }} */ -}} +{{- /* returns value "yes" if cluster is highly available, else — returns "no" */ -}} +{{- define "helm_lib_is_ha_to_value" }} + {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $yes := index . 1 -}} {{- /* Yes value */ -}} + {{- $no := index . 2 -}} {{- /* No value */ -}} + + {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) }} + + {{- if hasKey $module_values "highAvailability" -}} + {{- if $module_values.highAvailability -}} {{- $yes -}} {{- else -}} {{- $no -}} {{- end -}} + {{- else if hasKey $context.Values.global "highAvailability" -}} + {{- if $context.Values.global.highAvailability -}} {{- $yes -}} {{- else -}} {{- $no -}} {{- end -}} + {{- else -}} + {{- if $context.Values.global.discovery.clusterControlPlaneIsHighlyAvailable -}} {{- $yes -}} {{- else -}} {{- $no -}} {{- end -}} + {{- end -}} +{{- end }} + +{{- /* Usage: {{- if (include "helm_lib_ha_enabled" .) }} */ -}} +{{- /* returns empty value, which is treated by go template as false */ -}} +{{- define "helm_lib_ha_enabled" }} + {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} + + {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) }} + + {{- if hasKey $module_values "highAvailability" -}} + {{- if $module_values.highAvailability -}} + "not empty string" + {{- end -}} + {{- else if hasKey $context.Values.global "highAvailability" -}} + {{- if $context.Values.global.highAvailability -}} + "not empty string" + {{- end -}} + {{- else -}} + {{- if $context.Values.global.discovery.clusterControlPlaneIsHighlyAvailable -}} + "not empty string" + {{- end -}} + {{- end -}} +{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_kube_rbac_proxy.tpl b/charts/deckhouse_lib_helm/templates/_kube_rbac_proxy.tpl new file mode 100644 index 0000000..af9f7a4 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_kube_rbac_proxy.tpl @@ -0,0 +1,21 @@ +{{- /* Usage: {{ include "helm_lib_kube_rbac_proxy_ca_certificate" (list . "namespace") }} */ -}} +{{- /* Renders configmap with kube-rbac-proxy CA certificate which uses to verify the kube-rbac-proxy clients. */ -}} +{{- define "helm_lib_kube_rbac_proxy_ca_certificate" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +{{- /* Namespace where CA configmap will be created */ -}} + {{- $context := index . 0 }} + {{- $namespace := index . 1 }} +--- +apiVersion: v1 +data: + ca.crt: | + {{ $context.Values.global.internal.modules.kubeRBACProxyCA.cert | nindent 4 }} +kind: ConfigMap +metadata: + annotations: + kubernetes.io/description: | + Contains a CA bundle that can be used to verify the kube-rbac-proxy clients. + {{- include "helm_lib_module_labels" (list $context) | nindent 2 }} + name: kube-rbac-proxy-ca.crt + namespace: {{ $namespace }} +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_documentation_uri.tpl b/charts/deckhouse_lib_helm/templates/_module_documentation_uri.tpl new file mode 100644 index 0000000..a02cf45 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_module_documentation_uri.tpl @@ -0,0 +1,15 @@ +{{- /* Usage: {{ include "helm_lib_module_documentation_uri" (list . "") }} */ -}} +{{- /* returns rendered documentation uri using publicDomainTemplate or deckhouse.io domains*/ -}} +{{- define "helm_lib_module_documentation_uri" }} + {{- $default_doc_prefix := "https://deckhouse.io/documentation/v1" -}} + {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $path_portion := index . 1 -}} {{- /* Path to the document */ -}} + {{- $uri := "" -}} + {{- if $context.Values.global.modules.publicDomainTemplate }} + {{- $uri = printf "%s://%s%s" (include "helm_lib_module_uri_scheme" $context) (include "helm_lib_module_public_domain" (list $context "documentation")) $path_portion -}} + {{- else }} + {{- $uri = printf "%s%s" $default_doc_prefix $path_portion -}} + {{- end -}} + + {{ $uri }} +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_ephemeral_storage.tpl b/charts/deckhouse_lib_helm/templates/_module_ephemeral_storage.tpl new file mode 100644 index 0000000..4b2dd02 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_module_ephemeral_storage.tpl @@ -0,0 +1,15 @@ +{{- /* Usage: {{ include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 }} */ -}} +{{- /* 50Mi for container logs `log-opts.max-file * log-opts.max-size` would be added to passed value */ -}} +{{- /* returns ephemeral-storage size for logs with extra space */ -}} +{{- define "helm_lib_module_ephemeral_storage_logs_with_extra" -}} +{{- /* Extra space in mebibytes */ -}} +ephemeral-storage: {{ add . 50 }}Mi +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_ephemeral_storage_only_logs" . }} */ -}} +{{- /* 50Mi for container logs `log-opts.max-file * log-opts.max-size` would be requested */ -}} +{{- /* returns ephemeral-storage size for only logs */ -}} +{{- define "helm_lib_module_ephemeral_storage_only_logs" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +ephemeral-storage: 50Mi +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_generate_common_name.tpl b/charts/deckhouse_lib_helm/templates/_module_generate_common_name.tpl new file mode 100644 index 0000000..fb142f8 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_module_generate_common_name.tpl @@ -0,0 +1,13 @@ +{{- /* Usage: {{ include "helm_lib_module_generate_common_name" (list . "") }} */ -}} +{{- /* returns the commonName parameter for use in the Certificate custom resource(cert-manager) */ -}} +{{- define "helm_lib_module_generate_common_name" }} + {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $name_portion := index . 1 -}} {{- /* Name portion */ -}} + + {{- $domain := include "helm_lib_module_public_domain" (list $context $name_portion) -}} + + {{- $domain_length := len $domain -}} + {{- if le $domain_length 64 -}} +commonName: {{ $domain }} + {{- end -}} +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_https.tpl b/charts/deckhouse_lib_helm/templates/_module_https.tpl new file mode 100644 index 0000000..8ee41ef --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_module_https.tpl @@ -0,0 +1,160 @@ +{{- /* Usage: {{ include "helm_lib_module_uri_scheme" . }} */ -}} +{{- /* return module uri scheme "http" or "https" */ -}} +{{- define "helm_lib_module_uri_scheme" -}} + {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $mode := "" -}} + + {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) -}} + {{- if hasKey $module_values "https" -}} + {{- if hasKey $module_values.https "mode" -}} + {{- $mode = $module_values.https.mode -}} + {{- else }} + {{- $mode = $context.Values.global.modules.https.mode | default "" -}} + {{- end }} + {{- else }} + {{- $mode = $context.Values.global.modules.https.mode | default "" -}} + {{- end }} + + + {{- if eq "Disabled" $mode -}} + http + {{- else -}} + https + {{- end -}} +{{- end -}} + +{{- /* Usage: {{ $https_values := include "helm_lib_https_values" . | fromYaml }} */ -}} +{{- define "helm_lib_https_values" -}} + {{- $context := . -}} + {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) -}} + {{- $mode := "" -}} + {{- $certManagerClusterIssuerName := "" -}} + + {{- if hasKey $module_values "https" -}} + {{- if hasKey $module_values.https "mode" -}} + {{- $mode = $module_values.https.mode -}} + {{- if eq $mode "CertManager" -}} + {{- if not (hasKey $module_values.https "certManager") -}} + {{- cat ".https.certManager.clusterIssuerName is mandatory when .https.mode is set to CertManager" | fail -}} + {{- end -}} + {{- if hasKey $module_values.https.certManager "clusterIssuerName" -}} + {{- $certManagerClusterIssuerName = $module_values.https.certManager.clusterIssuerName -}} + {{- else -}} + {{- cat ".https.certManager.clusterIssuerName is mandatory when .https.mode is set to CertManager" | fail -}} + {{- end -}} + {{- end -}} + {{- else -}} + {{- cat ".https.mode is mandatory when .https is defined" | fail -}} + {{- end -}} + {{- end -}} + + {{- if empty $mode -}} + {{- $mode = $context.Values.global.modules.https.mode -}} + {{- if eq $mode "CertManager" -}} + {{- $certManagerClusterIssuerName = $context.Values.global.modules.https.certManager.clusterIssuerName -}} + {{- end -}} + {{- end -}} + + {{- if not (has $mode (list "Disabled" "CertManager" "CustomCertificate" "OnlyInURI")) -}} + {{- cat "Unknown https.mode:" $mode | fail -}} + {{- end -}} + + {{- if and (eq $mode "CertManager") (not ($context.Values.global.enabledModules | has "cert-manager")) -}} + {{- cat "https.mode has value CertManager but cert-manager module not enabled" | fail -}} + {{- end -}} + +mode: {{ $mode }} + {{- if eq $mode "CertManager" }} +certManager: + clusterIssuerName: {{ $certManagerClusterIssuerName }} + {{- end -}} + +{{- end -}} + +{{- /* Usage: {{ if (include "helm_lib_module_https_mode" .) }} */ -}} +{{- /* returns https mode for module */ -}} +{{- define "helm_lib_module_https_mode" -}} + {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $https_values := include "helm_lib_https_values" $context | fromYaml -}} + {{- $https_values.mode -}} +{{- end -}} + +{{- /* Usage: {{ include "helm_lib_module_https_cert_manager_cluster_issuer_name" . }} */ -}} +{{- /* returns cluster issuer name */ -}} +{{- define "helm_lib_module_https_cert_manager_cluster_issuer_name" -}} + {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $https_values := include "helm_lib_https_values" $context | fromYaml -}} + {{- $https_values.certManager.clusterIssuerName -}} +{{- end -}} + +{{- /* Usage: {{ if (include "helm_lib_module_https_cert_manager_cluster_issuer_is_dns01_challenge_solver" .) }} */ -}} +{{- define "helm_lib_module_https_cert_manager_cluster_issuer_is_dns01_challenge_solver" -}} + {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- if has (include "helm_lib_module_https_cert_manager_cluster_issuer_name" $context) (list "route53" "cloudflare" "digitalocean" "clouddns") }} + "not empty string" + {{- end -}} +{{- end -}} + +{{- /* Usage: {{ include "helm_lib_module_https_cert_manager_acme_solver_challenge_settings" . | nindent 4 }} */ -}} +{{- define "helm_lib_module_https_cert_manager_acme_solver_challenge_settings" -}} + {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- if (include "helm_lib_module_https_cert_manager_cluster_issuer_is_dns01_challenge_solver" $context) }} +- dns01: + provider: {{ include "helm_lib_module_https_cert_manager_cluster_issuer_name" $context }} + {{- else }} +- http01: + ingressClass: {{ include "helm_lib_module_ingress_class" $context | quote }} + {{- end }} +{{- end -}} + +{{- /* Usage: {{ if (include "helm_lib_module_https_ingress_tls_enabled" .) }} */ -}} +{{- /* returns not empty string if tls should enable for ingress */ -}} +{{- define "helm_lib_module_https_ingress_tls_enabled" -}} + {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} + + {{- $mode := include "helm_lib_module_https_mode" $context -}} + + {{- if or (eq "CertManager" $mode) (eq "CustomCertificate" $mode) -}} + not empty string + {{- end -}} +{{- end -}} + +{{- /* Usage: {{ include "helm_lib_module_https_copy_custom_certificate" (list . "namespace" "secret_name_prefix") }} */ -}} +{{- /* Renders secret with [custom certificate](https://deckhouse.io/documentation/v1/deckhouse-configure-global.html#parameters-modules-https-customcertificate) */ -}} +{{- /* in passed namespace with passed prefix */ -}} +{{- define "helm_lib_module_https_copy_custom_certificate" -}} + {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $namespace := index . 1 -}} {{- /* Namespace */ -}} + {{- $secret_name_prefix := index . 2 -}} {{- /* Secret name prefix */ -}} + {{- $mode := include "helm_lib_module_https_mode" $context -}} + {{- if eq $mode "CustomCertificate" -}} + {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) -}} + {{- $secret_name := include "helm_lib_module_https_secret_name" (list $context $secret_name_prefix) -}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secret_name }} + namespace: {{ $namespace }} + {{- include "helm_lib_module_labels" (list $context) | nindent 2 }} +type: kubernetes.io/tls +data: {{ $module_values.internal.customCertificateData | toJson }} + {{- end -}} +{{- end -}} + +{{- /* Usage: {{ include "helm_lib_module_https_secret_name (list . "secret_name_prefix") }} */ -}} +{{- /* returns custom certificate name */ -}} +{{- define "helm_lib_module_https_secret_name" -}} + {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $secret_name_prefix := index . 1 -}} {{- /* Secret name prefix */ -}} + {{- $mode := include "helm_lib_module_https_mode" $context -}} + {{- if eq $mode "CertManager" -}} + {{- $secret_name_prefix -}} + {{- else -}} + {{- if eq $mode "CustomCertificate" -}} + {{- printf "%s-customcertificate" $secret_name_prefix -}} + {{- else -}} + {{- fail "https.mode must be CustomCertificate or CertManager" -}} + {{- end -}} + {{- end -}} +{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_module_image.tpl b/charts/deckhouse_lib_helm/templates/_module_image.tpl new file mode 100644 index 0000000..bdf29f0 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_module_image.tpl @@ -0,0 +1,76 @@ +{{- /* Usage: {{ include "helm_lib_module_image" (list . "") }} */ -}} +{{- /* returns image name */ -}} +{{- define "helm_lib_module_image" }} + {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $containerName := index . 1 | trimAll "\"" }} {{- /* Container name */ -}} + {{- $moduleName := (include "helm_lib_module_camelcase_name" $context) }} + {{- if ge (len .) 3 }} + {{- $moduleName = (include "helm_lib_module_camelcase_name" (index . 2)) }} {{- /* Optional module name */ -}} + {{- end }} + {{- $imageDigest := index $context.Values.global.modulesImages.digests $moduleName $containerName }} + {{- if not $imageDigest }} + {{- $error := (printf "Image %s.%s has no digest" $moduleName $containerName ) }} + {{- fail $error }} + {{- end }} + {{- $registryBase := $context.Values.global.modulesImages.registry.base }} + {{- /* handle external modules registry */}} + {{- if index $context.Values $moduleName }} + {{- if index $context.Values $moduleName "registry" }} + {{- if index $context.Values $moduleName "registry" "base" }} + {{- $host := trimAll "/" (index $context.Values $moduleName "registry" "base") }} + {{- $path := trimAll "/" $context.Chart.Name }} + {{- $registryBase = join "/" (list $host $path) }} + {{- end }} + {{- end }} + {{- end }} + {{- printf "%s@%s" $registryBase $imageDigest }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_image_no_fail" (list . "") }} */ -}} +{{- /* returns image name if found */ -}} +{{- define "helm_lib_module_image_no_fail" }} + {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $containerName := index . 1 | trimAll "\"" }} {{- /* Container name */ -}} + {{- $moduleName := (include "helm_lib_module_camelcase_name" $context) }} + {{- if ge (len .) 3 }} + {{- $moduleName = (include "helm_lib_module_camelcase_name" (index . 2)) }} {{- /* Optional module name */ -}} + {{- end }} + {{- $imageDigest := index $context.Values.global.modulesImages.digests $moduleName $containerName }} + {{- if $imageDigest }} + {{- $registryBase := $context.Values.global.modulesImages.registry.base }} + {{- if index $context.Values $moduleName }} + {{- if index $context.Values $moduleName "registry" }} + {{- if index $context.Values $moduleName "registry" "base" }} + {{- $host := trimAll "/" (index $context.Values $moduleName "registry" "base") }} + {{- $path := trimAll "/" $context.Chart.Name }} + {{- $registryBase = join "/" (list $host $path) }} + {{- end }} + {{- end }} + {{- end }} + {{- printf "%s@%s" $registryBase $imageDigest }} + {{- end }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_common_image" (list . "") }} */ -}} +{{- /* returns image name from common module */ -}} +{{- define "helm_lib_module_common_image" }} + {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $containerName := index . 1 | trimAll "\"" }} {{- /* Container name */ -}} + {{- $imageDigest := index $context.Values.global.modulesImages.digests "common" $containerName }} + {{- if not $imageDigest }} + {{- $error := (printf "Image %s.%s has no digest" "common" $containerName ) }} + {{- fail $error }} + {{- end }} + {{- printf "%s@%s" $context.Values.global.modulesImages.registry.base $imageDigest }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_common_image_no_fail" (list . "") }} */ -}} +{{- /* returns image name from common module if found */ -}} +{{- define "helm_lib_module_common_image_no_fail" }} + {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $containerName := index . 1 | trimAll "\"" }} {{- /* Container name */ -}} + {{- $imageDigest := index $context.Values.global.modulesImages.digests "common" $containerName }} + {{- if $imageDigest }} + {{- printf "%s@%s" $context.Values.global.modulesImages.registry.base $imageDigest }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/deckhouse_lib_helm/templates/_module_ingress_class.tpl b/charts/deckhouse_lib_helm/templates/_module_ingress_class.tpl new file mode 100644 index 0000000..db7f50b --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_module_ingress_class.tpl @@ -0,0 +1,13 @@ +{{- /* Usage: {{ include "helm_lib_module_ingress_class" . }} */ -}} +{{- /* returns ingress class from module settings or if not exists from global config */ -}} +{{- define "helm_lib_module_ingress_class" -}} + {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} + + {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) -}} + + {{- if hasKey $module_values "ingressClass" -}} + {{- $module_values.ingressClass -}} + {{- else if hasKey $context.Values.global.modules "ingressClass" -}} + {{- $context.Values.global.modules.ingressClass -}} + {{- end -}} +{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_module_init_container.tpl b/charts/deckhouse_lib_helm/templates/_module_init_container.tpl new file mode 100644 index 0000000..9b3fe00 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_module_init_container.tpl @@ -0,0 +1,56 @@ +{{- /* ### Migration 11.12.2020: Remove this helper with all its usages after this commit reached RockSolid */ -}} +{{- /* Usage: {{ include "helm_lib_module_init_container_chown_nobody_volume" (list . "volume-name") }} */ -}} +{{- /* returns initContainer which chowns recursively all files and directories in passed volume */ -}} +{{- define "helm_lib_module_init_container_chown_nobody_volume" }} + {{- $context := index . 0 -}} + {{- $volume_name := index . 1 -}} +- name: chown-volume-{{ $volume_name }} + image: {{ include "helm_lib_module_common_image" (list $context "alpine") }} + command: ["sh", "-c", "chown -R 65534:65534 /tmp/{{ $volume_name }}"] + securityContext: + runAsNonRoot: false + runAsUser: 0 + runAsGroup: 0 + volumeMounts: + - name: {{ $volume_name }} + mountPath: /tmp/{{ $volume_name }} + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 6 }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_init_container_chown_deckhouse_volume" (list . "volume-name") }} */ -}} +{{- /* returns initContainer which chowns recursively all files and directories in passed volume */ -}} +{{- define "helm_lib_module_init_container_chown_deckhouse_volume" }} + {{- $context := index . 0 -}} + {{- $volume_name := index . 1 -}} +- name: chown-volume-{{ $volume_name }} + image: {{ include "helm_lib_module_common_image" (list $context "alpine") }} + command: ["sh", "-c", "chown -R 64535:64535 /tmp/{{ $volume_name }}"] + securityContext: + runAsNonRoot: false + runAsUser: 0 + runAsGroup: 0 + volumeMounts: + - name: {{ $volume_name }} + mountPath: /tmp/{{ $volume_name }} + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 6 }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_init_container_check_linux_kernel" (list . ">= 4.9.17") }} */ -}} +{{- /* returns initContainer which checks the kernel version on the node for compliance to semver constraint */ -}} +{{- define "helm_lib_module_init_container_check_linux_kernel" }} + {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $semver_constraint := index . 1 -}} {{- /* Semver constraint */ -}} +- name: check-linux-kernel + image: {{ include "helm_lib_module_common_image" (list $context "checkKernelVersion") }} + {{- include "helm_lib_module_pod_security_context_run_as_user_deckhouse" . | nindent 2 }} + env: + - name: KERNEL_CONSTRAINT + value: {{ $semver_constraint | quote }} + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" $context | nindent 6 }} +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_labels.tpl b/charts/deckhouse_lib_helm/templates/_module_labels.tpl new file mode 100644 index 0000000..228dcf3 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_module_labels.tpl @@ -0,0 +1,15 @@ +{{- /* Usage: {{ include "helm_lib_module_labels" (list . (dict "app" "test" "component" "testing")) }} */ -}} +{{- /* returns deckhouse labels */ -}} +{{- define "helm_lib_module_labels" }} + {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- /* Additional labels dict */ -}} +labels: + heritage: deckhouse + module: {{ $context.Chart.Name }} + {{- if eq (len .) 2 }} + {{- $deckhouse_additional_labels := index . 1 }} + {{- range $key, $value := $deckhouse_additional_labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_name.tpl b/charts/deckhouse_lib_helm/templates/_module_name.tpl new file mode 100644 index 0000000..0fecf05 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_module_name.tpl @@ -0,0 +1,11 @@ +{{- define "helm_lib_module_camelcase_name" -}} + +{{- $moduleName := "" -}} +{{- if (kindIs "string" .) -}} +{{- $moduleName = . | trimAll "\"" -}} +{{- else -}} +{{- $moduleName = .Chart.Name -}} +{{- end -}} + +{{ $moduleName | replace "-" "_" | camelcase | untitle }} +{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_module_public_domain.tpl b/charts/deckhouse_lib_helm/templates/_module_public_domain.tpl new file mode 100644 index 0000000..bfbaae7 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_module_public_domain.tpl @@ -0,0 +1,11 @@ +{{- /* Usage: {{ include "helm_lib_module_public_domain" (list . "") }} */ -}} +{{- /* returns rendered publicDomainTemplate to service fqdn */ -}} +{{- define "helm_lib_module_public_domain" }} + {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $name_portion := index . 1 -}} {{- /* Name portion */ -}} + + {{- if not (contains "%s" $context.Values.global.modules.publicDomainTemplate) }} + {{ fail "Error!!! global.modules.publicDomainTemplate must contain \"%s\" pattern to render service fqdn!" }} + {{- end }} + {{- printf $context.Values.global.modules.publicDomainTemplate $name_portion }} +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_security_context.tpl b/charts/deckhouse_lib_helm/templates/_module_security_context.tpl new file mode 100644 index 0000000..8c5fcb8 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_module_security_context.tpl @@ -0,0 +1,183 @@ +{{- /* Usage: {{ include "helm_lib_module_pod_security_context_run_as_user_custom" (list . 1000 1000) }} */ -}} +{{- /* returns PodSecurityContext parameters for Pod with custom user and group */ -}} +{{- define "helm_lib_module_pod_security_context_run_as_user_custom" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +{{- /* User id */ -}} +{{- /* Group id */ -}} +securityContext: + runAsNonRoot: true + runAsUser: {{ index . 1 }} + runAsGroup: {{ index . 2 }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_pod_security_context_run_as_user_nobody" . }} */ -}} +{{- /* returns PodSecurityContext parameters for Pod with user and group "nobody" */ -}} +{{- define "helm_lib_module_pod_security_context_run_as_user_nobody" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_pod_security_context_run_as_user_nobody_with_writable_fs" . }} */ -}} +{{- /* returns PodSecurityContext parameters for Pod with user and group "nobody" with write access to mounted volumes */ -}} +{{- define "helm_lib_module_pod_security_context_run_as_user_nobody_with_writable_fs" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + fsGroup: 65534 +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_pod_security_context_run_as_user_deckhouse" . }} */ -}} +{{- /* returns PodSecurityContext parameters for Pod with user and group "deckhouse" */ -}} +{{- define "helm_lib_module_pod_security_context_run_as_user_deckhouse" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +securityContext: + runAsNonRoot: true + runAsUser: 64535 + runAsGroup: 64535 +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_pod_security_context_run_as_user_deckhouse_with_writable_fs" . }} */ -}} +{{- /* returns PodSecurityContext parameters for Pod with user and group "deckhouse" with write access to mounted volumes */ -}} +{{- define "helm_lib_module_pod_security_context_run_as_user_deckhouse_with_writable_fs" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +securityContext: + runAsNonRoot: true + runAsUser: 64535 + runAsGroup: 64535 + fsGroup: 64535 +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_pod_security_context_run_as_user_root" . }} */ -}} +{{- /* returns PodSecurityContext parameters for Pod with user and group 0 */ -}} +{{- define "helm_lib_module_pod_security_context_run_as_user_root" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +securityContext: + runAsNonRoot: false + runAsUser: 0 + runAsGroup: 0 +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_pod_security_context_runtime_default" . }} */ -}} +{{- /* returns PodSecurityContext parameters for Pod with seccomp profile RuntimeDefault */ -}} +{{- define "helm_lib_module_pod_security_context_runtime_default" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +securityContext: + seccompProfile: + type: RuntimeDefault +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_container_security_context_not_allow_privilege_escalation" . }} */ -}} +{{- /* returns SecurityContext parameters for Container with allowPrivilegeEscalation false */ -}} +{{- define "helm_lib_module_container_security_context_not_allow_privilege_escalation" -}} +securityContext: + allowPrivilegeEscalation: false +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_container_security_context_read_only_root_filesystem_with_selinux" . }} */ -}} +{{- /* returns SecurityContext parameters for Container with read only root filesystem and options for SELinux compatibility*/ -}} +{{- define "helm_lib_module_container_security_context_read_only_root_filesystem_with_selinux" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +securityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seLinuxOptions: + level: 's0' + type: 'spc_t' +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_container_security_context_read_only_root_filesystem" . }} */ -}} +{{- /* returns SecurityContext parameters for Container with read only root filesystem */ -}} +{{- define "helm_lib_module_container_security_context_read_only_root_filesystem" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +securityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_container_security_context_privileged" . }} */ -}} +{{- /* returns SecurityContext parameters for Container running privileged */ -}} +{{- define "helm_lib_module_container_security_context_privileged" -}} +securityContext: + privileged: true +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_container_security_context_escalated_sys_admin_privileged" . }} */ -}} +{{- /* returns SecurityContext parameters for Container running privileged with escalation and sys_admin */ -}} +{{- define "helm_lib_module_container_security_context_escalated_sys_admin_privileged" -}} +securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_container_security_context_privileged_read_only_root_filesystem" . }} */ -}} +{{- /* returns SecurityContext parameters for Container running privileged with read only root filesystem */ -}} +{{- define "helm_lib_module_container_security_context_privileged_read_only_root_filesystem" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +securityContext: + privileged: true + readOnlyRootFilesystem: true +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all" . }} */ -}} +{{- /* returns SecurityContext for Container with read only root filesystem and all capabilities dropped */ -}} +{{- define "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +securityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all_and_add" (list . (list "KILL" "SYS_PTRACE")) }} */ -}} +{{- /* returns SecurityContext parameters for Container with read only root filesystem, all dropped and some added capabilities */ -}} +{{- define "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all_and_add" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +{{- /* List of capabilities */ -}} +securityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + add: {{ index . 1 | toJson }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_container_security_context_capabilities_drop_all_and_add" (list . (list "KILL" "SYS_PTRACE")) }} */ -}} +{{- /* returns SecurityContext parameters for Container with all dropped and some added capabilities */ -}} +{{- define "helm_lib_module_container_security_context_capabilities_drop_all_and_add" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +{{- /* List of capabilities */ -}} +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + add: {{ index . 1 | toJson }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_module_container_security_context_capabilities_drop_all_and_run_as_user_custom" (list . 1000 1000) }} */ -}} +{{- /* returns SecurityContext parameters for Container with read only root filesystem, all dropped, and custom user ID */ -}} +{{- define "helm_lib_module_container_security_context_capabilities_drop_all_and_run_as_user_custom" -}} +{{- /* Template context with .Values, .Chart, etc */ -}} +{{- /* User id */ -}} +{{- /* Group id */ -}} +securityContext: + runAsUser: {{ index . 1 }} + runAsGroup: {{ index . 2 }} + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_storage_class.tpl b/charts/deckhouse_lib_helm/templates/_module_storage_class.tpl new file mode 100644 index 0000000..cf761a5 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_module_storage_class.tpl @@ -0,0 +1,38 @@ +{{- /* Usage: {{ include "helm_lib_module_storage_class_annotations" (list $ $index $storageClass.name) }} */ -}} +{{- /* return module StorageClass annotations */ -}} +{{- define "helm_lib_module_storage_class_annotations" -}} + {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $sc_index := index . 1 -}} {{- /* Storage class index */ -}} + {{- $sc_name := index . 2 -}} {{- /* Storage class name */ -}} + {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) -}} + {{- $annotations := dict -}} + + {{- $volume_expansion_mode_offline := false -}} + {{- range $module_name := list "cloud-provider-azure" "cloud-provider-yandex" "cloud-provider-vsphere" "cloud-provider-vcd"}} + {{- if has $module_name $context.Values.global.enabledModules }} + {{- $volume_expansion_mode_offline = true }} + {{- end }} + {{- end }} + + {{- if $volume_expansion_mode_offline }} + {{- $_ := set $annotations "storageclass.deckhouse.io/volume-expansion-mode" "offline" }} + {{- end }} + + {{- if hasKey $module_values.internal "defaultStorageClass" }} + {{- if eq $module_values.internal.defaultStorageClass $sc_name }} + {{- $_ := set $annotations "storageclass.kubernetes.io/is-default-class" "true" }} + {{- end }} + {{- else }} + {{- if eq $sc_index 0 }} + {{- if $context.Values.global.discovery.defaultStorageClass }} + {{- if eq $context.Values.global.discovery.defaultStorageClass $sc_name }} + {{- $_ := set $annotations "storageclass.kubernetes.io/is-default-class" "true" }} + {{- end }} + {{- else }} + {{- $_ := set $annotations "storageclass.kubernetes.io/is-default-class" "true" }} + {{- end }} + {{- end }} + {{- end }} + +{{- (dict "annotations" $annotations) | toYaml -}} +{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_monitoring_grafana_dashboards.tpl b/charts/deckhouse_lib_helm/templates/_monitoring_grafana_dashboards.tpl new file mode 100644 index 0000000..ebbcefb --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_monitoring_grafana_dashboards.tpl @@ -0,0 +1,68 @@ +{{- /* Usage: {{ include "helm_lib_grafana_dashboard_definitions_recursion" (list . [current dir]) }} */ -}} +{{- /* returns all the dashboard-definintions from / */ -}} +{{- /* current dir is optional — used for recursion but you can use it for partially generating dashboards */ -}} +{{- define "helm_lib_grafana_dashboard_definitions_recursion" -}} + {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $rootDir := index . 1 }} {{- /* Dashboards root dir */ -}} + {{- /* Dashboards current dir */ -}} + + {{- $currentDir := "" }} + {{- if gt (len .) 2 }} {{- $currentDir = index . 2 }} {{- else }} {{- $currentDir = $rootDir }} {{- end }} + + {{- $currentDirIndex := (sub ($currentDir | splitList "/" | len) 1) }} + {{- $rootDirIndex := (sub ($rootDir | splitList "/" | len) 1) }} + {{- $folderNamesIndex := (add1 $rootDirIndex) }} + + {{- range $path, $_ := $context.Files.Glob (print $currentDir "/*.json") }} + {{- $fileName := ($path | splitList "/" | last ) }} + {{- $definition := ($context.Files.Get $path) }} + + {{- $folder := (index ($currentDir | splitList "/") $folderNamesIndex | replace "-" " " | title) }} + {{- $resourceName := (regexReplaceAllLiteral "\\.json$" $path "") }} + {{- $resourceName = ($resourceName | replace " " "-" | replace "." "-" | replace "_" "-") }} + {{- $resourceName = (slice ($resourceName | splitList "/") $folderNamesIndex | join "-") }} + {{- $resourceName = (printf "%s-%s" $context.Chart.Name $resourceName) }} + +{{ include "helm_lib_single_dashboard" (list $context $resourceName $folder $definition) }} + {{- end }} + + {{- $subDirs := list }} + {{- range $path, $_ := ($context.Files.Glob (print $currentDir "/**.json")) }} + {{- $pathSlice := ($path | splitList "/") }} + {{- $subDirs = append $subDirs (slice $pathSlice 0 (add $currentDirIndex 2) | join "/") }} + {{- end }} + + {{- range $subDir := ($subDirs | uniq) }} +{{ include "helm_lib_grafana_dashboard_definitions_recursion" (list $context $rootDir $subDir) }} + {{- end }} +{{- end }} + + +{{- /* Usage: {{ include "helm_lib_grafana_dashboard_definitions" . }} */ -}} +{{- /* returns dashboard-definintions from monitoring/grafana-dashboards/ */ -}} +{{- define "helm_lib_grafana_dashboard_definitions" -}} + {{- $context := . }} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- if ( $context.Values.global.enabledModules | has "prometheus-crd" ) }} +{{- include "helm_lib_grafana_dashboard_definitions_recursion" (list $context "monitoring/grafana-dashboards") }} + {{- end }} +{{- end }} + + +{{- /* Usage: {{ include "helm_lib_single_dashboard" (list . "dashboard-name" "folder" $dashboard) }} */ -}} +{{- /* renders a single dashboard */ -}} +{{- define "helm_lib_single_dashboard" -}} + {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $resourceName := index . 1 }} {{- /* Dashboard name */ -}} + {{- $folder := index . 2 }} {{- /* Folder */ -}} + {{- $definition := index . 3 }} {{/* Dashboard definition */}} +--- +apiVersion: deckhouse.io/v1 +kind: GrafanaDashboardDefinition +metadata: + name: d8-{{ $resourceName }} + {{- include "helm_lib_module_labels" (list $context (dict "prometheus.deckhouse.io/grafana-dashboard" "")) | nindent 2 }} +spec: + folder: "{{ $folder }}" + definition: | + {{- $definition | nindent 4 }} +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_monitoring_prometheus_rules.tpl b/charts/deckhouse_lib_helm/templates/_monitoring_prometheus_rules.tpl new file mode 100644 index 0000000..794fe30 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_monitoring_prometheus_rules.tpl @@ -0,0 +1,96 @@ +{{- /* Usage: {{ include "helm_lib_prometheus_rules_recursion" (list . [current dir]) }} */ -}} +{{- /* returns all the prometheus rules from / */ -}} +{{- /* current dir is optional — used for recursion but you can use it for partially generating rules */ -}} +{{- define "helm_lib_prometheus_rules_recursion" -}} + {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $namespace := index . 1 }} {{- /* Namespace for creating rules */ -}} + {{- $rootDir := index . 2 }} {{- /* Rules root dir */ -}} + {{- $currentDir := "" }} {{- /* Current dir (optional) */ -}} + {{- if gt (len .) 3 }} {{- $currentDir = index . 3 }} {{- else }} {{- $currentDir = $rootDir }} {{- end }} + {{- $currentDirIndex := (sub ($currentDir | splitList "/" | len) 1) }} + {{- $rootDirIndex := (sub ($rootDir | splitList "/" | len) 1) }} + {{- $folderNamesIndex := (add1 $rootDirIndex) }} + + {{- range $path, $_ := $context.Files.Glob (print $currentDir "/*.{yaml,tpl}") }} + {{- $fileName := ($path | splitList "/" | last ) }} + {{- $definition := "" }} + {{- if eq ($path | splitList "." | last) "tpl" -}} + {{- $definition = tpl ($context.Files.Get $path) $context }} + {{- else }} + {{- $definition = $context.Files.Get $path }} + {{- end }} + + {{- $definition = $definition | replace "__SCRAPE_INTERVAL__" (printf "%ds" ($context.Values.global.discovery.prometheusScrapeInterval | default 30)) | replace "__SCRAPE_INTERVAL_X_2__" (printf "%ds" (mul ($context.Values.global.discovery.prometheusScrapeInterval | default 30) 2)) | replace "__SCRAPE_INTERVAL_X_3__" (printf "%ds" (mul ($context.Values.global.discovery.prometheusScrapeInterval | default 30) 3)) | replace "__SCRAPE_INTERVAL_X_4__" (printf "%ds" (mul ($context.Values.global.discovery.prometheusScrapeInterval | default 30) 4)) }} + +{{/* Patch expression based on `d8_ignore_on_update` annotation*/}} + + + {{ $definition = printf "Rules:\n%s" ($definition | nindent 2) }} + {{- $definitionStruct := ( $definition | fromYaml )}} + {{- if $definitionStruct.Error }} + {{- fail ($definitionStruct.Error | toString) }} + {{- end }} + {{- range $rule := $definitionStruct.Rules }} + + {{- range $dedicatedRule := $rule.rules }} + {{- if $dedicatedRule.annotations }} + {{- if (eq (get $dedicatedRule.annotations "d8_ignore_on_update") "true") }} + {{- $_ := set $dedicatedRule "expr" (printf "(%s) and ON() ((max(d8_is_updating) != 1) or ON() absent(d8_is_updating))" $dedicatedRule.expr) }} + {{- end }} + {{- end }} + {{- end }} + + {{- end }} + + {{ $definition = $definitionStruct.Rules | toYaml }} + + {{- $resourceName := (regexReplaceAllLiteral "\\.(yaml|tpl)$" $path "") }} + {{- $resourceName = ($resourceName | replace " " "-" | replace "." "-" | replace "_" "-") }} + {{- $resourceName = (slice ($resourceName | splitList "/") $folderNamesIndex | join "-") }} + {{- $resourceName = (printf "%s-%s" $context.Chart.Name $resourceName) }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ $resourceName }} + namespace: {{ $namespace }} + {{- include "helm_lib_module_labels" (list $context (dict "app" "prometheus" "prometheus" "main" "component" "rules")) | nindent 2 }} +spec: + groups: + {{- $definition | nindent 4 }} + {{- end }} + + {{- $subDirs := list }} + {{- range $path, $_ := ($context.Files.Glob (print $currentDir "/**.{yaml,tpl}")) }} + {{- $pathSlice := ($path | splitList "/") }} + {{- $subDirs = append $subDirs (slice $pathSlice 0 (add $currentDirIndex 2) | join "/") }} + {{- end }} + + {{- range $subDir := ($subDirs | uniq) }} +{{ include "helm_lib_prometheus_rules_recursion" (list $context $namespace $rootDir $subDir) }} + {{- end }} +{{- end }} + + +{{- /* Usage: {{ include "helm_lib_prometheus_rules" (list . ) }} */ -}} +{{- /* returns all the prometheus rules from monitoring/prometheus-rules/ */ -}} +{{- define "helm_lib_prometheus_rules" -}} + {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $namespace := index . 1 }} {{- /* Namespace for creating rules */ -}} + {{- if ( $context.Values.global.enabledModules | has "operator-prometheus-crd" ) }} +{{- include "helm_lib_prometheus_rules_recursion" (list $context $namespace "monitoring/prometheus-rules") }} + {{- end }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_prometheus_target_scrape_timeout_seconds" (list . ) }} */ -}} +{{- /* returns adjust timeout value to scrape interval / */ -}} +{{- define "helm_lib_prometheus_target_scrape_timeout_seconds" -}} + {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $timeout := index . 1 }} {{- /* Target timeout in seconds */ -}} + {{- $scrape_interval := (int $context.Values.global.discovery.prometheusScrapeInterval | default 30) }} + {{- if gt $timeout $scrape_interval -}} +{{ $scrape_interval }}s + {{- else -}} +{{ $timeout }}s + {{- end }} +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_node_affinity.tpl b/charts/deckhouse_lib_helm/templates/_node_affinity.tpl new file mode 100644 index 0000000..cbdd0f9 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_node_affinity.tpl @@ -0,0 +1,256 @@ +{{- /* Verify node selector strategy. */ -}} +{{- define "helm_lib_internal_check_node_selector_strategy" -}} + {{ if not (has . (list "frontend" "monitoring" "system" "master" )) }} + {{- fail (printf "unknown strategy \"%v\"" .) }} + {{- end }} + {{- . -}} +{{- end }} + +{{- /* Returns node selector for workloads depend on strategy. */ -}} +{{- define "helm_lib_node_selector" }} + {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $strategy := index . 1 | include "helm_lib_internal_check_node_selector_strategy" }} {{- /* strategy, one of "frontend" "monitoring" "system" "master" "any-node" "wildcard" */ -}} + {{- $module_values := dict }} + {{- if lt (len .) 3 }} + {{- $module_values = (index $context.Values (include "helm_lib_module_camelcase_name" $context)) }} + {{- else }} + {{- $module_values = index . 2 }} + {{- end }} + {{- $camel_chart_name := (include "helm_lib_module_camelcase_name" $context) }} + + {{- if eq $strategy "monitoring" }} + {{- if $module_values.nodeSelector }} +nodeSelector: {{ $module_values.nodeSelector | toJson }} + {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole $camel_chart_name | int) 0 }} +nodeSelector: + node-role.deckhouse.io/{{$context.Chart.Name}}: "" + {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole $strategy | int) 0 }} +nodeSelector: + node-role.deckhouse.io/{{$strategy}}: "" + {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole "system" | int) 0 }} +nodeSelector: + node-role.deckhouse.io/system: "" + {{- end }} + + {{- else if or (eq $strategy "frontend") (eq $strategy "system") }} + {{- if $module_values.nodeSelector }} +nodeSelector: {{ $module_values.nodeSelector | toJson }} + {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole $camel_chart_name | int) 0 }} +nodeSelector: + node-role.deckhouse.io/{{$context.Chart.Name}}: "" + {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole $strategy | int) 0 }} +nodeSelector: + node-role.deckhouse.io/{{$strategy}}: "" + {{- end }} + + {{- else if eq $strategy "master" }} + {{- if gt (index $context.Values.global.discovery "clusterMasterCount" | int) 0 }} +nodeSelector: + node-role.kubernetes.io/control-plane: "" + {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole "master" | int) 0 }} +nodeSelector: + node-role.deckhouse.io/control-plane: "" + {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole "system" | int) 0 }} +nodeSelector: + node-role.deckhouse.io/system: "" + {{- end }} + {{- end }} +{{- end }} + + +{{- /* Returns tolerations for workloads depend on strategy. */ -}} +{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node" "with-uninitialized" "without-storage-problems") }} */ -}} +{{- define "helm_lib_tolerations" }} + {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $strategy := index . 1 | include "helm_lib_internal_check_tolerations_strategy" }} {{- /* base strategy, one of "frontend" "monitoring" "system" any-node" "wildcard" */ -}} + {{- $additionalStrategies := tuple }} {{- /* list of additional strategies. To add strategy list it with prefix "with-", to remove strategy list it with prefix "without-". */ -}} + {{- if eq $strategy "custom" }} + {{ if lt (len .) 3 }} + {{- fail (print "additional strategies is required") }} + {{- end }} + {{- else }} + {{- $additionalStrategies = tuple "storage-problems" }} + {{- end }} + {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) }} + {{- if gt (len .) 2 }} + {{- range $as := slice . 2 (len .) }} + {{- if hasPrefix "with-" $as }} + {{- $additionalStrategies = mustAppend $additionalStrategies (trimPrefix "with-" $as) }} + {{- end }} + {{- if hasPrefix "without-" $as }} + {{- $additionalStrategies = mustWithout $additionalStrategies (trimPrefix "without-" $as) }} + {{- end }} + {{- end }} + {{- end }} +tolerations: + {{- /* Wildcard: gives permissions to schedule on any node with any taints (use with caution) */ -}} + {{- if eq $strategy "wildcard" }} + {{- include "_helm_lib_wildcard_tolerations" $context }} + + {{- else }} + {{- /* Any node: any node in the cluster with any known taints */ -}} + {{- if eq $strategy "any-node" }} + {{- include "_helm_lib_any_node_tolerations" $context }} + + {{- /* Tolerations from module config: overrides below strategies, if there is any toleration specified */ -}} + {{- else if $module_values.tolerations }} + {{- $module_values.tolerations | toYaml | nindent 0 }} + + {{- /* Monitoring: Nodes for monitoring components: prometheus, grafana, kube-state-metrics, etc. */ -}} + {{- else if eq $strategy "monitoring" }} + {{- include "_helm_lib_monitoring_tolerations" $context }} + + {{- /* Frontend: Nodes for ingress-controllers */ -}} + {{- else if eq $strategy "frontend" }} + {{- include "_helm_lib_frontend_tolerations" $context }} + + {{- /* System: Nodes for system components: prometheus, dns, cert-manager */ -}} + {{- else if eq $strategy "system" }} + {{- include "_helm_lib_system_tolerations" $context }} + {{- end }} + + {{- /* Additional strategies */ -}} + {{- range $additionalStrategies -}} + {{- include (printf "_helm_lib_additional_tolerations_%s" (. | replace "-" "_")) $context }} + {{- end }} + {{- end }} +{{- end }} + + +{{- /* Check cluster type. */ -}} +{{- /* Returns not empty string if this is cloud or hybrid cluster */ -}} +{{- define "_helm_lib_cloud_or_hybrid_cluster" }} + {{- if .Values.global.clusterConfiguration }} + {{- if eq .Values.global.clusterConfiguration.clusterType "Cloud" }} + "not empty string" + {{- /* We consider non-cloud clusters with enabled cloud-provider-.* module as Hybrid clusters */ -}} + {{- else }} + {{- range $v := .Values.global.enabledModules }} + {{- if hasPrefix "cloud-provider-" $v }} + "not empty string" + {{- end }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} + +{{- /* Verify base strategy. */ -}} +{{- /* Fails if strategy not in allowed list */ -}} +{{- define "helm_lib_internal_check_tolerations_strategy" -}} + {{ if not (has . (list "custom" "frontend" "monitoring" "system" "any-node" "wildcard" )) }} + {{- fail (printf "unknown strategy \"%v\"" .) }} + {{- end }} + {{- . -}} +{{- end }} + + +{{- /* Base strategy for any uncordoned node in cluster. */ -}} +{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node") }} */ -}} +{{- define "_helm_lib_any_node_tolerations" }} +- key: node-role.kubernetes.io/master +- key: node-role.kubernetes.io/control-plane +- key: dedicated.deckhouse.io + operator: "Exists" +- key: dedicated + operator: "Exists" +- key: DeletionCandidateOfClusterAutoscaler +- key: ToBeDeletedByClusterAutoscaler + {{- if .Values.global.modules.placement.customTolerationKeys }} + {{- range $key := .Values.global.modules.placement.customTolerationKeys }} +- key: {{ $key | quote }} + operator: "Exists" + {{- end }} + {{- end }} +{{- end }} + +{{- /* Base strategy that tolerates all. */ -}} +{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "wildcard") }} */ -}} +{{- define "_helm_lib_wildcard_tolerations" }} +- operator: "Exists" +{{- end }} + +{{- /* Base strategy that tolerates nodes with "dedicated.deckhouse.io: monitoring" and "dedicated.deckhouse.io: system" taints. */ -}} +{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "monitoring") }} */ -}} +{{- define "_helm_lib_monitoring_tolerations" }} +- key: dedicated.deckhouse.io + operator: Equal + value: {{ .Chart.Name | quote }} +- key: dedicated.deckhouse.io + operator: Equal + value: "monitoring" +- key: dedicated.deckhouse.io + operator: Equal + value: "system" +{{- end }} + +{{- /* Base strategy that tolerates nodes with "dedicated.deckhouse.io: frontend" taints. */ -}} +{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "frontend") }} */ -}} +{{- define "_helm_lib_frontend_tolerations" }} +- key: dedicated.deckhouse.io + operator: Equal + value: {{ .Chart.Name | quote }} +- key: dedicated.deckhouse.io + operator: Equal + value: "frontend" +{{- end }} + +{{- /* Base strategy that tolerates nodes with "dedicated.deckhouse.io: system" taints. */ -}} +{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "system") }} */ -}} +{{- define "_helm_lib_system_tolerations" }} +- key: dedicated.deckhouse.io + operator: Equal + value: {{ .Chart.Name | quote }} +- key: dedicated.deckhouse.io + operator: Equal + value: "system" +{{- end }} + + +{{- /* Additional strategy "uninitialized" - used for CNI's and kube-proxy to allow cni components scheduled on node after CCM initialization. */ -}} +{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node" "with-uninitialized") }} */ -}} +{{- define "_helm_lib_additional_tolerations_uninitialized" }} +- key: node.deckhouse.io/uninitialized + operator: "Exists" + effect: "NoSchedule" + {{- if include "_helm_lib_cloud_or_hybrid_cluster" . }} + {{- include "_helm_lib_additional_tolerations_no_csi" . }} + {{- end }} + {{- include "_helm_lib_additional_tolerations_node_problems" . }} +{{- end }} + +{{- /* Additional strategy "node-problems" - used for shedule critical components on non-ready nodes or nodes under pressure. */ -}} +{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node" "with-node-problems") }} */ -}} +{{- define "_helm_lib_additional_tolerations_node_problems" }} +- key: node.kubernetes.io/not-ready +- key: node.kubernetes.io/out-of-disk +- key: node.kubernetes.io/memory-pressure +- key: node.kubernetes.io/disk-pressure +- key: node.kubernetes.io/pid-pressure +- key: node.kubernetes.io/unreachable +- key: node.kubernetes.io/network-unavailable +{{- end }} + +{{- /* Additional strategy "storage-problems" - used for shedule critical components on nodes with drbd problems. This additional strategy enabled by default in any base strategy except "wildcard". */ -}} +{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node" "without-storage-problems") }} */ -}} +{{- define "_helm_lib_additional_tolerations_storage_problems" }} +- key: drbd.linbit.com/lost-quorum +- key: drbd.linbit.com/force-io-error +- key: drbd.linbit.com/ignore-fail-over +{{- end }} + +{{- /* Additional strategy "no-csi" - used for any node with no CSI: any node, which was initialized by deckhouse, but have no csi-node driver registered on it. */ -}} +{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node" "with-no-csi") }} */ -}} +{{- define "_helm_lib_additional_tolerations_no_csi" }} +- key: node.deckhouse.io/csi-not-bootstrapped + operator: "Exists" + effect: "NoSchedule" +{{- end }} + +{{- /* Additional strategy "cloud-provider-uninitialized" - used for any node which is not initialized by CCM. */ -}} +{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node" "with-cloud-provider-uninitialized") }} */ -}} +{{- define "_helm_lib_additional_tolerations_cloud_provider_uninitialized" }} + {{- if not .Values.global.clusterIsBootstrapped }} +- key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + {{- end }} +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_pod_disruption_budget.tpl b/charts/deckhouse_lib_helm/templates/_pod_disruption_budget.tpl new file mode 100644 index 0000000..ccd4f21 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_pod_disruption_budget.tpl @@ -0,0 +1,6 @@ +{{- /* Usage: {{ include "helm_lib_pdb_daemonset" . }} */ -}} +{{- /* Returns PDB max unavailable */ -}} +{{- define "helm_lib_pdb_daemonset" }} + {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} +maxUnavailable: 10% +{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_priority_class.tpl b/charts/deckhouse_lib_helm/templates/_priority_class.tpl new file mode 100644 index 0000000..5935445 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_priority_class.tpl @@ -0,0 +1,9 @@ +{{- /* Usage: {{ include "helm_lib_priority_class" (tuple . "priority-class-name") }} /* -}} +{{- /* returns priority class if priority-class module enabled, otherwise returns nothing */ -}} +{{- define "helm_lib_priority_class" }} + {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} + {{- $priorityClassName := index . 1 }} {{- /* Priority class name */ -}} + {{- if ( $context.Values.global.enabledModules | has "priority-class") }} +priorityClassName: {{ $priorityClassName }} + {{- end }} +{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_resources_management.tpl b/charts/deckhouse_lib_helm/templates/_resources_management.tpl new file mode 100644 index 0000000..dff75c1 --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_resources_management.tpl @@ -0,0 +1,160 @@ +{{- /* Usage: {{ include "helm_lib_resources_management_pod_resources" (list [ephemeral storage requests]) }} */ -}} +{{- /* returns rendered resources section based on configuration if it is */ -}} +{{- define "helm_lib_resources_management_pod_resources" -}} + {{- $configuration := index . 0 -}} {{- /* VPA resource configuration [example](https://deckhouse.io/documentation/v1/modules/110-istio/configuration.html#parameters-controlplane-resourcesmanagement) */ -}} + {{- /* Ephemeral storage requests */ -}} + + {{- $ephemeral_storage := "50Mi" -}} + {{- if eq (len .) 2 -}} + {{- $ephemeral_storage = index . 1 -}} + {{- end -}} + + {{- $pod_resources := (include "helm_lib_resources_management_original_pod_resources" $configuration | fromYaml) -}} + {{- if not (hasKey $pod_resources "requests") -}} + {{- $_ := set $pod_resources "requests" (dict) -}} + {{- end -}} + {{- $_ := set $pod_resources.requests "ephemeral-storage" $ephemeral_storage -}} + + {{- $pod_resources | toYaml -}} +{{- end -}} + + +{{- /* Usage: {{ include "helm_lib_resources_management_original_pod_resources" }} */ -}} +{{- /* returns rendered resources section based on configuration if it is present */ -}} +{{- define "helm_lib_resources_management_original_pod_resources" -}} + {{- $configuration := . -}} {{- /* VPA resource configuration [example](https://deckhouse.io/documentation/v1/modules/110-istio/configuration.html#parameters-controlplane-resourcesmanagement) */ -}} + + {{- if $configuration -}} + {{- if eq $configuration.mode "Static" -}} +{{- $configuration.static | toYaml -}} + + {{- else if eq $configuration.mode "VPA" -}} + {{- $resources := dict "requests" (dict) "limits" (dict) -}} + + {{- if $configuration.vpa.cpu -}} + {{- if $configuration.vpa.cpu.min -}} + {{- $_ := set $resources.requests "cpu" ($configuration.vpa.cpu.min | toString) -}} + {{- end -}} + {{- if $configuration.vpa.cpu.limitRatio -}} + {{- $cpuLimitMillicores := round (mulf (include "helm_lib_resources_management_cpu_units_to_millicores" $configuration.vpa.cpu.min) $configuration.vpa.cpu.limitRatio) 0 | int64 -}} + {{- $_ := set $resources.limits "cpu" (printf "%dm" $cpuLimitMillicores) -}} + {{- end -}} + {{- end -}} + + {{- if $configuration.vpa.memory -}} + {{- if $configuration.vpa.memory.min -}} + {{- $_ := set $resources.requests "memory" ($configuration.vpa.memory.min | toString) -}} + {{- end -}} + {{- if $configuration.vpa.memory.limitRatio -}} + {{- $memoryLimitBytes := round (mulf (include "helm_lib_resources_management_memory_units_to_bytes" $configuration.vpa.memory.min) $configuration.vpa.memory.limitRatio) 0 | int64 -}} + {{- $_ := set $resources.limits "memory" (printf "%d" $memoryLimitBytes) -}} + {{- end -}} + {{- end -}} +{{- $resources | toYaml -}} + + {{- else -}} + {{- cat "ERROR: unknown resource management mode: " $configuration.mode | fail -}} + {{- end -}} + {{- end -}} +{{- end }} + + +{{- /* Usage: {{ include "helm_lib_resources_management_vpa_spec" (list ) }} */ -}} +{{- /* returns rendered vpa spec based on configuration and target reference */ -}} +{{- define "helm_lib_resources_management_vpa_spec" -}} + {{- $targetAPIVersion := index . 0 -}} {{- /* Target API version */ -}} + {{- $targetKind := index . 1 -}} {{- /* Target Kind */ -}} + {{- $targetName := index . 2 -}} {{- /* Target Name */ -}} + {{- $targetContainer := index . 3 -}} {{- /* Target container name */ -}} + {{- $configuration := index . 4 -}} {{- /* VPA resource configuration [example](https://deckhouse.io/documentation/v1/modules/110-istio/configuration.html#parameters-controlplane-resourcesmanagement) */ -}} + +targetRef: + apiVersion: {{ $targetAPIVersion }} + kind: {{ $targetKind }} + name: {{ $targetName }} + {{- if eq ($configuration.mode) "VPA" }} +updatePolicy: + updateMode: {{ $configuration.vpa.mode | quote }} +resourcePolicy: + containerPolicies: + - containerName: {{ $targetContainer }} + maxAllowed: + cpu: {{ $configuration.vpa.cpu.max | quote }} + memory: {{ $configuration.vpa.memory.max | quote }} + minAllowed: + cpu: {{ $configuration.vpa.cpu.min | quote }} + memory: {{ $configuration.vpa.memory.min | quote }} + controlledValues: RequestsAndLimits + {{- else }} +updatePolicy: + updateMode: "Off" + {{- end }} +{{- end }} + + +{{- /* Usage: {{ include "helm_lib_resources_management_cpu_units_to_millicores" }} */ -}} +{{- /* helper for converting cpu units to millicores */ -}} +{{- define "helm_lib_resources_management_cpu_units_to_millicores" -}} + {{- $units := . | toString -}} + {{- if hasSuffix "m" $units -}} + {{- trimSuffix "m" $units -}} + {{- else -}} + {{- atoi $units | mul 1000 -}} + {{- end }} +{{- end }} + + +{{- /* Usage: {{ include "helm_lib_resources_management_memory_units_to_bytes" }} */ -}} +{{- /* helper for converting memory units to bytes */ -}} +{{- define "helm_lib_resources_management_memory_units_to_bytes" }} + {{- $units := . | toString -}} + {{- if hasSuffix "k" $units -}} + {{- trimSuffix "k" $units | atoi | mul 1000 -}} + {{- else if hasSuffix "M" $units -}} + {{- trimSuffix "M" $units | atoi | mul 1000000 -}} + {{- else if hasSuffix "G" $units -}} + {{- trimSuffix "G" $units | atoi | mul 1000000000 -}} + {{- else if hasSuffix "T" $units -}} + {{- trimSuffix "T" $units | atoi | mul 1000000000000 -}} + {{- else if hasSuffix "P" $units -}} + {{- trimSuffix "P" $units | atoi | mul 1000000000000000 -}} + {{- else if hasSuffix "E" $units -}} + {{- trimSuffix "E" $units | atoi | mul 1000000000000000000 -}} + {{- else if hasSuffix "Ki" $units -}} + {{- trimSuffix "Ki" $units | atoi | mul 1024 -}} + {{- else if hasSuffix "Mi" $units -}} + {{- trimSuffix "Mi" $units | atoi | mul 1024 | mul 1024 -}} + {{- else if hasSuffix "Gi" $units -}} + {{- trimSuffix "Gi" $units | atoi | mul 1024 | mul 1024 | mul 1024 -}} + {{- else if hasSuffix "Ti" $units -}} + {{- trimSuffix "Ti" $units | atoi | mul 1024 | mul 1024 | mul 1024 | mul 1024 -}} + {{- else if hasSuffix "Pi" $units -}} + {{- trimSuffix "Pi" $units | atoi | mul 1024 | mul 1024 | mul 1024 | mul 1024 | mul 1024 -}} + {{- else if hasSuffix "Ei" $units -}} + {{- trimSuffix "Ei" $units | atoi | mul 1024 | mul 1024 | mul 1024 | mul 1024 | mul 1024 | mul 1024 -}} + {{- else if regexMatch "^[0-9]+$" $units -}} + {{- $units -}} + {{- else -}} + {{- cat "ERROR: unknown memory format:" $units | fail -}} + {{- end }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_vpa_kube_rbac_proxy_resources" . }} */ -}} +{{- /* helper for VPA resources for kube_rbac_proxy */ -}} +{{- define "helm_lib_vpa_kube_rbac_proxy_resources" }} +{{- /* Template context with .Values, .Chart, etc */ -}} +- containerName: kube-rbac-proxy + minAllowed: + {{- include "helm_lib_container_kube_rbac_proxy_resources" . | nindent 4 }} + maxAllowed: + cpu: 20m + memory: 25Mi +{{- end }} + +{{- /* Usage: {{ include "helm_lib_container_kube_rbac_proxy_resources" . }} */ -}} +{{- /* helper for container resources for kube_rbac_proxy */ -}} +{{- define "helm_lib_container_kube_rbac_proxy_resources" }} +{{- /* Template context with .Values, .Chart, etc */ -}} +cpu: 10m +memory: 25Mi +{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_spec_for_high_availability.tpl b/charts/deckhouse_lib_helm/templates/_spec_for_high_availability.tpl new file mode 100644 index 0000000..8bfbf9e --- /dev/null +++ b/charts/deckhouse_lib_helm/templates/_spec_for_high_availability.tpl @@ -0,0 +1,138 @@ +{{- /* Usage: {{ include "helm_lib_pod_anti_affinity_for_ha" (list . (dict "app" "test")) }} */ -}} +{{- /* returns pod affinity spec */ -}} +{{- define "helm_lib_pod_anti_affinity_for_ha" }} +{{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} +{{- $labels := index . 1 }} {{- /* Match labels for podAntiAffinity label selector */ -}} + {{- if (include "helm_lib_ha_enabled" $context) }} +affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + {{- range $key, $value := $labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: kubernetes.io/hostname + {{- end }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_deployment_on_master_strategy_and_replicas_for_ha" }} */ -}} +{{- /* returns deployment strategy and replicas for ha components running on master nodes */ -}} +{{- define "helm_lib_deployment_on_master_strategy_and_replicas_for_ha" }} +{{- /* Template context with .Values, .Chart, etc */ -}} + {{- if (include "helm_lib_ha_enabled" .) }} + {{- if gt (index .Values.global.discovery "clusterMasterCount" | int) 0 }} +replicas: {{ index .Values.global.discovery "clusterMasterCount" }} +strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + {{- if gt (index .Values.global.discovery "clusterMasterCount" | int) 2 }} + maxUnavailable: 2 + {{- else }} + maxUnavailable: 1 + {{- end }} + {{- else if gt (index .Values.global.discovery.d8SpecificNodeCountByRole "master" | int) 0 }} +replicas: {{ index .Values.global.discovery.d8SpecificNodeCountByRole "master" }} +strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + {{- if gt (index .Values.global.discovery.d8SpecificNodeCountByRole "master" | int) 2 }} + maxUnavailable: 2 + {{- else }} + maxUnavailable: 1 + {{- end }} + {{- else }} +replicas: 2 +strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + {{- end }} + {{- else }} +replicas: 1 +strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + {{- end }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_deployment_on_master_custom_strategy_and_replicas_for_ha" (list . (dict "strategy" "strategy_type")) }} */ -}} +{{- /* returns deployment with custom strategy and replicas for ha components running on master nodes */ -}} +{{- define "helm_lib_deployment_on_master_custom_strategy_and_replicas_for_ha" }} +{{- $context := index . 0 }} +{{- $optionalArgs := dict }} +{{- $strategy := "RollingUpdate" }} +{{- if ge (len .) 2 }} + {{- $optionalArgs = index . 1 }} +{{- end }} +{{- if hasKey $optionalArgs "strategy" }} + {{- $strategy = $optionalArgs.strategy }} +{{- end }} +{{- /* Template context with .Values, .Chart, etc */ -}} + {{- if (include "helm_lib_ha_enabled" $context) }} + {{- if gt (index $context.Values.global.discovery "clusterMasterCount" | int) 0 }} +replicas: {{ index $context.Values.global.discovery "clusterMasterCount" }} +strategy: + type: {{ $strategy }} + {{- if eq $strategy "RollingUpdate" }} + rollingUpdate: + maxSurge: 0 + {{- if gt (index $context.Values.global.discovery "clusterMasterCount" | int) 2 }} + maxUnavailable: 2 + {{- else }} + maxUnavailable: 1 + {{- end }} + {{- end }} + {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole "master" | int) 0 }} +replicas: {{ index $context.Values.global.discovery.d8SpecificNodeCountByRole "master" }} +strategy: + type: {{ $strategy }} + {{- if eq $strategy "RollingUpdate" }} + rollingUpdate: + maxSurge: 0 + {{- if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole "master" | int) 2 }} + maxUnavailable: 2 + {{- else }} + maxUnavailable: 1 + {{- end }} + {{- end }} + {{- else }} +replicas: 2 +strategy: + type: {{ $strategy }} + {{- if eq $strategy "RollingUpdate" }} + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + {{- end }} + {{- end }} + {{- else }} +replicas: 1 +strategy: + type: {{ $strategy }} + {{- if eq $strategy "RollingUpdate" }} + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + {{- end }} + {{- end }} +{{- end }} + +{{- /* Usage: {{ include "helm_lib_deployment_strategy_and_replicas_for_ha" }} */ -}} +{{- /* returns deployment strategy and replicas for ha components running not on master nodes */ -}} +{{- define "helm_lib_deployment_strategy_and_replicas_for_ha" }} +{{- /* Template context with .Values, .Chart, etc */ -}} +replicas: {{ include "helm_lib_is_ha_to_value" (list . 2 1) }} +{{- if (include "helm_lib_ha_enabled" .) }} +strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 +{{- end }} +{{- end }} From 738672c6c205a36b35171796b8bfeeb787d8e119 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Wed, 19 Jun 2024 02:33:32 +0300 Subject: [PATCH 19/21] another fix Signed-off-by: Aleksandr Zimin --- charts/deckhouse_lib_helm/templates/_csi_node.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/deckhouse_lib_helm/templates/_csi_node.tpl b/charts/deckhouse_lib_helm/templates/_csi_node.tpl index 0b5ba04..19d494c 100644 --- a/charts/deckhouse_lib_helm/templates/_csi_node.tpl +++ b/charts/deckhouse_lib_helm/templates/_csi_node.tpl @@ -28,7 +28,7 @@ memory: 25Mi {{- $driverRegistrarImageName := join "" (list "csiNodeDriverRegistrar" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} {{- $driverRegistrarImage := include "helm_lib_module_common_image_no_fail" (list $context $driverRegistrarImageName) }} {{- if $driverRegistrarImage }} - {{- if or (include "_helm_lib_cloud_or_hybrid_cluster" $context) ($context.Values.global.enabledModules | has "ceph-csi") ($context.Values.global.enabledModules | has "csi-nfs") ($context.Values.global.enabledModules | has "csi-cep") }} + {{- if or (include "_helm_lib_cloud_or_hybrid_cluster" $context) ($context.Values.global.enabledModules | has "ceph-csi") ($context.Values.global.enabledModules | has "csi-nfs") ($context.Values.global.enabledModules | has "csi-ceph") }} {{- if ($context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} --- apiVersion: autoscaling.k8s.io/v1 From 0cab2e95f1dc436bce2da554f5b71b27780e7bb9 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Fri, 21 Jun 2024 00:03:33 +0300 Subject: [PATCH 20/21] change helm lib Signed-off-by: Aleksandr Zimin --- charts/deckhouse_lib_helm-1.24.0.tgz | Bin 0 -> 24430 bytes charts/deckhouse_lib_helm/Chart.yaml | 5 - charts/deckhouse_lib_helm/LICENSE | 201 --- charts/deckhouse_lib_helm/README.md | 1105 ----------------- .../templates/_csi_controller.tpl | 700 ----------- .../templates/_csi_node.tpl | 193 --- .../templates/_envs_for_proxy.tpl | 30 - .../templates/_high_availability.tpl | 39 - .../templates/_kube_rbac_proxy.tpl | 21 - .../templates/_module_documentation_uri.tpl | 15 - .../templates/_module_ephemeral_storage.tpl | 15 - .../_module_generate_common_name.tpl | 13 - .../templates/_module_https.tpl | 160 --- .../templates/_module_image.tpl | 76 -- .../templates/_module_ingress_class.tpl | 13 - .../templates/_module_init_container.tpl | 56 - .../templates/_module_labels.tpl | 15 - .../templates/_module_name.tpl | 11 - .../templates/_module_public_domain.tpl | 11 - .../templates/_module_security_context.tpl | 183 --- .../templates/_module_storage_class.tpl | 38 - .../_monitoring_grafana_dashboards.tpl | 68 - .../_monitoring_prometheus_rules.tpl | 96 -- .../templates/_node_affinity.tpl | 256 ---- .../templates/_pod_disruption_budget.tpl | 6 - .../templates/_priority_class.tpl | 9 - .../templates/_resources_management.tpl | 160 --- .../templates/_spec_for_high_availability.tpl | 138 -- 28 files changed, 3633 deletions(-) create mode 100644 charts/deckhouse_lib_helm-1.24.0.tgz delete mode 100644 charts/deckhouse_lib_helm/Chart.yaml delete mode 100644 charts/deckhouse_lib_helm/LICENSE delete mode 100644 charts/deckhouse_lib_helm/README.md delete mode 100644 charts/deckhouse_lib_helm/templates/_csi_controller.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_csi_node.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_envs_for_proxy.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_high_availability.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_kube_rbac_proxy.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_module_documentation_uri.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_module_ephemeral_storage.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_module_generate_common_name.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_module_https.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_module_image.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_module_ingress_class.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_module_init_container.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_module_labels.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_module_name.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_module_public_domain.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_module_security_context.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_module_storage_class.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_monitoring_grafana_dashboards.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_monitoring_prometheus_rules.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_node_affinity.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_pod_disruption_budget.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_priority_class.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_resources_management.tpl delete mode 100644 charts/deckhouse_lib_helm/templates/_spec_for_high_availability.tpl diff --git a/charts/deckhouse_lib_helm-1.24.0.tgz b/charts/deckhouse_lib_helm-1.24.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..05bf4fed47cd2838262e0aadf33dffa757f779ac GIT binary patch literal 24430 zcmV)qK$^cFiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0POwyb{w~{D2(sF`4mV?St0%73@@UjgeUU)(Ijp2Sfcombh5J? ze{HZEGlQrHXai_c6G>~Ghd58TpX6I80Nv>8^bBVx+2NeC9kCl-Hx#N0K%o$m=-q^8 zl7tDp2`40(?i^3BP{SFXCjYgX-|p`2?u&y1_}|^#-Rl4D?(gmY*WSU4XU}&J_Ff+B z{nzf^!ONE~{tNA{2!H1(=CLv*{p5fd2+nkxAFdy-61 zrf4FOBGWX%ilCT`D5LtlL?bTHiPIl6<#Cn}Iox1)N)Az7u{P8!C5I@XHv)^<#;rrU zH{3rM?rwZ;N&IL28pgltYq=9fl7H~v0<08{CPajSNS=v^$N+u%v=OD*A==xWZcNFPi`gOCfBuGU=nn~tb%PBb zNGkX()dTyUa0HR`-}i(e8BXPdD|Jse3B4tZNSO+LL+%Bs7)K#$AJMV@de8fMlqHGw6#BhwI?SJa)D+mKken(+%cGjOT)Z zHr-(U#*=JH=0bAK`T%>wGp2e%v~AnKV#=y&(Dc6e-1)Ez?ZYm(C~0;5Wzlhe`Sb3d zeY5^9yCH%RMdTHk{*4F?N0R9+5xGkk{tYJ?k;8GqZ*Ve{4Xw=o>RufR`W2;?PcA&nh8!A@#&Pa zkm`mZOU7Sfb#T2t^~D3{oBsaHhD7a6-f&5^(^C2ZOQQ@e$HlvZ<9OhvJ%f}dG@1?b1&AT#0ScyA>hZC) zg+3yt=M7AueNB{1Nu-%mSd59fB%?#<4_|5;x7@ki3*e2B)(J@yKARGz;9;9+vNXnu zTmVT1uzB)^$K((NTF@G*Bi?TqId2OODUs&!pm=c(Nd1|yp%9@dWk*TE??`-TpHyVu zGMmnYUI^*WPD2rz;`cSEFu&a2o#qcp^UP*~oq6v~ZPNn6I!JYcv@&kErCqq)V6b34A?jaJ!qAE%x0e$vF9aFU_b7-Gp2Y+3nK2QA zpk3Agm@%#}Y-M=^3_zG^@gSOz=$*`_rRCPmR8UrQ!SLF0GyDcia)H$Z1v@zu+ZTf= zW_V1*j?_{xo2$W=KmO$e?>~Pbv#HOWXF6Ti>EDD)bxxEHsSaUb(LRrvw1EjaPBI-w zeM^N@f2V5lz4^p8gb4W}OOh)fCue_vDtmVWLB>=nG23XcX}OA~f^tFCEQ}H?wT`MR zO-R9AAZJpM=^!#h4I)7yf(?8HeNl8wGZw~U5;~1+K;k0DMPMqP5MflH0U~CzfyS!C zSE?q1jKS6?PUyc$Tu0>m_aZX`J!IL)tr@lxYOO%Z<4}?)GtxGALXj~=rzErsHUSzo zQAQF;?ujMDGR!0qVXhx-xC*`1Mnv4wh#W-`&zLeUG?LW5&3$W43BAy_*{9a0e-=~& z@dU>qXUQxSoU3p|6Jm6-%z)>7?I1Chr#2be{w` zrL9Hve`G>4djaAXXNNT6>io90`6)F;fwdsaoPC08Lw9;gd zd)QY`H_wmWoL*fV9iMJhJ~G2^5aAR@RL!b#wvPw=ju9cqXpoRwlDyjYMcuJiQ{@wy z4vfPaI0u*W(k7idSTrA~iB?xxRU5aY?yC)9x4zDqfn(Jqe~!?twnT?$Cz4eE-%#@C zUG1}rw^RQJM9}uCuB+lPC^~0;V}sl zK9-?&O(7^;fcADhYqq!V#g!w(xI&wYd8fJSR;{%6U$^k7EDZ`LYqzNIxt&0H@qBR) zP~FD7m-+V^t;=FISflDr689w8XirUDZ;izruE^a&;Qj>lg2BZ{YG$au{ z+LG1VlHZXv>A#<~s6}y|P}*<$+?JvtDWadP=rz(ON~HtB;*?XSUOnCX{_WLucy_UM z=%X&u#MU)5E!yP=rZ@Ja!YY%)375))_~^p#HRu-Ef}K0XFE&b6w1~d^;3QfCSdloC zGa2G|N?Di+dP@^BCUL_EZCWFJ2+Po5!#pEV&+_iD5KiXZiLwQ2?F~O7#s7h)Ns}3C z$#&>Y@@=pz-RZ*%P@sP$i{=kiZ3n+2syupAGxjG0Cu;KV@+&S>**^P5avfI3o(K7s z3xpZU*gMAW7!m|a&d}{uIw69f@|G~V)|X zO>gNg+oYk3UbJ?R8C$h|#D+mdrm32pP;rPp_%y74G~IVBHI1YeW)>Pk-JMZx*}G4? zKm2oSOB`eqH#pKRaIr&c=BtJ@s67Aas-6@i-^*msQ*qro%2W{sj;Ws-bjbbN;%qQ=&9W72s zT=DXwY?{d>Pe@6^&=zNE!Ug>|de}DE_W;g#LkpdIl=+77P;({l{E+o~R+X^J%`oBZ^ zUI*USrS_xgvqm=7zdk{4^-$-GAU=WtJ>p5i?)e3T%d$tNu+-)9o=9_ zV#L|T6LhDCwjPT_!Kc5}Hz(9ACBF>OX$}+%f?|>i5@AJRL>U_4TP`$3uK`(Y1UL90 z2d#0e=AOt6TI=;o9?5#Cnx&067cpgix7YC-sjL9`6Lg_1o0NH#pW~~uD`T;aZKfPZ z%(u}n_0)Z)doayz5-KO!>^#vjwB`=EJUc|$h>NM=X9<_(!UzM7E*^4Kg8)AYdxEYf z1bugUjZ#kxHzeVAz_n#q%jFIy?=;U@3T#96pq$VYMH5W5E;b8qs4*B3%4{^E5!Hi2 zw~g}y2k4e!9nmYS$S6y$h(epWyuodbi9#1V-a?qg-bn?t>9^3G$zy1uw41s#ss9?B z;%G$8bMN!59-5x=xIyAIt;K07>mR$r2aOTt&lMB60_XJ`u!QUOWF;52I&bAoKR2Ef zd3vR%i_b_MoZrZF)JsqN?aoE#&xzJJjaWx4yb7Lbcz4WBVi|qx;ZlU}}+ z%gTLVH_jE7g49T5w^LsK+VPj*_8&x9zj&`-Na4$H`UO3YKL4S7{ud(iFT&+7HXcf| zc0V!xp3`{`)Ew8ZrRulVv<@l{zl6}Bd}`ne2oF}h0lLJF@XB^Lm){s(>7HsKt@+Cf zh2>lAhO(oqF~a#mBasIi8((}hQkKM*5s{Q>dW=dQDk}McIK7_x$CHgUbCsFP}d z8mWJz=)+^R^uC2#U7^XIb<&9*hvuWz)k?=?%Gnk9{H4-0Ujs9k26-gXyV&yFIA+

*nqxw&FuC<#f{EteYGIM{45hCGK>XJL!2b@JVvQH zL}QrRuZYaN0#e6UXW{Aj$;I2V^XqDD86CYRMCC$)qZlbxlUGkSPcF~?c6u3Jo_-fz zy*>URyg0i4errx`86Q`@z)Vo8FcZ|iZo=3E5}>S3EQO|mg%a;Fd`Op`x- z>cIEIkKdez=WkC=?^$KnuWXZ&2qypMej(lN8WV=}o;4WW^EwSVe2dw{wLFgg-G zmOUyaJW#=eWkS!n<#9C79Hlc9#|Dlsz@kn~$;6e0ez8>4GS2J80?JD$MXRPJT{Sft zIBZ6?Ncoq%{eqb14HIXp+LgUx7iz5pPw^O=o797EsHq46G}mpzQiC!kw=^OhAI0Rh zrT#=i7lM($pXadF=B=DN4uH6}C`kb!EE}H8ov#(dWCC>NTyNR|m8( z&V2{aXqk3wwBj7nY{&b=OpQ-mxta?0CM(z8iNXSnOv`2_Ap{uY3R3*u^lGp%w~k$S<)I+`a$1yIb4;-QRou z{N>~R??e20Y_@v69~O6J)$1|se1(qUSfXFtJx_(p^fru!l<(NfSYkPFfPe9B<3frf zb6-}mIEh?CbTc!RKnxC4Eb0#{v|LXpln9{HlnaF!9C%IzBufH_93&rkh*BO)6vHwa zvWy_Nxqv!1l-uOwK6q-_K6PjhUlqHe=H?uVC;v!51e?MC`3uMmtf42W}w>tiJ_je!1|3mzGjQ_F{w2gc8{q^-lcyammzyGlf z|GqMRKA!8DxRzWhrdJYE=hRXq>Ap#k2a-eJ804{#+%fu``abvFpt0G zf8FulgpMbny;Ip#&s!!0oEQHcJg?3F4)&fs#(xj->kdw-`I6blOv(Ow)4V@h4&H(Ya7dWN|Fdm z5^78AWf5+9DWK-^>>tRiiZRgrbL0*CAg^KV{8{OPVYPkEB#6xY0ezgmVZ3<3Vozky z%%iQ*t@WX?*&nW+)_!=4?AgL2E5VbC1T%6bzlR}lp-@ELFKpp*iEdb7m|Ms3g->b%ddwY-P zzaHe*BmS!>K^WmMB0|v-jj;0K!5~ir4jTN%C1EiU5}7r)DNc>S*1!i)>A|c%9UmbN zxOFUP6LhjWK^4HR}7F26ZChROQz(NIgu z+krgLcR~46Vvb<0{Xck7vH#C@ zpFQsXKFF_!{clqO&+`8@#cHDG;;zxxO)kJq9R#=n5~C7GlnIJ5N!i$x50A_eZf=O7 zI(D(ogJO6syX<{C<=%lovjwXgg{dGT`d(Ytpx4$?&}6*fxALV25OB(c+c|DrgSfzI zqOGGgi;mU`94%zcU(joV$@zlD4sk(s4)+yG1!Zc4f6|{Xck7 z-T&Wz`SNl9|3QBJ?SH?l%9_!CzWat&Hu&{-=$C|#2Rv1BIK}S=BbpHO6)HWG^xxzc zbjPzK)}bGcV-x$OSn4RSv~pTqoN}}d+|3gj7?1(T!Ri9h$i!+ryhsoJJxWeJ;FO^$ zxuG{RMWn-2qY#9T;}{Lmb1lZji(>RSC()3*v9b=bnYtuiyi?#}3&6TmM|ce0YYNxU zDR6l9H(|+_`}MN_W5S5QDle9x=QsGU1OUY zy&GxRMwLAa6ZCc#GU*PFOC)SHzz!3_#%cl?IfUgHOS6Q8R9n9|fEGn}+5@KMp8HUw`}WPuCm5NMqfRX$)&Qrm)^~M5(PCXg9cW+HKV16x@A%0!ym?9 zwvek&8pv-W-Qn(}P&Y89B(71^p6}urPRNpZTJsW%+v>^)o;$jF=}I5${dA{~Fx)t| zl-v51g53|@5 zq^iqx3vDi5`M{rEHJbO{S0!#RZ0tf910L+BpLOfMW$5e#F$GK6(~f z{0*58)gHH}Te=jhPD!`lZ_c6gr%j>pNnNRwX1D@RBc4mDN0jU%SZ9>HXmI0|yU1gl z7I1}ym?P+JA?r>Jt*W`&sUd}awGm9~ltp@p>AF26babEeT@dwewQJ^^&wCG!F5yub z#!T++h0z2j31MRrN}k;6!Co6p%Xd)+mNoX9iLLG>o=RZue3IaqBF~=%Iws>;JW8<8 z-Y2GGs&K+1f|+|4Gp}A@V`gms3D6>p;ArZE>`UDq>n)e?J6OJ>=b>3|_pV9vi~fdl zcF@eE*TMbWF%hdVm$^u4bM^uFD~FY`u^>_&Cs@jE%(kT~Md=wL@9f`)`eDDNZHq&w z64~!aLv;jbBr1_ePGF1EzyKF*K`-DX8i`qp239BMy~2|me>twSWm3zL7C_#zFY1A= zeeFWVs!&&IRd_lJ&DPM|lw@QtF$>K`2qg4$=%I;VxSHpf;@731hcBZDTiSNqNx%ZH1 znw`{#Ms*s!?nSz~hwp{Afq;s1JUFB4C zl}M8=?DF{k)r*6E7Sj( z=j48`zh3+QP~>x6`~SNyU+z7x?*Bh~`TTMJ{~><;JrgK_pYWCEiw?9A>P-29Z+rV6 zi)rq;xO;n?(v2-;XY&y$L8nJaf`Wg-on;PLUJAFwPAR5y(3XhYN-N!C?fePTbJl05 z5y>Jr^4fy?a7^9t^ENdFNAm^x8ds!z<(TfA6FMf+>yPf;7$_PnhcSF4OAJ)Hz~^31 z981^=8i)`a?kn4LPSVsR{-YfJQNqF+3y3l#z2Ipu2w{Y&t=YE4_e$Cokrrk%w z8!SEkcD_Prg5h|hyA{=TfOI8)XXvXPUmne~1E0Hcs4>5N;4lTXdQ}U39BAHxTBWK9 zH!6}3q@AE`j?)Qvs|Gp6CQ;Fc+|HW*Gg8JnwH8M}kl7DTM_#tw&&F5IjU^*M@7MxUQCFcMA)_?r1+r`&`=AZw2Sv~)~cktqI z{pTTm3rsx9`}ZCBRBm@1S{e&vO^-{yKwLOKK;Bb5x}cSW0L;8i39Yb)xG|zHLMQxK;4w{d(Ges*1Zx7j^)1?Emw<{p$Je z{b#$6>;Dh(vu5$hlPBm69os7;_V$K*`@{X+{oO-!Nv8akAT^;9O-PawVN51Y5=50m znf4qK;gKT3w9;`BRY(vVO-PI`dGzjzC$w4?qL<6@^)8Gi{Emf;-|%=An)}kc0NFf% z;J7n3_OG+E^#uX2M1n+_ko1-$Ghh$y+=blw*xu+xiS+Vr490$bYCa=93Rt`M<`XZW z!^OrX7sFk*r?CvISU!9xe+IycTmBZBReRSvCn;t53Wd9W5d9R$NwAHAL8Si+bfCc{ zdhz`Evx7tUFWOPlbf->K@bgB?6{jL&NAjGrOU~7ysU}=J|4|Zgh<1wy-wB?j=1KLQ z&w-bKQj5;h(V(%P^Bf6b7rTq_)JYz2Q2)D*p8H{PcBr{Pz6n z`tr!#Ztq>8QM;A<0qx^&p+s$UyJo${Zc*cUR_OOM4-+ zqY${Z&CDZhRwqPI1tQE@+CX;IvwW#ub#wMZo*3+M3HD37>BRsoE{C=svDH01wKNbKH2yVK7G8L?RWg^W&anf_gMYSvH!aV`_HQOfB*6R z--r2iZvIyGEApc(l{~7TD7#8WX!9Lq@tH({Jw~ientNT&jT%<>a`W8*cX*-U8_#|= z9J494{)hxg6C4o~3<4B{0s4q?p3p~>F-28Es)tQKcb7cquh;l5m6lxn{_p;?>irM< zdwY-L|3QBJjeqUTQS65Fca<|r@CX{_uxCpgtEk!1*sr1o~(2NBPeHrC2EL(rW> zwahgVWpO?Rk`MdqW&f)yTMN2?Zu`IY^2M`i{n!1M`;YZsALO@y{hznQX-=sYo?&ly zcNhNC;Sw%*d{y4ADC&VrSp2@=u`$f{K(j^#`$}WMv$U6&=u7xu1=qj*sL8ACb;Ev& z_qbt;ro2I;L`F7ByED)=rMbSpQ{%neUw+?XuB6PCb!YO|0zF#k%D_QiMV~rY_v+In zx(Rl-E1hGWt+h+cH6(X}DtwcWaMYckw}L)`ml_{IIEsjb`x#;LFW=nlwVJm)e_u|| zA~brCt}g8%t)p{21gq#e58Q)y*1_{<&#RpU&(_^H4>l4#=|0$C?Bepv>0*C8uCvbt z=W0#8yStL^G6b%-V_YgpC6T0@MzY!tP;^Q{TmF4j@|Prv_%ubS;BeK|r9pFIi_G7n zH_Ftmx-%@@7lv%J!*6OFl{r}YP?Ad-sjmL_COLLwuamt<4ssjnU$6A#cIBpx;P@>|X4{cv#L98yQnIF*#G#zY5XV!>9zt^FzR<$+XcL$da8J3;V`*Uz zWHmU#DF$0iT^=BgWAl#zy86decy#jS?EFD`t-!Eu>(thF>oQpVI9s*ts`c0$bM}DI zh_7{FEO@G;tA+Y@;aCJGNlDp}Mw^njS9Ru9GWmtMs3sGjiFG#M==JN*t{EX_afstM zKt*1msbl}c+3VK<3bdiRxV}6(KJ82-Tv7P44a(&72dq4wx~r#4I`*KdRY++STNJOg zeEF!6pUHeK$kbVZ^pUrTo3qvy-)H(wDiZ zy3GD*+w${+dgO=L|L>RoVNU|Br~b>cgM)hh-@)Ev{>MZ7`kTa(5`>sBZf?0KYVM<_ zwjzpc3I#xh3{Od`(Q}1iuDmah2fL4{>L9GSAWS(M5Z(V&V-y;Y?S~l+iMAq+X;Xty zJ&(L^Ks3W>EpEB~TVMU|IecXf#Q24B>~UZ6KE+H@&Y;SE$Va0@ci-VFyKY}Vw8vGv z!v=5*Jp^8GQPsfRZ8X6D&fq$$%HtW3qUGA8Z(C@;;QbEKy ztiJ+3G4C9WS52%NoPSCv)Dn|Km3t?!L&MnZ(nU4)Tzd|BV6H+4Ebw}s=bY+9`9&Eh zuzOwpkPcAeH3&`jY&3jbjE=Ij`!HR;>4VCFg+H(*qXOL}L3^JhwQ8iA_I$+e)=H1I zs7*{|#BYh1H8NN)L8`GV1F0^mHHxdfGHB^0@ipTA(62f(_z!nr(RUC&DO)ga$Ztga z@E>s`WM2HosN#aMaX1!ugc%NFEGIV{i@1LvI4AylaqzNQ|LI_N_woGigZz5Lf3=gFnsEDG zqoHZelWSRuNxZgaE)L(6*$vwCp?pL#O{juJs$eHTA5lWs7TViFHPKa}IP85ujCc~8 z%CNEkgX4Iw41LR^-^RZwR+DY(CUf5ps|>^M5`JSVB9|l+?0hx+mn>?OI2x!|xL!O6_6N?mzV)gd+Q)5f8eNlg>af`%JV0-|2&YpFr@2-XMZd2vz?$-5SCp7 z-jL@|eMA|fziyd@>-+m+uh*;zvzY3%`DFH4DDR}CbHlgG#}@(TjzusiJn+u)F1Wom z@MwwjDc+e@y2Hjk=|Jw}35CugCn`NZH7>NKq z&EGVJg685~j11^PPddB3PDhUrtC&3AE6}Pqq*An13#FQt>uDfYk6hJUETVfQJWmhNe-pqI@kEmP$RJBP$!a#U+e0g+n8lIhBpI-j$=yezd-c&u-Vb9zxp2Egl zMFOYfj42{+ae_V~yEB5G?Lz3&f%m_|{RZ6AELj-=aA)Bh8lJ72hG+9>I9N9g2U~7! zW#=ocEf-it6GYypf=J_nuy#wFp09k|VH<3#Jc4Uq4^NAG)sTP7{AOFzk4W(=s0+8% zxMx=~Y}!lN!j%g^lY$Yw=OG}Io7sD%j5@Ib3BdR{G;50gub`un_FmebBf<@YHZS=0kQt!m3Ay# zm^S!E5~iw+wt^}H8d1YVlck;Aw$0v(bu9ngZY)cEnQy(8?Cc^-zMI;geN<_(^|64Z`%hTjoH{6Urv#Nj-HvbWWJbEDJ;fBg%U!^P|+#jnF=L|IE#H{ zvu`?8E&f*~6|ybOB@s|d6JR6i;?&-nHHWr78h2lPbIi19+cl7w^Gq4CW%;CQdsRb#z-{g?5Wgm?tbaJHuVANHTW ze7;+||KY{6$MxTb_}LNlHzMe0X09BPB!LUNkW>OIGM)`PO^3_ewU-Kk$~X|A15wzG zy!x%S?fLn-kMMQwgVUU?@x+pkf?-;*hIo;@=!5?q`89)W{ z=CZZJQl_V8$1kmQEek~ZwmUA~B^<{R#RTFg&Uy-}B$RVXYh{Ygl^L zSi(qmU4G(W37X28`H-6KI=$2lm4%#g?!E4eE2rb1K5h7ao7r1Od;hFuY0cFx<#*n+ z?&Y%o`6?w59nt8V$K;r2Ono!EiT<8*G847 zm?`TQ%6;CSD-=XYra`;`K)_XBl`bDQ(TD_325EvBS>v#C94{g2hazz;mSOIajElMy zPl&)pg}L4_*PI!+2RA^QDoYcBhLwOsO$Hf*#Xp?Tf0NigE3P8DGT^C+_)PB<6ePB|MA6>SLx~)sYp&lE zp+7XIR&uEzBl_OG_r)Kgo`_= zAaT&VS=JbdHpTYS3K!^9ouM5=p$7?7J4Y8_6yw$)>o6D%)Ey$qjLk07=P zXPQZMWLL!-n`|mUr*-&SjT>QIbb&T4O4{Gehka?wakP`yW|TcMa9ErM`nw&vhiFW1 zi9{(8QwrPJ(yTO22pr;v!gzKO!Bndf^KT=UbZ1c!G>8`u*~F?x6odgy|OygsmE+)X4_rV7kA_efM0> ztZ8qT@9!4-X}eYS-sCRi5b21-IRC=2pnOkH9(r3V+sGyaZzCPg4Wz<~45ma08cFby z!#2j|PMBLe&dZGL)pg%mPw8_;*;o)M2R5inh^VPnGogjsokyl?*^kI88@d%F+A@1> zf{&TpMiCKeFvSdy{Veq^`dxT1k9u1Zt(Eq+421Hh^0)O7WU_5?DW;W@*Y8HDo|CvZ!t}zCO;qSnl#ExX)F?(ID3KZgyxA!_hy2N z#^5ONs+hbTCGPoJ?j*g8?k21w& zgp-2eYyJ&^_9XshR&Uj2=<+KLwXL5LriMlfi;(RHGLsDxoOfi_U>^E|vt+xgyGxgK zH%Mf`F<;hEHNnaSjs&t?R-lvlz?-j=&9OcRp@G#!*rtSg7%6cswUbv8z^`y7| zF|Qw+!}l+(7juTz(gQ0WN!~6~xAdE)A77g6n{{xVh5bhc7g56*noQItyem+sGRaFTR7GkP zg(^qDj4Q)zM;o500Ur%wD&KXqnvy9Ovw;&&M@tQ;PrH=HeVS!V5FAbPM7E1jqVBkO zH^>;i#WaD|eO0NxU9pHZ<-R2rH!*TBhUj`iWy3zwEMEk;ndNDo)U0WjCcTKfk4UP# z&~^1HxzK*&T*Y)Ha+`Eh3ul;6c0&~${7AS|gI_Z)vT0kB5f>2|P(C0+aM9MvoPiqX z3G{&960v{*8Gsq{`ntTYz&JX-IxAM~x6$2%MiX?0CGzaF=|^kDx8d~n1m6<7$7Ipl|L4~K+&$Q>=6~-$+kdS8`5?a@!!3<(!Wfe&XOcAE zx9YYWE>6BdQ~VxzuD<)!Nf~lug3QpbqrP}z%oM->v7qJp!MK`GHlA~qtxWHnyt{ju7k3)hqJ$dko#o&z$}*_egj=SX zpf5y|D^?@O>=4xHY}*5Pw2l86zi#{Qj`hpXdH3wL7qulW<3l4g@PzD7ThbV8<3tHZ8EKyoXQN_N$SGc`d;j0(V#M5T9uw=c6o?C9F@;Gdl>Yueu%<$40hB9IU@ywTeT|hr>%Wbt@NJv zo?A(R3SZB6-_W3z!&zkW`pGd3AbbxwDET2Db*WogPRTX)Cl_=~_3XXUv6b#D${lT$ zS#fzKaMw+lionS!*eWww)bVNiz*I5WYGAk<#4zugAeW?DAPkzM+b+Y_TTXgaF<3~} z?FL~TLp~Km64sqwv=I5NHXxsu{+7nQcd=BN!iHStGRi5}DsZJ~jEWl_D~%+)E^Aj~ z9%vKnzg-;FD*u&6y{Y|E8%9Awr?fJx>i02u=x$RyjMB`*HK*lpN?ECOZJaiZ(J0Mq zdA}Bjej&6fErmtsLmx=sQa@`VGfK19y1^ToBsAi}NK^2P#ZdZg)Z3m$X%=RTDj6ys zPIGWogX(j+HJ|1!v}+ETy*TJAWz*$O#zir|Y0$*(Jn>e1PPe?xR$I@`F^!kgAMqeT{Aw<4KFAlz?eYI!v>a0*Sj(ZCv&sNl0vk7`ukLzQxI7upomSzbhyDi*B6 zDwe;GutLG<<>lMUL*$Z8bH`GM5l!LPN=F?Pf77te&g`h9brSCxx#IV;YdqcyntDZEKWg2L=YBr zFH_H9+b>)MV2>`&Y;nEziGnR*baZistdy!Xkf6a2l*Lxx4lD6;6%;gWf&uG1H5mI% zFwW~&6c`xA-P7gk1{z@G{n&KjL2WAKK|A>EMN~#Omt+L@nR_)Ap^8&g89)rCsABOR zx((hG)fByct4^*O#`~5HpFVxsFvUhMctWF@CdvH$25f}}++anG!N@bw9J>XKyg%WQ zy|;Q$JU*|GU8st+KU4hv$YlQ?T0h5Hhv?Ee`6CvCYmt@niCujdyd8~#`;|6aEVlcrj+Nm}!<218l9ZUL zND*hZL_p2I{0pQwDqt;cVOxpv?1@1H)3G8JD$nC5SYBl#I7p)-MDvC38&Fz+=1^Q^j z-z;}K%k*HsnG5?(2kg1vzncsGyKeAvF)CI=HH~nkDFPxXqc+8=Rje5}ep|MJF z4yId3wG!mjh#CUg{oUz?AF1v?e?u4Tubu?Wi8gHbzkJ{IBI7vM0* zKG?5&|6isg3P)Uo6FQ!RHjR!Z3l;;Ov;V)hzx$$k|J(kH=a2jU5Ao|^6jC0Cm?@eU zf;Jj~i#p|{n;8_5%G_4avQU@u7&%a8p{_HVs};Mq4YlH|E&yw8p=i5B-FRf%1}>4~ z@exz>$T0@5iqgTg9Iv{ZCpfeTUX=o`j&p77D`<@UcXh%}GCj|-+5D7^&)@`xeSG?! zL{NFjd^0c<`UU;$N{8m`H86JAa9DjCJvBAv4`1answ}pfljxs5nbi{0y0R?tp<0$g z@suZgJo|ynj6W@M-X~lthNt9Skw{F^gwM=wFAGhPg1n$ah*=y8k|s35($lZD3ygDE z#fb>aV&ouzK1{HG9!X@3=_xR0$l$iele#p2DUz4>iG5XbSS`<5RGeJCM5p7#_TJ*a zK7HD7qS0EbW+^#DmpswK;73?N+dxS8e;>NV$*W9gwb?E2{@s(SeQ))%lz_hPZx(qK zV`_=&9rapjr9M|xg>->#=mYvQbjku8uf2;(0^MJYD4J>b&{h_*z`wUX{42J~rqanA zOIHa!iqb&&KSRx9XCUI)aZ{NP5I?+4@5OLuwBug7TUC$5xKuX(L$x?;Q0n>|x*lSr zT9301xf~L0RQlg6q(ItS>8rpmEL*Es`>Nb}n7^naSgU(0MP7}j=l%t$8_eu-KpPD# zTr}fJuVg{Xh4|`P(9<5TS}b#Iaer&v5M1{1p{uUwy+kK3?Nh(dE?=cHUW)?E_0a1V z>>@$M;vT+<{`FH|C-%P4!Nbqg7+1@B_VvxtDmJB7)Lm*zf{x#wpPXHvy*KVF?~qs!Ba%eN;#9_z>3(CXyu>iY8Rn;-QP0I)Yi zC;4^0a_Ex01Itl>Lyp__=-e(NKFLK#uM}x#BDKVm#kQ&i;Egz5l?4=jwcH7J4OWJ0uiRD znjxI22^Y4Sid#@^_yVf$QHjO^Gsp*0MbCN!kTFisDS)mKkul96P)`t!0E(l6#Rw;f z1p{Zd%_~ZzL2Z-awhPr9D{I(zU9JPvzv*O zOCk%Y<{j?(D6mihU`3)$x@A7)cSLNXm>FNXjRt@_gc$Y@<@5umxipY`gLAWgU;;y6&9x zw>SSHk*br90SO=W;F-y$K!V6Df$?FkY9P@BGa6wxK$O5tn!F3EQ1B$Ne~u7FMmzv+ zyUd`4QDIYuu@q`D0&$kjF=0?{GcGgiTQ8F$nBVAO#_p8F6hrLivHf>0-qno49T)F_ zVz5J6(~2QLnPVd#07i6{ttpNPD$2!}@$cECZ3IQVY++2TN;tO*&bl$KXhd@JX2aW9 z4gjw-mZkYcb=> zjuS)@e8YwNiwoDy{%Eqmw3maklpuYK?j}5eL4n*+$%a0!+hb>^My27ujjH0a(leF8 z%+%x#o`YZnnPQrcPm*E*nxf?bY)pwD$qXfwy#vABPzHL!@RV%1PKUDcBOHN~*!Fy6 zF4P)Pa9s)?6}^6}O}X{)jXho+1No5k@Z|z@$3Cnt&FRpSb}e7n@h2EO8= zmH2IMP$+HnIm1cf%(l#Krc_zu;o=T^HA$3YK%N~6fC%h(Ev#_84DPPeTlj&Bw#tBB z>-!Cv;ADjOs6BY;=j~CD^AzNn``X{l*O(-q~m%g4c)M-!T2s^`^kRf631T~;0nnX13iH{n#ujA$1U zfseE;O!sTgXQGun*7AkE?U|917I@Oua*L^qG6`NDFiyc<+A#9(VDSpqMBY0glzDaf z7;z?38f83_$qY@gct_&gDCy|Pg)pvZph;cP}|381u4|#xgmrhK_UW{{AO0dQw%~$e$5C|iAEc7k#gg%b-?QlKVyjY zhv+*UR%x`yIr}cMLRXn_u2xqYCo|qK^Q|($(FAz{LfVAf%uEyqalt=$hOmzCQlc`P zIQ6^ZB8l&)jx`u(1L%24Z{g2DG{Iu5Cr*5Z6Ez!*1R>jq3PEmp1jS&h-rUar%q@Lp zwneseIGAcZu35UmoTu4MLZf7+6*W!pY`b`p5@B4egeNwz@aJuQn4jBX2$pLY8{9eA z7Sln`ybij++UWicdS}y?yu}j1^;$MWqIq$KEtDEwz22MRcVyd?T-8yYBM9cvd^FN= z9!HWS$+rD3ou*tU(=kQuFB2%+s1VGI;|%UIgFbRtahfK2y2)8GGg8*J%aRr)m`oIpTGeCUl=5~{jh)9VAh0!?@l#ShthEV7G{DHqIw-8R?hDVo^#jL{ znc-S*tGEsF%EVa|ce*Q_uN$JXk?#L|E+iFITBq|)QFLrizmBnf4MxsR+&2q{mB-eC zOF006AxP@Ybo^xgrVL?};5(U7rCCkL*f;^Ka^j1apkfuf3>^3bL%6h4?E+3zbk5AN z<@#d^k*b;y6N;5J(S=NIGHOSPn`jhc%(^@mRT=M~hp6s7rz=>xfGx%myTh$6hJf$wSDcPfrD9=@?)-$$jx^47`o7)R{?vcyWX5+$J zEZKH36#{`2sx>9V^zw)&3BNP`-Px|gjeK6UWjM{Gf=eTTJTu=AB%&!bRl2>xr2?N)J7hGI1)cVd|;- z7=%F#oHFLf!z_rxlx@4X*_*<{?10T`%z21&goQ$E>k(FX$8D?K+uAI~M8{3rUN8YF zs*2%ec{5884fIq^YB_Q<*3-cN;$jW~ZAye@6RwAV5hqmPxR8zDtBjU~7H?^zmG{4$ z`01_=&fi|29iIjWHpR5O^{}%zkU25ye*Wn4OhCg(sYw?4%LC=6o)`&&W0+$WN=F*S z>JBRS;b6j}9I(#m+>r%!p*7H`m?A)M1N~6k1fdJx` z2-8_=LPb2#OwRJYVO&i0gjoj`1dAcMp4jbLR#HPN6|cV`cra7xe31_)-sDwBC}nc& z*aC|+E3M7vZUe`${0YSTc#-jxfDg9%ov(2cpRQHiXoe?~BE`xcENDrOZBw!&h)2>>b!UtU*eAeIlG zWG+xlM^G=si*g$x_+n|VVav#X@%D=NsDYa8V%WGM`pr`je|0V&N&ruz_Ez%>0&2Te zB`=&u-4UOfP+IFzX;~(psZ?eHr4D%xlO4MgVCJet6ex%2N0tyNq0`9wG@%jI(_et- zZII;a8?$N@=`Ch?%UG>TS4E_+F;*9HOsIF`uRbp_HM3zBkm)Im0bxSd*zG==@6Wl? zALY9@;9qXInUt9d$J#V$Hv*)|EG0scn3#MKn@R2*oXhl1K0-7s%?cY3!8NQM_HOI@xKeI}-ysAqbWn@>@)oLc%Q;GoXet z6cZTZRU?lZ&h)$@4sS^>p)!(tua+&3WASd-^W+t#O z^VUK5ws${uyx6CE(C^))8XIBCG=IH9| z$_e-HXV>4q{qY+8{pj-Y==}QZ^a{Pb^ta{Tev6LI|ABruJ3rY*gqj`T_i&R?!3hPs z5_`*}#R!6RQta&14BZ(~QBoO4#m2(*+4bwwZFK(jd~kOD?d93|cc*Vo&#$-9o72nV z?~l%}kG?s3eRlm1P@!+nuFp@eO!~l)g>i9od3|>L<894uVL}o; zSCc7clESV7?97;1VOd>pnhKr@s>5?&c!V-oD+T>4EUvd+YE~Pi%%(6`apsyzuV>f60K6aYDz0MPzF`-|gNmE#>AbKXdhc(*z0<#U!CO5YPc}W5K1&_bnVs zg>a-K?2b2&4`V$`?t=?-%i7* zCFlUgtm#smIH1l#M-!|?LQtDISw;hzT-H_z2S^6UH0iWR$TYxB(~FkO>m;=~OGP zFAsy5$gOSAL%27(vJhusSkf*cAJYrqY39`6%gALze(&W6vQn%+U)4a*KD;=sq%9!TC+;qJ!FW!=XZK0WTuk2 zz(AfJ;DSHMXkm%B8BDD)**gQ zhn*dGZrj_Ng^ckTQ}}(p5ek!fL%d4+E%4N@A2ksc$SRJD)cBMUzRer1^flm7Lpm&6j72MGh>Y3B7fe zve-=2Qainc724DH+7xM5Wzrg;rPYtvn=X|#XWP#OMVq=oo^HAIzcdIp711`pcrLqg z?jm@~>$yLt@{BMY7`0n@{Gn*g_7)t%Ss|3&G#dYTyaGH3zStdXx{g{KkR%o{lHz$SZSH zgIGRd2*BCMY;(bL`1;WKxMp%Z^& z2K^{#4YARMu(X2{W`g=C+uBr+75C0unafJrkkxKA2@^h+p^gSZ@?HttfPC&Z)t+hn zPR};r<6c}sP6F$YX9>Ul!O{!@aL!$=BrS}yCS0u{dQqW^_5WE!!kM#i0%TR}p2u0E8UV&sh?9ihg{h#oG$CUW5*gv7Hodto zti@>)1ji;?)zSU;nBJ8Htmyu({4YjXIlZ41N<4imF6`cB{xW*D7dIl19j#B~bB zrb!sPCSh?PQ-(rILRP$9&rhly*!fv+_*JNqvT;I+wmzWhy9Nne2tFlhLNbXivxL;; zljaRVp&K-dq`g&@-fGJ(C>`IL+qyy-37nEp$2&YzT5&mx+hBHn<79KrV}e|P>iPmI zQ$#SF*eM+2F$rCj?7VVTXdNO%^9T!fR^ zji9kxllpn{$=o(o>=i~mtbOFt)VxM5W-}v!GN;`QYEbTI5=VMawLxiWP=hkfhfph= z<|Z{rBLTY)?esJ?s6ny5t_?>0bzQo75=W^wG6t3hC-mPCtu6LJUo6mTso*yWnaVy` zN+0ya;#|ifh^pW8=O~OM?Z=94*%z)Ejy4BYEZ#j`-#Z(4!DDnnrN~UO$2VC#CiP{V zG`-y4U`Hcg2nP1QHCNE@E1KN1~Tj`c`AJP2Q zRhzfKb?kEyHGkF%qr1#?E)LhQ7u1#u4Cf+g`K}kv#;f_}Vr=}Xim&rVB_Bm+!z$Ds zcm4RVdo*_newC64_J-&0>b6}nQW`}qZK@0{d~HK%w|mi#dtvWUV0rr6?sGww zc%*?+HXb4b%s(+TbvHJCX)dbUC=q+B*Do7ZtbSHSMWp4-2_s7M{q^-lcyammzyGlf z|GqMRK-S~=Tl)uLw^VQj$(6TQK=Qq;TpG54Vv-UTOT-!a$y^mCiBgn}W#9Xr^S1+6 z!Ho=TY3k}#&o;wlV6#B_2(Zk5AvpN*NQ{9Nxv?=oixl_;xz`L;MfBwqlZ}nM&at^; zCzuf#AUeu5gGw}^<4H0r3ao9L!qNZpfBtX&alm-ci}SYf!ZSGTwbddA-RN*pX$%nh z2XTDqzs|Y)qau?cg~pQynWk!HKG?QJG^j*M5O+X$%#pk13}J~zSPP{lgWlDWSJeNN zkTfxr1yA#b~}g$(R$Lk<|&)0d3h(m7ZwJbMiD$KwngAE}~3Odn(NA z2b(?RoH00DR+u(qc8g^nfw$4*iTbZ8RuesbU8ApanS0FFva_0*pPOg3v4NiNzM&QS zt{b6W5Lr8`wm(esM(y zZT=Y6e-CubNICm@EjANb=q2GoLSeHsi;TfshBk8;%$<3K0v)>r+QPxVWWSK3hGKP! z)5uGazv$@$Jk%_iv0!;?AiptKs6k#~%+SHZyi7@tzam`JZ_RO~MS7z7Ws@2&GXBh3A31G&-}#80;O zv0j-c5Kq_51JLtgS){+5t7jiJzGN9xiCv8tsz);Bihx9>bP6)5vuZK_sCm%NY=F>rz=r-~Ntqh2vz$yT!VC8K8+g2PVXr2%?kV6H+=K8oHuc? z^I?8>;UJ#o0Nb)ix>jG7q}@x`T2v_gj%qF~91Y&=7HjaR0B;I7rYgTq_v-Yu#srf) ztf-gpbZ_DHT^nXm2z750PDlg7ih4W&$V_4eqomZw;y>(kVcZajJa2FH-OdPQZ;GuLyqwP73-Stzrhb@uU`i!xcbLccyWDsbbPw{ zh0NC)5g%2>uxMYer$-;Q+x`PPr*jc?9iwmCyUxT+c+XDO-hE%vVyw6J?N>=??$g$~ z2erIK3k+cSm#8ly+g5B&T2Ika%3|^!J(V81p()+B;{7Ho9J3*Au-2dZTkstPAS%M` zS}Zog4~2MSlggkE!^22W=pz#?RnyM5UcZ;slPdWHP_x$D)GU zE@nJK5oRV|*Tfwu7K*}+Q8vj|^OqCRuXB5HTa1W;5%iPC#?Pzn;-Y_$!&rUpOR;M9 zI{EUq8$0@}IIh2A!3_%O?%ZFz>x#XqMLvLClP-yod0Ft?i;XHoZ*Q;=ph=1iMm&j$ z2+&in$jF*h!;bI)Lf-LBEk-#oQ?3hm~Zz zY&z@U_%e(Cl}Uwc1AA;qab!M(YYfFLPSC<4>LkDuoqe-%!CeFXoH5Ff#ceD{sRU3( zjJ@jlY;2&vnUjc+1mxZ$_kM_KYepqNkn!%4{(S(tpYOPMmv9_Qd)63nRsy^#Avu`a z2yTNPp}-ZQ2~bcBm;eQCeJ3!7?E@5GHXATFEE2eCIzde%y{aT0!`{>W>~<=i2QfgK zDoYchU0I1nH5rsDq1cB!Q|>g`z!iX5c|yz$mShQOH_Lc{P+}*F-mOd)Iwf)lcP7Pg zj?grxmPnhyZX^(MSuV&(kyT3Er*b@SW;QX3oSSdXFn0FRobWQcs#Z#eH_3l9KH z#04@ia~QR8)uYZ-FZjO6R6lG=?jl@P68#R`j3Q=urpl^Km`So2H zi@1n6Be5CnlsR@*qu=E`$nH87Up9GeB{8|xTx0`TLlj*>J%+dke~^RE4cgKDtriJ$?B|!!e2kRWyR53%NKz2y7q-j%QX`xUl~) zH+I4OV3{B*6-Sxa1;tUV71t(_T1c^wQ1WpTi<=lZ81@$Hy3?Q8lc7*K#AdaC3u6_B z=Ps`+jPxoc+ZjsrR<|&cv|Y~zk)$PAY93)IfxH+;$5&^E`BPV1*$_@Xf$|Q;mch<>{)@n0Tlp zyW`_GE0sehb$$tu#~yo6bt)FZImc z)HB1x+oX-qk)F%YeoJceX>p4ee=P_o;rkl>l-CtOu1<-qW+LY=wf*hlC}*w0 z$WP=wo~B9fGw}9c`_A6p?tn@~`A!L5?I67FX~B~;!Hf)Y?g}|>g(oIzW()QTe&f< z3x-_%tR;DAL>{2O`e<61>8xy9rr%dOvkqJNuonZJ2-meA+M~v9t*>1A9{IYS1Qoz?e|_rjzGYZ+zBR#|QqoPYTCvk9>W z^);n-jf3K!p_-+nQ1N!TtzWvA&+?R+y{vLmizu)BV{|f*B|Nkc{brAr5 F0sx8XFmM0> literal 0 HcmV?d00001 diff --git a/charts/deckhouse_lib_helm/Chart.yaml b/charts/deckhouse_lib_helm/Chart.yaml deleted file mode 100644 index ccc0e4e..0000000 --- a/charts/deckhouse_lib_helm/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v2 -description: Helm utils template definitions for Deckhouse modules. -name: deckhouse_lib_helm -type: library -version: 1.22.0 diff --git a/charts/deckhouse_lib_helm/LICENSE b/charts/deckhouse_lib_helm/LICENSE deleted file mode 100644 index 13fe0e3..0000000 --- a/charts/deckhouse_lib_helm/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright The Events Exporter authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/charts/deckhouse_lib_helm/README.md b/charts/deckhouse_lib_helm/README.md deleted file mode 100644 index b120623..0000000 --- a/charts/deckhouse_lib_helm/README.md +++ /dev/null @@ -1,1105 +0,0 @@ -# Helm library for Deckhouse modules - -## Table of contents - -| Table of contents | -|---| -| **Envs For Proxy** | -| [helm_lib_envs_for_proxy](#helm_lib_envs_for_proxy) | -| **High Availability** | -| [helm_lib_is_ha_to_value](#helm_lib_is_ha_to_value) | -| [helm_lib_ha_enabled](#helm_lib_ha_enabled) | -| **Kube Rbac Proxy** | -| [helm_lib_kube_rbac_proxy_ca_certificate](#helm_lib_kube_rbac_proxy_ca_certificate) | -| **Module Documentation Uri** | -| [helm_lib_module_documentation_uri](#helm_lib_module_documentation_uri) | -| **Module Ephemeral Storage** | -| [helm_lib_module_ephemeral_storage_logs_with_extra](#helm_lib_module_ephemeral_storage_logs_with_extra) | -| [helm_lib_module_ephemeral_storage_only_logs](#helm_lib_module_ephemeral_storage_only_logs) | -| **Module Generate Common Name** | -| [helm_lib_module_generate_common_name](#helm_lib_module_generate_common_name) | -| **Module Https** | -| [helm_lib_module_uri_scheme](#helm_lib_module_uri_scheme) | -| [helm_lib_module_https_mode](#helm_lib_module_https_mode) | -| [helm_lib_module_https_cert_manager_cluster_issuer_name](#helm_lib_module_https_cert_manager_cluster_issuer_name) | -| [helm_lib_module_https_ingress_tls_enabled](#helm_lib_module_https_ingress_tls_enabled) | -| [helm_lib_module_https_copy_custom_certificate](#helm_lib_module_https_copy_custom_certificate) | -| [helm_lib_module_https_secret_name](#helm_lib_module_https_secret_name) | -| **Module Image** | -| [helm_lib_module_image](#helm_lib_module_image) | -| [helm_lib_module_image_no_fail](#helm_lib_module_image_no_fail) | -| [helm_lib_module_common_image](#helm_lib_module_common_image) | -| [helm_lib_module_common_image_no_fail](#helm_lib_module_common_image_no_fail) | -| **Module Ingress Class** | -| [helm_lib_module_ingress_class](#helm_lib_module_ingress_class) | -| **Module Init Container** | -| [helm_lib_module_init_container_chown_nobody_volume](#helm_lib_module_init_container_chown_nobody_volume) | -| [helm_lib_module_init_container_chown_deckhouse_volume](#helm_lib_module_init_container_chown_deckhouse_volume) | -| [helm_lib_module_init_container_check_linux_kernel](#helm_lib_module_init_container_check_linux_kernel) | -| **Module Labels** | -| [helm_lib_module_labels](#helm_lib_module_labels) | -| **Module Public Domain** | -| [helm_lib_module_public_domain](#helm_lib_module_public_domain) | -| **Module Security Context** | -| [helm_lib_module_pod_security_context_run_as_user_custom](#helm_lib_module_pod_security_context_run_as_user_custom) | -| [helm_lib_module_pod_security_context_run_as_user_nobody](#helm_lib_module_pod_security_context_run_as_user_nobody) | -| [helm_lib_module_pod_security_context_run_as_user_nobody_with_writable_fs](#helm_lib_module_pod_security_context_run_as_user_nobody_with_writable_fs) | -| [helm_lib_module_pod_security_context_run_as_user_deckhouse](#helm_lib_module_pod_security_context_run_as_user_deckhouse) | -| [helm_lib_module_pod_security_context_run_as_user_deckhouse_with_writable_fs](#helm_lib_module_pod_security_context_run_as_user_deckhouse_with_writable_fs) | -| [helm_lib_module_pod_security_context_run_as_user_root](#helm_lib_module_pod_security_context_run_as_user_root) | -| [helm_lib_module_pod_security_context_runtime_default](#helm_lib_module_pod_security_context_runtime_default) | -| [helm_lib_module_container_security_context_not_allow_privilege_escalation](#helm_lib_module_container_security_context_not_allow_privilege_escalation) | -| [helm_lib_module_container_security_context_read_only_root_filesystem_with_selinux](#helm_lib_module_container_security_context_read_only_root_filesystem_with_selinux) | -| [helm_lib_module_container_security_context_read_only_root_filesystem](#helm_lib_module_container_security_context_read_only_root_filesystem) | -| [helm_lib_module_container_security_context_privileged](#helm_lib_module_container_security_context_privileged) | -| [helm_lib_module_container_security_context_escalated_sys_admin_privileged](#helm_lib_module_container_security_context_escalated_sys_admin_privileged) | -| [helm_lib_module_container_security_context_privileged_read_only_root_filesystem](#helm_lib_module_container_security_context_privileged_read_only_root_filesystem) | -| [helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all](#helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all) | -| [helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all_and_add](#helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all_and_add) | -| [helm_lib_module_container_security_context_capabilities_drop_all_and_add](#helm_lib_module_container_security_context_capabilities_drop_all_and_add) | -| [helm_lib_module_container_security_context_capabilities_drop_all_and_run_as_user_custom](#helm_lib_module_container_security_context_capabilities_drop_all_and_run_as_user_custom) | -| **Module Storage Class** | -| [helm_lib_module_storage_class_annotations](#helm_lib_module_storage_class_annotations) | -| **Monitoring Grafana Dashboards** | -| [helm_lib_grafana_dashboard_definitions_recursion](#helm_lib_grafana_dashboard_definitions_recursion) | -| [helm_lib_grafana_dashboard_definitions](#helm_lib_grafana_dashboard_definitions) | -| [helm_lib_single_dashboard](#helm_lib_single_dashboard) | -| **Monitoring Prometheus Rules** | -| [helm_lib_prometheus_rules_recursion](#helm_lib_prometheus_rules_recursion) | -| [helm_lib_prometheus_rules](#helm_lib_prometheus_rules) | -| [helm_lib_prometheus_target_scrape_timeout_seconds](#helm_lib_prometheus_target_scrape_timeout_seconds) | -| **Node Affinity** | -| [helm_lib_internal_check_node_selector_strategy](#helm_lib_internal_check_node_selector_strategy) | -| [helm_lib_node_selector](#helm_lib_node_selector) | -| [helm_lib_tolerations](#helm_lib_tolerations) | -| [_helm_lib_cloud_or_hybrid_cluster](#_helm_lib_cloud_or_hybrid_cluster) | -| [helm_lib_internal_check_tolerations_strategy](#helm_lib_internal_check_tolerations_strategy) | -| [_helm_lib_any_node_tolerations](#_helm_lib_any_node_tolerations) | -| [_helm_lib_wildcard_tolerations](#_helm_lib_wildcard_tolerations) | -| [_helm_lib_monitoring_tolerations](#_helm_lib_monitoring_tolerations) | -| [_helm_lib_frontend_tolerations](#_helm_lib_frontend_tolerations) | -| [_helm_lib_system_tolerations](#_helm_lib_system_tolerations) | -| [_helm_lib_additional_tolerations_uninitialized](#_helm_lib_additional_tolerations_uninitialized) | -| [_helm_lib_additional_tolerations_node_problems](#_helm_lib_additional_tolerations_node_problems) | -| [_helm_lib_additional_tolerations_storage_problems](#_helm_lib_additional_tolerations_storage_problems) | -| [_helm_lib_additional_tolerations_no_csi](#_helm_lib_additional_tolerations_no_csi) | -| [_helm_lib_additional_tolerations_cloud_provider_uninitialized](#_helm_lib_additional_tolerations_cloud_provider_uninitialized) | -| **Pod Disruption Budget** | -| [helm_lib_pdb_daemonset](#helm_lib_pdb_daemonset) | -| **Priority Class** | -| [helm_lib_priority_class](#helm_lib_priority_class) | -| **Resources Management** | -| [helm_lib_resources_management_pod_resources](#helm_lib_resources_management_pod_resources) | -| [helm_lib_resources_management_original_pod_resources](#helm_lib_resources_management_original_pod_resources) | -| [helm_lib_resources_management_vpa_spec](#helm_lib_resources_management_vpa_spec) | -| [helm_lib_resources_management_cpu_units_to_millicores](#helm_lib_resources_management_cpu_units_to_millicores) | -| [helm_lib_resources_management_memory_units_to_bytes](#helm_lib_resources_management_memory_units_to_bytes) | -| [helm_lib_vpa_kube_rbac_proxy_resources](#helm_lib_vpa_kube_rbac_proxy_resources) | -| [helm_lib_container_kube_rbac_proxy_resources](#helm_lib_container_kube_rbac_proxy_resources) | -| **Spec For High Availability** | -| [helm_lib_pod_anti_affinity_for_ha](#helm_lib_pod_anti_affinity_for_ha) | -| [helm_lib_deployment_on_master_strategy_and_replicas_for_ha](#helm_lib_deployment_on_master_strategy_and_replicas_for_ha) | -| [helm_lib_deployment_on_master_custom_strategy_and_replicas_for_ha](#helm_lib_deployment_on_master_custom_strategy_and_replicas_for_ha) | -| [helm_lib_deployment_strategy_and_replicas_for_ha](#helm_lib_deployment_strategy_and_replicas_for_ha) | - -## Envs For Proxy - -### helm_lib_envs_for_proxy - - Add HTTP_PROXY, HTTPS_PROXY and NO_PROXY environment variables for container - depends on [proxy settings](https://deckhouse.io/documentation/v1/deckhouse-configure-global.html#parameters-modules-proxy) - -#### Usage - -`{{ include "helm_lib_envs_for_proxy" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - -## High Availability - -### helm_lib_is_ha_to_value - - returns value "yes" if cluster is highly available, else — returns "no" - -#### Usage - -`{{ include "helm_lib_is_ha_to_value" (list . yes no) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Yes value -- No value - - -### helm_lib_ha_enabled - - returns empty value, which is treated by go template as false - -#### Usage - -`{{- if (include "helm_lib_ha_enabled" .) }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - -## Kube Rbac Proxy - -### helm_lib_kube_rbac_proxy_ca_certificate - - Renders configmap with kube-rbac-proxy CA certificate which uses to verify the kube-rbac-proxy clients. - -#### Usage - -`{{ include "helm_lib_kube_rbac_proxy_ca_certificate" (list . "namespace") }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Namespace where CA configmap will be created - -## Module Documentation Uri - -### helm_lib_module_documentation_uri - - returns rendered documentation uri using publicDomainTemplate or deckhouse.io domains - -#### Usage - -`{{ include "helm_lib_module_documentation_uri" (list . "") }} ` - - -## Module Ephemeral Storage - -### helm_lib_module_ephemeral_storage_logs_with_extra - - 50Mi for container logs `log-opts.max-file * log-opts.max-size` would be added to passed value - returns ephemeral-storage size for logs with extra space - -#### Usage - -`{{ include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 }} ` - -#### Arguments - -- Extra space in mebibytes - - -### helm_lib_module_ephemeral_storage_only_logs - - 50Mi for container logs `log-opts.max-file * log-opts.max-size` would be requested - returns ephemeral-storage size for only logs - -#### Usage - -`{{ include "helm_lib_module_ephemeral_storage_only_logs" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - -## Module Generate Common Name - -### helm_lib_module_generate_common_name - - returns the commonName parameter for use in the Certificate custom resource(cert-manager) - -#### Usage - -`{{ include "helm_lib_module_generate_common_name" (list . "") }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Name portion - -## Module Https - -### helm_lib_module_uri_scheme - - return module uri scheme "http" or "https" - -#### Usage - -`{{ include "helm_lib_module_uri_scheme" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_https_mode - - returns https mode for module - -#### Usage - -`{{ if (include "helm_lib_module_https_mode" .) }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_https_cert_manager_cluster_issuer_name - - returns cluster issuer name - -#### Usage - -`{{ include "helm_lib_module_https_cert_manager_cluster_issuer_name" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_https_ingress_tls_enabled - - returns not empty string if tls should enable for ingress - -#### Usage - -`{{ if (include "helm_lib_module_https_ingress_tls_enabled" .) }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_https_copy_custom_certificate - - Renders secret with [custom certificate](https://deckhouse.io/documentation/v1/deckhouse-configure-global.html#parameters-modules-https-customcertificate) - in passed namespace with passed prefix - -#### Usage - -`{{ include "helm_lib_module_https_copy_custom_certificate" (list . "namespace" "secret_name_prefix") }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Namespace -- Secret name prefix - - -### helm_lib_module_https_secret_name - - returns custom certificate name - -#### Usage - -`{{ include "helm_lib_module_https_secret_name (list . "secret_name_prefix") }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Secret name prefix - -## Module Image - -### helm_lib_module_image - - returns image name - -#### Usage - -`{{ include "helm_lib_module_image" (list . "") }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Container name - - -### helm_lib_module_image_no_fail - - returns image name if found - -#### Usage - -`{{ include "helm_lib_module_image_no_fail" (list . "") }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Container name - - -### helm_lib_module_common_image - - returns image name from common module - -#### Usage - -`{{ include "helm_lib_module_common_image" (list . "") }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Container name - - -### helm_lib_module_common_image_no_fail - - returns image name from common module if found - -#### Usage - -`{{ include "helm_lib_module_common_image_no_fail" (list . "") }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Container name - -## Module Ingress Class - -### helm_lib_module_ingress_class - - returns ingress class from module settings or if not exists from global config - -#### Usage - -`{{ include "helm_lib_module_ingress_class" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - -## Module Init Container - -### helm_lib_module_init_container_chown_nobody_volume - - ### Migration 11.12.2020: Remove this helper with all its usages after this commit reached RockSolid - returns initContainer which chowns recursively all files and directories in passed volume - -#### Usage - -`{{ include "helm_lib_module_init_container_chown_nobody_volume" (list . "volume-name") }} ` - - - -### helm_lib_module_init_container_chown_deckhouse_volume - - returns initContainer which chowns recursively all files and directories in passed volume - -#### Usage - -`{{ include "helm_lib_module_init_container_chown_deckhouse_volume" (list . "volume-name") }} ` - - - -### helm_lib_module_init_container_check_linux_kernel - - returns initContainer which checks the kernel version on the node for compliance to semver constraint - -#### Usage - -`{{ include "helm_lib_module_init_container_check_linux_kernel" (list . ">= 4.9.17") }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Semver constraint - -## Module Labels - -### helm_lib_module_labels - - returns deckhouse labels - -#### Usage - -`{{ include "helm_lib_module_labels" (list . (dict "app" "test" "component" "testing")) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Additional labels dict - -## Module Public Domain - -### helm_lib_module_public_domain - - returns rendered publicDomainTemplate to service fqdn - -#### Usage - -`{{ include "helm_lib_module_public_domain" (list . "") }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Name portion - -## Module Security Context - -### helm_lib_module_pod_security_context_run_as_user_custom - - returns PodSecurityContext parameters for Pod with custom user and group - -#### Usage - -`{{ include "helm_lib_module_pod_security_context_run_as_user_custom" (list . 1000 1000) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- User id -- Group id - - -### helm_lib_module_pod_security_context_run_as_user_nobody - - returns PodSecurityContext parameters for Pod with user and group "nobody" - -#### Usage - -`{{ include "helm_lib_module_pod_security_context_run_as_user_nobody" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_pod_security_context_run_as_user_nobody_with_writable_fs - - returns PodSecurityContext parameters for Pod with user and group "nobody" with write access to mounted volumes - -#### Usage - -`{{ include "helm_lib_module_pod_security_context_run_as_user_nobody_with_writable_fs" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_pod_security_context_run_as_user_deckhouse - - returns PodSecurityContext parameters for Pod with user and group "deckhouse" - -#### Usage - -`{{ include "helm_lib_module_pod_security_context_run_as_user_deckhouse" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_pod_security_context_run_as_user_deckhouse_with_writable_fs - - returns PodSecurityContext parameters for Pod with user and group "deckhouse" with write access to mounted volumes - -#### Usage - -`{{ include "helm_lib_module_pod_security_context_run_as_user_deckhouse_with_writable_fs" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_pod_security_context_run_as_user_root - - returns PodSecurityContext parameters for Pod with user and group 0 - -#### Usage - -`{{ include "helm_lib_module_pod_security_context_run_as_user_root" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_pod_security_context_runtime_default - - returns PodSecurityContext parameters for Pod with seccomp profile RuntimeDefault - -#### Usage - -`{{ include "helm_lib_module_pod_security_context_runtime_default" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_container_security_context_not_allow_privilege_escalation - - returns SecurityContext parameters for Container with allowPrivilegeEscalation false - -#### Usage - -`{{ include "helm_lib_module_container_security_context_not_allow_privilege_escalation" . }} ` - - - -### helm_lib_module_container_security_context_read_only_root_filesystem_with_selinux - - returns SecurityContext parameters for Container with read only root filesystem and options for SELinux compatibility - -#### Usage - -`{{ include "helm_lib_module_container_security_context_read_only_root_filesystem_with_selinux" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_container_security_context_read_only_root_filesystem - - returns SecurityContext parameters for Container with read only root filesystem - -#### Usage - -`{{ include "helm_lib_module_container_security_context_read_only_root_filesystem" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_container_security_context_privileged - - returns SecurityContext parameters for Container running privileged - -#### Usage - -`{{ include "helm_lib_module_container_security_context_privileged" . }} ` - - - -### helm_lib_module_container_security_context_escalated_sys_admin_privileged - - returns SecurityContext parameters for Container running privileged with escalation and sys_admin - -#### Usage - -`{{ include "helm_lib_module_container_security_context_escalated_sys_admin_privileged" . }} ` - - - -### helm_lib_module_container_security_context_privileged_read_only_root_filesystem - - returns SecurityContext parameters for Container running privileged with read only root filesystem - -#### Usage - -`{{ include "helm_lib_module_container_security_context_privileged_read_only_root_filesystem" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all - - returns SecurityContext for Container with read only root filesystem and all capabilities dropped - -#### Usage - -`{{ include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all_and_add - - returns SecurityContext parameters for Container with read only root filesystem, all dropped and some added capabilities - -#### Usage - -`{{ include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all_and_add" (list . (list "KILL" "SYS_PTRACE")) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- List of capabilities - - -### helm_lib_module_container_security_context_capabilities_drop_all_and_add - - returns SecurityContext parameters for Container with all dropped and some added capabilities - -#### Usage - -`{{ include "helm_lib_module_container_security_context_capabilities_drop_all_and_add" (list . (list "KILL" "SYS_PTRACE")) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- List of capabilities - - -### helm_lib_module_container_security_context_capabilities_drop_all_and_run_as_user_custom - - returns SecurityContext parameters for Container with read only root filesystem, all dropped, and custom user ID - -#### Usage - -`{{ include "helm_lib_module_container_security_context_capabilities_drop_all_and_run_as_user_custom" (list . 1000 1000) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- User id -- Group id - -## Module Storage Class - -### helm_lib_module_storage_class_annotations - - return module StorageClass annotations - -#### Usage - -`{{ include "helm_lib_module_storage_class_annotations" (list $ $index $storageClass.name) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Storage class index -- Storage class name - -## Monitoring Grafana Dashboards - -### helm_lib_grafana_dashboard_definitions_recursion - - returns all the dashboard-definintions from / - current dir is optional — used for recursion but you can use it for partially generating dashboards - -#### Usage - -`{{ include "helm_lib_grafana_dashboard_definitions_recursion" (list . [current dir]) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Dashboards root dir -- Dashboards current dir - - -### helm_lib_grafana_dashboard_definitions - - returns dashboard-definintions from monitoring/grafana-dashboards/ - -#### Usage - -`{{ include "helm_lib_grafana_dashboard_definitions" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_single_dashboard - - renders a single dashboard - -#### Usage - -`{{ include "helm_lib_single_dashboard" (list . "dashboard-name" "folder" $dashboard) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Dashboard name -- Folder -- Dashboard definition - -## Monitoring Prometheus Rules - -### helm_lib_prometheus_rules_recursion - - returns all the prometheus rules from / - current dir is optional — used for recursion but you can use it for partially generating rules - -#### Usage - -`{{ include "helm_lib_prometheus_rules_recursion" (list . [current dir]) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Namespace for creating rules -- Rules root dir -- Current dir (optional) - - -### helm_lib_prometheus_rules - - returns all the prometheus rules from monitoring/prometheus-rules/ - -#### Usage - -`{{ include "helm_lib_prometheus_rules" (list . ) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Namespace for creating rules - - -### helm_lib_prometheus_target_scrape_timeout_seconds - - returns adjust timeout value to scrape interval / - -#### Usage - -`{{ include "helm_lib_prometheus_target_scrape_timeout_seconds" (list . ) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Target timeout in seconds - -## Node Affinity - -### helm_lib_internal_check_node_selector_strategy - - Verify node selector strategy. - - - -### helm_lib_node_selector - - Returns node selector for workloads depend on strategy. - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- strategy, one of "frontend" "monitoring" "system" "master" "any-node" "wildcard" - - -### helm_lib_tolerations - - Returns tolerations for workloads depend on strategy. - -#### Usage - -`{{ include "helm_lib_tolerations" (tuple . "any-node" "with-uninitialized" "without-storage-problems") }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- base strategy, one of "frontend" "monitoring" "system" any-node" "wildcard" -- list of additional strategies. To add strategy list it with prefix "with-", to remove strategy list it with prefix "without-". - - -### _helm_lib_cloud_or_hybrid_cluster - - Check cluster type. - Returns not empty string if this is cloud or hybrid cluster - - - -### helm_lib_internal_check_tolerations_strategy - - Verify base strategy. - Fails if strategy not in allowed list - - - -### _helm_lib_any_node_tolerations - - Base strategy for any uncordoned node in cluster. - -#### Usage - -`{{ include "helm_lib_tolerations" (tuple . "any-node") }} ` - - - -### _helm_lib_wildcard_tolerations - - Base strategy that tolerates all. - -#### Usage - -`{{ include "helm_lib_tolerations" (tuple . "wildcard") }} ` - - - -### _helm_lib_monitoring_tolerations - - Base strategy that tolerates nodes with "dedicated.deckhouse.io: monitoring" and "dedicated.deckhouse.io: system" taints. - -#### Usage - -`{{ include "helm_lib_tolerations" (tuple . "monitoring") }} ` - - - -### _helm_lib_frontend_tolerations - - Base strategy that tolerates nodes with "dedicated.deckhouse.io: frontend" taints. - -#### Usage - -`{{ include "helm_lib_tolerations" (tuple . "frontend") }} ` - - - -### _helm_lib_system_tolerations - - Base strategy that tolerates nodes with "dedicated.deckhouse.io: system" taints. - -#### Usage - -`{{ include "helm_lib_tolerations" (tuple . "system") }} ` - - - -### _helm_lib_additional_tolerations_uninitialized - - Additional strategy "uninitialized" - used for CNI's and kube-proxy to allow cni components scheduled on node after CCM initialization. - -#### Usage - -`{{ include "helm_lib_tolerations" (tuple . "any-node" "with-uninitialized") }} ` - - - -### _helm_lib_additional_tolerations_node_problems - - Additional strategy "node-problems" - used for shedule critical components on non-ready nodes or nodes under pressure. - -#### Usage - -`{{ include "helm_lib_tolerations" (tuple . "any-node" "with-node-problems") }} ` - - - -### _helm_lib_additional_tolerations_storage_problems - - Additional strategy "storage-problems" - used for shedule critical components on nodes with drbd problems. This additional strategy enabled by default in any base strategy except "wildcard". - -#### Usage - -`{{ include "helm_lib_tolerations" (tuple . "any-node" "without-storage-problems") }} ` - - - -### _helm_lib_additional_tolerations_no_csi - - Additional strategy "no-csi" - used for any node with no CSI: any node, which was initialized by deckhouse, but have no csi-node driver registered on it. - -#### Usage - -`{{ include "helm_lib_tolerations" (tuple . "any-node" "with-no-csi") }} ` - - - -### _helm_lib_additional_tolerations_cloud_provider_uninitialized - - Additional strategy "cloud-provider-uninitialized" - used for any node which is not initialized by CCM. - -#### Usage - -`{{ include "helm_lib_tolerations" (tuple . "any-node" "with-cloud-provider-uninitialized") }} ` - - -## Pod Disruption Budget - -### helm_lib_pdb_daemonset - - Returns PDB max unavailable - -#### Usage - -`{{ include "helm_lib_pdb_daemonset" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - -## Priority Class - -### helm_lib_priority_class - - returns priority class if priority-class module enabled, otherwise returns nothing - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Priority class name - -## Resources Management - -### helm_lib_resources_management_pod_resources - - returns rendered resources section based on configuration if it is - -#### Usage - -`{{ include "helm_lib_resources_management_pod_resources" (list [ephemeral storage requests]) }} ` - -#### Arguments - -list: -- VPA resource configuration [example](https://deckhouse.io/documentation/v1/modules/110-istio/configuration.html#parameters-controlplane-resourcesmanagement) -- Ephemeral storage requests - - -### helm_lib_resources_management_original_pod_resources - - returns rendered resources section based on configuration if it is present - -#### Usage - -`{{ include "helm_lib_resources_management_original_pod_resources" }} ` - -#### Arguments - -- VPA resource configuration [example](https://deckhouse.io/documentation/v1/modules/110-istio/configuration.html#parameters-controlplane-resourcesmanagement) - - -### helm_lib_resources_management_vpa_spec - - returns rendered vpa spec based on configuration and target reference - -#### Usage - -`{{ include "helm_lib_resources_management_vpa_spec" (list ) }} ` - -#### Arguments - -list: -- Target API version -- Target Kind -- Target Name -- Target container name -- VPA resource configuration [example](https://deckhouse.io/documentation/v1/modules/110-istio/configuration.html#parameters-controlplane-resourcesmanagement) - - -### helm_lib_resources_management_cpu_units_to_millicores - - helper for converting cpu units to millicores - -#### Usage - -`{{ include "helm_lib_resources_management_cpu_units_to_millicores" }} ` - - - -### helm_lib_resources_management_memory_units_to_bytes - - helper for converting memory units to bytes - -#### Usage - -`{{ include "helm_lib_resources_management_memory_units_to_bytes" }} ` - - - -### helm_lib_vpa_kube_rbac_proxy_resources - - helper for VPA resources for kube_rbac_proxy - -#### Usage - -`{{ include "helm_lib_vpa_kube_rbac_proxy_resources" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_container_kube_rbac_proxy_resources - - helper for container resources for kube_rbac_proxy - -#### Usage - -`{{ include "helm_lib_container_kube_rbac_proxy_resources" . }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - -## Spec For High Availability - -### helm_lib_pod_anti_affinity_for_ha - - returns pod affinity spec - -#### Usage - -`{{ include "helm_lib_pod_anti_affinity_for_ha" (list . (dict "app" "test")) }} ` - -#### Arguments - -list: -- Template context with .Values, .Chart, etc -- Match labels for podAntiAffinity label selector - - -### helm_lib_deployment_on_master_strategy_and_replicas_for_ha - - returns deployment strategy and replicas for ha components running on master nodes - -#### Usage - -`{{ include "helm_lib_deployment_on_master_strategy_and_replicas_for_ha" }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc - - -### helm_lib_deployment_on_master_custom_strategy_and_replicas_for_ha - - returns deployment with custom strategy and replicas for ha components running on master nodes - -#### Usage - -`{{ include "helm_lib_deployment_on_master_custom_strategy_and_replicas_for_ha" (list . (dict "strategy" "strategy_type")) }} ` - - - -### helm_lib_deployment_strategy_and_replicas_for_ha - - returns deployment strategy and replicas for ha components running not on master nodes - -#### Usage - -`{{ include "helm_lib_deployment_strategy_and_replicas_for_ha" }} ` - -#### Arguments - -- Template context with .Values, .Chart, etc diff --git a/charts/deckhouse_lib_helm/templates/_csi_controller.tpl b/charts/deckhouse_lib_helm/templates/_csi_controller.tpl deleted file mode 100644 index 094b365..0000000 --- a/charts/deckhouse_lib_helm/templates/_csi_controller.tpl +++ /dev/null @@ -1,700 +0,0 @@ -{{- define "attacher_resources" }} -cpu: 10m -memory: 25Mi -{{- end }} - -{{- define "provisioner_resources" }} -cpu: 10m -memory: 25Mi -{{- end }} - -{{- define "resizer_resources" }} -cpu: 10m -memory: 25Mi -{{- end }} - -{{- define "snapshotter_resources" }} -cpu: 10m -memory: 25Mi -{{- end }} - -{{- define "livenessprobe_resources" }} -cpu: 10m -memory: 25Mi -{{- end }} - -{{- define "controller_resources" }} -cpu: 10m -memory: 50Mi -{{- end }} - -{{- /* Usage: {{ include "helm_lib_csi_controller_manifests" (list . $config) }} */ -}} -{{- define "helm_lib_csi_controller_manifests" }} - {{- $context := index . 0 }} - - {{- $config := index . 1 }} - {{- $fullname := $config.fullname | default "csi-controller" }} - {{- $snapshotterEnabled := dig "snapshotterEnabled" true $config }} - {{- $resizerEnabled := dig "resizerEnabled" true $config }} - {{- $topologyEnabled := dig "topologyEnabled" true $config }} - {{- $extraCreateMetadataEnabled := dig "extraCreateMetadataEnabled" false $config }} - {{- $controllerImage := $config.controllerImage | required "$config.controllerImage is required" }} - {{- $provisionerTimeout := $config.provisionerTimeout | default "600s" }} - {{- $attacherTimeout := $config.attacherTimeout | default "600s" }} - {{- $resizerTimeout := $config.resizerTimeout | default "600s" }} - {{- $snapshotterTimeout := $config.snapshotterTimeout | default "600s" }} - {{- $provisionerWorkers := $config.provisionerWorkers | default "10" }} - {{- $attacherWorkers := $config.attacherWorkers | default "10" }} - {{- $resizerWorkers := $config.resizerWorkers | default "10" }} - {{- $snapshotterWorkers := $config.snapshotterWorkers | default "10" }} - {{- $additionalControllerEnvs := $config.additionalControllerEnvs }} - {{- $additionalControllerArgs := $config.additionalControllerArgs }} - {{- $additionalControllerVolumes := $config.additionalControllerVolumes }} - {{- $additionalControllerVolumeMounts := $config.additionalControllerVolumeMounts }} - {{- $additionalContainers := $config.additionalContainers }} - {{- $livenessProbePort := $config.livenessProbePort | default 9808 }} - - {{- $kubernetesSemVer := semver $context.Values.global.discovery.kubernetesVersion }} - - {{- $provisionerImageName := join "" (list "csiExternalProvisioner" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} - {{- $provisionerImage := include "helm_lib_module_common_image_no_fail" (list $context $provisionerImageName) }} - - {{- $attacherImageName := join "" (list "csiExternalAttacher" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} - {{- $attacherImage := include "helm_lib_module_common_image_no_fail" (list $context $attacherImageName) }} - - {{- $resizerImageName := join "" (list "csiExternalResizer" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} - {{- $resizerImage := include "helm_lib_module_common_image_no_fail" (list $context $resizerImageName) }} - - {{- $snapshotterImageName := join "" (list "csiExternalSnapshotter" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} - {{- $snapshotterImage := include "helm_lib_module_common_image_no_fail" (list $context $snapshotterImageName) }} - - {{- $livenessprobeImageName := join "" (list "csiLivenessprobe" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} - {{- $livenessprobeImage := include "helm_lib_module_common_image_no_fail" (list $context $livenessprobeImageName) }} - - {{- if $provisionerImage }} - {{- if ($context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} ---- -apiVersion: autoscaling.k8s.io/v1 -kind: VerticalPodAutoscaler -metadata: - name: {{ $fullname }} - namespace: d8-{{ $context.Chart.Name }} - {{- include "helm_lib_module_labels" (list $context (dict "app" "csi-controller" "workload-resource-policy.deckhouse.io" "master")) | nindent 2 }} -spec: - targetRef: - apiVersion: "apps/v1" - kind: Deployment - name: {{ $fullname }} - updatePolicy: - updateMode: "Auto" - resourcePolicy: - containerPolicies: - - containerName: "provisioner" - minAllowed: - {{- include "provisioner_resources" $context | nindent 8 }} - maxAllowed: - cpu: 20m - memory: 50Mi - - containerName: "attacher" - minAllowed: - {{- include "attacher_resources" $context | nindent 8 }} - maxAllowed: - cpu: 20m - memory: 50Mi - {{- if $resizerEnabled }} - - containerName: "resizer" - minAllowed: - {{- include "resizer_resources" $context | nindent 8 }} - maxAllowed: - cpu: 20m - memory: 50Mi - {{- end }} - {{- if $snapshotterEnabled }} - - containerName: "snapshotter" - minAllowed: - {{- include "snapshotter_resources" $context | nindent 8 }} - maxAllowed: - cpu: 20m - memory: 50Mi - {{- end }} - - containerName: "livenessprobe" - minAllowed: - {{- include "livenessprobe_resources" $context | nindent 8 }} - maxAllowed: - cpu: 20m - memory: 50Mi - - containerName: "controller" - minAllowed: - {{- include "controller_resources" $context | nindent 8 }} - maxAllowed: - cpu: 20m - memory: 100Mi - {{- end }} ---- -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: {{ $fullname }} - namespace: d8-{{ $context.Chart.Name }} - {{- include "helm_lib_module_labels" (list $context (dict "app" "csi-controller")) | nindent 2 }} -spec: - maxUnavailable: 1 - selector: - matchLabels: - app: {{ $fullname }} ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: {{ $fullname }} - namespace: d8-{{ $context.Chart.Name }} - {{- include "helm_lib_module_labels" (list $context (dict "app" "csi-controller")) | nindent 2 }} -spec: - replicas: 1 - revisionHistoryLimit: 2 - selector: - matchLabels: - app: {{ $fullname }} - strategy: - type: Recreate - template: - metadata: - labels: - app: {{ $fullname }} - {{- if hasPrefix "cloud-provider-" $context.Chart.Name }} - annotations: - cloud-config-checksum: {{ include (print $context.Template.BasePath "/cloud-controller-manager/secret.yaml") $context | sha256sum }} - {{- end }} - spec: - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - imagePullSecrets: - - name: deckhouse-registry - {{- include "helm_lib_priority_class" (tuple $context "system-cluster-critical") | nindent 6 }} - {{- include "helm_lib_node_selector" (tuple $context "master") | nindent 6 }} - {{- include "helm_lib_tolerations" (tuple $context "any-node" "with-uninitialized") | nindent 6 }} -{{- if $context.Values.global.enabledModules | has "csi-nfs" }} - {{- include "helm_lib_module_pod_security_context_runtime_default" . | nindent 6 }} -{{- else }} - {{- include "helm_lib_module_pod_security_context_run_as_user_deckhouse" . | nindent 6 }} -{{- end }} - serviceAccountName: csi - containers: - - name: provisioner - {{- include "helm_lib_module_container_security_context_read_only_root_filesystem" . | nindent 8 }} - image: {{ $provisionerImage | quote }} - args: - - "--timeout={{ $provisionerTimeout }}" - - "--v=5" - - "--csi-address=$(ADDRESS)" - {{- if $topologyEnabled }} - - "--feature-gates=Topology=true" - - "--strict-topology" - {{- else }} - - "--feature-gates=Topology=false" - {{- end }} - - "--default-fstype=ext4" - - "--leader-election=true" - - "--leader-election-namespace=$(NAMESPACE)" - - "--enable-capacity" - - "--capacity-ownerref-level=2" - {{- if $extraCreateMetadataEnabled }} - - "--extra-create-metadata=true" - {{- end }} - - "--worker-threads={{ $provisionerWorkers }}" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: POD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - - name: NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} - {{- if not ( $context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} - {{- include "provisioner_resources" $context | nindent 12 }} - {{- end }} - - name: attacher - {{- include "helm_lib_module_container_security_context_read_only_root_filesystem" . | nindent 8 }} - image: {{ $attacherImage | quote }} - args: - - "--timeout={{ $attacherTimeout }}" - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--leader-election=true" - - "--leader-election-namespace=$(NAMESPACE)" - - "--worker-threads={{ $attacherWorkers }}" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} - {{- if not ( $context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} - {{- include "attacher_resources" $context | nindent 12 }} - {{- end }} - {{- if $resizerEnabled }} - - name: resizer - {{- include "helm_lib_module_container_security_context_read_only_root_filesystem" . | nindent 8 }} - image: {{ $resizerImage | quote }} - args: - - "--timeout={{ $resizerTimeout }}" - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--leader-election=true" - - "--leader-election-namespace=$(NAMESPACE)" - - "--workers={{ $resizerWorkers }}" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} - {{- if not ( $context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} - {{- include "resizer_resources" $context | nindent 12 }} - {{- end }} - {{- end }} - {{- if $snapshotterEnabled }} - - name: snapshotter - {{- include "helm_lib_module_container_security_context_read_only_root_filesystem" . | nindent 8 }} - image: {{ $snapshotterImage | quote }} - args: - - "--timeout={{ $snapshotterTimeout }}" - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--leader-election=true" - - "--leader-election-namespace=$(NAMESPACE)" - - "--worker-threads={{ $snapshotterWorkers }}" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} - {{- if not ( $context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} - {{- include "snapshotter_resources" $context | nindent 12 }} - {{- end }} - {{- end }} - - name: livenessprobe - {{- include "helm_lib_module_container_security_context_read_only_root_filesystem" . | nindent 8 }} - image: {{ $livenessprobeImage | quote }} - args: - - "--csi-address=$(ADDRESS)" - - "--http-endpoint=$(HOST_IP):{{ $livenessProbePort }}" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} - {{- if not ( $context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} - {{- include "livenessprobe_resources" $context | nindent 12 }} - {{- end }} - - name: controller -{{- if $context.Values.global.enabledModules | has "csi-nfs" }} - {{- include "helm_lib_module_container_security_context_escalated_sys_admin_privileged" . | nindent 8 }} -{{- else }} - {{- include "helm_lib_module_container_security_context_read_only_root_filesystem" . | nindent 8 }} -{{- end }} - image: {{ $controllerImage | quote }} - args: - {{- if $additionalControllerArgs }} - {{- $additionalControllerArgs | toYaml | nindent 8 }} - {{- end }} - {{- if $additionalControllerEnvs }} - env: - {{- $additionalControllerEnvs | toYaml | nindent 8 }} - {{- end }} - livenessProbe: - httpGet: - path: /healthz - port: {{ $livenessProbePort }} - volumeMounts: - - name: socket-dir - mountPath: /csi - {{- /* For an unknown reason vSphere csi-controller won't start without `/tmp` directory */ -}} - {{- if eq $context.Chart.Name "cloud-provider-vsphere" }} - - name: tmp - mountPath: /tmp - {{- end }} - {{- if $additionalControllerVolumeMounts }} - {{- $additionalControllerVolumeMounts | toYaml | nindent 8 }} - {{- end }} - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} - {{- if not ( $context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} - {{- include "controller_resources" $context | nindent 12 }} - {{- end }} - {{- if $additionalContainers }} - {{- $additionalContainers | toYaml | nindent 6 }} - {{- end }} - volumes: - - name: socket-dir - emptyDir: {} - {{- /* For an unknown reason vSphere csi-controller won't start without `/tmp` directory */ -}} - {{- if eq $context.Chart.Name "cloud-provider-vsphere" }} - - name: tmp - emptyDir: {} - {{- end }} - {{- if $additionalControllerVolumes }} - {{- $additionalControllerVolumes | toYaml | nindent 6 }} - {{- end }} - {{- end }} -{{- end }} - - -{{- /* Usage: {{ include "helm_lib_csi_controller_rbac" . }} */ -}} -{{- define "helm_lib_csi_controller_rbac" }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: csi - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} - -# =========== -# provisioner -# =========== -# Source https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: d8:{{ .Chart.Name }}:csi:controller:external-provisioner - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -rules: -- apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] -- apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] -- apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] -- apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots"] - verbs: ["get", "list"] -- apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["get", "list"] -- apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] -# Access to volumeattachments is only needed when the CSI driver -# has the PUBLISH_UNPUBLISH_VOLUME controller capability. -# In that case, external-provisioner will watch volumeattachments -# to determine when it is safe to delete a volume. -- apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: d8:{{ .Chart.Name }}:csi:controller:external-provisioner - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -subjects: -- kind: ServiceAccount - name: csi - namespace: d8-{{ .Chart.Name }} -roleRef: - kind: ClusterRole - name: d8:{{ .Chart.Name }}:csi:controller:external-provisioner - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi:controller:external-provisioner - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -rules: -# Only one of the following rules for endpoints or leases is required based on -# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases. -- apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "watch", "list", "delete", "update", "create"] -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] -# Permissions for CSIStorageCapacity are only needed enabling the publishing -# of storage capacity information. -- apiGroups: ["storage.k8s.io"] - resources: ["csistoragecapacities"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] -# The GET permissions below are needed for walking up the ownership chain -# for CSIStorageCapacity. They are sufficient for deployment via -# StatefulSet (only needs to get Pod) and Deployment (needs to get -# Pod and then ReplicaSet to find the Deployment). -- apiGroups: [""] - resources: ["pods"] - verbs: ["get"] -- apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get"] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi:controller:external-provisioner - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -subjects: -- kind: ServiceAccount - name: csi - namespace: d8-{{ .Chart.Name }} -roleRef: - kind: Role - name: csi:controller:external-provisioner - apiGroup: rbac.authorization.k8s.io - -# ======== -# attacher -# ======== -# Source https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: d8:{{ .Chart.Name }}:csi:controller:external-attacher - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -rules: -- apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] -- apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments/status"] - verbs: ["patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: d8:{{ .Chart.Name }}:csi:controller:external-attacher - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -subjects: -- kind: ServiceAccount - name: csi - namespace: d8-{{ .Chart.Name }} -roleRef: - kind: ClusterRole - name: d8:{{ .Chart.Name }}:csi:controller:external-attacher - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi:controller:external-attacher - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -rules: -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi:controller:external-attacher - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -subjects: -- kind: ServiceAccount - name: csi - namespace: d8-{{ .Chart.Name }} -roleRef: - kind: Role - name: csi:controller:external-attacher - apiGroup: rbac.authorization.k8s.io - -# ======= -# resizer -# ======= -# Source https://github.com/kubernetes-csi/external-resizer/blob/master/deploy/kubernetes/rbac.yaml ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: d8:{{ .Chart.Name }}:csi:controller:external-resizer - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -rules: -- apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "patch"] -- apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["patch"] -- apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: d8:{{ .Chart.Name }}:csi:controller:external-resizer - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -subjects: -- kind: ServiceAccount - name: csi - namespace: d8-{{ .Chart.Name }} -roleRef: - kind: ClusterRole - name: d8:{{ .Chart.Name }}:csi:controller:external-resizer - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi:controller:external-resizer - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -rules: -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi:controller:external-resizer - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -subjects: -- kind: ServiceAccount - name: csi - namespace: d8-{{ .Chart.Name }} -roleRef: - kind: Role - name: csi:controller:external-resizer - apiGroup: rbac.authorization.k8s.io -# ======== -# snapshotter -# ======== -# Source https://github.com/kubernetes-csi/external-snapshotter/blob/master/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: d8:{{ .Chart.Name }}:csi:controller:external-snapshotter - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -rules: -- apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] -- apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] -- apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotclasses"] - verbs: ["get", "list", "watch"] -- apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] -- apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents/status"] - verbs: ["update", "patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: d8:{{ .Chart.Name }}:csi:controller:external-snapshotter - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -subjects: -- kind: ServiceAccount - name: csi - namespace: d8-{{ .Chart.Name }} -roleRef: - kind: ClusterRole - name: d8:{{ .Chart.Name }}:csi:controller:external-snapshotter - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi:controller:external-snapshotter - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -rules: -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi:controller:external-snapshotter - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "csi-controller")) | nindent 2 }} -subjects: -- kind: ServiceAccount - name: csi - namespace: d8-{{ .Chart.Name }} -roleRef: - kind: Role - name: csi:controller:external-snapshotter - apiGroup: rbac.authorization.k8s.io -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_csi_node.tpl b/charts/deckhouse_lib_helm/templates/_csi_node.tpl deleted file mode 100644 index 19d494c..0000000 --- a/charts/deckhouse_lib_helm/templates/_csi_node.tpl +++ /dev/null @@ -1,193 +0,0 @@ -{{- define "node_driver_registrar_resources" }} -cpu: 12m -memory: 25Mi -{{- end }} - -{{- define "node_resources" }} -cpu: 12m -memory: 25Mi -{{- end }} - -{{- /* Usage: {{ include "helm_lib_csi_node_manifests" (list . $config) }} */ -}} -{{- define "helm_lib_csi_node_manifests" }} - {{- $context := index . 0 }} - - {{- $config := index . 1 }} - {{- $fullname := $config.fullname | default "csi-node" }} - {{- $nodeImage := $config.nodeImage | required "$config.nodeImage is required" }} - {{- $driverFQDN := $config.driverFQDN | required "$config.driverFQDN is required" }} - {{- $serviceAccount := $config.serviceAccount | default "" }} - {{- $additionalNodeEnvs := $config.additionalNodeEnvs }} - {{- $additionalNodeArgs := $config.additionalNodeArgs }} - {{- $additionalNodeVolumes := $config.additionalNodeVolumes }} - {{- $additionalNodeVolumeMounts := $config.additionalNodeVolumeMounts }} - {{- $initContainerCommand := $config.initContainerCommand }} - {{- $initContainerImage := $config.initContainerImage }} - - {{- $kubernetesSemVer := semver $context.Values.global.discovery.kubernetesVersion }} - {{- $driverRegistrarImageName := join "" (list "csiNodeDriverRegistrar" $kubernetesSemVer.Major $kubernetesSemVer.Minor) }} - {{- $driverRegistrarImage := include "helm_lib_module_common_image_no_fail" (list $context $driverRegistrarImageName) }} - {{- if $driverRegistrarImage }} - {{- if or (include "_helm_lib_cloud_or_hybrid_cluster" $context) ($context.Values.global.enabledModules | has "ceph-csi") ($context.Values.global.enabledModules | has "csi-nfs") ($context.Values.global.enabledModules | has "csi-ceph") }} - {{- if ($context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} ---- -apiVersion: autoscaling.k8s.io/v1 -kind: VerticalPodAutoscaler -metadata: - name: {{ $fullname }} - namespace: d8-{{ $context.Chart.Name }} - {{- include "helm_lib_module_labels" (list $context (dict "app" "csi-node" "workload-resource-policy.deckhouse.io" "every-node")) | nindent 2 }} -spec: - targetRef: - apiVersion: "apps/v1" - kind: DaemonSet - name: {{ $fullname }} - updatePolicy: - updateMode: "Auto" - resourcePolicy: - containerPolicies: - - containerName: "node-driver-registrar" - minAllowed: - {{- include "node_driver_registrar_resources" $context | nindent 8 }} - maxAllowed: - cpu: 25m - memory: 50Mi - - containerName: "node" - minAllowed: - {{- include "node_resources" $context | nindent 8 }} - maxAllowed: - cpu: 25m - memory: 50Mi - {{- end }} ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: {{ $fullname }} - namespace: d8-{{ $context.Chart.Name }} - {{- include "helm_lib_module_labels" (list $context (dict "app" "csi-node")) | nindent 2 }} -spec: - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app: {{ $fullname }} - template: - metadata: - labels: - app: {{ $fullname }} - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - operator: In - key: node.deckhouse.io/type - values: - - CloudEphemeral - - CloudPermanent - - CloudStatic - {{- if or (eq $fullname "csi-node-rbd") (eq $fullname "csi-node-cephfs") (eq $fullname "csi-nfs") }} - - Static - {{- end }} - imagePullSecrets: - - name: deckhouse-registry - {{- include "helm_lib_priority_class" (tuple $context "system-node-critical") | nindent 6 }} - {{- include "helm_lib_tolerations" (tuple $context "any-node" "with-no-csi") | nindent 6 }} - {{- include "helm_lib_module_pod_security_context_run_as_user_root" . | nindent 6 }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - containers: - - name: node-driver-registrar - {{- include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all" $context | nindent 8 }} - image: {{ $driverRegistrarImage | quote }} - args: - - "--v=5" - - "--csi-address=$(CSI_ENDPOINT)" - - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" - env: - - name: CSI_ENDPOINT - value: "/csi/csi.sock" - - name: DRIVER_REG_SOCK_PATH - value: "/var/lib/kubelet/csi-plugins/{{ $driverFQDN }}/csi.sock" - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_only_logs" 10 | nindent 12 }} - {{- if not ($context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} - {{- include "node_driver_registrar_resources" $context | nindent 12 }} - {{- end }} - - name: node - securityContext: - privileged: true - image: {{ $nodeImage }} - args: - {{- if $additionalNodeArgs }} - {{- $additionalNodeArgs | toYaml | nindent 8 }} - {{- end }} - {{- if $additionalNodeEnvs }} - env: - {{- $additionalNodeEnvs | toYaml | nindent 8 }} - {{- end }} - volumeMounts: - - name: kubelet-dir - mountPath: /var/lib/kubelet - mountPropagation: "Bidirectional" - - name: plugin-dir - mountPath: /csi - - name: device-dir - mountPath: /dev - {{- if $additionalNodeVolumeMounts }} - {{- $additionalNodeVolumeMounts | toYaml | nindent 8 }} - {{- end }} - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} - {{- if not ($context.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} - {{- include "node_resources" $context | nindent 12 }} - {{- end }} - {{- if $initContainerCommand }} - initContainers: - - command: - {{- $initContainerCommand | toYaml | nindent 8 }} - image: {{ $initContainerImage }} - imagePullPolicy: IfNotPresent - name: csi-node-init-container - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 | nindent 12 }} - {{- end }} - serviceAccount: {{ $serviceAccount | quote }} - serviceAccountName: {{ $serviceAccount | quote }} - volumes: - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry/ - type: Directory - - name: kubelet-dir - hostPath: - path: /var/lib/kubelet - type: Directory - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/csi-plugins/{{ $driverFQDN }}/ - type: DirectoryOrCreate - - name: device-dir - hostPath: - path: /dev - type: Directory - {{- if $additionalNodeVolumes }} - {{- $additionalNodeVolumes | toYaml | nindent 6 }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_envs_for_proxy.tpl b/charts/deckhouse_lib_helm/templates/_envs_for_proxy.tpl deleted file mode 100644 index 177bb1c..0000000 --- a/charts/deckhouse_lib_helm/templates/_envs_for_proxy.tpl +++ /dev/null @@ -1,30 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_envs_for_proxy" . }} */ -}} -{{- /* Add HTTP_PROXY, HTTPS_PROXY and NO_PROXY environment variables for container */ -}} -{{- /* depends on [proxy settings](https://deckhouse.io/documentation/v1/deckhouse-configure-global.html#parameters-modules-proxy) */ -}} -{{- define "helm_lib_envs_for_proxy" }} - {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- if $context.Values.global.clusterConfiguration }} - {{- if $context.Values.global.clusterConfiguration.proxy }} - {{- if $context.Values.global.clusterConfiguration.proxy.httpProxy }} -- name: HTTP_PROXY - value: {{ $context.Values.global.clusterConfiguration.proxy.httpProxy | quote }} -- name: http_proxy - value: {{ $context.Values.global.clusterConfiguration.proxy.httpProxy | quote }} - {{- end }} - {{- if $context.Values.global.clusterConfiguration.proxy.httpsProxy }} -- name: HTTPS_PROXY - value: {{ $context.Values.global.clusterConfiguration.proxy.httpsProxy | quote }} -- name: https_proxy - value: {{ $context.Values.global.clusterConfiguration.proxy.httpsProxy | quote }} - {{- end }} - {{- $noProxy := list "127.0.0.1" "169.254.169.254" $context.Values.global.clusterConfiguration.clusterDomain $context.Values.global.clusterConfiguration.podSubnetCIDR $context.Values.global.clusterConfiguration.serviceSubnetCIDR }} - {{- if $context.Values.global.clusterConfiguration.proxy.noProxy }} - {{- $noProxy = concat $noProxy $context.Values.global.clusterConfiguration.proxy.noProxy }} - {{- end }} -- name: NO_PROXY - value: {{ $noProxy | join "," | quote }} -- name: no_proxy - value: {{ $noProxy | join "," | quote }} - {{- end }} - {{- end }} -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_high_availability.tpl b/charts/deckhouse_lib_helm/templates/_high_availability.tpl deleted file mode 100644 index 8c7da23..0000000 --- a/charts/deckhouse_lib_helm/templates/_high_availability.tpl +++ /dev/null @@ -1,39 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_is_ha_to_value" (list . yes no) }} */ -}} -{{- /* returns value "yes" if cluster is highly available, else — returns "no" */ -}} -{{- define "helm_lib_is_ha_to_value" }} - {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $yes := index . 1 -}} {{- /* Yes value */ -}} - {{- $no := index . 2 -}} {{- /* No value */ -}} - - {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) }} - - {{- if hasKey $module_values "highAvailability" -}} - {{- if $module_values.highAvailability -}} {{- $yes -}} {{- else -}} {{- $no -}} {{- end -}} - {{- else if hasKey $context.Values.global "highAvailability" -}} - {{- if $context.Values.global.highAvailability -}} {{- $yes -}} {{- else -}} {{- $no -}} {{- end -}} - {{- else -}} - {{- if $context.Values.global.discovery.clusterControlPlaneIsHighlyAvailable -}} {{- $yes -}} {{- else -}} {{- $no -}} {{- end -}} - {{- end -}} -{{- end }} - -{{- /* Usage: {{- if (include "helm_lib_ha_enabled" .) }} */ -}} -{{- /* returns empty value, which is treated by go template as false */ -}} -{{- define "helm_lib_ha_enabled" }} - {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} - - {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) }} - - {{- if hasKey $module_values "highAvailability" -}} - {{- if $module_values.highAvailability -}} - "not empty string" - {{- end -}} - {{- else if hasKey $context.Values.global "highAvailability" -}} - {{- if $context.Values.global.highAvailability -}} - "not empty string" - {{- end -}} - {{- else -}} - {{- if $context.Values.global.discovery.clusterControlPlaneIsHighlyAvailable -}} - "not empty string" - {{- end -}} - {{- end -}} -{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_kube_rbac_proxy.tpl b/charts/deckhouse_lib_helm/templates/_kube_rbac_proxy.tpl deleted file mode 100644 index af9f7a4..0000000 --- a/charts/deckhouse_lib_helm/templates/_kube_rbac_proxy.tpl +++ /dev/null @@ -1,21 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_kube_rbac_proxy_ca_certificate" (list . "namespace") }} */ -}} -{{- /* Renders configmap with kube-rbac-proxy CA certificate which uses to verify the kube-rbac-proxy clients. */ -}} -{{- define "helm_lib_kube_rbac_proxy_ca_certificate" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -{{- /* Namespace where CA configmap will be created */ -}} - {{- $context := index . 0 }} - {{- $namespace := index . 1 }} ---- -apiVersion: v1 -data: - ca.crt: | - {{ $context.Values.global.internal.modules.kubeRBACProxyCA.cert | nindent 4 }} -kind: ConfigMap -metadata: - annotations: - kubernetes.io/description: | - Contains a CA bundle that can be used to verify the kube-rbac-proxy clients. - {{- include "helm_lib_module_labels" (list $context) | nindent 2 }} - name: kube-rbac-proxy-ca.crt - namespace: {{ $namespace }} -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_documentation_uri.tpl b/charts/deckhouse_lib_helm/templates/_module_documentation_uri.tpl deleted file mode 100644 index a02cf45..0000000 --- a/charts/deckhouse_lib_helm/templates/_module_documentation_uri.tpl +++ /dev/null @@ -1,15 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_module_documentation_uri" (list . "") }} */ -}} -{{- /* returns rendered documentation uri using publicDomainTemplate or deckhouse.io domains*/ -}} -{{- define "helm_lib_module_documentation_uri" }} - {{- $default_doc_prefix := "https://deckhouse.io/documentation/v1" -}} - {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $path_portion := index . 1 -}} {{- /* Path to the document */ -}} - {{- $uri := "" -}} - {{- if $context.Values.global.modules.publicDomainTemplate }} - {{- $uri = printf "%s://%s%s" (include "helm_lib_module_uri_scheme" $context) (include "helm_lib_module_public_domain" (list $context "documentation")) $path_portion -}} - {{- else }} - {{- $uri = printf "%s%s" $default_doc_prefix $path_portion -}} - {{- end -}} - - {{ $uri }} -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_ephemeral_storage.tpl b/charts/deckhouse_lib_helm/templates/_module_ephemeral_storage.tpl deleted file mode 100644 index 4b2dd02..0000000 --- a/charts/deckhouse_lib_helm/templates/_module_ephemeral_storage.tpl +++ /dev/null @@ -1,15 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_module_ephemeral_storage_logs_with_extra" 10 }} */ -}} -{{- /* 50Mi for container logs `log-opts.max-file * log-opts.max-size` would be added to passed value */ -}} -{{- /* returns ephemeral-storage size for logs with extra space */ -}} -{{- define "helm_lib_module_ephemeral_storage_logs_with_extra" -}} -{{- /* Extra space in mebibytes */ -}} -ephemeral-storage: {{ add . 50 }}Mi -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_ephemeral_storage_only_logs" . }} */ -}} -{{- /* 50Mi for container logs `log-opts.max-file * log-opts.max-size` would be requested */ -}} -{{- /* returns ephemeral-storage size for only logs */ -}} -{{- define "helm_lib_module_ephemeral_storage_only_logs" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -ephemeral-storage: 50Mi -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_generate_common_name.tpl b/charts/deckhouse_lib_helm/templates/_module_generate_common_name.tpl deleted file mode 100644 index fb142f8..0000000 --- a/charts/deckhouse_lib_helm/templates/_module_generate_common_name.tpl +++ /dev/null @@ -1,13 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_module_generate_common_name" (list . "") }} */ -}} -{{- /* returns the commonName parameter for use in the Certificate custom resource(cert-manager) */ -}} -{{- define "helm_lib_module_generate_common_name" }} - {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $name_portion := index . 1 -}} {{- /* Name portion */ -}} - - {{- $domain := include "helm_lib_module_public_domain" (list $context $name_portion) -}} - - {{- $domain_length := len $domain -}} - {{- if le $domain_length 64 -}} -commonName: {{ $domain }} - {{- end -}} -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_https.tpl b/charts/deckhouse_lib_helm/templates/_module_https.tpl deleted file mode 100644 index 8ee41ef..0000000 --- a/charts/deckhouse_lib_helm/templates/_module_https.tpl +++ /dev/null @@ -1,160 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_module_uri_scheme" . }} */ -}} -{{- /* return module uri scheme "http" or "https" */ -}} -{{- define "helm_lib_module_uri_scheme" -}} - {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $mode := "" -}} - - {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) -}} - {{- if hasKey $module_values "https" -}} - {{- if hasKey $module_values.https "mode" -}} - {{- $mode = $module_values.https.mode -}} - {{- else }} - {{- $mode = $context.Values.global.modules.https.mode | default "" -}} - {{- end }} - {{- else }} - {{- $mode = $context.Values.global.modules.https.mode | default "" -}} - {{- end }} - - - {{- if eq "Disabled" $mode -}} - http - {{- else -}} - https - {{- end -}} -{{- end -}} - -{{- /* Usage: {{ $https_values := include "helm_lib_https_values" . | fromYaml }} */ -}} -{{- define "helm_lib_https_values" -}} - {{- $context := . -}} - {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) -}} - {{- $mode := "" -}} - {{- $certManagerClusterIssuerName := "" -}} - - {{- if hasKey $module_values "https" -}} - {{- if hasKey $module_values.https "mode" -}} - {{- $mode = $module_values.https.mode -}} - {{- if eq $mode "CertManager" -}} - {{- if not (hasKey $module_values.https "certManager") -}} - {{- cat ".https.certManager.clusterIssuerName is mandatory when .https.mode is set to CertManager" | fail -}} - {{- end -}} - {{- if hasKey $module_values.https.certManager "clusterIssuerName" -}} - {{- $certManagerClusterIssuerName = $module_values.https.certManager.clusterIssuerName -}} - {{- else -}} - {{- cat ".https.certManager.clusterIssuerName is mandatory when .https.mode is set to CertManager" | fail -}} - {{- end -}} - {{- end -}} - {{- else -}} - {{- cat ".https.mode is mandatory when .https is defined" | fail -}} - {{- end -}} - {{- end -}} - - {{- if empty $mode -}} - {{- $mode = $context.Values.global.modules.https.mode -}} - {{- if eq $mode "CertManager" -}} - {{- $certManagerClusterIssuerName = $context.Values.global.modules.https.certManager.clusterIssuerName -}} - {{- end -}} - {{- end -}} - - {{- if not (has $mode (list "Disabled" "CertManager" "CustomCertificate" "OnlyInURI")) -}} - {{- cat "Unknown https.mode:" $mode | fail -}} - {{- end -}} - - {{- if and (eq $mode "CertManager") (not ($context.Values.global.enabledModules | has "cert-manager")) -}} - {{- cat "https.mode has value CertManager but cert-manager module not enabled" | fail -}} - {{- end -}} - -mode: {{ $mode }} - {{- if eq $mode "CertManager" }} -certManager: - clusterIssuerName: {{ $certManagerClusterIssuerName }} - {{- end -}} - -{{- end -}} - -{{- /* Usage: {{ if (include "helm_lib_module_https_mode" .) }} */ -}} -{{- /* returns https mode for module */ -}} -{{- define "helm_lib_module_https_mode" -}} - {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $https_values := include "helm_lib_https_values" $context | fromYaml -}} - {{- $https_values.mode -}} -{{- end -}} - -{{- /* Usage: {{ include "helm_lib_module_https_cert_manager_cluster_issuer_name" . }} */ -}} -{{- /* returns cluster issuer name */ -}} -{{- define "helm_lib_module_https_cert_manager_cluster_issuer_name" -}} - {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $https_values := include "helm_lib_https_values" $context | fromYaml -}} - {{- $https_values.certManager.clusterIssuerName -}} -{{- end -}} - -{{- /* Usage: {{ if (include "helm_lib_module_https_cert_manager_cluster_issuer_is_dns01_challenge_solver" .) }} */ -}} -{{- define "helm_lib_module_https_cert_manager_cluster_issuer_is_dns01_challenge_solver" -}} - {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- if has (include "helm_lib_module_https_cert_manager_cluster_issuer_name" $context) (list "route53" "cloudflare" "digitalocean" "clouddns") }} - "not empty string" - {{- end -}} -{{- end -}} - -{{- /* Usage: {{ include "helm_lib_module_https_cert_manager_acme_solver_challenge_settings" . | nindent 4 }} */ -}} -{{- define "helm_lib_module_https_cert_manager_acme_solver_challenge_settings" -}} - {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- if (include "helm_lib_module_https_cert_manager_cluster_issuer_is_dns01_challenge_solver" $context) }} -- dns01: - provider: {{ include "helm_lib_module_https_cert_manager_cluster_issuer_name" $context }} - {{- else }} -- http01: - ingressClass: {{ include "helm_lib_module_ingress_class" $context | quote }} - {{- end }} -{{- end -}} - -{{- /* Usage: {{ if (include "helm_lib_module_https_ingress_tls_enabled" .) }} */ -}} -{{- /* returns not empty string if tls should enable for ingress */ -}} -{{- define "helm_lib_module_https_ingress_tls_enabled" -}} - {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} - - {{- $mode := include "helm_lib_module_https_mode" $context -}} - - {{- if or (eq "CertManager" $mode) (eq "CustomCertificate" $mode) -}} - not empty string - {{- end -}} -{{- end -}} - -{{- /* Usage: {{ include "helm_lib_module_https_copy_custom_certificate" (list . "namespace" "secret_name_prefix") }} */ -}} -{{- /* Renders secret with [custom certificate](https://deckhouse.io/documentation/v1/deckhouse-configure-global.html#parameters-modules-https-customcertificate) */ -}} -{{- /* in passed namespace with passed prefix */ -}} -{{- define "helm_lib_module_https_copy_custom_certificate" -}} - {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $namespace := index . 1 -}} {{- /* Namespace */ -}} - {{- $secret_name_prefix := index . 2 -}} {{- /* Secret name prefix */ -}} - {{- $mode := include "helm_lib_module_https_mode" $context -}} - {{- if eq $mode "CustomCertificate" -}} - {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) -}} - {{- $secret_name := include "helm_lib_module_https_secret_name" (list $context $secret_name_prefix) -}} ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ $secret_name }} - namespace: {{ $namespace }} - {{- include "helm_lib_module_labels" (list $context) | nindent 2 }} -type: kubernetes.io/tls -data: {{ $module_values.internal.customCertificateData | toJson }} - {{- end -}} -{{- end -}} - -{{- /* Usage: {{ include "helm_lib_module_https_secret_name (list . "secret_name_prefix") }} */ -}} -{{- /* returns custom certificate name */ -}} -{{- define "helm_lib_module_https_secret_name" -}} - {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $secret_name_prefix := index . 1 -}} {{- /* Secret name prefix */ -}} - {{- $mode := include "helm_lib_module_https_mode" $context -}} - {{- if eq $mode "CertManager" -}} - {{- $secret_name_prefix -}} - {{- else -}} - {{- if eq $mode "CustomCertificate" -}} - {{- printf "%s-customcertificate" $secret_name_prefix -}} - {{- else -}} - {{- fail "https.mode must be CustomCertificate or CertManager" -}} - {{- end -}} - {{- end -}} -{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_module_image.tpl b/charts/deckhouse_lib_helm/templates/_module_image.tpl deleted file mode 100644 index bdf29f0..0000000 --- a/charts/deckhouse_lib_helm/templates/_module_image.tpl +++ /dev/null @@ -1,76 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_module_image" (list . "") }} */ -}} -{{- /* returns image name */ -}} -{{- define "helm_lib_module_image" }} - {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $containerName := index . 1 | trimAll "\"" }} {{- /* Container name */ -}} - {{- $moduleName := (include "helm_lib_module_camelcase_name" $context) }} - {{- if ge (len .) 3 }} - {{- $moduleName = (include "helm_lib_module_camelcase_name" (index . 2)) }} {{- /* Optional module name */ -}} - {{- end }} - {{- $imageDigest := index $context.Values.global.modulesImages.digests $moduleName $containerName }} - {{- if not $imageDigest }} - {{- $error := (printf "Image %s.%s has no digest" $moduleName $containerName ) }} - {{- fail $error }} - {{- end }} - {{- $registryBase := $context.Values.global.modulesImages.registry.base }} - {{- /* handle external modules registry */}} - {{- if index $context.Values $moduleName }} - {{- if index $context.Values $moduleName "registry" }} - {{- if index $context.Values $moduleName "registry" "base" }} - {{- $host := trimAll "/" (index $context.Values $moduleName "registry" "base") }} - {{- $path := trimAll "/" $context.Chart.Name }} - {{- $registryBase = join "/" (list $host $path) }} - {{- end }} - {{- end }} - {{- end }} - {{- printf "%s@%s" $registryBase $imageDigest }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_image_no_fail" (list . "") }} */ -}} -{{- /* returns image name if found */ -}} -{{- define "helm_lib_module_image_no_fail" }} - {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $containerName := index . 1 | trimAll "\"" }} {{- /* Container name */ -}} - {{- $moduleName := (include "helm_lib_module_camelcase_name" $context) }} - {{- if ge (len .) 3 }} - {{- $moduleName = (include "helm_lib_module_camelcase_name" (index . 2)) }} {{- /* Optional module name */ -}} - {{- end }} - {{- $imageDigest := index $context.Values.global.modulesImages.digests $moduleName $containerName }} - {{- if $imageDigest }} - {{- $registryBase := $context.Values.global.modulesImages.registry.base }} - {{- if index $context.Values $moduleName }} - {{- if index $context.Values $moduleName "registry" }} - {{- if index $context.Values $moduleName "registry" "base" }} - {{- $host := trimAll "/" (index $context.Values $moduleName "registry" "base") }} - {{- $path := trimAll "/" $context.Chart.Name }} - {{- $registryBase = join "/" (list $host $path) }} - {{- end }} - {{- end }} - {{- end }} - {{- printf "%s@%s" $registryBase $imageDigest }} - {{- end }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_common_image" (list . "") }} */ -}} -{{- /* returns image name from common module */ -}} -{{- define "helm_lib_module_common_image" }} - {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $containerName := index . 1 | trimAll "\"" }} {{- /* Container name */ -}} - {{- $imageDigest := index $context.Values.global.modulesImages.digests "common" $containerName }} - {{- if not $imageDigest }} - {{- $error := (printf "Image %s.%s has no digest" "common" $containerName ) }} - {{- fail $error }} - {{- end }} - {{- printf "%s@%s" $context.Values.global.modulesImages.registry.base $imageDigest }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_common_image_no_fail" (list . "") }} */ -}} -{{- /* returns image name from common module if found */ -}} -{{- define "helm_lib_module_common_image_no_fail" }} - {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $containerName := index . 1 | trimAll "\"" }} {{- /* Container name */ -}} - {{- $imageDigest := index $context.Values.global.modulesImages.digests "common" $containerName }} - {{- if $imageDigest }} - {{- printf "%s@%s" $context.Values.global.modulesImages.registry.base $imageDigest }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/deckhouse_lib_helm/templates/_module_ingress_class.tpl b/charts/deckhouse_lib_helm/templates/_module_ingress_class.tpl deleted file mode 100644 index db7f50b..0000000 --- a/charts/deckhouse_lib_helm/templates/_module_ingress_class.tpl +++ /dev/null @@ -1,13 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_module_ingress_class" . }} */ -}} -{{- /* returns ingress class from module settings or if not exists from global config */ -}} -{{- define "helm_lib_module_ingress_class" -}} - {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} - - {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) -}} - - {{- if hasKey $module_values "ingressClass" -}} - {{- $module_values.ingressClass -}} - {{- else if hasKey $context.Values.global.modules "ingressClass" -}} - {{- $context.Values.global.modules.ingressClass -}} - {{- end -}} -{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_module_init_container.tpl b/charts/deckhouse_lib_helm/templates/_module_init_container.tpl deleted file mode 100644 index 9b3fe00..0000000 --- a/charts/deckhouse_lib_helm/templates/_module_init_container.tpl +++ /dev/null @@ -1,56 +0,0 @@ -{{- /* ### Migration 11.12.2020: Remove this helper with all its usages after this commit reached RockSolid */ -}} -{{- /* Usage: {{ include "helm_lib_module_init_container_chown_nobody_volume" (list . "volume-name") }} */ -}} -{{- /* returns initContainer which chowns recursively all files and directories in passed volume */ -}} -{{- define "helm_lib_module_init_container_chown_nobody_volume" }} - {{- $context := index . 0 -}} - {{- $volume_name := index . 1 -}} -- name: chown-volume-{{ $volume_name }} - image: {{ include "helm_lib_module_common_image" (list $context "alpine") }} - command: ["sh", "-c", "chown -R 65534:65534 /tmp/{{ $volume_name }}"] - securityContext: - runAsNonRoot: false - runAsUser: 0 - runAsGroup: 0 - volumeMounts: - - name: {{ $volume_name }} - mountPath: /tmp/{{ $volume_name }} - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 6 }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_init_container_chown_deckhouse_volume" (list . "volume-name") }} */ -}} -{{- /* returns initContainer which chowns recursively all files and directories in passed volume */ -}} -{{- define "helm_lib_module_init_container_chown_deckhouse_volume" }} - {{- $context := index . 0 -}} - {{- $volume_name := index . 1 -}} -- name: chown-volume-{{ $volume_name }} - image: {{ include "helm_lib_module_common_image" (list $context "alpine") }} - command: ["sh", "-c", "chown -R 64535:64535 /tmp/{{ $volume_name }}"] - securityContext: - runAsNonRoot: false - runAsUser: 0 - runAsGroup: 0 - volumeMounts: - - name: {{ $volume_name }} - mountPath: /tmp/{{ $volume_name }} - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 6 }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_init_container_check_linux_kernel" (list . ">= 4.9.17") }} */ -}} -{{- /* returns initContainer which checks the kernel version on the node for compliance to semver constraint */ -}} -{{- define "helm_lib_module_init_container_check_linux_kernel" }} - {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $semver_constraint := index . 1 -}} {{- /* Semver constraint */ -}} -- name: check-linux-kernel - image: {{ include "helm_lib_module_common_image" (list $context "checkKernelVersion") }} - {{- include "helm_lib_module_pod_security_context_run_as_user_deckhouse" . | nindent 2 }} - env: - - name: KERNEL_CONSTRAINT - value: {{ $semver_constraint | quote }} - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_only_logs" $context | nindent 6 }} -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_labels.tpl b/charts/deckhouse_lib_helm/templates/_module_labels.tpl deleted file mode 100644 index 228dcf3..0000000 --- a/charts/deckhouse_lib_helm/templates/_module_labels.tpl +++ /dev/null @@ -1,15 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_module_labels" (list . (dict "app" "test" "component" "testing")) }} */ -}} -{{- /* returns deckhouse labels */ -}} -{{- define "helm_lib_module_labels" }} - {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- /* Additional labels dict */ -}} -labels: - heritage: deckhouse - module: {{ $context.Chart.Name }} - {{- if eq (len .) 2 }} - {{- $deckhouse_additional_labels := index . 1 }} - {{- range $key, $value := $deckhouse_additional_labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_name.tpl b/charts/deckhouse_lib_helm/templates/_module_name.tpl deleted file mode 100644 index 0fecf05..0000000 --- a/charts/deckhouse_lib_helm/templates/_module_name.tpl +++ /dev/null @@ -1,11 +0,0 @@ -{{- define "helm_lib_module_camelcase_name" -}} - -{{- $moduleName := "" -}} -{{- if (kindIs "string" .) -}} -{{- $moduleName = . | trimAll "\"" -}} -{{- else -}} -{{- $moduleName = .Chart.Name -}} -{{- end -}} - -{{ $moduleName | replace "-" "_" | camelcase | untitle }} -{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_module_public_domain.tpl b/charts/deckhouse_lib_helm/templates/_module_public_domain.tpl deleted file mode 100644 index bfbaae7..0000000 --- a/charts/deckhouse_lib_helm/templates/_module_public_domain.tpl +++ /dev/null @@ -1,11 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_module_public_domain" (list . "") }} */ -}} -{{- /* returns rendered publicDomainTemplate to service fqdn */ -}} -{{- define "helm_lib_module_public_domain" }} - {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $name_portion := index . 1 -}} {{- /* Name portion */ -}} - - {{- if not (contains "%s" $context.Values.global.modules.publicDomainTemplate) }} - {{ fail "Error!!! global.modules.publicDomainTemplate must contain \"%s\" pattern to render service fqdn!" }} - {{- end }} - {{- printf $context.Values.global.modules.publicDomainTemplate $name_portion }} -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_security_context.tpl b/charts/deckhouse_lib_helm/templates/_module_security_context.tpl deleted file mode 100644 index 8c5fcb8..0000000 --- a/charts/deckhouse_lib_helm/templates/_module_security_context.tpl +++ /dev/null @@ -1,183 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_module_pod_security_context_run_as_user_custom" (list . 1000 1000) }} */ -}} -{{- /* returns PodSecurityContext parameters for Pod with custom user and group */ -}} -{{- define "helm_lib_module_pod_security_context_run_as_user_custom" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -{{- /* User id */ -}} -{{- /* Group id */ -}} -securityContext: - runAsNonRoot: true - runAsUser: {{ index . 1 }} - runAsGroup: {{ index . 2 }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_pod_security_context_run_as_user_nobody" . }} */ -}} -{{- /* returns PodSecurityContext parameters for Pod with user and group "nobody" */ -}} -{{- define "helm_lib_module_pod_security_context_run_as_user_nobody" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_pod_security_context_run_as_user_nobody_with_writable_fs" . }} */ -}} -{{- /* returns PodSecurityContext parameters for Pod with user and group "nobody" with write access to mounted volumes */ -}} -{{- define "helm_lib_module_pod_security_context_run_as_user_nobody_with_writable_fs" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - fsGroup: 65534 -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_pod_security_context_run_as_user_deckhouse" . }} */ -}} -{{- /* returns PodSecurityContext parameters for Pod with user and group "deckhouse" */ -}} -{{- define "helm_lib_module_pod_security_context_run_as_user_deckhouse" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -securityContext: - runAsNonRoot: true - runAsUser: 64535 - runAsGroup: 64535 -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_pod_security_context_run_as_user_deckhouse_with_writable_fs" . }} */ -}} -{{- /* returns PodSecurityContext parameters for Pod with user and group "deckhouse" with write access to mounted volumes */ -}} -{{- define "helm_lib_module_pod_security_context_run_as_user_deckhouse_with_writable_fs" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -securityContext: - runAsNonRoot: true - runAsUser: 64535 - runAsGroup: 64535 - fsGroup: 64535 -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_pod_security_context_run_as_user_root" . }} */ -}} -{{- /* returns PodSecurityContext parameters for Pod with user and group 0 */ -}} -{{- define "helm_lib_module_pod_security_context_run_as_user_root" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -securityContext: - runAsNonRoot: false - runAsUser: 0 - runAsGroup: 0 -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_pod_security_context_runtime_default" . }} */ -}} -{{- /* returns PodSecurityContext parameters for Pod with seccomp profile RuntimeDefault */ -}} -{{- define "helm_lib_module_pod_security_context_runtime_default" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -securityContext: - seccompProfile: - type: RuntimeDefault -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_container_security_context_not_allow_privilege_escalation" . }} */ -}} -{{- /* returns SecurityContext parameters for Container with allowPrivilegeEscalation false */ -}} -{{- define "helm_lib_module_container_security_context_not_allow_privilege_escalation" -}} -securityContext: - allowPrivilegeEscalation: false -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_container_security_context_read_only_root_filesystem_with_selinux" . }} */ -}} -{{- /* returns SecurityContext parameters for Container with read only root filesystem and options for SELinux compatibility*/ -}} -{{- define "helm_lib_module_container_security_context_read_only_root_filesystem_with_selinux" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - seLinuxOptions: - level: 's0' - type: 'spc_t' -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_container_security_context_read_only_root_filesystem" . }} */ -}} -{{- /* returns SecurityContext parameters for Container with read only root filesystem */ -}} -{{- define "helm_lib_module_container_security_context_read_only_root_filesystem" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_container_security_context_privileged" . }} */ -}} -{{- /* returns SecurityContext parameters for Container running privileged */ -}} -{{- define "helm_lib_module_container_security_context_privileged" -}} -securityContext: - privileged: true -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_container_security_context_escalated_sys_admin_privileged" . }} */ -}} -{{- /* returns SecurityContext parameters for Container running privileged with escalation and sys_admin */ -}} -{{- define "helm_lib_module_container_security_context_escalated_sys_admin_privileged" -}} -securityContext: - allowPrivilegeEscalation: true - capabilities: - add: - - SYS_ADMIN - privileged: true -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_container_security_context_privileged_read_only_root_filesystem" . }} */ -}} -{{- /* returns SecurityContext parameters for Container running privileged with read only root filesystem */ -}} -{{- define "helm_lib_module_container_security_context_privileged_read_only_root_filesystem" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -securityContext: - privileged: true - readOnlyRootFilesystem: true -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all" . }} */ -}} -{{- /* returns SecurityContext for Container with read only root filesystem and all capabilities dropped */ -}} -{{- define "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all_and_add" (list . (list "KILL" "SYS_PTRACE")) }} */ -}} -{{- /* returns SecurityContext parameters for Container with read only root filesystem, all dropped and some added capabilities */ -}} -{{- define "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all_and_add" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -{{- /* List of capabilities */ -}} -securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - add: {{ index . 1 | toJson }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_container_security_context_capabilities_drop_all_and_add" (list . (list "KILL" "SYS_PTRACE")) }} */ -}} -{{- /* returns SecurityContext parameters for Container with all dropped and some added capabilities */ -}} -{{- define "helm_lib_module_container_security_context_capabilities_drop_all_and_add" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -{{- /* List of capabilities */ -}} -securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - add: {{ index . 1 | toJson }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_module_container_security_context_capabilities_drop_all_and_run_as_user_custom" (list . 1000 1000) }} */ -}} -{{- /* returns SecurityContext parameters for Container with read only root filesystem, all dropped, and custom user ID */ -}} -{{- define "helm_lib_module_container_security_context_capabilities_drop_all_and_run_as_user_custom" -}} -{{- /* Template context with .Values, .Chart, etc */ -}} -{{- /* User id */ -}} -{{- /* Group id */ -}} -securityContext: - runAsUser: {{ index . 1 }} - runAsGroup: {{ index . 2 }} - runAsNonRoot: true - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_module_storage_class.tpl b/charts/deckhouse_lib_helm/templates/_module_storage_class.tpl deleted file mode 100644 index cf761a5..0000000 --- a/charts/deckhouse_lib_helm/templates/_module_storage_class.tpl +++ /dev/null @@ -1,38 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_module_storage_class_annotations" (list $ $index $storageClass.name) }} */ -}} -{{- /* return module StorageClass annotations */ -}} -{{- define "helm_lib_module_storage_class_annotations" -}} - {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $sc_index := index . 1 -}} {{- /* Storage class index */ -}} - {{- $sc_name := index . 2 -}} {{- /* Storage class name */ -}} - {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) -}} - {{- $annotations := dict -}} - - {{- $volume_expansion_mode_offline := false -}} - {{- range $module_name := list "cloud-provider-azure" "cloud-provider-yandex" "cloud-provider-vsphere" "cloud-provider-vcd"}} - {{- if has $module_name $context.Values.global.enabledModules }} - {{- $volume_expansion_mode_offline = true }} - {{- end }} - {{- end }} - - {{- if $volume_expansion_mode_offline }} - {{- $_ := set $annotations "storageclass.deckhouse.io/volume-expansion-mode" "offline" }} - {{- end }} - - {{- if hasKey $module_values.internal "defaultStorageClass" }} - {{- if eq $module_values.internal.defaultStorageClass $sc_name }} - {{- $_ := set $annotations "storageclass.kubernetes.io/is-default-class" "true" }} - {{- end }} - {{- else }} - {{- if eq $sc_index 0 }} - {{- if $context.Values.global.discovery.defaultStorageClass }} - {{- if eq $context.Values.global.discovery.defaultStorageClass $sc_name }} - {{- $_ := set $annotations "storageclass.kubernetes.io/is-default-class" "true" }} - {{- end }} - {{- else }} - {{- $_ := set $annotations "storageclass.kubernetes.io/is-default-class" "true" }} - {{- end }} - {{- end }} - {{- end }} - -{{- (dict "annotations" $annotations) | toYaml -}} -{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_monitoring_grafana_dashboards.tpl b/charts/deckhouse_lib_helm/templates/_monitoring_grafana_dashboards.tpl deleted file mode 100644 index ebbcefb..0000000 --- a/charts/deckhouse_lib_helm/templates/_monitoring_grafana_dashboards.tpl +++ /dev/null @@ -1,68 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_grafana_dashboard_definitions_recursion" (list . [current dir]) }} */ -}} -{{- /* returns all the dashboard-definintions from / */ -}} -{{- /* current dir is optional — used for recursion but you can use it for partially generating dashboards */ -}} -{{- define "helm_lib_grafana_dashboard_definitions_recursion" -}} - {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $rootDir := index . 1 }} {{- /* Dashboards root dir */ -}} - {{- /* Dashboards current dir */ -}} - - {{- $currentDir := "" }} - {{- if gt (len .) 2 }} {{- $currentDir = index . 2 }} {{- else }} {{- $currentDir = $rootDir }} {{- end }} - - {{- $currentDirIndex := (sub ($currentDir | splitList "/" | len) 1) }} - {{- $rootDirIndex := (sub ($rootDir | splitList "/" | len) 1) }} - {{- $folderNamesIndex := (add1 $rootDirIndex) }} - - {{- range $path, $_ := $context.Files.Glob (print $currentDir "/*.json") }} - {{- $fileName := ($path | splitList "/" | last ) }} - {{- $definition := ($context.Files.Get $path) }} - - {{- $folder := (index ($currentDir | splitList "/") $folderNamesIndex | replace "-" " " | title) }} - {{- $resourceName := (regexReplaceAllLiteral "\\.json$" $path "") }} - {{- $resourceName = ($resourceName | replace " " "-" | replace "." "-" | replace "_" "-") }} - {{- $resourceName = (slice ($resourceName | splitList "/") $folderNamesIndex | join "-") }} - {{- $resourceName = (printf "%s-%s" $context.Chart.Name $resourceName) }} - -{{ include "helm_lib_single_dashboard" (list $context $resourceName $folder $definition) }} - {{- end }} - - {{- $subDirs := list }} - {{- range $path, $_ := ($context.Files.Glob (print $currentDir "/**.json")) }} - {{- $pathSlice := ($path | splitList "/") }} - {{- $subDirs = append $subDirs (slice $pathSlice 0 (add $currentDirIndex 2) | join "/") }} - {{- end }} - - {{- range $subDir := ($subDirs | uniq) }} -{{ include "helm_lib_grafana_dashboard_definitions_recursion" (list $context $rootDir $subDir) }} - {{- end }} -{{- end }} - - -{{- /* Usage: {{ include "helm_lib_grafana_dashboard_definitions" . }} */ -}} -{{- /* returns dashboard-definintions from monitoring/grafana-dashboards/ */ -}} -{{- define "helm_lib_grafana_dashboard_definitions" -}} - {{- $context := . }} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- if ( $context.Values.global.enabledModules | has "prometheus-crd" ) }} -{{- include "helm_lib_grafana_dashboard_definitions_recursion" (list $context "monitoring/grafana-dashboards") }} - {{- end }} -{{- end }} - - -{{- /* Usage: {{ include "helm_lib_single_dashboard" (list . "dashboard-name" "folder" $dashboard) }} */ -}} -{{- /* renders a single dashboard */ -}} -{{- define "helm_lib_single_dashboard" -}} - {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $resourceName := index . 1 }} {{- /* Dashboard name */ -}} - {{- $folder := index . 2 }} {{- /* Folder */ -}} - {{- $definition := index . 3 }} {{/* Dashboard definition */}} ---- -apiVersion: deckhouse.io/v1 -kind: GrafanaDashboardDefinition -metadata: - name: d8-{{ $resourceName }} - {{- include "helm_lib_module_labels" (list $context (dict "prometheus.deckhouse.io/grafana-dashboard" "")) | nindent 2 }} -spec: - folder: "{{ $folder }}" - definition: | - {{- $definition | nindent 4 }} -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_monitoring_prometheus_rules.tpl b/charts/deckhouse_lib_helm/templates/_monitoring_prometheus_rules.tpl deleted file mode 100644 index 794fe30..0000000 --- a/charts/deckhouse_lib_helm/templates/_monitoring_prometheus_rules.tpl +++ /dev/null @@ -1,96 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_prometheus_rules_recursion" (list . [current dir]) }} */ -}} -{{- /* returns all the prometheus rules from / */ -}} -{{- /* current dir is optional — used for recursion but you can use it for partially generating rules */ -}} -{{- define "helm_lib_prometheus_rules_recursion" -}} - {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $namespace := index . 1 }} {{- /* Namespace for creating rules */ -}} - {{- $rootDir := index . 2 }} {{- /* Rules root dir */ -}} - {{- $currentDir := "" }} {{- /* Current dir (optional) */ -}} - {{- if gt (len .) 3 }} {{- $currentDir = index . 3 }} {{- else }} {{- $currentDir = $rootDir }} {{- end }} - {{- $currentDirIndex := (sub ($currentDir | splitList "/" | len) 1) }} - {{- $rootDirIndex := (sub ($rootDir | splitList "/" | len) 1) }} - {{- $folderNamesIndex := (add1 $rootDirIndex) }} - - {{- range $path, $_ := $context.Files.Glob (print $currentDir "/*.{yaml,tpl}") }} - {{- $fileName := ($path | splitList "/" | last ) }} - {{- $definition := "" }} - {{- if eq ($path | splitList "." | last) "tpl" -}} - {{- $definition = tpl ($context.Files.Get $path) $context }} - {{- else }} - {{- $definition = $context.Files.Get $path }} - {{- end }} - - {{- $definition = $definition | replace "__SCRAPE_INTERVAL__" (printf "%ds" ($context.Values.global.discovery.prometheusScrapeInterval | default 30)) | replace "__SCRAPE_INTERVAL_X_2__" (printf "%ds" (mul ($context.Values.global.discovery.prometheusScrapeInterval | default 30) 2)) | replace "__SCRAPE_INTERVAL_X_3__" (printf "%ds" (mul ($context.Values.global.discovery.prometheusScrapeInterval | default 30) 3)) | replace "__SCRAPE_INTERVAL_X_4__" (printf "%ds" (mul ($context.Values.global.discovery.prometheusScrapeInterval | default 30) 4)) }} - -{{/* Patch expression based on `d8_ignore_on_update` annotation*/}} - - - {{ $definition = printf "Rules:\n%s" ($definition | nindent 2) }} - {{- $definitionStruct := ( $definition | fromYaml )}} - {{- if $definitionStruct.Error }} - {{- fail ($definitionStruct.Error | toString) }} - {{- end }} - {{- range $rule := $definitionStruct.Rules }} - - {{- range $dedicatedRule := $rule.rules }} - {{- if $dedicatedRule.annotations }} - {{- if (eq (get $dedicatedRule.annotations "d8_ignore_on_update") "true") }} - {{- $_ := set $dedicatedRule "expr" (printf "(%s) and ON() ((max(d8_is_updating) != 1) or ON() absent(d8_is_updating))" $dedicatedRule.expr) }} - {{- end }} - {{- end }} - {{- end }} - - {{- end }} - - {{ $definition = $definitionStruct.Rules | toYaml }} - - {{- $resourceName := (regexReplaceAllLiteral "\\.(yaml|tpl)$" $path "") }} - {{- $resourceName = ($resourceName | replace " " "-" | replace "." "-" | replace "_" "-") }} - {{- $resourceName = (slice ($resourceName | splitList "/") $folderNamesIndex | join "-") }} - {{- $resourceName = (printf "%s-%s" $context.Chart.Name $resourceName) }} ---- -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ $resourceName }} - namespace: {{ $namespace }} - {{- include "helm_lib_module_labels" (list $context (dict "app" "prometheus" "prometheus" "main" "component" "rules")) | nindent 2 }} -spec: - groups: - {{- $definition | nindent 4 }} - {{- end }} - - {{- $subDirs := list }} - {{- range $path, $_ := ($context.Files.Glob (print $currentDir "/**.{yaml,tpl}")) }} - {{- $pathSlice := ($path | splitList "/") }} - {{- $subDirs = append $subDirs (slice $pathSlice 0 (add $currentDirIndex 2) | join "/") }} - {{- end }} - - {{- range $subDir := ($subDirs | uniq) }} -{{ include "helm_lib_prometheus_rules_recursion" (list $context $namespace $rootDir $subDir) }} - {{- end }} -{{- end }} - - -{{- /* Usage: {{ include "helm_lib_prometheus_rules" (list . ) }} */ -}} -{{- /* returns all the prometheus rules from monitoring/prometheus-rules/ */ -}} -{{- define "helm_lib_prometheus_rules" -}} - {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $namespace := index . 1 }} {{- /* Namespace for creating rules */ -}} - {{- if ( $context.Values.global.enabledModules | has "operator-prometheus-crd" ) }} -{{- include "helm_lib_prometheus_rules_recursion" (list $context $namespace "monitoring/prometheus-rules") }} - {{- end }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_prometheus_target_scrape_timeout_seconds" (list . ) }} */ -}} -{{- /* returns adjust timeout value to scrape interval / */ -}} -{{- define "helm_lib_prometheus_target_scrape_timeout_seconds" -}} - {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $timeout := index . 1 }} {{- /* Target timeout in seconds */ -}} - {{- $scrape_interval := (int $context.Values.global.discovery.prometheusScrapeInterval | default 30) }} - {{- if gt $timeout $scrape_interval -}} -{{ $scrape_interval }}s - {{- else -}} -{{ $timeout }}s - {{- end }} -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_node_affinity.tpl b/charts/deckhouse_lib_helm/templates/_node_affinity.tpl deleted file mode 100644 index cbdd0f9..0000000 --- a/charts/deckhouse_lib_helm/templates/_node_affinity.tpl +++ /dev/null @@ -1,256 +0,0 @@ -{{- /* Verify node selector strategy. */ -}} -{{- define "helm_lib_internal_check_node_selector_strategy" -}} - {{ if not (has . (list "frontend" "monitoring" "system" "master" )) }} - {{- fail (printf "unknown strategy \"%v\"" .) }} - {{- end }} - {{- . -}} -{{- end }} - -{{- /* Returns node selector for workloads depend on strategy. */ -}} -{{- define "helm_lib_node_selector" }} - {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $strategy := index . 1 | include "helm_lib_internal_check_node_selector_strategy" }} {{- /* strategy, one of "frontend" "monitoring" "system" "master" "any-node" "wildcard" */ -}} - {{- $module_values := dict }} - {{- if lt (len .) 3 }} - {{- $module_values = (index $context.Values (include "helm_lib_module_camelcase_name" $context)) }} - {{- else }} - {{- $module_values = index . 2 }} - {{- end }} - {{- $camel_chart_name := (include "helm_lib_module_camelcase_name" $context) }} - - {{- if eq $strategy "monitoring" }} - {{- if $module_values.nodeSelector }} -nodeSelector: {{ $module_values.nodeSelector | toJson }} - {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole $camel_chart_name | int) 0 }} -nodeSelector: - node-role.deckhouse.io/{{$context.Chart.Name}}: "" - {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole $strategy | int) 0 }} -nodeSelector: - node-role.deckhouse.io/{{$strategy}}: "" - {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole "system" | int) 0 }} -nodeSelector: - node-role.deckhouse.io/system: "" - {{- end }} - - {{- else if or (eq $strategy "frontend") (eq $strategy "system") }} - {{- if $module_values.nodeSelector }} -nodeSelector: {{ $module_values.nodeSelector | toJson }} - {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole $camel_chart_name | int) 0 }} -nodeSelector: - node-role.deckhouse.io/{{$context.Chart.Name}}: "" - {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole $strategy | int) 0 }} -nodeSelector: - node-role.deckhouse.io/{{$strategy}}: "" - {{- end }} - - {{- else if eq $strategy "master" }} - {{- if gt (index $context.Values.global.discovery "clusterMasterCount" | int) 0 }} -nodeSelector: - node-role.kubernetes.io/control-plane: "" - {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole "master" | int) 0 }} -nodeSelector: - node-role.deckhouse.io/control-plane: "" - {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole "system" | int) 0 }} -nodeSelector: - node-role.deckhouse.io/system: "" - {{- end }} - {{- end }} -{{- end }} - - -{{- /* Returns tolerations for workloads depend on strategy. */ -}} -{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node" "with-uninitialized" "without-storage-problems") }} */ -}} -{{- define "helm_lib_tolerations" }} - {{- $context := index . 0 }} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $strategy := index . 1 | include "helm_lib_internal_check_tolerations_strategy" }} {{- /* base strategy, one of "frontend" "monitoring" "system" any-node" "wildcard" */ -}} - {{- $additionalStrategies := tuple }} {{- /* list of additional strategies. To add strategy list it with prefix "with-", to remove strategy list it with prefix "without-". */ -}} - {{- if eq $strategy "custom" }} - {{ if lt (len .) 3 }} - {{- fail (print "additional strategies is required") }} - {{- end }} - {{- else }} - {{- $additionalStrategies = tuple "storage-problems" }} - {{- end }} - {{- $module_values := (index $context.Values (include "helm_lib_module_camelcase_name" $context)) }} - {{- if gt (len .) 2 }} - {{- range $as := slice . 2 (len .) }} - {{- if hasPrefix "with-" $as }} - {{- $additionalStrategies = mustAppend $additionalStrategies (trimPrefix "with-" $as) }} - {{- end }} - {{- if hasPrefix "without-" $as }} - {{- $additionalStrategies = mustWithout $additionalStrategies (trimPrefix "without-" $as) }} - {{- end }} - {{- end }} - {{- end }} -tolerations: - {{- /* Wildcard: gives permissions to schedule on any node with any taints (use with caution) */ -}} - {{- if eq $strategy "wildcard" }} - {{- include "_helm_lib_wildcard_tolerations" $context }} - - {{- else }} - {{- /* Any node: any node in the cluster with any known taints */ -}} - {{- if eq $strategy "any-node" }} - {{- include "_helm_lib_any_node_tolerations" $context }} - - {{- /* Tolerations from module config: overrides below strategies, if there is any toleration specified */ -}} - {{- else if $module_values.tolerations }} - {{- $module_values.tolerations | toYaml | nindent 0 }} - - {{- /* Monitoring: Nodes for monitoring components: prometheus, grafana, kube-state-metrics, etc. */ -}} - {{- else if eq $strategy "monitoring" }} - {{- include "_helm_lib_monitoring_tolerations" $context }} - - {{- /* Frontend: Nodes for ingress-controllers */ -}} - {{- else if eq $strategy "frontend" }} - {{- include "_helm_lib_frontend_tolerations" $context }} - - {{- /* System: Nodes for system components: prometheus, dns, cert-manager */ -}} - {{- else if eq $strategy "system" }} - {{- include "_helm_lib_system_tolerations" $context }} - {{- end }} - - {{- /* Additional strategies */ -}} - {{- range $additionalStrategies -}} - {{- include (printf "_helm_lib_additional_tolerations_%s" (. | replace "-" "_")) $context }} - {{- end }} - {{- end }} -{{- end }} - - -{{- /* Check cluster type. */ -}} -{{- /* Returns not empty string if this is cloud or hybrid cluster */ -}} -{{- define "_helm_lib_cloud_or_hybrid_cluster" }} - {{- if .Values.global.clusterConfiguration }} - {{- if eq .Values.global.clusterConfiguration.clusterType "Cloud" }} - "not empty string" - {{- /* We consider non-cloud clusters with enabled cloud-provider-.* module as Hybrid clusters */ -}} - {{- else }} - {{- range $v := .Values.global.enabledModules }} - {{- if hasPrefix "cloud-provider-" $v }} - "not empty string" - {{- end }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} - -{{- /* Verify base strategy. */ -}} -{{- /* Fails if strategy not in allowed list */ -}} -{{- define "helm_lib_internal_check_tolerations_strategy" -}} - {{ if not (has . (list "custom" "frontend" "monitoring" "system" "any-node" "wildcard" )) }} - {{- fail (printf "unknown strategy \"%v\"" .) }} - {{- end }} - {{- . -}} -{{- end }} - - -{{- /* Base strategy for any uncordoned node in cluster. */ -}} -{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node") }} */ -}} -{{- define "_helm_lib_any_node_tolerations" }} -- key: node-role.kubernetes.io/master -- key: node-role.kubernetes.io/control-plane -- key: dedicated.deckhouse.io - operator: "Exists" -- key: dedicated - operator: "Exists" -- key: DeletionCandidateOfClusterAutoscaler -- key: ToBeDeletedByClusterAutoscaler - {{- if .Values.global.modules.placement.customTolerationKeys }} - {{- range $key := .Values.global.modules.placement.customTolerationKeys }} -- key: {{ $key | quote }} - operator: "Exists" - {{- end }} - {{- end }} -{{- end }} - -{{- /* Base strategy that tolerates all. */ -}} -{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "wildcard") }} */ -}} -{{- define "_helm_lib_wildcard_tolerations" }} -- operator: "Exists" -{{- end }} - -{{- /* Base strategy that tolerates nodes with "dedicated.deckhouse.io: monitoring" and "dedicated.deckhouse.io: system" taints. */ -}} -{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "monitoring") }} */ -}} -{{- define "_helm_lib_monitoring_tolerations" }} -- key: dedicated.deckhouse.io - operator: Equal - value: {{ .Chart.Name | quote }} -- key: dedicated.deckhouse.io - operator: Equal - value: "monitoring" -- key: dedicated.deckhouse.io - operator: Equal - value: "system" -{{- end }} - -{{- /* Base strategy that tolerates nodes with "dedicated.deckhouse.io: frontend" taints. */ -}} -{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "frontend") }} */ -}} -{{- define "_helm_lib_frontend_tolerations" }} -- key: dedicated.deckhouse.io - operator: Equal - value: {{ .Chart.Name | quote }} -- key: dedicated.deckhouse.io - operator: Equal - value: "frontend" -{{- end }} - -{{- /* Base strategy that tolerates nodes with "dedicated.deckhouse.io: system" taints. */ -}} -{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "system") }} */ -}} -{{- define "_helm_lib_system_tolerations" }} -- key: dedicated.deckhouse.io - operator: Equal - value: {{ .Chart.Name | quote }} -- key: dedicated.deckhouse.io - operator: Equal - value: "system" -{{- end }} - - -{{- /* Additional strategy "uninitialized" - used for CNI's and kube-proxy to allow cni components scheduled on node after CCM initialization. */ -}} -{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node" "with-uninitialized") }} */ -}} -{{- define "_helm_lib_additional_tolerations_uninitialized" }} -- key: node.deckhouse.io/uninitialized - operator: "Exists" - effect: "NoSchedule" - {{- if include "_helm_lib_cloud_or_hybrid_cluster" . }} - {{- include "_helm_lib_additional_tolerations_no_csi" . }} - {{- end }} - {{- include "_helm_lib_additional_tolerations_node_problems" . }} -{{- end }} - -{{- /* Additional strategy "node-problems" - used for shedule critical components on non-ready nodes or nodes under pressure. */ -}} -{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node" "with-node-problems") }} */ -}} -{{- define "_helm_lib_additional_tolerations_node_problems" }} -- key: node.kubernetes.io/not-ready -- key: node.kubernetes.io/out-of-disk -- key: node.kubernetes.io/memory-pressure -- key: node.kubernetes.io/disk-pressure -- key: node.kubernetes.io/pid-pressure -- key: node.kubernetes.io/unreachable -- key: node.kubernetes.io/network-unavailable -{{- end }} - -{{- /* Additional strategy "storage-problems" - used for shedule critical components on nodes with drbd problems. This additional strategy enabled by default in any base strategy except "wildcard". */ -}} -{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node" "without-storage-problems") }} */ -}} -{{- define "_helm_lib_additional_tolerations_storage_problems" }} -- key: drbd.linbit.com/lost-quorum -- key: drbd.linbit.com/force-io-error -- key: drbd.linbit.com/ignore-fail-over -{{- end }} - -{{- /* Additional strategy "no-csi" - used for any node with no CSI: any node, which was initialized by deckhouse, but have no csi-node driver registered on it. */ -}} -{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node" "with-no-csi") }} */ -}} -{{- define "_helm_lib_additional_tolerations_no_csi" }} -- key: node.deckhouse.io/csi-not-bootstrapped - operator: "Exists" - effect: "NoSchedule" -{{- end }} - -{{- /* Additional strategy "cloud-provider-uninitialized" - used for any node which is not initialized by CCM. */ -}} -{{- /* Usage: {{ include "helm_lib_tolerations" (tuple . "any-node" "with-cloud-provider-uninitialized") }} */ -}} -{{- define "_helm_lib_additional_tolerations_cloud_provider_uninitialized" }} - {{- if not .Values.global.clusterIsBootstrapped }} -- key: node.cloudprovider.kubernetes.io/uninitialized - operator: Exists - {{- end }} -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_pod_disruption_budget.tpl b/charts/deckhouse_lib_helm/templates/_pod_disruption_budget.tpl deleted file mode 100644 index ccd4f21..0000000 --- a/charts/deckhouse_lib_helm/templates/_pod_disruption_budget.tpl +++ /dev/null @@ -1,6 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_pdb_daemonset" . }} */ -}} -{{- /* Returns PDB max unavailable */ -}} -{{- define "helm_lib_pdb_daemonset" }} - {{- $context := . -}} {{- /* Template context with .Values, .Chart, etc */ -}} -maxUnavailable: 10% -{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_priority_class.tpl b/charts/deckhouse_lib_helm/templates/_priority_class.tpl deleted file mode 100644 index 5935445..0000000 --- a/charts/deckhouse_lib_helm/templates/_priority_class.tpl +++ /dev/null @@ -1,9 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_priority_class" (tuple . "priority-class-name") }} /* -}} -{{- /* returns priority class if priority-class module enabled, otherwise returns nothing */ -}} -{{- define "helm_lib_priority_class" }} - {{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} - {{- $priorityClassName := index . 1 }} {{- /* Priority class name */ -}} - {{- if ( $context.Values.global.enabledModules | has "priority-class") }} -priorityClassName: {{ $priorityClassName }} - {{- end }} -{{- end -}} diff --git a/charts/deckhouse_lib_helm/templates/_resources_management.tpl b/charts/deckhouse_lib_helm/templates/_resources_management.tpl deleted file mode 100644 index dff75c1..0000000 --- a/charts/deckhouse_lib_helm/templates/_resources_management.tpl +++ /dev/null @@ -1,160 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_resources_management_pod_resources" (list [ephemeral storage requests]) }} */ -}} -{{- /* returns rendered resources section based on configuration if it is */ -}} -{{- define "helm_lib_resources_management_pod_resources" -}} - {{- $configuration := index . 0 -}} {{- /* VPA resource configuration [example](https://deckhouse.io/documentation/v1/modules/110-istio/configuration.html#parameters-controlplane-resourcesmanagement) */ -}} - {{- /* Ephemeral storage requests */ -}} - - {{- $ephemeral_storage := "50Mi" -}} - {{- if eq (len .) 2 -}} - {{- $ephemeral_storage = index . 1 -}} - {{- end -}} - - {{- $pod_resources := (include "helm_lib_resources_management_original_pod_resources" $configuration | fromYaml) -}} - {{- if not (hasKey $pod_resources "requests") -}} - {{- $_ := set $pod_resources "requests" (dict) -}} - {{- end -}} - {{- $_ := set $pod_resources.requests "ephemeral-storage" $ephemeral_storage -}} - - {{- $pod_resources | toYaml -}} -{{- end -}} - - -{{- /* Usage: {{ include "helm_lib_resources_management_original_pod_resources" }} */ -}} -{{- /* returns rendered resources section based on configuration if it is present */ -}} -{{- define "helm_lib_resources_management_original_pod_resources" -}} - {{- $configuration := . -}} {{- /* VPA resource configuration [example](https://deckhouse.io/documentation/v1/modules/110-istio/configuration.html#parameters-controlplane-resourcesmanagement) */ -}} - - {{- if $configuration -}} - {{- if eq $configuration.mode "Static" -}} -{{- $configuration.static | toYaml -}} - - {{- else if eq $configuration.mode "VPA" -}} - {{- $resources := dict "requests" (dict) "limits" (dict) -}} - - {{- if $configuration.vpa.cpu -}} - {{- if $configuration.vpa.cpu.min -}} - {{- $_ := set $resources.requests "cpu" ($configuration.vpa.cpu.min | toString) -}} - {{- end -}} - {{- if $configuration.vpa.cpu.limitRatio -}} - {{- $cpuLimitMillicores := round (mulf (include "helm_lib_resources_management_cpu_units_to_millicores" $configuration.vpa.cpu.min) $configuration.vpa.cpu.limitRatio) 0 | int64 -}} - {{- $_ := set $resources.limits "cpu" (printf "%dm" $cpuLimitMillicores) -}} - {{- end -}} - {{- end -}} - - {{- if $configuration.vpa.memory -}} - {{- if $configuration.vpa.memory.min -}} - {{- $_ := set $resources.requests "memory" ($configuration.vpa.memory.min | toString) -}} - {{- end -}} - {{- if $configuration.vpa.memory.limitRatio -}} - {{- $memoryLimitBytes := round (mulf (include "helm_lib_resources_management_memory_units_to_bytes" $configuration.vpa.memory.min) $configuration.vpa.memory.limitRatio) 0 | int64 -}} - {{- $_ := set $resources.limits "memory" (printf "%d" $memoryLimitBytes) -}} - {{- end -}} - {{- end -}} -{{- $resources | toYaml -}} - - {{- else -}} - {{- cat "ERROR: unknown resource management mode: " $configuration.mode | fail -}} - {{- end -}} - {{- end -}} -{{- end }} - - -{{- /* Usage: {{ include "helm_lib_resources_management_vpa_spec" (list ) }} */ -}} -{{- /* returns rendered vpa spec based on configuration and target reference */ -}} -{{- define "helm_lib_resources_management_vpa_spec" -}} - {{- $targetAPIVersion := index . 0 -}} {{- /* Target API version */ -}} - {{- $targetKind := index . 1 -}} {{- /* Target Kind */ -}} - {{- $targetName := index . 2 -}} {{- /* Target Name */ -}} - {{- $targetContainer := index . 3 -}} {{- /* Target container name */ -}} - {{- $configuration := index . 4 -}} {{- /* VPA resource configuration [example](https://deckhouse.io/documentation/v1/modules/110-istio/configuration.html#parameters-controlplane-resourcesmanagement) */ -}} - -targetRef: - apiVersion: {{ $targetAPIVersion }} - kind: {{ $targetKind }} - name: {{ $targetName }} - {{- if eq ($configuration.mode) "VPA" }} -updatePolicy: - updateMode: {{ $configuration.vpa.mode | quote }} -resourcePolicy: - containerPolicies: - - containerName: {{ $targetContainer }} - maxAllowed: - cpu: {{ $configuration.vpa.cpu.max | quote }} - memory: {{ $configuration.vpa.memory.max | quote }} - minAllowed: - cpu: {{ $configuration.vpa.cpu.min | quote }} - memory: {{ $configuration.vpa.memory.min | quote }} - controlledValues: RequestsAndLimits - {{- else }} -updatePolicy: - updateMode: "Off" - {{- end }} -{{- end }} - - -{{- /* Usage: {{ include "helm_lib_resources_management_cpu_units_to_millicores" }} */ -}} -{{- /* helper for converting cpu units to millicores */ -}} -{{- define "helm_lib_resources_management_cpu_units_to_millicores" -}} - {{- $units := . | toString -}} - {{- if hasSuffix "m" $units -}} - {{- trimSuffix "m" $units -}} - {{- else -}} - {{- atoi $units | mul 1000 -}} - {{- end }} -{{- end }} - - -{{- /* Usage: {{ include "helm_lib_resources_management_memory_units_to_bytes" }} */ -}} -{{- /* helper for converting memory units to bytes */ -}} -{{- define "helm_lib_resources_management_memory_units_to_bytes" }} - {{- $units := . | toString -}} - {{- if hasSuffix "k" $units -}} - {{- trimSuffix "k" $units | atoi | mul 1000 -}} - {{- else if hasSuffix "M" $units -}} - {{- trimSuffix "M" $units | atoi | mul 1000000 -}} - {{- else if hasSuffix "G" $units -}} - {{- trimSuffix "G" $units | atoi | mul 1000000000 -}} - {{- else if hasSuffix "T" $units -}} - {{- trimSuffix "T" $units | atoi | mul 1000000000000 -}} - {{- else if hasSuffix "P" $units -}} - {{- trimSuffix "P" $units | atoi | mul 1000000000000000 -}} - {{- else if hasSuffix "E" $units -}} - {{- trimSuffix "E" $units | atoi | mul 1000000000000000000 -}} - {{- else if hasSuffix "Ki" $units -}} - {{- trimSuffix "Ki" $units | atoi | mul 1024 -}} - {{- else if hasSuffix "Mi" $units -}} - {{- trimSuffix "Mi" $units | atoi | mul 1024 | mul 1024 -}} - {{- else if hasSuffix "Gi" $units -}} - {{- trimSuffix "Gi" $units | atoi | mul 1024 | mul 1024 | mul 1024 -}} - {{- else if hasSuffix "Ti" $units -}} - {{- trimSuffix "Ti" $units | atoi | mul 1024 | mul 1024 | mul 1024 | mul 1024 -}} - {{- else if hasSuffix "Pi" $units -}} - {{- trimSuffix "Pi" $units | atoi | mul 1024 | mul 1024 | mul 1024 | mul 1024 | mul 1024 -}} - {{- else if hasSuffix "Ei" $units -}} - {{- trimSuffix "Ei" $units | atoi | mul 1024 | mul 1024 | mul 1024 | mul 1024 | mul 1024 | mul 1024 -}} - {{- else if regexMatch "^[0-9]+$" $units -}} - {{- $units -}} - {{- else -}} - {{- cat "ERROR: unknown memory format:" $units | fail -}} - {{- end }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_vpa_kube_rbac_proxy_resources" . }} */ -}} -{{- /* helper for VPA resources for kube_rbac_proxy */ -}} -{{- define "helm_lib_vpa_kube_rbac_proxy_resources" }} -{{- /* Template context with .Values, .Chart, etc */ -}} -- containerName: kube-rbac-proxy - minAllowed: - {{- include "helm_lib_container_kube_rbac_proxy_resources" . | nindent 4 }} - maxAllowed: - cpu: 20m - memory: 25Mi -{{- end }} - -{{- /* Usage: {{ include "helm_lib_container_kube_rbac_proxy_resources" . }} */ -}} -{{- /* helper for container resources for kube_rbac_proxy */ -}} -{{- define "helm_lib_container_kube_rbac_proxy_resources" }} -{{- /* Template context with .Values, .Chart, etc */ -}} -cpu: 10m -memory: 25Mi -{{- end }} diff --git a/charts/deckhouse_lib_helm/templates/_spec_for_high_availability.tpl b/charts/deckhouse_lib_helm/templates/_spec_for_high_availability.tpl deleted file mode 100644 index 8bfbf9e..0000000 --- a/charts/deckhouse_lib_helm/templates/_spec_for_high_availability.tpl +++ /dev/null @@ -1,138 +0,0 @@ -{{- /* Usage: {{ include "helm_lib_pod_anti_affinity_for_ha" (list . (dict "app" "test")) }} */ -}} -{{- /* returns pod affinity spec */ -}} -{{- define "helm_lib_pod_anti_affinity_for_ha" }} -{{- $context := index . 0 -}} {{- /* Template context with .Values, .Chart, etc */ -}} -{{- $labels := index . 1 }} {{- /* Match labels for podAntiAffinity label selector */ -}} - {{- if (include "helm_lib_ha_enabled" $context) }} -affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - {{- range $key, $value := $labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - topologyKey: kubernetes.io/hostname - {{- end }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_deployment_on_master_strategy_and_replicas_for_ha" }} */ -}} -{{- /* returns deployment strategy and replicas for ha components running on master nodes */ -}} -{{- define "helm_lib_deployment_on_master_strategy_and_replicas_for_ha" }} -{{- /* Template context with .Values, .Chart, etc */ -}} - {{- if (include "helm_lib_ha_enabled" .) }} - {{- if gt (index .Values.global.discovery "clusterMasterCount" | int) 0 }} -replicas: {{ index .Values.global.discovery "clusterMasterCount" }} -strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 0 - {{- if gt (index .Values.global.discovery "clusterMasterCount" | int) 2 }} - maxUnavailable: 2 - {{- else }} - maxUnavailable: 1 - {{- end }} - {{- else if gt (index .Values.global.discovery.d8SpecificNodeCountByRole "master" | int) 0 }} -replicas: {{ index .Values.global.discovery.d8SpecificNodeCountByRole "master" }} -strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 0 - {{- if gt (index .Values.global.discovery.d8SpecificNodeCountByRole "master" | int) 2 }} - maxUnavailable: 2 - {{- else }} - maxUnavailable: 1 - {{- end }} - {{- else }} -replicas: 2 -strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - {{- end }} - {{- else }} -replicas: 1 -strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - {{- end }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_deployment_on_master_custom_strategy_and_replicas_for_ha" (list . (dict "strategy" "strategy_type")) }} */ -}} -{{- /* returns deployment with custom strategy and replicas for ha components running on master nodes */ -}} -{{- define "helm_lib_deployment_on_master_custom_strategy_and_replicas_for_ha" }} -{{- $context := index . 0 }} -{{- $optionalArgs := dict }} -{{- $strategy := "RollingUpdate" }} -{{- if ge (len .) 2 }} - {{- $optionalArgs = index . 1 }} -{{- end }} -{{- if hasKey $optionalArgs "strategy" }} - {{- $strategy = $optionalArgs.strategy }} -{{- end }} -{{- /* Template context with .Values, .Chart, etc */ -}} - {{- if (include "helm_lib_ha_enabled" $context) }} - {{- if gt (index $context.Values.global.discovery "clusterMasterCount" | int) 0 }} -replicas: {{ index $context.Values.global.discovery "clusterMasterCount" }} -strategy: - type: {{ $strategy }} - {{- if eq $strategy "RollingUpdate" }} - rollingUpdate: - maxSurge: 0 - {{- if gt (index $context.Values.global.discovery "clusterMasterCount" | int) 2 }} - maxUnavailable: 2 - {{- else }} - maxUnavailable: 1 - {{- end }} - {{- end }} - {{- else if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole "master" | int) 0 }} -replicas: {{ index $context.Values.global.discovery.d8SpecificNodeCountByRole "master" }} -strategy: - type: {{ $strategy }} - {{- if eq $strategy "RollingUpdate" }} - rollingUpdate: - maxSurge: 0 - {{- if gt (index $context.Values.global.discovery.d8SpecificNodeCountByRole "master" | int) 2 }} - maxUnavailable: 2 - {{- else }} - maxUnavailable: 1 - {{- end }} - {{- end }} - {{- else }} -replicas: 2 -strategy: - type: {{ $strategy }} - {{- if eq $strategy "RollingUpdate" }} - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - {{- end }} - {{- end }} - {{- else }} -replicas: 1 -strategy: - type: {{ $strategy }} - {{- if eq $strategy "RollingUpdate" }} - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - {{- end }} - {{- end }} -{{- end }} - -{{- /* Usage: {{ include "helm_lib_deployment_strategy_and_replicas_for_ha" }} */ -}} -{{- /* returns deployment strategy and replicas for ha components running not on master nodes */ -}} -{{- define "helm_lib_deployment_strategy_and_replicas_for_ha" }} -{{- /* Template context with .Values, .Chart, etc */ -}} -replicas: {{ include "helm_lib_is_ha_to_value" (list . 2 1) }} -{{- if (include "helm_lib_ha_enabled" .) }} -strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 -{{- end }} -{{- end }} From f34455905fd9a08e383072e385f35c4caf2923dc Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Fri, 21 Jun 2024 14:08:55 +0300 Subject: [PATCH 21/21] Fix after review Signed-off-by: Aleksandr Zimin --- crds/cephclusterconnection.yaml | 2 +- crds/cephcsi.yaml | 158 ----- crds/cephstorageclass.yaml | 20 +- crds/doc-ru-cephclusterconnection.yaml | 36 ++ crds/doc-ru-cephstorageclass.yaml | 57 ++ docs/CONFIGURATION.md | 2 +- docs/CONFIGURATION_RU.md | 2 +- docs/CR.md | 2 +- docs/CR_RU.md | 2 +- docs/EXAMPLES.md | 80 ++- docs/EXAMPLES_RU.md | 80 ++- docs/FAQ.md | 6 +- docs/FAQ_RU.md | 6 +- docs/README.md | 2 +- docs/README_RU.md | 2 +- docs/internal/INTREE_MIGRATION.md | 550 ------------------ .../api/v1alpha1/ceph_cluster_connection.go | 6 - images/controller/go.mod | 2 +- images/controller/go.sum | 2 - .../ceph_cluster_connection_watcher_func.go | 24 +- .../ceph_cluster_connection_watcher_test.go | 9 +- .../ceph_storage_class_watcher_func.go | 40 +- .../ceph_storage_class_watcher_test.go | 86 +++ openapi/config-values.yaml | 26 +- ...bd-in-tree-to-ceph-csi-migration-helper.sh | 132 ----- 25 files changed, 373 insertions(+), 961 deletions(-) delete mode 100644 crds/cephcsi.yaml create mode 100644 crds/doc-ru-cephclusterconnection.yaml create mode 100644 crds/doc-ru-cephstorageclass.yaml delete mode 100644 docs/internal/INTREE_MIGRATION.md delete mode 100755 tools/rbd-in-tree-to-ceph-csi-migration-helper.sh diff --git a/crds/cephclusterconnection.yaml b/crds/cephclusterconnection.yaml index 4b15e31..da074e2 100644 --- a/crds/cephclusterconnection.yaml +++ b/crds/cephclusterconnection.yaml @@ -63,7 +63,7 @@ spec: description: | The current state of resources managed by the CephClusterConnection custom resource. Might be: - Failed (if the controller received incorrect resource configuration or some errors occurred during the operation) - - Create (if everything went fine) + - Created (if everything went fine) enum: - Failed - Created diff --git a/crds/cephcsi.yaml b/crds/cephcsi.yaml deleted file mode 100644 index a648c61..0000000 --- a/crds/cephcsi.yaml +++ /dev/null @@ -1,158 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: cephcsidriver.deckhouse.io - labels: - heritage: deckhouse - module: ceph-csi - app: ceph-csi -spec: - group: deckhouse.io - scope: Cluster - names: - plural: cephcsidriver - kind: CephCSIDriver - preserveUnknownFields: false - versions: - - name: v1alpha1 - served: true - storage: true - schema: &schema - openAPIV3Schema: - type: object - description: | - Ceph cluster connection parameters and StorageClasses configuration. - required: - - spec - properties: - spec: - type: object - required: - - clusterID - - userID - - userKey - - monitors - properties: - clusterID: - description: | - Ceph cluster FSID/UUID. - - Use `ceph fsid` to get Ceph cluster FSID/UUID. - type: string - userID: - description: | - Username without `client.`. - type: string - userKey: - description: | - Ceph auth key corresponding to the `userID`. - type: string - monitors: - description: | - List of ceph-mon IP addresses in the format `10.0.0.10:6789`. - type: array - items: - type: string - rbd: - type: object - properties: - storageClasses: - description: | - Description of StorageClasses for Rados Block Device (RBD). - type: array - items: - type: object - required: - - namePostfix - properties: - namePostfix: - description: | - Part of the StorageClass name after `-`. - - The name from the CustomResource `CephCSIDriver` is used as the first part. - type: string - pool: - description: | - Ceph pool into which the RBD image shall be created. - type: string - reclaimPolicy: - description: | - The reclaim policy for a Persistent Volume. - - [More info...](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming) - type: string - enum: - - Delete - - Retain - default: Retain - allowVolumeExpansion: - description: | - Allows the users to resize the volume by editing the corresponding PVC object. - - [More info...](https://kubernetes.io/docs/concepts/storage/storage-classes/#allow-volume-expansion) - type: boolean - default: true - mountOptions: - description: | - List of mount options. - type: array - items: - type: string - defaultFSType: - description: | - Default fstype. - type: string - enum: - - ext4 - - xfs - default: ext4 - cephfs: - type: object - properties: - subvolumeGroup: - description: | - CephFS subvolume group name. - type: string - storageClasses: - type: array - description: | - CephFS StorageClasses. - items: - type: object - required: - - namePostfix - - fsName - properties: - namePostfix: - description: | - Part of the StorageClass name after `-`. - - The name from the CustomResource `CephCSIDriver` is used as the first part. - type: string - pool: - description: | - Ceph pool name into which volume data shall be stored. - type: string - reclaimPolicy: - description: | - The reclaim policy for a Persistent Volume. - type: string - enum: - - Delete - - Retain - default: Retain - allowVolumeExpansion: - description: | - Allows the users to resize the volume by editing the corresponding PVC object. - type: boolean - default: true - mountOptions: - description: | - List of mount options. - type: array - items: - type: string - fsName: - description: | - CephFS filesystem name. - type: string diff --git a/crds/cephstorageclass.yaml b/crds/cephstorageclass.yaml index 840db24..e9b0599 100644 --- a/crds/cephstorageclass.yaml +++ b/crds/cephstorageclass.yaml @@ -32,10 +32,18 @@ spec: - reclaimPolicy - type oneOf: - - required: - - rbd - - required: - - cephfs + - required: ["cephfs"] + properties: + type: + enum: ["cephfs"] + not: + required: ["rbd"] + - required: ["rbd"] + properties: + type: + enum: ["rbd"] + not: + required: ["cephfs"] properties: clusterConnectionName: description: | @@ -111,7 +119,7 @@ spec: minLength: 1 pool: description: | - Name of the Ceph pool. + Name of the RBD pool. type: string x-kubernetes-validations: - rule: self == oldSelf @@ -127,7 +135,7 @@ spec: description: | The Storage class current state. Might be: - Failed (if the controller received incorrect resource configuration or some errors occurred during the operation) - - Create (if everything went fine) + - Created (if everything went fine) enum: - Failed - Created diff --git a/crds/doc-ru-cephclusterconnection.yaml b/crds/doc-ru-cephclusterconnection.yaml new file mode 100644 index 0000000..a96923a --- /dev/null +++ b/crds/doc-ru-cephclusterconnection.yaml @@ -0,0 +1,36 @@ +spec: + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: | + Параметры подключения к кластеру Ceph. + properties: + spec: + properties: + clusterID: + description: | + FSID/UUID кластера Ceph. + + Используйте `ceph fsid`, чтобы получить FSID/UUID кластера Ceph. + userID: + description: | + Имя пользователя без префикса `client.`. + userKey: + description: | + Авторизационный ключ Ceph, соответствующий `userID`. + monitors: + description: | + Список IP-адресов ceph-mon в формате `10.0.0.10:6789`. + status: + description: | + Отображает текущую информацию о ресурсах, управляемых пользовательским ресурсом CephClusterConnection. + properties: + phase: + description: | + Текущее состояние ресурсов, управляемых ресурсом CephClusterConnection. Возможные состояния: + - Failed (если контроллер получил некорректную конфигурацию ресурса или возникли ошибки в процессе выполнения операции) + - Created (если все прошло успешно) + reason: + description: | + Дополнительная информация о ресурсах, управляемых ресурсом CephClusterConnection. diff --git a/crds/doc-ru-cephstorageclass.yaml b/crds/doc-ru-cephstorageclass.yaml new file mode 100644 index 0000000..73e751d --- /dev/null +++ b/crds/doc-ru-cephstorageclass.yaml @@ -0,0 +1,57 @@ +spec: + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: | + Интерфейс управления StorageСlass для CSI-драйверов rbd.csi.ceph.com и cephfs.csi.ceph.com. Ручное создание StorageClass для данных драйверов запрещено. + properties: + spec: + description: | + Определяет конфигурацию StorageClass. + properties: + clusterConnectionName: + description: | + Имя ресурса CephClusterConnection. + reclaimPolicy: + description: | + Режим поведения при удалении PVC. Возможные значения: + - Delete (При удалении PVC будет удален PV и данные) + - Retain (При удалении PVC не будут удалены PV и данные. Для их удаления потребуется ручное вмешательство администратора) + type: + description: | + Тип storage-класса. Возможные значения: + - cephfs (CephFS) + - rbd (Rados Block Device) + cephfs: + description: | + Специфические параметры для CephFS. + properties: + fsName: + description: | + Имя файловой системы CephFS. + pool: + description: | + Имя пула Ceph. + rbd: + description: | + Специфические параметры для Rados Block Device. + properties: + defaultFSType: + description: | + Тип файловой системы по умолчанию для Rados Block Device. + pool: + description: | + Имя пула RBD. + status: + description: | + Отображает текущую информацию о StorageClass. + properties: + phase: + description: | + Текущее состояние StorageClass. Возможные состояния: + - Failed (если контроллер получил некорректную конфигурацию ресурса или возникли ошибки в процессе выполнения операции) + - Created (если все прошло успешно) + reason: + description: | + Дополнительная информация о текущем состоянии StorageClass. diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index cc1f6de..16de579 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -1,5 +1,5 @@ --- -title: "The ceph-csi module: configuration" +title: "The csi-ceph module: configuration" --- {% include module-bundle.liquid %} diff --git a/docs/CONFIGURATION_RU.md b/docs/CONFIGURATION_RU.md index 9be6a04..2d784f9 100644 --- a/docs/CONFIGURATION_RU.md +++ b/docs/CONFIGURATION_RU.md @@ -1,5 +1,5 @@ --- -title: "Модуль ceph-csi: настройки" +title: "Модуль csi-ceph: настройки" --- {% include module-bundle.liquid %} diff --git a/docs/CR.md b/docs/CR.md index 88c4bc6..e64d866 100644 --- a/docs/CR.md +++ b/docs/CR.md @@ -1,5 +1,5 @@ --- -title: "The ceph-csi module: Custom Resources" +title: "The csi-ceph module: Custom Resources" --- diff --git a/docs/CR_RU.md b/docs/CR_RU.md index 4101ff6..a2077cd 100644 --- a/docs/CR_RU.md +++ b/docs/CR_RU.md @@ -1,5 +1,5 @@ --- -title: "Модуль ceph-csi: custom resources" +title: "Модуль csi-ceph: custom resources" --- diff --git a/docs/EXAMPLES.md b/docs/EXAMPLES.md index 420c0f2..5a49dd2 100644 --- a/docs/EXAMPLES.md +++ b/docs/EXAMPLES.md @@ -1,34 +1,66 @@ --- -title: "The ceph-csi module: examples" +title: "The csi-ceph module: examples" --- -## An example of the `CephCSIDriver` configuration +## Example of `CephClusterConnection` configuration ```yaml -apiVersion: deckhouse.io/v1alpha1 -kind: CephCSIDriver +apiVersion: storage.deckhouse.io/v1alpha1 +kind: CephClusterConnection metadata: - name: example + name: ceph-cluster-1 spec: - clusterID: 2bf085fc-5119-404f-bb19-820ca6a1b07e + clusterID: 0324bfe8-c36a-4829-bacd-9e28b6480de9 monitors: - - 10.0.0.10:6789 - userID: admin - userKey: AQDbc7phl+eeGRAAaWL9y71mnUiRHKRFOWMPCQ== + - 172.20.1.28:6789 + - 172.20.1.34:6789 + - 172.20.1.37:6789 + userID: user + userKey: AQDiVXVmBJVRLxAAg65PhODrtwbwSWrjJwssUg== +``` + +- To verify the creation of the object, use the following command (Phase should be `Created`): + +```shell +kubectl get cephclusterconnection +``` + +## Example of `CephStorageClass` configuration + +### RBD + +```yaml +apiVersion: storage.deckhouse.io/v1alpha1 +kind: CephStorageClass +metadata: + name: ceph-rbd-sc +spec: + clusterConnectionName: ceph-cluster-1 + reclaimPolicy: Delete + type: rbd rbd: - storageClasses: - - allowVolumeExpansion: true - defaultFSType: ext4 - mountOptions: - - discard - namePostfix: csi-rbd - pool: kubernetes-rbd - reclaimPolicy: Delete - cephfs: - storageClasses: - - allowVolumeExpansion: true - fsName: cephfs - namePostfix: csi-cephfs - pool: cephfs_data - reclaimPolicy: Delete + defaultFSType: ext4 + pool: ceph-rbd-pool +``` + +### CephFS + +```yaml +apiVersion: storage.deckhouse.io/v1alpha1 +kind: CephStorageClass +metadata: + name: ceph-fs-sc +spec: + clusterConnectionName: ceph-cluster-1 + reclaimPolicy: Delete + type: rbd + rbd: + defaultFSType: ext4 + pool: ceph-rbd-pool +``` + +### To verify the creation of the object, use the following command (Phase should be `Created`): + +```shell +kubectl get cephstorageclass ``` diff --git a/docs/EXAMPLES_RU.md b/docs/EXAMPLES_RU.md index b5f4dca..e38ff23 100644 --- a/docs/EXAMPLES_RU.md +++ b/docs/EXAMPLES_RU.md @@ -1,34 +1,66 @@ --- -title: "Модуль ceph-csi: примеры" +title: "Модуль csi-ceph: примеры" --- -## Пример описания `CephCSIDriver` +## Пример описания `CephClusterConnection` ```yaml -apiVersion: deckhouse.io/v1alpha1 -kind: CephCSIDriver +apiVersion: storage.deckhouse.io/v1alpha1 +kind: CephClusterConnection metadata: - name: example + name: ceph-cluster-1 spec: - clusterID: 2bf085fc-5119-404f-bb19-820ca6a1b07e + clusterID: 0324bfe8-c36a-4829-bacd-9e28b6480de9 monitors: - - 10.0.0.10:6789 - userID: admin - userKey: AQDbc7phl+eeGRAAaWL9y71mnUiRHKRFOWMPCQ== + - 172.20.1.28:6789 + - 172.20.1.34:6789 + - 172.20.1.37:6789 + userID: user + userKey: AQDiVXVmBJVRLxAAg65PhODrtwbwSWrjJwssUg== +``` + +- Проверить создание объекта можно командой (Phase должен быть `Created`): + +```shell +kubectl get cephclusterconnection <имя cephclusterconnection> +``` + +## Пример описаня `CephStorageClass` + +### RBD + +```yaml +apiVersion: storage.deckhouse.io/v1alpha1 +kind: CephStorageClass +metadata: + name: ceph-rbd-sc +spec: + clusterConnectionName: ceph-cluster-1 + reclaimPolicy: Delete + type: rbd rbd: - storageClasses: - - allowVolumeExpansion: true - defaultFSType: ext4 - mountOptions: - - discard - namePostfix: csi-rbd - pool: kubernetes-rbd - reclaimPolicy: Delete - cephfs: - storageClasses: - - allowVolumeExpansion: true - fsName: cephfs - namePostfix: csi-cephfs - pool: cephfs_data - reclaimPolicy: Delete + defaultFSType: ext4 + pool: ceph-rbd-pool +``` + +### CephFS + +```yaml +apiVersion: storage.deckhouse.io/v1alpha1 +kind: CephStorageClass +metadata: + name: ceph-fs-sc +spec: + clusterConnectionName: ceph-cluster-1 + reclaimPolicy: Delete + type: rbd + rbd: + defaultFSType: ext4 + pool: ceph-rbd-pool +``` + +### Проверить создание объекта можно командой (Phase должен быть `Created`): + +```shell +kubectl get cephstorageclass <имя storage class> ``` diff --git a/docs/FAQ.md b/docs/FAQ.md index f4a955c..9843688 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -1,10 +1,10 @@ --- -title: "The ceph-csi module: FAQ" +title: "The csi-ceph module: FAQ" --- ## How to get a list of RBD volumes separated by nodes? ```shell -kubectl -n d8-ceph-csi get po -l app=csi-node-rbd -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName --no-headers \ - | awk '{print "echo "$2"; kubectl -n d8-ceph-csi exec "$1" -c node -- rbd showmapped"}' | bash +kubectl -n d8-csi-ceph get po -l app=csi-node-rbd -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName --no-headers \ + | awk '{print "echo "$2"; kubectl -n d8-csi-ceph exec "$1" -c node -- rbd showmapped"}' | bash ``` diff --git a/docs/FAQ_RU.md b/docs/FAQ_RU.md index 53ee743..53f8ab7 100644 --- a/docs/FAQ_RU.md +++ b/docs/FAQ_RU.md @@ -1,10 +1,10 @@ --- -title: "Модуль ceph-csi: FAQ" +title: "Модуль csi-ceph: FAQ" --- ## Как получить список томов RBD, разделенный по узлам? ```shell -kubectl -n d8-ceph-csi get po -l app=csi-node-rbd -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName --no-headers \ - | awk '{print "echo "$2"; kubectl -n d8-ceph-csi exec "$1" -c node -- rbd showmapped"}' | bash +kubectl -n d8-csi-ceph get po -l app=csi-node-rbd -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName --no-headers \ + | awk '{print "echo "$2"; kubectl -n d8-csi-ceph exec "$1" -c node -- rbd showmapped"}' | bash ``` diff --git a/docs/README.md b/docs/README.md index 8902f95..6ce67df 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,5 +1,5 @@ --- -title: "The ceph-csi module" +title: "The csi-ceph module" --- The module installs and configures the CSI driver for RBD and CephFS. diff --git a/docs/README_RU.md b/docs/README_RU.md index 169fe0f..27e494f 100644 --- a/docs/README_RU.md +++ b/docs/README_RU.md @@ -1,5 +1,5 @@ --- -title: "Модуль ceph-csi" +title: "Модуль csi-ceph" --- Модуль устанавливает и настраивает CSI-драйвер для RBD и CephFS. diff --git a/docs/internal/INTREE_MIGRATION.md b/docs/internal/INTREE_MIGRATION.md deleted file mode 100644 index 4432940..0000000 --- a/docs/internal/INTREE_MIGRATION.md +++ /dev/null @@ -1,550 +0,0 @@ -# Switching from the in-tree RBD driver to CSI (Ceph CSI) - -The [rbd-in-tree-to-ceph-csi-migration-helper.sh](https://github.com/deckhouse/deckhouse/blob/main/modules/031-ceph-csi/tools/rbd-in-tree-to-ceph-csi-migration-helper.sh) script was created to simplify the migration process. -Before running it, delete the Pod (scale the StatefulSet/Deployment down to zero) which uses the PVC. You will have to manually run a command in the Ceph cluster to rename the RBD image (since Ceph CSI uses a different name format) during the migration. - -**Caution!** It is assumed that the `ceph-csi` module is enabled and configured and that the old driver continues to work. - -The script will back up the manifests if the PVCs and PVs to be migrated, delete the old manifests and create the new ones. Note that deleting the PV will not cause the RBD image in the Ceph cluster to be deleted, since the script will rename it beforehand. - -The script requires the PVCs and PVs to work; it will use their manifests to obtain the parameters specific to Ceph CSI. You can use the following manifest to create them: - -```yaml -kubectl create -f - <<"END" -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: sample - namespace: d8-monitoring -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: ceph-csi-rbd -END -``` - -Below is an example of the output generated by the script: - -```bash -root@kube-master-0:~# ./rbd-in-tree-to-ceph-csi-migration-helper.sh default/sample default/data-test-0 -Rename the rbd image in your ceph cluster using the following command: ->rbd mv kube/kubernetes-dynamic-pvc-162a2c43-568e-40ab-aedb-a4632a613ecd kube/csi-vol-162a2c43-568e-40ab-aedb-a4632a613ecd -After renaming, enter yes to confirm: yes -PersistentVolumeClaim data-test-0 and PersistentVolume pvc-4a77a995-ce1e-463c-9726-d05966d3c5ef will be removed (Type yes to confirm): yes ->kubectl -n default delete pvc data-test-0 -persistentvolumeclaim "data-test-0" deleted ->kubectl delete pv pvc-4a77a995-ce1e-463c-9726-d05966d3c5ef -persistentvolume "pvc-4a77a995-ce1e-463c-9726-d05966d3c5ef" deleted ->kubectl create -f - <<"END" -{ - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "metadata": { - "annotations": { - "pv.kubernetes.io/bind-completed": "yes", - "pv.kubernetes.io/bound-by-controller": "yes", - "volume.beta.kubernetes.io/storage-provisioner": "rbd.csi.ceph.com" - }, - "finalizers": [ - "kubernetes.io/pvc-protection" - ], - "labels": { - "app": "test" - }, - "name": "data-test-0", - "namespace": "default" - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "1Gi" - } - }, - "storageClassName": "ceph-csi-rbd", - "volumeMode": "Filesystem", - "volumeName": "pvc-4a77a995-ce1e-463c-9726-d05966d3c5ef" - } -} -END -Apply this manifest in the cluster? (Type yes to confirm): yes -persistentvolumeclaim/data-test-0 created ->kubectl create -f - <<"END" -{ - "apiVersion": "v1", - "kind": "PersistentVolume", - "metadata": { - "annotations": { - "pv.kubernetes.io/provisioned-by": "rbd.csi.ceph.com", - "volume.kubernetes.io/provisioner-deletion-secret-name": "csi-new", - "volume.kubernetes.io/provisioner-deletion-secret-namespace": "d8-ceph-csi" - }, - "finalizers": [ - "kubernetes.io/pv-protection" - ], - "name": "pvc-4a77a995-ce1e-463c-9726-d05966d3c5ef" - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "capacity": { - "storage": "1Gi" - }, - "claimRef": { - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "name": "data-test-0", - "namespace": "default", - "resourceVersion": "14908531", - "uid": "0ac58d43-75f9-4481-96fd-dcf8ca60ad85" - }, - "mountOptions": [ - "discard" - ], - "persistentVolumeReclaimPolicy": "Retain", - "storageClassName": "ceph-csi-rbd", - "volumeMode": "Filesystem", - "csi": { - "controllerExpandSecretRef": { - "name": "csi-new", - "namespace": "d8-ceph-csi" - }, - "driver": "rbd.csi.ceph.com", - "fsType": "ext4", - "nodeStageSecretRef": { - "name": "csi-new", - "namespace": "d8-ceph-csi" - }, - "volumeAttributes": { - "clusterID": "60f356ee-7c2d-4556-81be-c24b34a30b2a", - "imageFeatures": "layering", - "imageName": "csi-vol-162a2c43-568e-40ab-aedb-a4632a613ecd", - "journalPool": "kube", - "pool": "kube", - "storage.kubernetes.io/csiProvisionerIdentity": "1666697721019-8081-rbd.csi.ceph.com" - }, - "volumeHandle": "0001-0024-60f356ee-7c2d-4556-81be-c24b34a30b2a-0000000000000005-162a2c43-568e-40ab-aedb-a4632a613ecd" - } - } -} -END -Apply this manifest in the cluster? (Type yes to confirm): yes -persistentvolume/pvc-4a77a995-ce1e-463c-9726-d05966d3c5ef created -``` - -**Caution!** Before switching to containerd, make sure that no log collectors other than log-shipper are used in the cluster. If there are any, you will either have to discard them in favor of the [log-shipper](https://deckhouse.io/documentation/v1/modules/460-log-shipper/) module or reconfigure them to work with containerd. This is because containerd has a different log format and stores log files under a different path. - -| CRI | Log format | Log files path | -| ---------- | ----------- | -----------------------| -| Docker | JSON | `/var/log/containers/` | -| Containerd | Plain Text | `/var/log/pods/` | - -## Additional information about the migration process - -### Table of contents - -- [Switching from the in-tree RBD driver to CSI (Ceph CSI)](#switching-from-the-in-tree-rbd-driver-to-csi-ceph-csi) - - [Additional information about the migration process](#additional-information-about-the-migration-process) - - [Table of contents](#table-of-contents) - - [Manifests of the PVCs and PVs to be migrated](#manifests-of-the-pvcs-and-pvs-to-be-migrated) - - [PVC and PV manifests to use for importing the specific Ceph CSI parameters](#pvc-and-pv-manifests-to-use-for-importing-the-specific-ceph-csi-parameters) - - [Renaming an RBD image in a ceph cluster](#renaming-an-rbd-image-in-a-ceph-cluster) - - [Deleting PVCs and PVs from the cluster](#deleting-pvcs-and-pvs-from-the-cluster) - - [Generating a new PVC manifest and creating an object in the cluster](#generating-a-new-pvc-manifest-and-creating-an-object-in-the-cluster) - - [Generating a new PV manifest and creating an object in the cluster](#generating-a-new-pv-manifest-and-creating-an-object-in-the-cluster) - -### Manifests of the PVCs and PVs to be migrated - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - annotations: - pv.kubernetes.io/bind-completed: "yes" - pv.kubernetes.io/bound-by-controller: "yes" - volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/rbd - creationTimestamp: "2022-11-03T13:15:43Z" - finalizers: - - kubernetes.io/pvc-protection - labels: - app: test - name: data-test-0 - namespace: default - resourceVersion: "8956688" - uid: cd6f7b26-d768-4cab-88a4-baca5b242cc5 -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: rbd - volumeMode: Filesystem - volumeName: pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - phase: Bound -``` - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - annotations: - kubernetes.io/createdby: rbd-dynamic-provisioner - pv.kubernetes.io/bound-by-controller: "yes" - pv.kubernetes.io/provisioned-by: kubernetes.io/rbd - creationTimestamp: "2022-11-03T13:15:49Z" - finalizers: - - kubernetes.io/pv-protection - name: pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 - resourceVersion: "8956671" - uid: 4ab7fcf4-e8db-426e-a7aa-f5380ef857c7 -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - claimRef: - apiVersion: v1 - kind: PersistentVolumeClaim - name: data-test-0 - namespace: default - resourceVersion: "8956643" - uid: cd6f7b26-d768-4cab-88a4-baca5b242cc5 - mountOptions: - - discard - persistentVolumeReclaimPolicy: Delete - rbd: - image: kubernetes-dynamic-pvc-f32fea79-d658-4ab1-967a-fb6e8f930dec - keyring: /etc/ceph/keyring - monitors: - - 192.168.4.215:6789 - pool: kube - secretRef: - name: ceph-secret - user: kube - storageClassName: rbd - volumeMode: Filesystem -status: - phase: Bound -``` - -### PVC and PV manifests to use for importing the specific Ceph CSI parameters - -The StorageClass created by the ceph-csi module is used. - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - annotations: - pv.kubernetes.io/bind-completed: "yes" - pv.kubernetes.io/bound-by-controller: "yes" - volume.beta.kubernetes.io/storage-provisioner: rbd.csi.ceph.com - creationTimestamp: "2022-11-03T12:46:20Z" - finalizers: - - kubernetes.io/pvc-protection - name: sample - namespace: default - resourceVersion: "8950577" - uid: abdbb7ea-5da6-47f3-8b76-b968a93b7bc1 -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: ceph-csi-rbd - volumeMode: Filesystem - volumeName: pvc-abdbb7ea-5da6-47f3-8b76-b968a93b7bc1 -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - phase: Bound -``` - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - annotations: - pv.kubernetes.io/provisioned-by: rbd.csi.ceph.com - volume.kubernetes.io/provisioner-deletion-secret-name: csi-new - volume.kubernetes.io/provisioner-deletion-secret-namespace: d8-ceph-csi - creationTimestamp: "2022-11-03T12:46:27Z" - finalizers: - - kubernetes.io/pv-protection - name: pvc-abdbb7ea-5da6-47f3-8b76-b968a93b7bc1 - resourceVersion: "8950562" - uid: 6200ce15-b6f2-45af-94d0-828913e850d0 -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - claimRef: - apiVersion: v1 - kind: PersistentVolumeClaim - name: sample - namespace: default - resourceVersion: "8950550" - uid: abdbb7ea-5da6-47f3-8b76-b968a93b7bc1 - csi: - controllerExpandSecretRef: - name: csi-new - namespace: d8-ceph-csi - driver: rbd.csi.ceph.com - fsType: ext4 - nodeStageSecretRef: - name: csi-new - namespace: d8-ceph-csi - volumeAttributes: - clusterID: 60f356ee-7c2d-4556-81be-c24b34a30b2a - imageFeatures: layering - imageName: csi-vol-880ec27e-5b75-11ed-a252-fa163ee74632 - journalPool: kube - pool: kube - storage.kubernetes.io/csiProvisionerIdentity: 1666697721019-8081-rbd.csi.ceph.com - volumeHandle: 0001-0024-60f356ee-7c2d-4556-81be-c24b34a30b2a-0000000000000005-880ec27e-5b75-11ed-a252-fa163ee74632 - mountOptions: - - discard - persistentVolumeReclaimPolicy: Delete - storageClassName: ceph-csi-rbd - volumeMode: Filesystem -status: - phase: Bound -``` - -### Renaming an RBD image in a ceph cluster - -Renaming is mandatory because the Ceph CSI driver uses a different RBD image naming format. - -The command is supposed to be run in the Ceph cluster: - -```shell -rbd mv kube/kubernetes-dynamic-pvc- kube/csi-vol- -``` - -* `kube` is the name of the pool in the Ceph cluster; -* `kubernetes-dynamic-pvc-` is the RBD image name format used by the in-tree driver; -* `csi-vol-` is the RBD image name format used by Ceph CSI. - -### Deleting PVCs and PVs from the cluster - -```bash -kubectl -n default delete pvc data-test-0 -kubectl delete pv pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 -``` - -Since the RBD image in the Ceph cluster was renamed in the previous step, deleting PersistentVolume will not cause the image to be deleted. - -### Generating a new PVC manifest and creating an object in the cluster - -Here is the original manifest with comments: - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - annotations: - pv.kubernetes.io/bind-completed: "yes" - pv.kubernetes.io/bound-by-controller: "yes" - volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/rbd # replace the annotation with a similar one from the PVC sample - creationTimestamp: "2022-11-03T13:15:43Z" # delete - finalizers: - - kubernetes.io/pvc-protection - labels: - app: test - name: data-test-0 - namespace: default - resourceVersion: "8956688" # delete - uid: cd6f7b26-d768-4cab-88a4-baca5b242cc5 # delete -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: rbd - volumeMode: Filesystem - volumeName: pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 -status: # delete - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - phase: Bound -``` - -You will end up with the following manifest: - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - annotations: - pv.kubernetes.io/bind-completed: "yes" - pv.kubernetes.io/bound-by-controller: "yes" - volume.beta.kubernetes.io/storage-provisioner: rbd.csi.ceph.com - finalizers: - - kubernetes.io/pvc-protection - labels: - app: test - name: data-test-0 - namespace: default -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: ceph-csi-rbd - volumeMode: Filesystem - volumeName: pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 -``` - -Let's create an object in the cluster using this manifest. - -### Generating a new PV manifest and creating an object in the cluster - -Here is the original manifest with comments: - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - annotations: - kubernetes.io/createdby: rbd-dynamic-provisioner - pv.kubernetes.io/bound-by-controller: "yes" - pv.kubernetes.io/provisioned-by: kubernetes.io/rbd - creationTimestamp: "2022-11-03T13:15:49Z" # delete - finalizers: - - kubernetes.io/pv-protection - name: pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 - resourceVersion: "8956671" # delete - uid: 4ab7fcf4-e8db-426e-a7aa-f5380ef857c7 # delete -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - claimRef: - apiVersion: v1 - kind: PersistentVolumeClaim - name: data-test-0 - namespace: default - resourceVersion: "8956643" # delete - uid: cd6f7b26-d768-4cab-88a4-baca5b242cc5 # replace with a new one from the PVC created in the previous step - mountOptions: - - discard - persistentVolumeReclaimPolicy: Delete - rbd: # delete - image: kubernetes-dynamic-pvc-f32fea79-d658-4ab1-967a-fb6e8f930dec - keyring: /etc/ceph/keyring - monitors: - - 192.168.4.215:6789 - pool: kube - secretRef: - name: ceph-secret - user: kube - storageClassName: rbd # replace with ceph-csi-rbd - volumeMode: Filesystem - # add the csi section -status: # delete - phase: Bound -``` - -The `spec.csi` sample can be borrowed from the PV (sample) created earlier: - -```yaml - csi: - controllerExpandSecretRef: - name: csi-new - namespace: d8-ceph-csi - driver: rbd.csi.ceph.com - fsType: ext4 - nodeStageSecretRef: - name: csi-new - namespace: d8-ceph-csi - volumeAttributes: - clusterID: 60f356ee-7c2d-4556-81be-c24b34a30b2a - imageFeatures: layering - imageName: csi-vol-880ec27e-5b75-11ed-a252-fa163ee74632 # replace the uid - journalPool: kube - pool: kube - storage.kubernetes.io/csiProvisionerIdentity: 1666697721019-8081-rbd.csi.ceph.com - volumeHandle: 0001-0024-60f356ee-7c2d-4556-81be-c24b34a30b2a-0000000000000005-880ec27e-5b75-11ed-a252-fa163ee74632 # replace the uid -``` - -In the `imageName` and `volumeHandle` fields, replace the uid of the rbd image. - -In the sample below, the uid is highlighted with the tags (`here`): - -```yaml -imageName: csi-vol-880ec27e-5b75-11ed-a252-fa163ee74632 -volumeHandle: 0001-0024-60f356ee-7c2d-4556-81be-c24b34a30b2a-0000000000000005-880ec27e-5b75-11ed-a252-fa163ee74632 -``` - -You will end up with the following manifest: - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - annotations: - pv.kubernetes.io/provisioned-by: rbd.csi.ceph.com - volume.kubernetes.io/provisioner-deletion-secret-name: csi-new - volume.kubernetes.io/provisioner-deletion-secret-namespace: d8-ceph-csi - finalizers: - - kubernetes.io/pv-protection - name: pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - claimRef: - apiVersion: v1 - kind: PersistentVolumeClaim - name: data-test-0 - namespace: default - uid: cd6f7b26-d768-4cab-88a4-baca5b242cc7 - mountOptions: - - discard - persistentVolumeReclaimPolicy: Delete - csi: - controllerExpandSecretRef: - name: csi-new - namespace: d8-ceph-csi - driver: rbd.csi.ceph.com - fsType: ext4 - nodeStageSecretRef: - name: csi-new - namespace: d8-ceph-csi - volumeAttributes: - clusterID: 60f356ee-7c2d-4556-81be-c24b34a30b2a - imageFeatures: layering - imageName: csi-vol-f32fea79-d658-4ab1-967a-fb6e8f930dec - journalPool: kube - pool: kube - storage.kubernetes.io/csiProvisionerIdentity: 1666697721019-8081-rbd.csi.ceph.com - volumeHandle: 0001-0024-60f356ee-7c2d-4556-81be-c24b34a30b2a-0000000000000005-f32fea79-d658-4ab1-967a-fb6e8f930dec - storageClassName: ceph-csi-rbd - volumeMode: Filesystem -``` - -Create an object in the cluster using this manifest. - -This concludes the migration process. diff --git a/images/controller/api/v1alpha1/ceph_cluster_connection.go b/images/controller/api/v1alpha1/ceph_cluster_connection.go index 66c8ad4..066d820 100644 --- a/images/controller/api/v1alpha1/ceph_cluster_connection.go +++ b/images/controller/api/v1alpha1/ceph_cluster_connection.go @@ -55,9 +55,3 @@ type ClusterConfig struct { ClusterID string `json:"clusterID"` Monitors []string `json:"monitors"` } - -type ClusterConfigList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []ClusterConfig `json:"items"` -} diff --git a/images/controller/go.mod b/images/controller/go.mod index 5ce50e4..804a812 100644 --- a/images/controller/go.mod +++ b/images/controller/go.mod @@ -11,7 +11,6 @@ require ( k8s.io/apimachinery v0.29.2 k8s.io/client-go v0.29.2 k8s.io/klog/v2 v2.120.1 - k8s.io/utils v0.0.0-20240102154912-e7106e64919e sigs.k8s.io/controller-runtime v0.17.5 ) @@ -65,6 +64,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/component-base v0.29.2 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/images/controller/go.sum b/images/controller/go.sum index b1f77a7..2bdb2e9 100644 --- a/images/controller/go.sum +++ b/images/controller/go.sum @@ -194,8 +194,6 @@ k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/A k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.17.4 h1:AMf1E0+93/jLQ13fb76S6Atwqp24EQFCmNbG84GJxew= -sigs.k8s.io/controller-runtime v0.17.4/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= sigs.k8s.io/controller-runtime v0.17.5 h1:1FI9Lm7NiOOmBsgTV36/s2XrEFXnO2C4sbg/Zme72Rw= sigs.k8s.io/controller-runtime v0.17.5/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go index 2d5da21..31019d6 100644 --- a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go @@ -281,7 +281,7 @@ func IdentifyReconcileFuncForConfigMap(log logger.Logger, configMapList *corev1. return internal.CreateReconcile, nil } - should, err := shouldReconcileConfigMapByUpdateFunc(log, configMapList, cephClusterConnection, controllerNamespace, configMapName) + should, err := shouldReconcileConfigMapByUpdateFunc(log, configMapList, cephClusterConnection, configMapName) if err != nil { return "", err } @@ -310,7 +310,7 @@ func shouldReconcileConfigMapByCreateFunc(configMapList *corev1.ConfigMapList, c return true } -func shouldReconcileConfigMapByUpdateFunc(log logger.Logger, configMapList *corev1.ConfigMapList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, configMapName string) (bool, error) { +func shouldReconcileConfigMapByUpdateFunc(log logger.Logger, configMapList *corev1.ConfigMapList, cephClusterConnection *v1alpha1.CephClusterConnection, configMapName string) (bool, error) { if cephClusterConnection.DeletionTimestamp != nil { return false, nil } @@ -328,7 +328,7 @@ func shouldReconcileConfigMapByUpdateFunc(log logger.Logger, configMapList *core equal := false clusterConfigExists := false - for _, oldClusterConfig := range oldClusterConfigs.Items { + for _, oldClusterConfig := range oldClusterConfigs { if oldClusterConfig.ClusterID == cephClusterConnection.Spec.ClusterID { clusterConfigExists = true newClusterConfig := configureClusterConfig(cephClusterConnection) @@ -361,16 +361,16 @@ func shouldReconcileConfigMapByUpdateFunc(log logger.Logger, configMapList *core return false, err } -func getClusterConfigsFromConfigMap(configMap corev1.ConfigMap) (v1alpha1.ClusterConfigList, error) { +func getClusterConfigsFromConfigMap(configMap corev1.ConfigMap) ([]v1alpha1.ClusterConfig, error) { jsonData, ok := configMap.Data["config.json"] if !ok { - return v1alpha1.ClusterConfigList{}, fmt.Errorf("[getClusterConfigsFromConfigMap] config.json key not found in the ConfigMap %s", configMap.Name) + return nil, fmt.Errorf("[getClusterConfigsFromConfigMap] config.json key not found in the ConfigMap %s", configMap.Name) } - clusterConfigs := v1alpha1.ClusterConfigList{} + var clusterConfigs []v1alpha1.ClusterConfig err := json.Unmarshal([]byte(jsonData), &clusterConfigs) if err != nil { - return v1alpha1.ClusterConfigList{}, fmt.Errorf("[getClusterConfigsFromConfigMap] unable to unmarshal data from the ConfigMap %s: %w", configMap.Name, err) + return nil, fmt.Errorf("[getClusterConfigsFromConfigMap] unable to unmarshal data from the ConfigMap %s: %w", configMap.Name, err) } return clusterConfigs, nil @@ -482,9 +482,7 @@ func reconcileConfigMapDeleteFunc(ctx context.Context, cl client.Client, log log } func createConfigMap(clusterConfig v1alpha1.ClusterConfig, controllerNamespace, configMapName string) *corev1.ConfigMap { - clusterConfigs := v1alpha1.ClusterConfigList{ - Items: []v1alpha1.ClusterConfig{clusterConfig}, - } + clusterConfigs := []v1alpha1.ClusterConfig{clusterConfig} jsonData, _ := json.Marshal(clusterConfigs) configMap := &corev1.ConfigMap{ @@ -507,15 +505,15 @@ func createConfigMap(clusterConfig v1alpha1.ClusterConfig, controllerNamespace, func updateConfigMap(oldConfigMap *corev1.ConfigMap, cephClusterConnection *v1alpha1.CephClusterConnection, updateAction string) *corev1.ConfigMap { clusterConfigs, _ := getClusterConfigsFromConfigMap(*oldConfigMap) - for i, clusterConfig := range clusterConfigs.Items { + for i, clusterConfig := range clusterConfigs { if clusterConfig.ClusterID == cephClusterConnection.Spec.ClusterID { - clusterConfigs.Items = slices.Delete(clusterConfigs.Items, i, i+1) + clusterConfigs = slices.Delete(clusterConfigs, i, i+1) } } if updateAction == internal.UpdateConfigMapActionUpdate { newClusterConfig := configureClusterConfig(cephClusterConnection) - clusterConfigs.Items = append(clusterConfigs.Items, newClusterConfig) + clusterConfigs = append(clusterConfigs, newClusterConfig) } newJsonData, _ := json.Marshal(clusterConfigs) diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher_test.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher_test.go index 2f21364..dc2a4e2 100644 --- a/images/controller/pkg/controller/ceph_cluster_connection_watcher_test.go +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher_test.go @@ -248,11 +248,12 @@ func verifyConfigMap(ctx context.Context, cl client.Client, cephClusterConnectio Expect(configMap.Finalizers).To(HaveLen(1)) Expect(configMap.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) - clusterConfigs := v1alpha1.ClusterConfigList{} + var clusterConfigs []v1alpha1.ClusterConfig err = json.Unmarshal([]byte(configMap.Data["config.json"]), &clusterConfigs) Expect(err).NotTo(HaveOccurred()) + Expect(clusterConfigs).NotTo(BeNil()) found := false - for _, cfg := range clusterConfigs.Items { + for _, cfg := range clusterConfigs { if cfg.ClusterID == cephClusterConnection.Spec.ClusterID { Expect(cfg.Monitors).To(ConsistOf(cephClusterConnection.Spec.Monitors)) found = true @@ -270,10 +271,10 @@ func verifyConfigMapWithoutClusterConnection(ctx context.Context, cl client.Clie Expect(configMap.Finalizers).To(HaveLen(1)) Expect(configMap.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) - clusterConfigs := v1alpha1.ClusterConfigList{} + var clusterConfigs []v1alpha1.ClusterConfig err = json.Unmarshal([]byte(configMap.Data["config.json"]), &clusterConfigs) Expect(err).NotTo(HaveOccurred()) - for _, cfg := range clusterConfigs.Items { + for _, cfg := range clusterConfigs { Expect(cfg.ClusterID).NotTo(Equal(cephClusterConnection.Spec.ClusterID)) } } diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher_func.go b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go index dd5b0a2..b63323a 100644 --- a/images/controller/pkg/controller/ceph_storage_class_watcher_func.go +++ b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go @@ -453,32 +453,32 @@ func validateCephStorageClassSpec(cephSC *storagev1alpha1.CephStorageClass) (boo case storagev1alpha1.CephStorageClassTypeRBD: if cephSC.Spec.RBD == nil { validationPassed = false - failedMsgBuilder.WriteString("the spec.rbd field is empty; ") - } - - if cephSC.Spec.RBD.DefaultFSType == "" { - validationPassed = false - failedMsgBuilder.WriteString("the spec.rbd.defaultFSType field is empty; ") - } + failedMsgBuilder.WriteString(fmt.Sprintf("CephStorageClass type is %s but the spec.rbd field is empty; ", storagev1alpha1.CephStorageClassTypeRBD)) + } else { + if cephSC.Spec.RBD.DefaultFSType == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.rbd.defaultFSType field is empty; ") + } - if cephSC.Spec.RBD.Pool == "" { - validationPassed = false - failedMsgBuilder.WriteString("the spec.rbd.pool field is empty; ") + if cephSC.Spec.RBD.Pool == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.rbd.pool field is empty; ") + } } case storagev1alpha1.CephStorageClassTypeCephFS: if cephSC.Spec.CephFS == nil { validationPassed = false - failedMsgBuilder.WriteString("the spec.cephfs field is empty; ") - } - - if cephSC.Spec.CephFS.FSName == "" { - validationPassed = false - failedMsgBuilder.WriteString("the spec.cephfs.fsName field is empty; ") - } + failedMsgBuilder.WriteString(fmt.Sprintf("CephStorageClass type is %s but the spec.cephfs field is empty; ", storagev1alpha1.CephStorageClassTypeRBD)) + } else { + if cephSC.Spec.CephFS.FSName == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.cephfs.fsName field is empty; ") + } - if cephSC.Spec.CephFS.Pool == "" { - validationPassed = false - failedMsgBuilder.WriteString("the spec.cephfs.pool field is empty; ") + if cephSC.Spec.CephFS.Pool == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.cephfs.pool field is empty; ") + } } default: validationPassed = false diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher_test.go b/images/controller/pkg/controller/ceph_storage_class_watcher_test.go index 1c334e6..a6cf365 100644 --- a/images/controller/pkg/controller/ceph_storage_class_watcher_test.go +++ b/images/controller/pkg/controller/ceph_storage_class_watcher_test.go @@ -38,6 +38,7 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { controllerNamespace = "test-namespace" nameForCephSC = "example-ceph-fs" nameForRBDSC = "example-rbd" + nameForBadSC = "example-bad" ) var ( ctx = context.Background() @@ -473,6 +474,91 @@ var _ = Describe(controller.CephStorageClassCtrlName, func() { Expect(sc.Labels).To(HaveLen(0)) }) + It("Create_ceph_sc_with_invalid_type", func() { + cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ + Name: nameForBadSC, + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyDelete, + Type: "invalid", + CephFS: &CephFSConfig{ + FSName: fsName, + Pool: pool, + }, + }) + + err := cl.Create(ctx, cephSCtemplate) + Expect(err).NotTo(HaveOccurred()) + + csc := &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForBadSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForBadSC)) + Expect(csc.Finalizers).To(HaveLen(0)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + Expect(csc.Finalizers).To(HaveLen(0)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForBadSC}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + err = cl.Delete(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForBadSC}, csc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("Create_ceph_sc_with_props_for_another_type", func() { + cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ + Name: nameForBadSC, + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeCephFS, + RBD: &RBDConfig{ + DefaultFSType: "ext4", + Pool: pool, + }, + }) + + err := cl.Create(ctx, cephSCtemplate) + Expect(err).NotTo(HaveOccurred()) + + csc := &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForBadSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForBadSC)) + Expect(csc.Finalizers).To(HaveLen(0)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + Expect(csc.Finalizers).To(HaveLen(0)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForBadSC}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + err = cl.Delete(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + csc = &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForBadSC}, csc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + It("Remove_ceph_cluster_connection", func() { cephClusterConnection := &v1alpha1.CephClusterConnection{} diff --git a/openapi/config-values.yaml b/openapi/config-values.yaml index 556676e..f8005eb 100644 --- a/openapi/config-values.yaml +++ b/openapi/config-values.yaml @@ -3,12 +3,22 @@ required: - cephfsEnabled - rbdEnabled properties: - cephfsEnabled: - type: boolean - default: true - description: Cephfs driver state - rbdEnabled: - type: boolean - default: true - description: RBD driver state + cephfsEnabled: + type: boolean + default: true + description: Cephfs driver state + rbdEnabled: + type: boolean + default: true + description: RBD driver state + logLevel: + type: string + enum: + - ERROR + - WARN + - INFO + - DEBUG + - TRACE + description: Module log level + default: DEBUG diff --git a/tools/rbd-in-tree-to-ceph-csi-migration-helper.sh b/tools/rbd-in-tree-to-ceph-csi-migration-helper.sh deleted file mode 100755 index fe37411..0000000 --- a/tools/rbd-in-tree-to-ceph-csi-migration-helper.sh +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2022 Flant JSC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ "$#" != "2" ] || ! grep -qs "/" <<< "$1" || ! grep -qs "/" <<< "$2"; then - echo "Not enough arguments passed or namespace is not specified." - echo "Usage: ./rbd-in-tree-to-ceph-csi-migration-helper.sh / /" - exit 1 -fi - -sample_pvc_namespace="$(echo -n "$1" | awk '{gsub("/"," ");print $1}')" -target_pvc_namespace="$(echo -n "$2" | awk '{gsub("/"," ");print $1}')" - -sample_pvc_name="$(echo -n "$1" | awk '{gsub("/"," ");print $2}')" -target_pvc_name="$(echo -n "$2" | awk '{gsub("/"," ");print $2}')" - - -sample_pvc="$(kubectl -n "$sample_pvc_namespace" get pvc "$sample_pvc_name" -o json)" -target_pvc="$(kubectl -n "$target_pvc_namespace" get pvc "$target_pvc_name" -o json)" - -sample_pv_name="$(jq -r '.spec.volumeName' <<< "$sample_pvc")" -target_pv_name="$(jq -r '.spec.volumeName' <<< "$target_pvc")" - -sample_pv="$(kubectl get pv "$sample_pv_name" -o json)" -target_pv="$(kubectl get pv "$target_pv_name" -o json)" - -echo "Backup PVC $target_pvc_namespace/$target_pvc_name to backup-pvc-$target_pvc_namespace-$target_pvc_name.json" -echo "$target_pvc" > "backup-pvc-$target_pvc_namespace-$target_pvc_name.json" -echo "Backup PV $target_pv_name to backup-pv-$target_pv_name.json" -echo "$target_pv" > "backup-pv-$target_pv_name.json" - -pool_name="$(jq -r '.spec.csi.volumeAttributes.pool' <<< "$sample_pv")" -original_rbd_image_name="$(jq -r '.spec.rbd.image' <<< "$target_pv")" -new_rbd_image_name="$(jq -rn --arg s "$original_rbd_image_name" '$s | sub("kubernetes-dynamic-pvc-"; "csi-vol-")')" -new_rbd_image_uid="$(jq -rn --arg s "$original_rbd_image_name" '$s | sub("kubernetes-dynamic-pvc-"; "")')" -sample_rbd_image_uid="$(jq -r '.spec.csi.volumeAttributes.imageName | sub("csi-vol-"; "")' <<< "$sample_pv")" - -csi_section_for_target_pv="$(jq -r --arg i "$new_rbd_image_name" '.spec.csi.volumeAttributes.imageName = $i | .spec.csi' <<< "$sample_pv")" -new_storage_class_for_target="$(jq -r '.spec.storageClassName' <<< "$sample_pvc")" -new_annotations_for_target_pvc="$(jq -r '.metadata.annotations' <<< "$sample_pvc")" -new_annotations_for_target_pv="$(jq -r '.metadata.annotations' <<< "$sample_pv")" - -new_target_pvc="$(jq --argjson a "$new_annotations_for_target_pvc" --arg sc "$new_storage_class_for_target" ' - .metadata.annotations = $a | - .spec.storageClassName = $sc | - del(.metadata.resourceVersion) | - del(.metadata.uid) | - del(.metadata.creationTimestamp) | - del(.status) - ' <<< "$target_pvc")" - -while true; do - msg="Rename the rbd image in your ceph cluster using the following commands: ---- -rbd mv $pool_name/$original_rbd_image_name $pool_name/$new_rbd_image_name -rbd image-meta set $pool_name/$new_rbd_image_name rbd.csi.ceph.com/thick-provisioned false ---- -After renaming, enter yes to confirm: " - read -p "$msg" confirm - if [ "$confirm" == "yes" ]; then - break - fi -done - -while true; do - read -p "PersistentVolumeClaim $target_pvc_name and PersistentVolume $target_pv_name will be removed (Type yes to confirm): " confirm - if [ "$confirm" == "yes" ]; then - echo ">kubectl -n $target_pvc_namespace delete pvc $target_pvc_name" - kubectl -n "$target_pvc_namespace" delete pvc "$target_pvc_name" - echo ">kubectl delete pv $target_pv_name" - kubectl delete pv "$target_pv_name" - break - fi -done - -echo ">kubectl create -f - <<\"END\" -$new_target_pvc -END" - -while true; do - read -p "Apply this manifest in the cluster? (Type yes to confirm): " confirm - if [ "$confirm" == "yes" ]; then - kubectl create -f - <kubectl create -f - <<\"END\" -$new_target_pv -END" - -while true; do - read -p "Apply this manifest in the cluster? (Type yes to confirm): " confirm - if [ "$confirm" == "yes" ]; then - kubectl create -f - <