diff --git a/.github/workflows/deploy_dev.yml b/.github/workflows/deploy_dev.yml index ba1a5bd..fadd54a 100644 --- a/.github/workflows/deploy_dev.yml +++ b/.github/workflows/deploy_dev.yml @@ -9,6 +9,8 @@ env: MODULES_REGISTRY_PASSWORD: ${{ secrets.DEV_MODULES_REGISTRY_PASSWORD }} RELEASE_CHANNEL: ${{ github.event.inputs.channel }} MODULES_MODULE_TAG: ${{ github.event.inputs.tag }} + GOPROXY: ${{ secrets.GOPROXY }} + SOURCE_REPO: ${{ secrets.SOURCE_REPO }} on: workflow_dispatch: diff --git a/.gitignore b/.gitignore index 4fd25ed..dcabfa3 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,6 @@ __pycache__/ *.py[cod] *$py.class .pytest_cache/ + +# dev +images/controller/Makefile diff --git a/charts/deckhouse_lib_helm-1.21.0.tgz b/charts/deckhouse_lib_helm-1.21.0.tgz deleted file mode 100644 index d00bd9f..0000000 Binary files a/charts/deckhouse_lib_helm-1.21.0.tgz and /dev/null differ diff --git a/charts/deckhouse_lib_helm-1.24.0.tgz b/charts/deckhouse_lib_helm-1.24.0.tgz new file mode 100644 index 0000000..05bf4fe Binary files /dev/null and b/charts/deckhouse_lib_helm-1.24.0.tgz differ diff --git a/crds/cephclusterconnection.yaml b/crds/cephclusterconnection.yaml new file mode 100644 index 0000000..da074e2 --- /dev/null +++ b/crds/cephclusterconnection.yaml @@ -0,0 +1,87 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephclusterconnections.storage.deckhouse.io + labels: + heritage: deckhouse + module: csi-ceph +spec: + group: storage.deckhouse.io + scope: Cluster + names: + plural: cephclusterconnections + singular: cephclusterconnection + kind: CephClusterConnection + preserveUnknownFields: false + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + description: | + Ceph cluster connection parameters. + required: + - spec + properties: + spec: + type: object + required: + - clusterID + - userID + - userKey + - monitors + properties: + clusterID: + description: | + Ceph cluster FSID/UUID. + + Use `ceph fsid` to get Ceph cluster FSID/UUID. + type: string + userID: + description: | + Username without `client.`. + type: string + userKey: + description: | + Ceph auth key corresponding to the `userID`. + type: string + monitors: + description: | + List of ceph-mon IP addresses in the format `10.0.0.10:6789`. + type: array + items: + type: string + status: + type: object + description: | + Displays current information about the resources managed by the CephClusterConnection custom resource. + properties: + phase: + type: string + description: | + The current state of resources managed by the CephClusterConnection custom resource. Might be: + - Failed (if the controller received incorrect resource configuration or some errors occurred during the operation) + - Created (if everything went fine) + enum: + - Failed + - Created + reason: + type: string + description: | + Additional information about the resources managed by the CephClusterConnection custom resource. + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.reason + name: Reason + type: string + priority: 1 + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + description: The age of this resource diff --git a/crds/cephcsi.yaml b/crds/cephcsi.yaml deleted file mode 100644 index a648c61..0000000 --- a/crds/cephcsi.yaml +++ /dev/null @@ -1,158 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: cephcsidriver.deckhouse.io - labels: - heritage: deckhouse - module: ceph-csi - app: ceph-csi -spec: - group: deckhouse.io - scope: Cluster - names: - plural: cephcsidriver - kind: CephCSIDriver - preserveUnknownFields: false - versions: - - name: v1alpha1 - served: true - storage: true - schema: &schema - openAPIV3Schema: - type: object - description: | - Ceph cluster connection parameters and StorageClasses configuration. - required: - - spec - properties: - spec: - type: object - required: - - clusterID - - userID - - userKey - - monitors - properties: - clusterID: - description: | - Ceph cluster FSID/UUID. - - Use `ceph fsid` to get Ceph cluster FSID/UUID. - type: string - userID: - description: | - Username without `client.`. - type: string - userKey: - description: | - Ceph auth key corresponding to the `userID`. - type: string - monitors: - description: | - List of ceph-mon IP addresses in the format `10.0.0.10:6789`. - type: array - items: - type: string - rbd: - type: object - properties: - storageClasses: - description: | - Description of StorageClasses for Rados Block Device (RBD). - type: array - items: - type: object - required: - - namePostfix - properties: - namePostfix: - description: | - Part of the StorageClass name after `-`. - - The name from the CustomResource `CephCSIDriver` is used as the first part. - type: string - pool: - description: | - Ceph pool into which the RBD image shall be created. - type: string - reclaimPolicy: - description: | - The reclaim policy for a Persistent Volume. - - [More info...](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming) - type: string - enum: - - Delete - - Retain - default: Retain - allowVolumeExpansion: - description: | - Allows the users to resize the volume by editing the corresponding PVC object. - - [More info...](https://kubernetes.io/docs/concepts/storage/storage-classes/#allow-volume-expansion) - type: boolean - default: true - mountOptions: - description: | - List of mount options. - type: array - items: - type: string - defaultFSType: - description: | - Default fstype. - type: string - enum: - - ext4 - - xfs - default: ext4 - cephfs: - type: object - properties: - subvolumeGroup: - description: | - CephFS subvolume group name. - type: string - storageClasses: - type: array - description: | - CephFS StorageClasses. - items: - type: object - required: - - namePostfix - - fsName - properties: - namePostfix: - description: | - Part of the StorageClass name after `-`. - - The name from the CustomResource `CephCSIDriver` is used as the first part. - type: string - pool: - description: | - Ceph pool name into which volume data shall be stored. - type: string - reclaimPolicy: - description: | - The reclaim policy for a Persistent Volume. - type: string - enum: - - Delete - - Retain - default: Retain - allowVolumeExpansion: - description: | - Allows the users to resize the volume by editing the corresponding PVC object. - type: boolean - default: true - mountOptions: - description: | - List of mount options. - type: array - items: - type: string - fsName: - description: | - CephFS filesystem name. - type: string diff --git a/crds/cephstorageclass.yaml b/crds/cephstorageclass.yaml new file mode 100644 index 0000000..e9b0599 --- /dev/null +++ b/crds/cephstorageclass.yaml @@ -0,0 +1,159 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephstorageclasses.storage.deckhouse.io + labels: + heritage: deckhouse + module: csi-ceph +spec: + group: storage.deckhouse.io + scope: Cluster + names: + plural: cephstorageclasses + singular: cephstorageclass + kind: CephStorageClass + preserveUnknownFields: false + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + description: | + CephStorageClass is a Kubernetes Custom Resource that defines a configuration for a Kubernetes Storage class. + required: + - spec + properties: + spec: + type: object + required: + - clusterConnectionName + - reclaimPolicy + - type + oneOf: + - required: ["cephfs"] + properties: + type: + enum: ["cephfs"] + not: + required: ["rbd"] + - required: ["rbd"] + properties: + type: + enum: ["rbd"] + not: + required: ["cephfs"] + properties: + clusterConnectionName: + description: | + Name of the CephClusterConnection custom resource. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + minLength: 1 + reclaimPolicy: + description: | + The storage class's reclaim policy. Might be: + - Delete (If the Persistent Volume Claim is deleted, deletes the Persistent Volume and its associated storage as well) + - Retain (If the Persistent Volume Claim is deleted, remains the Persistent Volume and its associated storage) + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + enum: + - Delete + - Retain + type: + description: | + The type of the storage class. Might be: + - cephfs (CephFS) + - rbd (Rados Block Device) + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + enum: + - cephfs + - rbd + cephfs: + type: object + description: | + CephFS specific parameters. + required: + - fsName + - pool + properties: + fsName: + description: | + Name of the CephFS file system. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + minLength: 1 + pool: + description: | + Name of the Ceph pool. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + minLength: 1 + rbd: + type: object + description: | + Rados Block Device specific parameters. + required: + - pool + properties: + defaultFSType: + description: | + Default file system type for the Rados Block Device. + type: string + default: ext4 + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + minLength: 1 + pool: + description: | + Name of the RBD pool. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Value is immutable. + minLength: 1 + status: + type: object + description: | + Displays current information about the Storage Class. + properties: + phase: + type: string + description: | + The Storage class current state. Might be: + - Failed (if the controller received incorrect resource configuration or some errors occurred during the operation) + - Created (if everything went fine) + enum: + - Failed + - Created + reason: + type: string + description: | + Additional information about the current state of the Storage Class. + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.reason + name: Reason + type: string + priority: 1 + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + description: The age of this resource diff --git a/crds/doc-ru-cephclusterconnection.yaml b/crds/doc-ru-cephclusterconnection.yaml new file mode 100644 index 0000000..a96923a --- /dev/null +++ b/crds/doc-ru-cephclusterconnection.yaml @@ -0,0 +1,36 @@ +spec: + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: | + Параметры подключения к кластеру Ceph. + properties: + spec: + properties: + clusterID: + description: | + FSID/UUID кластера Ceph. + + Используйте `ceph fsid`, чтобы получить FSID/UUID кластера Ceph. + userID: + description: | + Имя пользователя без префикса `client.`. + userKey: + description: | + Авторизационный ключ Ceph, соответствующий `userID`. + monitors: + description: | + Список IP-адресов ceph-mon в формате `10.0.0.10:6789`. + status: + description: | + Отображает текущую информацию о ресурсах, управляемых пользовательским ресурсом CephClusterConnection. + properties: + phase: + description: | + Текущее состояние ресурсов, управляемых ресурсом CephClusterConnection. Возможные состояния: + - Failed (если контроллер получил некорректную конфигурацию ресурса или возникли ошибки в процессе выполнения операции) + - Created (если все прошло успешно) + reason: + description: | + Дополнительная информация о ресурсах, управляемых ресурсом CephClusterConnection. diff --git a/crds/doc-ru-cephstorageclass.yaml b/crds/doc-ru-cephstorageclass.yaml new file mode 100644 index 0000000..73e751d --- /dev/null +++ b/crds/doc-ru-cephstorageclass.yaml @@ -0,0 +1,57 @@ +spec: + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: | + Интерфейс управления StorageСlass для CSI-драйверов rbd.csi.ceph.com и cephfs.csi.ceph.com. Ручное создание StorageClass для данных драйверов запрещено. + properties: + spec: + description: | + Определяет конфигурацию StorageClass. + properties: + clusterConnectionName: + description: | + Имя ресурса CephClusterConnection. + reclaimPolicy: + description: | + Режим поведения при удалении PVC. Возможные значения: + - Delete (При удалении PVC будет удален PV и данные) + - Retain (При удалении PVC не будут удалены PV и данные. Для их удаления потребуется ручное вмешательство администратора) + type: + description: | + Тип storage-класса. Возможные значения: + - cephfs (CephFS) + - rbd (Rados Block Device) + cephfs: + description: | + Специфические параметры для CephFS. + properties: + fsName: + description: | + Имя файловой системы CephFS. + pool: + description: | + Имя пула Ceph. + rbd: + description: | + Специфические параметры для Rados Block Device. + properties: + defaultFSType: + description: | + Тип файловой системы по умолчанию для Rados Block Device. + pool: + description: | + Имя пула RBD. + status: + description: | + Отображает текущую информацию о StorageClass. + properties: + phase: + description: | + Текущее состояние StorageClass. Возможные состояния: + - Failed (если контроллер получил некорректную конфигурацию ресурса или возникли ошибки в процессе выполнения операции) + - Created (если все прошло успешно) + reason: + description: | + Дополнительная информация о текущем состоянии StorageClass. diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index cc1f6de..16de579 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -1,5 +1,5 @@ --- -title: "The ceph-csi module: configuration" +title: "The csi-ceph module: configuration" --- {% include module-bundle.liquid %} diff --git a/docs/CONFIGURATION_RU.md b/docs/CONFIGURATION_RU.md index 9be6a04..2d784f9 100644 --- a/docs/CONFIGURATION_RU.md +++ b/docs/CONFIGURATION_RU.md @@ -1,5 +1,5 @@ --- -title: "Модуль ceph-csi: настройки" +title: "Модуль csi-ceph: настройки" --- {% include module-bundle.liquid %} diff --git a/docs/CR.md b/docs/CR.md index 88c4bc6..e64d866 100644 --- a/docs/CR.md +++ b/docs/CR.md @@ -1,5 +1,5 @@ --- -title: "The ceph-csi module: Custom Resources" +title: "The csi-ceph module: Custom Resources" --- diff --git a/docs/CR_RU.md b/docs/CR_RU.md index 4101ff6..a2077cd 100644 --- a/docs/CR_RU.md +++ b/docs/CR_RU.md @@ -1,5 +1,5 @@ --- -title: "Модуль ceph-csi: custom resources" +title: "Модуль csi-ceph: custom resources" --- diff --git a/docs/EXAMPLES.md b/docs/EXAMPLES.md index 420c0f2..5a49dd2 100644 --- a/docs/EXAMPLES.md +++ b/docs/EXAMPLES.md @@ -1,34 +1,66 @@ --- -title: "The ceph-csi module: examples" +title: "The csi-ceph module: examples" --- -## An example of the `CephCSIDriver` configuration +## Example of `CephClusterConnection` configuration ```yaml -apiVersion: deckhouse.io/v1alpha1 -kind: CephCSIDriver +apiVersion: storage.deckhouse.io/v1alpha1 +kind: CephClusterConnection metadata: - name: example + name: ceph-cluster-1 spec: - clusterID: 2bf085fc-5119-404f-bb19-820ca6a1b07e + clusterID: 0324bfe8-c36a-4829-bacd-9e28b6480de9 monitors: - - 10.0.0.10:6789 - userID: admin - userKey: AQDbc7phl+eeGRAAaWL9y71mnUiRHKRFOWMPCQ== + - 172.20.1.28:6789 + - 172.20.1.34:6789 + - 172.20.1.37:6789 + userID: user + userKey: AQDiVXVmBJVRLxAAg65PhODrtwbwSWrjJwssUg== +``` + +- To verify the creation of the object, use the following command (Phase should be `Created`): + +```shell +kubectl get cephclusterconnection +``` + +## Example of `CephStorageClass` configuration + +### RBD + +```yaml +apiVersion: storage.deckhouse.io/v1alpha1 +kind: CephStorageClass +metadata: + name: ceph-rbd-sc +spec: + clusterConnectionName: ceph-cluster-1 + reclaimPolicy: Delete + type: rbd rbd: - storageClasses: - - allowVolumeExpansion: true - defaultFSType: ext4 - mountOptions: - - discard - namePostfix: csi-rbd - pool: kubernetes-rbd - reclaimPolicy: Delete - cephfs: - storageClasses: - - allowVolumeExpansion: true - fsName: cephfs - namePostfix: csi-cephfs - pool: cephfs_data - reclaimPolicy: Delete + defaultFSType: ext4 + pool: ceph-rbd-pool +``` + +### CephFS + +```yaml +apiVersion: storage.deckhouse.io/v1alpha1 +kind: CephStorageClass +metadata: + name: ceph-fs-sc +spec: + clusterConnectionName: ceph-cluster-1 + reclaimPolicy: Delete + type: rbd + rbd: + defaultFSType: ext4 + pool: ceph-rbd-pool +``` + +### To verify the creation of the object, use the following command (Phase should be `Created`): + +```shell +kubectl get cephstorageclass ``` diff --git a/docs/EXAMPLES_RU.md b/docs/EXAMPLES_RU.md index b5f4dca..e38ff23 100644 --- a/docs/EXAMPLES_RU.md +++ b/docs/EXAMPLES_RU.md @@ -1,34 +1,66 @@ --- -title: "Модуль ceph-csi: примеры" +title: "Модуль csi-ceph: примеры" --- -## Пример описания `CephCSIDriver` +## Пример описания `CephClusterConnection` ```yaml -apiVersion: deckhouse.io/v1alpha1 -kind: CephCSIDriver +apiVersion: storage.deckhouse.io/v1alpha1 +kind: CephClusterConnection metadata: - name: example + name: ceph-cluster-1 spec: - clusterID: 2bf085fc-5119-404f-bb19-820ca6a1b07e + clusterID: 0324bfe8-c36a-4829-bacd-9e28b6480de9 monitors: - - 10.0.0.10:6789 - userID: admin - userKey: AQDbc7phl+eeGRAAaWL9y71mnUiRHKRFOWMPCQ== + - 172.20.1.28:6789 + - 172.20.1.34:6789 + - 172.20.1.37:6789 + userID: user + userKey: AQDiVXVmBJVRLxAAg65PhODrtwbwSWrjJwssUg== +``` + +- Проверить создание объекта можно командой (Phase должен быть `Created`): + +```shell +kubectl get cephclusterconnection <имя cephclusterconnection> +``` + +## Пример описаня `CephStorageClass` + +### RBD + +```yaml +apiVersion: storage.deckhouse.io/v1alpha1 +kind: CephStorageClass +metadata: + name: ceph-rbd-sc +spec: + clusterConnectionName: ceph-cluster-1 + reclaimPolicy: Delete + type: rbd rbd: - storageClasses: - - allowVolumeExpansion: true - defaultFSType: ext4 - mountOptions: - - discard - namePostfix: csi-rbd - pool: kubernetes-rbd - reclaimPolicy: Delete - cephfs: - storageClasses: - - allowVolumeExpansion: true - fsName: cephfs - namePostfix: csi-cephfs - pool: cephfs_data - reclaimPolicy: Delete + defaultFSType: ext4 + pool: ceph-rbd-pool +``` + +### CephFS + +```yaml +apiVersion: storage.deckhouse.io/v1alpha1 +kind: CephStorageClass +metadata: + name: ceph-fs-sc +spec: + clusterConnectionName: ceph-cluster-1 + reclaimPolicy: Delete + type: rbd + rbd: + defaultFSType: ext4 + pool: ceph-rbd-pool +``` + +### Проверить создание объекта можно командой (Phase должен быть `Created`): + +```shell +kubectl get cephstorageclass <имя storage class> ``` diff --git a/docs/FAQ.md b/docs/FAQ.md index f4a955c..9843688 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -1,10 +1,10 @@ --- -title: "The ceph-csi module: FAQ" +title: "The csi-ceph module: FAQ" --- ## How to get a list of RBD volumes separated by nodes? ```shell -kubectl -n d8-ceph-csi get po -l app=csi-node-rbd -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName --no-headers \ - | awk '{print "echo "$2"; kubectl -n d8-ceph-csi exec "$1" -c node -- rbd showmapped"}' | bash +kubectl -n d8-csi-ceph get po -l app=csi-node-rbd -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName --no-headers \ + | awk '{print "echo "$2"; kubectl -n d8-csi-ceph exec "$1" -c node -- rbd showmapped"}' | bash ``` diff --git a/docs/FAQ_RU.md b/docs/FAQ_RU.md index 53ee743..53f8ab7 100644 --- a/docs/FAQ_RU.md +++ b/docs/FAQ_RU.md @@ -1,10 +1,10 @@ --- -title: "Модуль ceph-csi: FAQ" +title: "Модуль csi-ceph: FAQ" --- ## Как получить список томов RBD, разделенный по узлам? ```shell -kubectl -n d8-ceph-csi get po -l app=csi-node-rbd -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName --no-headers \ - | awk '{print "echo "$2"; kubectl -n d8-ceph-csi exec "$1" -c node -- rbd showmapped"}' | bash +kubectl -n d8-csi-ceph get po -l app=csi-node-rbd -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName --no-headers \ + | awk '{print "echo "$2"; kubectl -n d8-csi-ceph exec "$1" -c node -- rbd showmapped"}' | bash ``` diff --git a/docs/README.md b/docs/README.md index 8902f95..6ce67df 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,5 +1,5 @@ --- -title: "The ceph-csi module" +title: "The csi-ceph module" --- The module installs and configures the CSI driver for RBD and CephFS. diff --git a/docs/README_RU.md b/docs/README_RU.md index 169fe0f..27e494f 100644 --- a/docs/README_RU.md +++ b/docs/README_RU.md @@ -1,5 +1,5 @@ --- -title: "Модуль ceph-csi" +title: "Модуль csi-ceph" --- Модуль устанавливает и настраивает CSI-драйвер для RBD и CephFS. diff --git a/docs/internal/INTREE_MIGRATION.md b/docs/internal/INTREE_MIGRATION.md deleted file mode 100644 index 4432940..0000000 --- a/docs/internal/INTREE_MIGRATION.md +++ /dev/null @@ -1,550 +0,0 @@ -# Switching from the in-tree RBD driver to CSI (Ceph CSI) - -The [rbd-in-tree-to-ceph-csi-migration-helper.sh](https://github.com/deckhouse/deckhouse/blob/main/modules/031-ceph-csi/tools/rbd-in-tree-to-ceph-csi-migration-helper.sh) script was created to simplify the migration process. -Before running it, delete the Pod (scale the StatefulSet/Deployment down to zero) which uses the PVC. You will have to manually run a command in the Ceph cluster to rename the RBD image (since Ceph CSI uses a different name format) during the migration. - -**Caution!** It is assumed that the `ceph-csi` module is enabled and configured and that the old driver continues to work. - -The script will back up the manifests if the PVCs and PVs to be migrated, delete the old manifests and create the new ones. Note that deleting the PV will not cause the RBD image in the Ceph cluster to be deleted, since the script will rename it beforehand. - -The script requires the PVCs and PVs to work; it will use their manifests to obtain the parameters specific to Ceph CSI. You can use the following manifest to create them: - -```yaml -kubectl create -f - <<"END" -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: sample - namespace: d8-monitoring -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: ceph-csi-rbd -END -``` - -Below is an example of the output generated by the script: - -```bash -root@kube-master-0:~# ./rbd-in-tree-to-ceph-csi-migration-helper.sh default/sample default/data-test-0 -Rename the rbd image in your ceph cluster using the following command: ->rbd mv kube/kubernetes-dynamic-pvc-162a2c43-568e-40ab-aedb-a4632a613ecd kube/csi-vol-162a2c43-568e-40ab-aedb-a4632a613ecd -After renaming, enter yes to confirm: yes -PersistentVolumeClaim data-test-0 and PersistentVolume pvc-4a77a995-ce1e-463c-9726-d05966d3c5ef will be removed (Type yes to confirm): yes ->kubectl -n default delete pvc data-test-0 -persistentvolumeclaim "data-test-0" deleted ->kubectl delete pv pvc-4a77a995-ce1e-463c-9726-d05966d3c5ef -persistentvolume "pvc-4a77a995-ce1e-463c-9726-d05966d3c5ef" deleted ->kubectl create -f - <<"END" -{ - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "metadata": { - "annotations": { - "pv.kubernetes.io/bind-completed": "yes", - "pv.kubernetes.io/bound-by-controller": "yes", - "volume.beta.kubernetes.io/storage-provisioner": "rbd.csi.ceph.com" - }, - "finalizers": [ - "kubernetes.io/pvc-protection" - ], - "labels": { - "app": "test" - }, - "name": "data-test-0", - "namespace": "default" - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "1Gi" - } - }, - "storageClassName": "ceph-csi-rbd", - "volumeMode": "Filesystem", - "volumeName": "pvc-4a77a995-ce1e-463c-9726-d05966d3c5ef" - } -} -END -Apply this manifest in the cluster? (Type yes to confirm): yes -persistentvolumeclaim/data-test-0 created ->kubectl create -f - <<"END" -{ - "apiVersion": "v1", - "kind": "PersistentVolume", - "metadata": { - "annotations": { - "pv.kubernetes.io/provisioned-by": "rbd.csi.ceph.com", - "volume.kubernetes.io/provisioner-deletion-secret-name": "csi-new", - "volume.kubernetes.io/provisioner-deletion-secret-namespace": "d8-ceph-csi" - }, - "finalizers": [ - "kubernetes.io/pv-protection" - ], - "name": "pvc-4a77a995-ce1e-463c-9726-d05966d3c5ef" - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "capacity": { - "storage": "1Gi" - }, - "claimRef": { - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "name": "data-test-0", - "namespace": "default", - "resourceVersion": "14908531", - "uid": "0ac58d43-75f9-4481-96fd-dcf8ca60ad85" - }, - "mountOptions": [ - "discard" - ], - "persistentVolumeReclaimPolicy": "Retain", - "storageClassName": "ceph-csi-rbd", - "volumeMode": "Filesystem", - "csi": { - "controllerExpandSecretRef": { - "name": "csi-new", - "namespace": "d8-ceph-csi" - }, - "driver": "rbd.csi.ceph.com", - "fsType": "ext4", - "nodeStageSecretRef": { - "name": "csi-new", - "namespace": "d8-ceph-csi" - }, - "volumeAttributes": { - "clusterID": "60f356ee-7c2d-4556-81be-c24b34a30b2a", - "imageFeatures": "layering", - "imageName": "csi-vol-162a2c43-568e-40ab-aedb-a4632a613ecd", - "journalPool": "kube", - "pool": "kube", - "storage.kubernetes.io/csiProvisionerIdentity": "1666697721019-8081-rbd.csi.ceph.com" - }, - "volumeHandle": "0001-0024-60f356ee-7c2d-4556-81be-c24b34a30b2a-0000000000000005-162a2c43-568e-40ab-aedb-a4632a613ecd" - } - } -} -END -Apply this manifest in the cluster? (Type yes to confirm): yes -persistentvolume/pvc-4a77a995-ce1e-463c-9726-d05966d3c5ef created -``` - -**Caution!** Before switching to containerd, make sure that no log collectors other than log-shipper are used in the cluster. If there are any, you will either have to discard them in favor of the [log-shipper](https://deckhouse.io/documentation/v1/modules/460-log-shipper/) module or reconfigure them to work with containerd. This is because containerd has a different log format and stores log files under a different path. - -| CRI | Log format | Log files path | -| ---------- | ----------- | -----------------------| -| Docker | JSON | `/var/log/containers/` | -| Containerd | Plain Text | `/var/log/pods/` | - -## Additional information about the migration process - -### Table of contents - -- [Switching from the in-tree RBD driver to CSI (Ceph CSI)](#switching-from-the-in-tree-rbd-driver-to-csi-ceph-csi) - - [Additional information about the migration process](#additional-information-about-the-migration-process) - - [Table of contents](#table-of-contents) - - [Manifests of the PVCs and PVs to be migrated](#manifests-of-the-pvcs-and-pvs-to-be-migrated) - - [PVC and PV manifests to use for importing the specific Ceph CSI parameters](#pvc-and-pv-manifests-to-use-for-importing-the-specific-ceph-csi-parameters) - - [Renaming an RBD image in a ceph cluster](#renaming-an-rbd-image-in-a-ceph-cluster) - - [Deleting PVCs and PVs from the cluster](#deleting-pvcs-and-pvs-from-the-cluster) - - [Generating a new PVC manifest and creating an object in the cluster](#generating-a-new-pvc-manifest-and-creating-an-object-in-the-cluster) - - [Generating a new PV manifest and creating an object in the cluster](#generating-a-new-pv-manifest-and-creating-an-object-in-the-cluster) - -### Manifests of the PVCs and PVs to be migrated - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - annotations: - pv.kubernetes.io/bind-completed: "yes" - pv.kubernetes.io/bound-by-controller: "yes" - volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/rbd - creationTimestamp: "2022-11-03T13:15:43Z" - finalizers: - - kubernetes.io/pvc-protection - labels: - app: test - name: data-test-0 - namespace: default - resourceVersion: "8956688" - uid: cd6f7b26-d768-4cab-88a4-baca5b242cc5 -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: rbd - volumeMode: Filesystem - volumeName: pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - phase: Bound -``` - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - annotations: - kubernetes.io/createdby: rbd-dynamic-provisioner - pv.kubernetes.io/bound-by-controller: "yes" - pv.kubernetes.io/provisioned-by: kubernetes.io/rbd - creationTimestamp: "2022-11-03T13:15:49Z" - finalizers: - - kubernetes.io/pv-protection - name: pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 - resourceVersion: "8956671" - uid: 4ab7fcf4-e8db-426e-a7aa-f5380ef857c7 -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - claimRef: - apiVersion: v1 - kind: PersistentVolumeClaim - name: data-test-0 - namespace: default - resourceVersion: "8956643" - uid: cd6f7b26-d768-4cab-88a4-baca5b242cc5 - mountOptions: - - discard - persistentVolumeReclaimPolicy: Delete - rbd: - image: kubernetes-dynamic-pvc-f32fea79-d658-4ab1-967a-fb6e8f930dec - keyring: /etc/ceph/keyring - monitors: - - 192.168.4.215:6789 - pool: kube - secretRef: - name: ceph-secret - user: kube - storageClassName: rbd - volumeMode: Filesystem -status: - phase: Bound -``` - -### PVC and PV manifests to use for importing the specific Ceph CSI parameters - -The StorageClass created by the ceph-csi module is used. - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - annotations: - pv.kubernetes.io/bind-completed: "yes" - pv.kubernetes.io/bound-by-controller: "yes" - volume.beta.kubernetes.io/storage-provisioner: rbd.csi.ceph.com - creationTimestamp: "2022-11-03T12:46:20Z" - finalizers: - - kubernetes.io/pvc-protection - name: sample - namespace: default - resourceVersion: "8950577" - uid: abdbb7ea-5da6-47f3-8b76-b968a93b7bc1 -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: ceph-csi-rbd - volumeMode: Filesystem - volumeName: pvc-abdbb7ea-5da6-47f3-8b76-b968a93b7bc1 -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - phase: Bound -``` - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - annotations: - pv.kubernetes.io/provisioned-by: rbd.csi.ceph.com - volume.kubernetes.io/provisioner-deletion-secret-name: csi-new - volume.kubernetes.io/provisioner-deletion-secret-namespace: d8-ceph-csi - creationTimestamp: "2022-11-03T12:46:27Z" - finalizers: - - kubernetes.io/pv-protection - name: pvc-abdbb7ea-5da6-47f3-8b76-b968a93b7bc1 - resourceVersion: "8950562" - uid: 6200ce15-b6f2-45af-94d0-828913e850d0 -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - claimRef: - apiVersion: v1 - kind: PersistentVolumeClaim - name: sample - namespace: default - resourceVersion: "8950550" - uid: abdbb7ea-5da6-47f3-8b76-b968a93b7bc1 - csi: - controllerExpandSecretRef: - name: csi-new - namespace: d8-ceph-csi - driver: rbd.csi.ceph.com - fsType: ext4 - nodeStageSecretRef: - name: csi-new - namespace: d8-ceph-csi - volumeAttributes: - clusterID: 60f356ee-7c2d-4556-81be-c24b34a30b2a - imageFeatures: layering - imageName: csi-vol-880ec27e-5b75-11ed-a252-fa163ee74632 - journalPool: kube - pool: kube - storage.kubernetes.io/csiProvisionerIdentity: 1666697721019-8081-rbd.csi.ceph.com - volumeHandle: 0001-0024-60f356ee-7c2d-4556-81be-c24b34a30b2a-0000000000000005-880ec27e-5b75-11ed-a252-fa163ee74632 - mountOptions: - - discard - persistentVolumeReclaimPolicy: Delete - storageClassName: ceph-csi-rbd - volumeMode: Filesystem -status: - phase: Bound -``` - -### Renaming an RBD image in a ceph cluster - -Renaming is mandatory because the Ceph CSI driver uses a different RBD image naming format. - -The command is supposed to be run in the Ceph cluster: - -```shell -rbd mv kube/kubernetes-dynamic-pvc- kube/csi-vol- -``` - -* `kube` is the name of the pool in the Ceph cluster; -* `kubernetes-dynamic-pvc-` is the RBD image name format used by the in-tree driver; -* `csi-vol-` is the RBD image name format used by Ceph CSI. - -### Deleting PVCs and PVs from the cluster - -```bash -kubectl -n default delete pvc data-test-0 -kubectl delete pv pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 -``` - -Since the RBD image in the Ceph cluster was renamed in the previous step, deleting PersistentVolume will not cause the image to be deleted. - -### Generating a new PVC manifest and creating an object in the cluster - -Here is the original manifest with comments: - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - annotations: - pv.kubernetes.io/bind-completed: "yes" - pv.kubernetes.io/bound-by-controller: "yes" - volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/rbd # replace the annotation with a similar one from the PVC sample - creationTimestamp: "2022-11-03T13:15:43Z" # delete - finalizers: - - kubernetes.io/pvc-protection - labels: - app: test - name: data-test-0 - namespace: default - resourceVersion: "8956688" # delete - uid: cd6f7b26-d768-4cab-88a4-baca5b242cc5 # delete -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: rbd - volumeMode: Filesystem - volumeName: pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 -status: # delete - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - phase: Bound -``` - -You will end up with the following manifest: - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - annotations: - pv.kubernetes.io/bind-completed: "yes" - pv.kubernetes.io/bound-by-controller: "yes" - volume.beta.kubernetes.io/storage-provisioner: rbd.csi.ceph.com - finalizers: - - kubernetes.io/pvc-protection - labels: - app: test - name: data-test-0 - namespace: default -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: ceph-csi-rbd - volumeMode: Filesystem - volumeName: pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 -``` - -Let's create an object in the cluster using this manifest. - -### Generating a new PV manifest and creating an object in the cluster - -Here is the original manifest with comments: - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - annotations: - kubernetes.io/createdby: rbd-dynamic-provisioner - pv.kubernetes.io/bound-by-controller: "yes" - pv.kubernetes.io/provisioned-by: kubernetes.io/rbd - creationTimestamp: "2022-11-03T13:15:49Z" # delete - finalizers: - - kubernetes.io/pv-protection - name: pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 - resourceVersion: "8956671" # delete - uid: 4ab7fcf4-e8db-426e-a7aa-f5380ef857c7 # delete -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - claimRef: - apiVersion: v1 - kind: PersistentVolumeClaim - name: data-test-0 - namespace: default - resourceVersion: "8956643" # delete - uid: cd6f7b26-d768-4cab-88a4-baca5b242cc5 # replace with a new one from the PVC created in the previous step - mountOptions: - - discard - persistentVolumeReclaimPolicy: Delete - rbd: # delete - image: kubernetes-dynamic-pvc-f32fea79-d658-4ab1-967a-fb6e8f930dec - keyring: /etc/ceph/keyring - monitors: - - 192.168.4.215:6789 - pool: kube - secretRef: - name: ceph-secret - user: kube - storageClassName: rbd # replace with ceph-csi-rbd - volumeMode: Filesystem - # add the csi section -status: # delete - phase: Bound -``` - -The `spec.csi` sample can be borrowed from the PV (sample) created earlier: - -```yaml - csi: - controllerExpandSecretRef: - name: csi-new - namespace: d8-ceph-csi - driver: rbd.csi.ceph.com - fsType: ext4 - nodeStageSecretRef: - name: csi-new - namespace: d8-ceph-csi - volumeAttributes: - clusterID: 60f356ee-7c2d-4556-81be-c24b34a30b2a - imageFeatures: layering - imageName: csi-vol-880ec27e-5b75-11ed-a252-fa163ee74632 # replace the uid - journalPool: kube - pool: kube - storage.kubernetes.io/csiProvisionerIdentity: 1666697721019-8081-rbd.csi.ceph.com - volumeHandle: 0001-0024-60f356ee-7c2d-4556-81be-c24b34a30b2a-0000000000000005-880ec27e-5b75-11ed-a252-fa163ee74632 # replace the uid -``` - -In the `imageName` and `volumeHandle` fields, replace the uid of the rbd image. - -In the sample below, the uid is highlighted with the tags (`here`): - -```yaml -imageName: csi-vol-880ec27e-5b75-11ed-a252-fa163ee74632 -volumeHandle: 0001-0024-60f356ee-7c2d-4556-81be-c24b34a30b2a-0000000000000005-880ec27e-5b75-11ed-a252-fa163ee74632 -``` - -You will end up with the following manifest: - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - annotations: - pv.kubernetes.io/provisioned-by: rbd.csi.ceph.com - volume.kubernetes.io/provisioner-deletion-secret-name: csi-new - volume.kubernetes.io/provisioner-deletion-secret-namespace: d8-ceph-csi - finalizers: - - kubernetes.io/pv-protection - name: pvc-cd6f7b26-d768-4cab-88a4-baca5b242cc5 -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - claimRef: - apiVersion: v1 - kind: PersistentVolumeClaim - name: data-test-0 - namespace: default - uid: cd6f7b26-d768-4cab-88a4-baca5b242cc7 - mountOptions: - - discard - persistentVolumeReclaimPolicy: Delete - csi: - controllerExpandSecretRef: - name: csi-new - namespace: d8-ceph-csi - driver: rbd.csi.ceph.com - fsType: ext4 - nodeStageSecretRef: - name: csi-new - namespace: d8-ceph-csi - volumeAttributes: - clusterID: 60f356ee-7c2d-4556-81be-c24b34a30b2a - imageFeatures: layering - imageName: csi-vol-f32fea79-d658-4ab1-967a-fb6e8f930dec - journalPool: kube - pool: kube - storage.kubernetes.io/csiProvisionerIdentity: 1666697721019-8081-rbd.csi.ceph.com - volumeHandle: 0001-0024-60f356ee-7c2d-4556-81be-c24b34a30b2a-0000000000000005-f32fea79-d658-4ab1-967a-fb6e8f930dec - storageClassName: ceph-csi-rbd - volumeMode: Filesystem -``` - -Create an object in the cluster using this manifest. - -This concludes the migration process. diff --git a/enabled b/enabled index 81a65ba..734674c 100755 --- a/enabled +++ b/enabled @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2021 Flant JSC +# Copyright 2024 Flant JSC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +17,15 @@ source /deckhouse/shell_lib.sh function __main__() { - echo "true" > $MODULE_ENABLED_RESULT + enabled::disable_module_if_cluster_is_not_bootstraped + enabled::disable_module_in_kubernetes_versions_less_than 1.23.0 + + if values::array_has global.enabledModules "ceph-csi" ; then + echo "You must disable the ceph-csi module for the csi-ceph module to work." + echo "false" > "$MODULE_ENABLED_RESULT" + else + echo "true" > "$MODULE_ENABLED_RESULT" + fi } enabled::run $@ diff --git a/images/controller/Dockerfile b/images/controller/Dockerfile new file mode 100644 index 0000000..a66fefe --- /dev/null +++ b/images/controller/Dockerfile @@ -0,0 +1,17 @@ +ARG BASE_SCRATCH=registry.deckhouse.io/base_images/scratch@sha256:b054705fcc9f2205777d80a558d920c0b4209efdc3163c22b5bfcb5dda1db5fc +ARG BASE_GOLANG_ALPINE_BUILDER=registry.deckhouse.io/base_images/golang:1.22.3-alpine@sha256:dbf216b880b802c22e3f4f2ef0a78396b4a9a6983cb9b767c5efc351ebf946b0 + +FROM $BASE_GOLANG_ALPINE_BUILDER as builder + +WORKDIR /go/src +ADD go.mod . +ADD go.sum . +RUN go mod download +COPY . . +WORKDIR /go/src/cmd +RUN GOOS=linux GOARCH=amd64 go build -o controller + +FROM --platform=linux/amd64 $BASE_SCRATCH +COPY --from=builder /go/src/cmd/controller /go/src/cmd/controller + +ENTRYPOINT ["/go/src/cmd/controller"] diff --git a/images/controller/api/v1alpha1/ceph_cluster_connection.go b/images/controller/api/v1alpha1/ceph_cluster_connection.go new file mode 100644 index 0000000..066d820 --- /dev/null +++ b/images/controller/api/v1alpha1/ceph_cluster_connection.go @@ -0,0 +1,57 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type CephClusterConnection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec CephClusterConnectionSpec `json:"spec"` + Status *CephClusterConnectionStatus `json:"status,omitempty"` +} + +type CephClusterConnectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephClusterConnection `json:"items"` +} + +type CephClusterConnectionSpec struct { + ClusterID string `json:"clusterID"` + UserID string `json:"userID"` + UserKey string `json:"userKey"` + Monitors []string `json:"monitors"` + CephFS CephClusterConnectionSpecCephFS `json:"cephFS"` +} + +type CephClusterConnectionSpecCephFS struct { + SubvolumeGroup string `json:"subvolumeGroup"` +} + +type CephClusterConnectionStatus struct { + Phase string `json:"phase,omitempty"` + Reason string `json:"reason,omitempty"` +} + +type ClusterConfig struct { + CephFS map[string]string `json:"cephFS"` + ClusterID string `json:"clusterID"` + Monitors []string `json:"monitors"` +} diff --git a/images/controller/api/v1alpha1/ceph_storage_class.go b/images/controller/api/v1alpha1/ceph_storage_class.go new file mode 100644 index 0000000..750d400 --- /dev/null +++ b/images/controller/api/v1alpha1/ceph_storage_class.go @@ -0,0 +1,65 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +const ( + CephStorageClassTypeRBD = "rbd" + CephStorageClassTypeCephFS = "cephfs" +) + +var ( + DefaultMountOptions = []string{"discard"} +) + +type CephStorageClass struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec CephStorageClassSpec `json:"spec"` + Status *CephStorageClassStatus `json:"status,omitempty"` +} + +// CephStorageClassList contains a list of empty block device +type CephStorageClassList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephStorageClass `json:"items"` +} + +type CephStorageClassSpec struct { + ClusterConnectionName string `json:"clusterConnectionName"` + ReclaimPolicy string `json:"reclaimPolicy"` + Type string `json:"type"` + RBD *CephStorageClassRBD `json:"rbd,omitempty"` + CephFS *CephStorageClassCephFS `json:"cephfs,omitempty"` +} + +type CephStorageClassRBD struct { + DefaultFSType string `json:"defaultFSType"` + Pool string `json:"pool"` +} + +type CephStorageClassCephFS struct { + FSName string `json:"fsName,omitempty"` + Pool string `json:"pool"` +} + +type CephStorageClassStatus struct { + Phase string `json:"phase,omitempty"` + Reason string `json:"reason,omitempty"` +} diff --git a/images/controller/api/v1alpha1/const.go b/images/controller/api/v1alpha1/const.go new file mode 100644 index 0000000..00abd68 --- /dev/null +++ b/images/controller/api/v1alpha1/const.go @@ -0,0 +1,22 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + PhaseFailed = "Failed" + PhaseCreated = "Created" +) diff --git a/images/controller/api/v1alpha1/register.go b/images/controller/api/v1alpha1/register.go new file mode 100644 index 0000000..f106772 --- /dev/null +++ b/images/controller/api/v1alpha1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + CephStorageClassKind = "CephStorageClass" + APIGroup = "storage.deckhouse.io" + APIVersion = "v1alpha1" +) + +// SchemeGroupVersion is group version used to register these objects +var ( + SchemeGroupVersion = schema.GroupVersion{ + Group: APIGroup, + Version: APIVersion, + } + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CephStorageClass{}, + &CephStorageClassList{}, + &CephClusterConnection{}, + &CephClusterConnectionList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/images/controller/api/v1alpha1/zz_generated.deepcopy.go b/images/controller/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..3fe6764 --- /dev/null +++ b/images/controller/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,137 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import "k8s.io/apimachinery/pkg/runtime" + +// CephStorageClass + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephStorageClass) DeepCopyInto(out *CephStorageClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyBlockDevice. +func (in *CephStorageClass) DeepCopy() *CephStorageClass { + if in == nil { + return nil + } + out := new(CephStorageClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephStorageClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephStorageClassList) DeepCopyInto(out *CephStorageClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephStorageClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuestbookList. +func (in *CephStorageClassList) DeepCopy() *CephStorageClassList { + if in == nil { + return nil + } + out := new(CephStorageClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephStorageClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// CephClusterConnection +func (in *CephClusterConnection) DeepCopyInto(out *CephClusterConnection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyBlockDevice. +func (in *CephClusterConnection) DeepCopy() *CephClusterConnection { + if in == nil { + return nil + } + out := new(CephClusterConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephClusterConnection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephClusterConnectionList) DeepCopyInto(out *CephClusterConnectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephClusterConnection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuestbookList. +func (in *CephClusterConnectionList) DeepCopy() *CephClusterConnectionList { + if in == nil { + return nil + } + out := new(CephClusterConnectionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephClusterConnectionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/images/controller/cmd/main.go b/images/controller/cmd/main.go new file mode 100644 index 0000000..3202dd0 --- /dev/null +++ b/images/controller/cmd/main.go @@ -0,0 +1,136 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "d8-controller/api/v1alpha1" + "d8-controller/pkg/config" + "d8-controller/pkg/controller" + "d8-controller/pkg/kubutils" + "d8-controller/pkg/logger" + "fmt" + "os" + goruntime "runtime" + + "sigs.k8s.io/controller-runtime/pkg/cache" + + v1 "k8s.io/api/core/v1" + sv1 "k8s.io/api/storage/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + apiruntime "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +var ( + resourcesSchemeFuncs = []func(*apiruntime.Scheme) error{ + v1alpha1.AddToScheme, + clientgoscheme.AddToScheme, + extv1.AddToScheme, + v1.AddToScheme, + sv1.AddToScheme, + } +) + +func main() { + ctx := context.Background() + cfgParams := config.NewConfig() + + log, err := logger.NewLogger(cfgParams.Loglevel) + if err != nil { + fmt.Println(fmt.Sprintf("unable to create NewLogger, err: %v", err)) + os.Exit(1) + } + + log.Info(fmt.Sprintf("[main] Go Version:%s ", goruntime.Version())) + log.Info(fmt.Sprintf("[main] OS/Arch:Go OS/Arch:%s/%s ", goruntime.GOOS, goruntime.GOARCH)) + + log.Info("[main] CfgParams has been successfully created") + log.Info(fmt.Sprintf("[main] %s = %s", config.LogLevelEnvName, cfgParams.Loglevel)) + log.Info(fmt.Sprintf("[main] RequeueStorageClassInterval = %d", cfgParams.RequeueStorageClassInterval)) + + kConfig, err := kubutils.KubernetesDefaultConfigCreate() + if err != nil { + log.Error(err, "[main] unable to KubernetesDefaultConfigCreate") + } + log.Info("[main] kubernetes config has been successfully created.") + + scheme := runtime.NewScheme() + for _, f := range resourcesSchemeFuncs { + err := f(scheme) + if err != nil { + log.Error(err, "[main] unable to add scheme to func") + os.Exit(1) + } + } + log.Info("[main] successfully read scheme CR") + + cacheOpt := cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + cfgParams.ControllerNamespace: {}, + }, + } + + managerOpts := manager.Options{ + Scheme: scheme, + Cache: cacheOpt, + //MetricsBindAddress: cfgParams.MetricsPort, + HealthProbeBindAddress: cfgParams.HealthProbeBindAddress, + LeaderElection: true, + LeaderElectionNamespace: cfgParams.ControllerNamespace, + LeaderElectionID: config.ControllerName, + Logger: log.GetLogger(), + } + + mgr, err := manager.New(kConfig, managerOpts) + if err != nil { + log.Error(err, "[main] unable to manager.New") + os.Exit(1) + } + log.Info("[main] successfully created kubernetes manager") + + if _, err = controller.RunCephStorageClassWatcherController(mgr, *cfgParams, *log); err != nil { + log.Error(err, fmt.Sprintf("[main] unable to run %s", controller.CephStorageClassCtrlName)) + os.Exit(1) + } + + if _, err = controller.RunCephClusterConnectionWatcherController(mgr, *cfgParams, *log); err != nil { + log.Error(err, fmt.Sprintf("[main] unable to run %s", controller.CephClusterConnectionCtrlName)) + os.Exit(1) + } + + if err = mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + log.Error(err, "[main] unable to mgr.AddHealthzCheck") + os.Exit(1) + } + log.Info("[main] successfully AddHealthzCheck") + + if err = mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + log.Error(err, "[main] unable to mgr.AddReadyzCheck") + os.Exit(1) + } + log.Info("[main] successfully AddReadyzCheck") + + err = mgr.Start(ctx) + if err != nil { + log.Error(err, "[main] unable to mgr.Start") + os.Exit(1) + } +} diff --git a/images/controller/go.mod b/images/controller/go.mod new file mode 100644 index 0000000..804a812 --- /dev/null +++ b/images/controller/go.mod @@ -0,0 +1,71 @@ +module d8-controller + +go 1.22 + +require ( + github.com/go-logr/logr v1.4.1 + github.com/onsi/ginkgo/v2 v2.14.0 + github.com/onsi/gomega v1.30.0 + k8s.io/api v0.29.2 + k8s.io/apiextensions-apiserver v0.29.2 + k8s.io/apimachinery v0.29.2 + k8s.io/client-go v0.29.2 + k8s.io/klog/v2 v2.120.1 + sigs.k8s.io/controller-runtime v0.17.5 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.18.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.16.1 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/component-base v0.29.2 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/images/controller/go.sum b/images/controller/go.sum new file mode 100644 index 0000000..2bdb2e9 --- /dev/null +++ b/images/controller/go.sum @@ -0,0 +1,204 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= +github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= +github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= +k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= +k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.17.5 h1:1FI9Lm7NiOOmBsgTV36/s2XrEFXnO2C4sbg/Zme72Rw= +sigs.k8s.io/controller-runtime v0.17.5/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/images/controller/pkg/config/config.go b/images/controller/pkg/config/config.go new file mode 100644 index 0000000..d72572c --- /dev/null +++ b/images/controller/pkg/config/config.go @@ -0,0 +1,72 @@ +/* +Copyright 2024 Flant JSC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "d8-controller/pkg/logger" + "log" + "os" + "time" +) + +const ( + LogLevelEnvName = "LOG_LEVEL" + ControllerNamespaceEnv = "CONTROLLER_NAMESPACE" + HardcodedControllerNS = "d8-csi-ceph" + ControllerName = "d8-controller" + DefaultHealthProbeBindAddressEnvName = "HEALTH_PROBE_BIND_ADDRESS" + DefaultHealthProbeBindAddress = ":8081" + DefaultRequeueStorageClassInterval = 10 +) + +type Options struct { + Loglevel logger.Verbosity + RequeueStorageClassInterval time.Duration + HealthProbeBindAddress string + ControllerNamespace string +} + +func NewConfig() *Options { + var opts Options + + loglevel := os.Getenv(LogLevelEnvName) + if loglevel == "" { + opts.Loglevel = logger.DebugLevel + } else { + opts.Loglevel = logger.Verbosity(loglevel) + } + + opts.HealthProbeBindAddress = os.Getenv(DefaultHealthProbeBindAddressEnvName) + if opts.HealthProbeBindAddress == "" { + opts.HealthProbeBindAddress = DefaultHealthProbeBindAddress + } + + opts.ControllerNamespace = os.Getenv(ControllerNamespaceEnv) + if opts.ControllerNamespace == "" { + + namespace, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + log.Printf("Failed to get namespace from filesystem: %v", err) + log.Printf("Using hardcoded namespace: %s", HardcodedControllerNS) + opts.ControllerNamespace = HardcodedControllerNS + } else { + log.Printf("Got namespace from filesystem: %s", string(namespace)) + opts.ControllerNamespace = string(namespace) + } + } + + opts.RequeueStorageClassInterval = DefaultRequeueStorageClassInterval + + return &opts +} diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher.go new file mode 100644 index 0000000..7e89a46 --- /dev/null +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher.go @@ -0,0 +1,228 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + v1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/config" + "d8-controller/pkg/internal" + "d8-controller/pkg/logger" + "errors" + "fmt" + "reflect" + "time" + + corev1 "k8s.io/api/core/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +const ( + // This value used as a name for the controller AND the value for managed-by label. + CephClusterConnectionCtrlName = "d8-ceph-cluster-connection-controller" + CephClusterConnectionControllerFinalizerName = "storage.deckhouse.io/ceph-cluster-connection-controller" +) + +func RunCephClusterConnectionWatcherController( + mgr manager.Manager, + cfg config.Options, + log logger.Logger, +) (controller.Controller, error) { + cl := mgr.GetClient() + + c, err := controller.New(CephClusterConnectionCtrlName, mgr, controller.Options{ + Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + log.Info(fmt.Sprintf("[CephClusterConnectionReconciler] starts Reconcile for the CephClusterConnection %q", request.Name)) + cephClusterConnection := &v1alpha1.CephClusterConnection{} + err := cl.Get(ctx, request.NamespacedName, cephClusterConnection) + if err != nil && !k8serr.IsNotFound(err) { + log.Error(err, fmt.Sprintf("[CephClusterConnectionReconciler] unable to get CephClusterConnection, name: %s", request.Name)) + return reconcile.Result{}, err + } + + if cephClusterConnection.Name == "" { + log.Info(fmt.Sprintf("[CephClusterConnectionReconciler] seems like the CephClusterConnection for the request %s was deleted. Reconcile retrying will stop.", request.Name)) + return reconcile.Result{}, nil + } + + secretList := &corev1.SecretList{} + err = cl.List(ctx, secretList, client.InNamespace(cfg.ControllerNamespace)) + if err != nil { + log.Error(err, "[CephClusterConnectionReconciler] unable to list Secrets") + return reconcile.Result{}, err + } + + shouldRequeue, msg, err := RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, cephClusterConnection, cfg.ControllerNamespace) + log.Info(fmt.Sprintf("[CephClusterConnectionReconciler] CeohClusterConnection %s has been reconciled with message: %s", cephClusterConnection.Name, msg)) + phase := v1alpha1.PhaseCreated + if err != nil { + log.Error(err, fmt.Sprintf("[CephClusterConnectionReconciler] an error occured while reconciles the CephClusterConnection, name: %s", cephClusterConnection.Name)) + phase = v1alpha1.PhaseFailed + } + + if msg != "" { + log.Debug(fmt.Sprintf("[CephClusterConnectionReconciler] update the CephClusterConnection %s with the phase %s and message: %s", cephClusterConnection.Name, phase, msg)) + upErr := updateCephClusterConnectionPhase(ctx, cl, cephClusterConnection, phase, msg) + if upErr != nil { + log.Error(upErr, fmt.Sprintf("[CephClusterConnectionReconciler] unable to update the CephClusterConnection %s: %s", cephClusterConnection.Name, upErr.Error())) + shouldRequeue = true + } + } + + if shouldRequeue { + log.Warning(fmt.Sprintf("[CephClusterConnectionReconciler] Reconciler will requeue the request, name: %s", request.Name)) + return reconcile.Result{ + RequeueAfter: cfg.RequeueStorageClassInterval * time.Second, + }, nil + } + + log.Info(fmt.Sprintf("[CephClusterConnectionReconciler] ends Reconcile for the CephClusterConnection %q", request.Name)) + return reconcile.Result{}, nil + }), + }) + if err != nil { + log.Error(err, "[RunCephClusterConnectionWatcherController] unable to create controller") + return nil, err + } + + err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.CephClusterConnection{}), handler.Funcs{ + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + log.Info(fmt.Sprintf("[CreateFunc] get event for CephClusterConnection %q. Add to the queue", e.Object.GetName())) + request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} + q.Add(request) + }, + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + log.Info(fmt.Sprintf("[UpdateFunc] get event for CephClusterConnection %q. Check if it should be reconciled", e.ObjectNew.GetName())) + + oldCephClusterConnection, ok := e.ObjectOld.(*v1alpha1.CephClusterConnection) + if !ok { + err = errors.New("unable to cast event object to a given type") + log.Error(err, "[UpdateFunc] an error occurred while handling create event") + return + } + newCephClusterConnection, ok := e.ObjectNew.(*v1alpha1.CephClusterConnection) + if !ok { + err = errors.New("unable to cast event object to a given type") + log.Error(err, "[UpdateFunc] an error occurred while handling create event") + return + } + + if reflect.DeepEqual(oldCephClusterConnection.Spec, newCephClusterConnection.Spec) && newCephClusterConnection.DeletionTimestamp == nil { + log.Info(fmt.Sprintf("[UpdateFunc] an update event for the CephClusterConnection %s has no Spec field updates. It will not be reconciled", newCephClusterConnection.Name)) + return + } + + log.Info(fmt.Sprintf("[UpdateFunc] the CephClusterConnection %q will be reconciled. Add to the queue", newCephClusterConnection.Name)) + request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: newCephClusterConnection.Namespace, Name: newCephClusterConnection.Name}} + q.Add(request) + }, + }) + if err != nil { + log.Error(err, "[RunCephClusterConnectionWatcherController] unable to watch the events") + return nil, err + } + + return c, nil +} + +func RunCephClusterConnectionEventReconcile(ctx context.Context, cl client.Client, log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace string) (shouldRequeue bool, msg string, err error) { + valid, msg := validateCephClusterConnectionSpec(cephClusterConnection) + if !valid { + err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] CephClusterConnection %s has invalid spec: %s", cephClusterConnection.Name, msg) + return false, msg, err + } + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] CephClusterConnection %s has valid spec", cephClusterConnection.Name)) + + added, err := addFinalizerIfNotExists(ctx, cl, cephClusterConnection, CephClusterConnectionControllerFinalizerName) + if err != nil { + err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] unable to add a finalizer %s to the CephClusterConnection %s: %w", CephClusterConnectionControllerFinalizerName, cephClusterConnection.Name, err) + return true, err.Error(), err + } + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] finalizer %s was added to the CephClusterConnection %s: %t", CephClusterConnectionControllerFinalizerName, cephClusterConnection.Name, added)) + + secretName := internal.CephClusterConnectionSecretPrefix + cephClusterConnection.Name + reconcileTypeForSecret, err := IdentifyReconcileFuncForSecret(log, secretList, cephClusterConnection, controllerNamespace, secretName) + if err != nil { + err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] error occurred while identifying the reconcile function for CephClusterConnection %s on Secret %s: %w", cephClusterConnection.Name, secretName, err) + return true, err.Error(), err + } + + shouldRequeue = false + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] successfully identified the reconcile type for CephClusterConnection %s to be performed on Secret %s: %s", cephClusterConnection.Name, secretName, reconcileTypeForSecret)) + switch reconcileTypeForSecret { + case internal.CreateReconcile: + shouldRequeue, msg, err = reconcileSecretCreateFunc(ctx, cl, log, cephClusterConnection, controllerNamespace, secretName) + case internal.UpdateReconcile: + shouldRequeue, msg, err = reconcileSecretUpdateFunc(ctx, cl, log, secretList, cephClusterConnection, controllerNamespace, secretName) + case internal.DeleteReconcile: + shouldRequeue, msg, err = reconcileSecretDeleteFunc(ctx, cl, log, secretList, cephClusterConnection, secretName) + default: + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] no reconcile action required for CephClusterConnection %s on Secret %s. No changes will be made.", cephClusterConnection.Name, secretName)) + msg = "Successfully reconciled" + } + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] completed reconcile operation for CephClusterConnection %s on Secret %s.", cephClusterConnection.Name, secretName)) + + if err != nil || shouldRequeue { + return shouldRequeue, msg, err + } + + configMapList := &corev1.ConfigMapList{} + err = cl.List(ctx, configMapList, client.InNamespace(controllerNamespace)) + if err != nil { + err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] unable to list ConfigMaps in namespace %s: %w", controllerNamespace, err) + return true, err.Error(), err + } + + configMapName := internal.CSICephConfigMapName + + reconcileTypeForConfigMap, err := IdentifyReconcileFuncForConfigMap(log, configMapList, cephClusterConnection, controllerNamespace, configMapName) + if err != nil { + err = fmt.Errorf("[RunCephClusterConnectionEventReconcile] error occurred while identifying the reconcile function for CephClusterConnection %s on ConfigMap %s: %w", cephClusterConnection.Name, internal.CSICephConfigMapName, err) + return true, err.Error(), err + } + + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] successfully identified the reconcile type for CephClusterConnection %s to be performed on ConfigMap %s: %s", cephClusterConnection.Name, internal.CSICephConfigMapName, reconcileTypeForConfigMap)) + switch reconcileTypeForConfigMap { + case internal.CreateReconcile: + shouldRequeue, msg, err = reconcileConfigMapCreateFunc(ctx, cl, log, cephClusterConnection, controllerNamespace, configMapName) + case internal.UpdateReconcile: + shouldRequeue, msg, err = reconcileConfigMapUpdateFunc(ctx, cl, log, configMapList, cephClusterConnection, configMapName) + case internal.DeleteReconcile: + shouldRequeue, msg, err = reconcileConfigMapDeleteFunc(ctx, cl, log, configMapList, cephClusterConnection, configMapName) + default: + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] no reconcile action required for CephClusterConnection %s on ConfigMap %s. No changes will be made.", cephClusterConnection.Name, internal.CSICephConfigMapName)) + msg = "Successfully reconciled" + } + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] completed reconcile operation for CephClusterConnection %s on ConfigMap %s.", cephClusterConnection.Name, internal.CSICephConfigMapName)) + + if err != nil || shouldRequeue { + return shouldRequeue, msg, err + } + + log.Debug(fmt.Sprintf("[RunCephClusterConnectionEventReconcile] finish all reconciliations for CephClusterConnection %q.", cephClusterConnection.Name)) + return false, msg, nil +} diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go new file mode 100644 index 0000000..31019d6 --- /dev/null +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher_func.go @@ -0,0 +1,538 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + v1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/internal" + "d8-controller/pkg/logger" + "encoding/json" + "fmt" + "reflect" + "slices" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func validateCephClusterConnectionSpec(cephClusterConnection *v1alpha1.CephClusterConnection) (bool, string) { + if cephClusterConnection.DeletionTimestamp != nil { + return true, "" + } + + var ( + failedMsgBuilder strings.Builder + validationPassed = true + ) + + failedMsgBuilder.WriteString("Validation of CeohClusterConnection failed: ") + + if cephClusterConnection.Spec.ClusterID == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.clusterID field is empty; ") + } + + if len(cephClusterConnection.Spec.Monitors) == 0 { + validationPassed = false + failedMsgBuilder.WriteString("the spec.monitors field is empty; ") + } + + if cephClusterConnection.Spec.UserID == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.userID field is empty; ") + } + + if cephClusterConnection.Spec.UserKey == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.userKey field is empty; ") + } + + return validationPassed, failedMsgBuilder.String() +} + +func IdentifyReconcileFuncForSecret(log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (reconcileType string, err error) { + if shouldReconcileByDeleteFunc(cephClusterConnection) { + return internal.DeleteReconcile, nil + } + + if shouldReconcileSecretByCreateFunc(secretList, cephClusterConnection, secretName) { + return internal.CreateReconcile, nil + } + + should, err := shouldReconcileSecretByUpdateFunc(log, secretList, cephClusterConnection, controllerNamespace, secretName) + if err != nil { + return "", err + } + if should { + return internal.UpdateReconcile, nil + } + + return "", nil +} + +func shouldReconcileSecretByCreateFunc(secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, secretName string) bool { + if cephClusterConnection.DeletionTimestamp != nil { + return false + } + + for _, s := range secretList.Items { + if s.Name == secretName { + return false + } + } + + return true +} + +func shouldReconcileSecretByUpdateFunc(log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (bool, error) { + if cephClusterConnection.DeletionTimestamp != nil { + return false, nil + } + + for _, oldSecret := range secretList.Items { + if oldSecret.Name == secretName { + newSecret := configureSecret(cephClusterConnection, controllerNamespace, secretName) + equal := areSecretsEqual(&oldSecret, newSecret) + + log.Trace(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] old secret: %+v", oldSecret)) + log.Trace(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] new secret: %+v", newSecret)) + log.Trace(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] are secrets equal: %t", equal)) + + if !equal { + log.Debug(fmt.Sprintf("[shouldReconcileSecretByUpdateFunc] a secret %s should be updated", secretName)) + return true, nil + } + + return false, nil + } + } + err := fmt.Errorf("[shouldReconcileSecretByUpdateFunc] a secret %s not found in the list: %+v. It should be created", secretName, secretList.Items) + return false, err +} + +func configureSecret(cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) *corev1.Secret { + userID := cephClusterConnection.Spec.UserID + userKey := cephClusterConnection.Spec.UserKey + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: controllerNamespace, + Labels: map[string]string{ + internal.StorageManagedLabelKey: CephClusterConnectionCtrlName, + }, + Finalizers: []string{CephClusterConnectionControllerFinalizerName}, + }, + StringData: map[string]string{ + // Credentials for RBD + "userID": userID, + "userKey": userKey, + + // Credentials for CephFS + "adminID": userID, + "adminKey": userKey, + }, + } + + return secret +} + +func areSecretsEqual(old, new *corev1.Secret) bool { + if reflect.DeepEqual(old.StringData, new.StringData) && reflect.DeepEqual(old.Labels, new.Labels) { + return true + } + + return false +} + +func reconcileSecretCreateFunc(ctx context.Context, cl client.Client, log logger.Logger, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileSecretCreateFunc] starts reconciliation of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) + + newSecret := configureSecret(cephClusterConnection, controllerNamespace, secretName) + log.Debug(fmt.Sprintf("[reconcileSecretCreateFunc] successfully configurated secret %s for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) + log.Trace(fmt.Sprintf("[reconcileSecretCreateFunc] secret: %+v", newSecret)) + + err = cl.Create(ctx, newSecret) + if err != nil { + err = fmt.Errorf("[reconcileSecretCreateFunc] unable to create a Secret %s for CephClusterConnection %s: %w", newSecret.Name, cephClusterConnection.Name, err) + return true, err.Error(), err + } + + return false, "Successfully created", nil +} + +func reconcileSecretUpdateFunc(ctx context.Context, cl client.Client, log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, secretName string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileSecretUpdateFunc] starts reconciliation of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) + + var oldSecret *corev1.Secret + for _, s := range secretList.Items { + if s.Name == secretName { + oldSecret = &s + break + } + } + + if oldSecret == nil { + err := fmt.Errorf("[reconcileSecretUpdateFunc] unable to find a secret %s for the CephClusterConnection %s", secretName, cephClusterConnection.Name) + return true, err.Error(), err + } + + log.Debug(fmt.Sprintf("[reconcileSecretUpdateFunc] secret %s was found for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) + + newSecret := configureSecret(cephClusterConnection, controllerNamespace, secretName) + log.Debug(fmt.Sprintf("[reconcileSecretUpdateFunc] successfully configurated new secret %s for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) + log.Trace(fmt.Sprintf("[reconcileSecretUpdateFunc] new secret: %+v", newSecret)) + log.Trace(fmt.Sprintf("[reconcileSecretUpdateFunc] old secret: %+v", oldSecret)) + + err = cl.Update(ctx, newSecret) + if err != nil { + err = fmt.Errorf("[reconcileSecretUpdateFunc] unable to update the Secret %s for CephClusterConnection %s: %w", newSecret.Name, cephClusterConnection.Name, err) + return true, err.Error(), err + } + + log.Info(fmt.Sprintf("[reconcileSecretUpdateFunc] successfully updated the Secret %s for the CephClusterConnection %s", newSecret.Name, cephClusterConnection.Name)) + + return false, "Successfully updated", nil +} + +func reconcileSecretDeleteFunc(ctx context.Context, cl client.Client, log logger.Logger, secretList *corev1.SecretList, cephClusterConnection *v1alpha1.CephClusterConnection, secretName string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileSecretDeleteFunc] starts reconciliation of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) + + var secret *corev1.Secret + for _, s := range secretList.Items { + if s.Name == secretName { + secret = &s + break + } + } + + if secret == nil { + log.Info(fmt.Sprintf("[reconcileSecretDeleteFunc] no secret with name %s found for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) + } + + if secret != nil { + log.Info(fmt.Sprintf("[reconcileSecretDeleteFunc] successfully found a secret %s for the CephClusterConnection %s", secretName, cephClusterConnection.Name)) + err = deleteSecret(ctx, cl, secret) + + if err != nil { + err = fmt.Errorf("[reconcileSecretDeleteFunc] unable to delete the Secret %s for the CephCluster %s: %w", secret.Name, cephClusterConnection.Name, err) + return true, err.Error(), err + } + } + + log.Info(fmt.Sprintf("[reconcileSecretDeleteFunc] ends reconciliation of CephClusterConnection %s for Secret %s", cephClusterConnection.Name, secretName)) + + return false, "", nil +} + +func updateCephClusterConnectionPhase(ctx context.Context, cl client.Client, cephClusterConnection *v1alpha1.CephClusterConnection, phase, reason string) error { + if cephClusterConnection.Status == nil { + cephClusterConnection.Status = &v1alpha1.CephClusterConnectionStatus{} + } + cephClusterConnection.Status.Phase = phase + cephClusterConnection.Status.Reason = reason + + err := cl.Status().Update(ctx, cephClusterConnection) + if err != nil { + return err + } + + return nil +} + +func deleteSecret(ctx context.Context, cl client.Client, secret *corev1.Secret) error { + _, err := removeFinalizerIfExists(ctx, cl, secret, CephClusterConnectionControllerFinalizerName) + if err != nil { + return err + } + + err = cl.Delete(ctx, secret) + if err != nil { + return err + } + + return nil +} + +// ConfigMap +func IdentifyReconcileFuncForConfigMap(log logger.Logger, configMapList *corev1.ConfigMapList, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, configMapName string) (reconcileType string, err error) { + if shouldReconcileByDeleteFunc(cephClusterConnection) { + return internal.DeleteReconcile, nil + } + + if shouldReconcileConfigMapByCreateFunc(configMapList, cephClusterConnection, configMapName) { + return internal.CreateReconcile, nil + } + + should, err := shouldReconcileConfigMapByUpdateFunc(log, configMapList, cephClusterConnection, configMapName) + if err != nil { + return "", err + } + if should { + return internal.UpdateReconcile, nil + } + + return "", nil +} + +func shouldReconcileConfigMapByCreateFunc(configMapList *corev1.ConfigMapList, cephClusterConnection *v1alpha1.CephClusterConnection, configMapName string) bool { + if cephClusterConnection.DeletionTimestamp != nil { + return false + } + + for _, cm := range configMapList.Items { + if cm.Name == configMapName { + if cm.Data["config.json"] == "" { + return true + } + + return false + } + } + + return true +} + +func shouldReconcileConfigMapByUpdateFunc(log logger.Logger, configMapList *corev1.ConfigMapList, cephClusterConnection *v1alpha1.CephClusterConnection, configMapName string) (bool, error) { + if cephClusterConnection.DeletionTimestamp != nil { + return false, nil + } + + configMapSelector := labels.Set(map[string]string{ + internal.StorageManagedLabelKey: CephClusterConnectionCtrlName, + }) + + for _, oldConfigMap := range configMapList.Items { + if oldConfigMap.Name == configMapName { + oldClusterConfigs, err := getClusterConfigsFromConfigMap(oldConfigMap) + if err != nil { + return false, err + } + + equal := false + clusterConfigExists := false + for _, oldClusterConfig := range oldClusterConfigs { + if oldClusterConfig.ClusterID == cephClusterConnection.Spec.ClusterID { + clusterConfigExists = true + newClusterConfig := configureClusterConfig(cephClusterConnection) + equal = reflect.DeepEqual(oldClusterConfig, newClusterConfig) + + log.Trace(fmt.Sprintf("[shouldReconcileConfigMapByUpdateFunc] old cluster config: %+v", oldClusterConfig)) + log.Trace(fmt.Sprintf("[shouldReconcileConfigMapByUpdateFunc] new cluster config: %+v", newClusterConfig)) + log.Trace(fmt.Sprintf("[shouldReconcileConfigMapByUpdateFunc] are cluster configs equal: %t", equal)) + break + } + } + + if !equal || !labels.Set(oldConfigMap.Labels).AsSelector().Matches(configMapSelector) { + if !clusterConfigExists { + log.Trace(fmt.Sprintf("[shouldReconcileConfigMapByUpdateFunc] a cluster config for the cluster %s does not exist in the ConfigMap %+v", cephClusterConnection.Spec.ClusterID, oldConfigMap)) + } + if !labels.Set(oldConfigMap.Labels).AsSelector().Matches(configMapSelector) { + log.Trace(fmt.Sprintf("[shouldReconcileConfigMapByUpdateFunc] a configMap %s labels %+v does not match the selector %+v", oldConfigMap.Name, oldConfigMap.Labels, configMapSelector)) + } + + log.Debug(fmt.Sprintf("[shouldReconcileConfigMapByUpdateFunc] a configMap %s should be updated", configMapName)) + return true, nil + } + + return false, nil + } + } + + err := fmt.Errorf("[shouldReconcileConfigMapByUpdateFunc] a configMap %s not found in the list: %+v. It should be created", configMapName, configMapList.Items) + return false, err +} + +func getClusterConfigsFromConfigMap(configMap corev1.ConfigMap) ([]v1alpha1.ClusterConfig, error) { + jsonData, ok := configMap.Data["config.json"] + if !ok { + return nil, fmt.Errorf("[getClusterConfigsFromConfigMap] config.json key not found in the ConfigMap %s", configMap.Name) + } + + var clusterConfigs []v1alpha1.ClusterConfig + err := json.Unmarshal([]byte(jsonData), &clusterConfigs) + if err != nil { + return nil, fmt.Errorf("[getClusterConfigsFromConfigMap] unable to unmarshal data from the ConfigMap %s: %w", configMap.Name, err) + } + + return clusterConfigs, nil +} + +func configureClusterConfig(cephClusterConnection *v1alpha1.CephClusterConnection) v1alpha1.ClusterConfig { + cephFs := map[string]string{} + if cephClusterConnection.Spec.CephFS.SubvolumeGroup != "" { + cephFs = map[string]string{ + "subvolumeGroup": cephClusterConnection.Spec.CephFS.SubvolumeGroup, + } + } + + clusterConfig := v1alpha1.ClusterConfig{ + ClusterID: cephClusterConnection.Spec.ClusterID, + Monitors: cephClusterConnection.Spec.Monitors, + CephFS: cephFs, + } + + return clusterConfig +} + +func reconcileConfigMapCreateFunc(ctx context.Context, cl client.Client, log logger.Logger, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace, configMapName string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileConfigMapCreateFunc] starts reconciliation of ConfigMap %s for CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + + newClusterConfig := configureClusterConfig(cephClusterConnection) + newConfigMap := createConfigMap(newClusterConfig, controllerNamespace, configMapName) + log.Debug(fmt.Sprintf("[reconcileConfigMapCreateFunc] successfully configurated ConfigMap %s for the CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + log.Trace(fmt.Sprintf("[reconcileConfigMapCreateFunc] configMap: %+v", newConfigMap)) + + err = cl.Create(ctx, newConfigMap) + if err != nil { + err = fmt.Errorf("[reconcileConfigMapCreateFunc] unable to create a ConfigMap %s for CephClusterConnection %s: %w", newConfigMap.Name, cephClusterConnection.Name, err) + return true, err.Error(), err + } + + return false, "Successfully created", nil +} + +func reconcileConfigMapUpdateFunc(ctx context.Context, cl client.Client, log logger.Logger, configMapList *corev1.ConfigMapList, cephClusterConnection *v1alpha1.CephClusterConnection, configMapName string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileConfigMapUpdateFunc] starts reconciliation of ConfigMap %s for CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + + var oldConfigMap *corev1.ConfigMap + for _, cm := range configMapList.Items { + if cm.Name == configMapName { + oldConfigMap = &cm + break + } + } + + if oldConfigMap == nil { + err := fmt.Errorf("[reconcileConfigMapUpdateFunc] unable to find a ConfigMap %s for the CephClusterConnection %s", configMapName, cephClusterConnection.Name) + return true, err.Error(), err + } + + log.Debug(fmt.Sprintf("[reconcileConfigMapUpdateFunc] ConfigMap %s was found for the CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + + updatedConfigMap := updateConfigMap(oldConfigMap, cephClusterConnection, internal.UpdateConfigMapActionUpdate) + log.Debug(fmt.Sprintf("[reconcileConfigMapUpdateFunc] successfully configurated new ConfigMap %s for the CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + log.Trace(fmt.Sprintf("[reconcileConfigMapUpdateFunc] updated ConfigMap: %+v", updatedConfigMap)) + log.Trace(fmt.Sprintf("[reconcileConfigMapUpdateFunc] old ConfigMap: %+v", oldConfigMap)) + + err = cl.Update(ctx, updatedConfigMap) + if err != nil { + err = fmt.Errorf("[reconcileConfigMapUpdateFunc] unable to update the ConfigMap %s for CephClusterConnection %s: %w", updatedConfigMap.Name, cephClusterConnection.Name, err) + return true, err.Error(), err + } + + log.Info(fmt.Sprintf("[reconcileConfigMapUpdateFunc] successfully updated the ConfigMap %s for the CephClusterConnection %s", updatedConfigMap.Name, cephClusterConnection.Name)) + + return false, "Successfully updated", nil +} + +func reconcileConfigMapDeleteFunc(ctx context.Context, cl client.Client, log logger.Logger, configMapList *corev1.ConfigMapList, cephClusterConnection *v1alpha1.CephClusterConnection, configMapName string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileConfigMapDeleteFunc] starts reconciliation of ConfigMap %s for CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + + var configMap *corev1.ConfigMap + for _, cm := range configMapList.Items { + if cm.Name == configMapName { + configMap = &cm + break + } + } + + if configMap == nil { + log.Info(fmt.Sprintf("[reconcileConfigMapDeleteFunc] no ConfigMap with name %s found for the CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + } + + if configMap != nil { + log.Info(fmt.Sprintf("[reconcileConfigMapDeleteFunc] successfully found a ConfigMap %s for the CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + newConfigMap := updateConfigMap(configMap, cephClusterConnection, internal.UpdateConfigMapActionDelete) + + err := cl.Update(ctx, newConfigMap) + if err != nil { + err = fmt.Errorf("[reconcileConfigMapDeleteFunc] unable to delete cluster config for the CephClusterConnection %s from the ConfigMap %s: %w", cephClusterConnection.Name, configMapName, err) + return true, err.Error(), err + } + } + + _, err = removeFinalizerIfExists(ctx, cl, cephClusterConnection, CephClusterConnectionControllerFinalizerName) + if err != nil { + err = fmt.Errorf("[reconcileConfigMapDeleteFunc] unable to remove finalizer from the CephClusterConnection %s: %w", cephClusterConnection.Name, err) + return true, err.Error(), err + } + + log.Info(fmt.Sprintf("[reconcileConfigMapDeleteFunc] ends reconciliation of ConfigMap %s for CephClusterConnection %s", configMapName, cephClusterConnection.Name)) + + return false, "", nil +} + +func createConfigMap(clusterConfig v1alpha1.ClusterConfig, controllerNamespace, configMapName string) *corev1.ConfigMap { + clusterConfigs := []v1alpha1.ClusterConfig{clusterConfig} + jsonData, _ := json.Marshal(clusterConfigs) + + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: controllerNamespace, + Labels: map[string]string{ + internal.StorageManagedLabelKey: CephClusterConnectionCtrlName, + }, + Finalizers: []string{CephClusterConnectionControllerFinalizerName}, + }, + Data: map[string]string{ + "config.json": string(jsonData), + }, + } + + return configMap +} + +func updateConfigMap(oldConfigMap *corev1.ConfigMap, cephClusterConnection *v1alpha1.CephClusterConnection, updateAction string) *corev1.ConfigMap { + clusterConfigs, _ := getClusterConfigsFromConfigMap(*oldConfigMap) + + for i, clusterConfig := range clusterConfigs { + if clusterConfig.ClusterID == cephClusterConnection.Spec.ClusterID { + clusterConfigs = slices.Delete(clusterConfigs, i, i+1) + } + } + + if updateAction == internal.UpdateConfigMapActionUpdate { + newClusterConfig := configureClusterConfig(cephClusterConnection) + clusterConfigs = append(clusterConfigs, newClusterConfig) + } + + newJsonData, _ := json.Marshal(clusterConfigs) + + configMap := oldConfigMap.DeepCopy() + configMap.Data["config.json"] = string(newJsonData) + + if configMap.Labels == nil { + configMap.Labels = map[string]string{} + } + configMap.Labels[internal.StorageManagedLabelKey] = CephClusterConnectionCtrlName + + if configMap.Finalizers == nil { + configMap.Finalizers = []string{} + } + + if !slices.Contains(configMap.Finalizers, CephClusterConnectionControllerFinalizerName) { + configMap.Finalizers = append(configMap.Finalizers, CephClusterConnectionControllerFinalizerName) + } + + return configMap +} diff --git a/images/controller/pkg/controller/ceph_cluster_connection_watcher_test.go b/images/controller/pkg/controller/ceph_cluster_connection_watcher_test.go new file mode 100644 index 0000000..dc2a4e2 --- /dev/null +++ b/images/controller/pkg/controller/ceph_cluster_connection_watcher_test.go @@ -0,0 +1,287 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller_test + +import ( + "context" + "encoding/json" + + v1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/controller" + "d8-controller/pkg/internal" + "d8-controller/pkg/logger" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe(controller.CephClusterConnectionCtrlName, func() { + const ( + controllerNamespace = "test-namespace" + nameForClusterConnection = "example-ceph-connection" + clusterID = "clusterID1" + userID = "admin" + userKey = "key" + configMapName = internal.CSICephConfigMapName + secretNamePrefix = internal.CephClusterConnectionSecretPrefix + ) + + var ( + ctx = context.Background() + cl = NewFakeClient() + log = logger.Logger{} + monitors = []string{"mon1", "mon2", "mon3"} + ) + + It("CephClusterConnection positive operations", func() { + cephClusterConnection := &v1alpha1.CephClusterConnection{ + ObjectMeta: metav1.ObjectMeta{ + Name: nameForClusterConnection, + }, + Spec: v1alpha1.CephClusterConnectionSpec{ + ClusterID: clusterID, + Monitors: monitors, + UserID: userID, + UserKey: userKey, + }, + } + + By("Creating CephClusterConnection") + err := cl.Create(ctx, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + createdCephClusterConnection := &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForClusterConnection}, createdCephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + Expect(createdCephClusterConnection).NotTo(BeNil()) + Expect(createdCephClusterConnection.Name).To(Equal(nameForClusterConnection)) + Expect(createdCephClusterConnection.Spec.ClusterID).To(Equal(clusterID)) + Expect(createdCephClusterConnection.Spec.UserID).To(Equal(userID)) + Expect(createdCephClusterConnection.Spec.UserKey).To(Equal(userKey)) + Expect(createdCephClusterConnection.Spec.Monitors).To(ConsistOf(monitors)) + Expect(createdCephClusterConnection.Finalizers).To(HaveLen(0)) + + By("Running reconcile for CephClusterConnection creation") + secretList := &corev1.SecretList{} + err = cl.List(ctx, secretList) + Expect(err).NotTo(HaveOccurred()) + + shouldReconcile, _, err := controller.RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, createdCephClusterConnection, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldReconcile).To(BeFalse()) + + By("Verifying dependent Secret") + verifySecret(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Verifying dependent ConfigMap") + verifyConfigMap(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Verifying CephClusterConnection after create reconcile") + createdCephClusterConnection = &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForClusterConnection}, createdCephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + Expect(createdCephClusterConnection).NotTo(BeNil()) + Expect(createdCephClusterConnection.Finalizers).To(HaveLen(1)) + Expect(createdCephClusterConnection.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) + // Expect(createdCephClusterConnection.Status).NotTo(BeNil()) + // Expect(createdCephClusterConnection.Status.Phase).To(Equal(v1alpha1.PhaseCreated)) + + By("Updating CephClusterConnection") + newMonitors := []string{"mon4", "mon5", "mon6"} + createdCephClusterConnection.Spec.Monitors = newMonitors + err = cl.Update(ctx, createdCephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + updatedCephClusterConnection := &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForClusterConnection}, updatedCephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedCephClusterConnection).NotTo(BeNil()) + Expect(updatedCephClusterConnection.Spec.Monitors).To(ConsistOf(newMonitors)) + + By("Running reconcile for CephClusterConnection update") + secretList = &corev1.SecretList{} + err = cl.List(ctx, secretList) + Expect(err).NotTo(HaveOccurred()) + + shouldReconcile, _, err = controller.RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, updatedCephClusterConnection, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldReconcile).To(BeFalse()) + + By("Verifying updated Secret") + verifySecret(ctx, cl, updatedCephClusterConnection, controllerNamespace) + + By("Verifying updated ConfigMap") + verifyConfigMap(ctx, cl, updatedCephClusterConnection, controllerNamespace) + + By("Verifying CephClusterConnection after update reconcile") + updatedCephClusterConnection = &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForClusterConnection}, updatedCephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedCephClusterConnection).NotTo(BeNil()) + Expect(updatedCephClusterConnection.Finalizers).To(HaveLen(1)) + Expect(updatedCephClusterConnection.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) + // Expect(updatedCephClusterConnection.Status).NotTo(BeNil()) + // Expect(updatedCephClusterConnection.Status.Phase).To(Equal(v1alpha1.PhaseCreated)) + + By("Deleting CephClusterConnection") + err = cl.Delete(ctx, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + By("Running reconcile for CephClusterConnection deletion") + secretList = &corev1.SecretList{} + err = cl.List(ctx, secretList) + Expect(err).NotTo(HaveOccurred()) + + deletedCephClusterConnection := &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForClusterConnection}, deletedCephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + Expect(deletedCephClusterConnection).NotTo(BeNil()) + Expect(deletedCephClusterConnection.Finalizers).To(HaveLen(1)) + Expect(deletedCephClusterConnection.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) + + shouldReconcile, _, err = controller.RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, deletedCephClusterConnection, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldReconcile).To(BeFalse()) + + By("Verifying ConfigMap update after deletion") + verifyConfigMapWithoutClusterConnection(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Verifying Secret deletion") + verifySecretNotExists(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Verifying CephClusterConnection after delete reconcile") + deletedCephClusterConnection = &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForClusterConnection}, deletedCephClusterConnection) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("handles invalid CephClusterConnection spec", func() { + By("Creating CephClusterConnection with empty ClusterID") + cephClusterConnection := &v1alpha1.CephClusterConnection{ + ObjectMeta: metav1.ObjectMeta{ + Name: nameForClusterConnection, + }, + Spec: v1alpha1.CephClusterConnectionSpec{ + ClusterID: "", + Monitors: []string{"mon1", "mon2", "mon3"}, + UserID: userID, + UserKey: userKey, + }, + } + + err := cl.Create(ctx, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + By("Running reconcile for invalid CephClusterConnection") + secretList := &corev1.SecretList{} + err = cl.List(ctx, secretList) + Expect(err).NotTo(HaveOccurred()) + + shouldReconcile, _, err := controller.RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, cephClusterConnection, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldReconcile).To(BeFalse()) + + By("Verifying no Secret created for invalid CephClusterConnection") + verifySecretNotExists(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Verifying no ConfigMap entry created for invalid CephClusterConnection") + verifyConfigMapWithoutClusterConnection(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Creating CephClusterConnection with empty Monitors") + cephClusterConnection.Spec.ClusterID = clusterID + cephClusterConnection.Spec.Monitors = []string{} + + err = cl.Update(ctx, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + Expect(cephClusterConnection.Spec.Monitors).To(HaveLen(0)) + + By("Running reconcile for CephClusterConnection with empty Monitors") + shouldReconcile, _, err = controller.RunCephClusterConnectionEventReconcile(ctx, cl, log, secretList, cephClusterConnection, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldReconcile).To(BeFalse()) + + By("Verifying no Secret created for CephClusterConnection with empty Monitors") + verifySecretNotExists(ctx, cl, cephClusterConnection, controllerNamespace) + + By("Verifying no ConfigMap entry created for CephClusterConnection with empty Monitors") + verifyConfigMapWithoutClusterConnection(ctx, cl, cephClusterConnection, controllerNamespace) + }) +}) + +func verifySecret(ctx context.Context, cl client.Client, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace string) { + secretName := internal.CephClusterConnectionSecretPrefix + cephClusterConnection.Name + secret := &corev1.Secret{} + err := cl.Get(ctx, client.ObjectKey{Name: secretName, Namespace: controllerNamespace}, secret) + Expect(err).NotTo(HaveOccurred()) + Expect(secret).NotTo(BeNil()) + Expect(secret.Finalizers).To(HaveLen(1)) + Expect(secret.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) + Expect(secret.StringData).To(HaveKeyWithValue("userID", cephClusterConnection.Spec.UserID)) + Expect(secret.StringData).To(HaveKeyWithValue("userKey", cephClusterConnection.Spec.UserKey)) + Expect(secret.StringData).To(HaveKeyWithValue("adminID", cephClusterConnection.Spec.UserID)) + Expect(secret.StringData).To(HaveKeyWithValue("adminKey", cephClusterConnection.Spec.UserKey)) +} + +func verifyConfigMap(ctx context.Context, cl client.Client, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace string) { + configMap := &corev1.ConfigMap{} + err := cl.Get(ctx, client.ObjectKey{Name: internal.CSICephConfigMapName, Namespace: controllerNamespace}, configMap) + Expect(err).NotTo(HaveOccurred()) + Expect(configMap).NotTo(BeNil()) + Expect(configMap.Finalizers).To(HaveLen(1)) + Expect(configMap.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) + + var clusterConfigs []v1alpha1.ClusterConfig + err = json.Unmarshal([]byte(configMap.Data["config.json"]), &clusterConfigs) + Expect(err).NotTo(HaveOccurred()) + Expect(clusterConfigs).NotTo(BeNil()) + found := false + for _, cfg := range clusterConfigs { + if cfg.ClusterID == cephClusterConnection.Spec.ClusterID { + Expect(cfg.Monitors).To(ConsistOf(cephClusterConnection.Spec.Monitors)) + found = true + break + } + } + Expect(found).To(BeTrue(), "Cluster config not found in ConfigMap") +} + +func verifyConfigMapWithoutClusterConnection(ctx context.Context, cl client.Client, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace string) { + configMap := &corev1.ConfigMap{} + err := cl.Get(ctx, client.ObjectKey{Name: internal.CSICephConfigMapName, Namespace: controllerNamespace}, configMap) + Expect(err).NotTo(HaveOccurred()) + Expect(configMap).NotTo(BeNil()) + Expect(configMap.Finalizers).To(HaveLen(1)) + Expect(configMap.Finalizers).To(ContainElement(controller.CephClusterConnectionControllerFinalizerName)) + + var clusterConfigs []v1alpha1.ClusterConfig + err = json.Unmarshal([]byte(configMap.Data["config.json"]), &clusterConfigs) + Expect(err).NotTo(HaveOccurred()) + for _, cfg := range clusterConfigs { + Expect(cfg.ClusterID).NotTo(Equal(cephClusterConnection.Spec.ClusterID)) + } +} + +func verifySecretNotExists(ctx context.Context, cl client.Client, cephClusterConnection *v1alpha1.CephClusterConnection, controllerNamespace string) { + secretName := internal.CephClusterConnectionSecretPrefix + cephClusterConnection.Name + secret := &corev1.Secret{} + err := cl.Get(ctx, client.ObjectKey{Name: secretName, Namespace: controllerNamespace}, secret) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) +} diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher.go b/images/controller/pkg/controller/ceph_storage_class_watcher.go new file mode 100644 index 0000000..40dcba7 --- /dev/null +++ b/images/controller/pkg/controller/ceph_storage_class_watcher.go @@ -0,0 +1,214 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + v1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/config" + "d8-controller/pkg/internal" + "d8-controller/pkg/logger" + "errors" + "fmt" + "reflect" + "time" + + v1 "k8s.io/api/storage/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +const ( + // This value used as a name for the controller AND the value for managed-by label. + CephStorageClassCtrlName = "d8-ceph-storage-class-controller" + + StorageClassKind = "StorageClass" + StorageClassAPIVersion = "storage.k8s.io/v1" + + CephStorageClassRBDProvisioner = "rbd.csi.ceph.com" + CephStorageClassCephFSProvisioner = "cephfs.csi.ceph.com" + + CephStorageClassControllerFinalizerName = "storage.deckhouse.io/ceph-storage-class-controller" + CephStorageClassManagedLabelKey = "storage.deckhouse.io/managed-by" + CephStorageClassManagedLabelValue = "ceph-storage-class-controller" +) + +var ( + allowedProvisioners = []string{CephStorageClassRBDProvisioner, CephStorageClassCephFSProvisioner} +) + +func RunCephStorageClassWatcherController( + mgr manager.Manager, + cfg config.Options, + log logger.Logger, +) (controller.Controller, error) { + cl := mgr.GetClient() + + c, err := controller.New(CephStorageClassCtrlName, mgr, controller.Options{ + Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + log.Info(fmt.Sprintf("[CephStorageClassReconciler] starts Reconcile for the CephStorageClass %q", request.Name)) + cephSC := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, request.NamespacedName, cephSC) + if err != nil && !k8serr.IsNotFound(err) { + log.Error(err, fmt.Sprintf("[CephStorageClassReconciler] unable to get CephStorageClass, name: %s", request.Name)) + return reconcile.Result{}, err + } + + if cephSC.Name == "" { + log.Info(fmt.Sprintf("[CephStorageClassReconciler] seems like the CephStorageClass for the request %s was deleted. Reconcile retrying will stop.", request.Name)) + return reconcile.Result{}, nil + } + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + if err != nil { + log.Error(err, "[CephStorageClassReconciler] unable to list Storage Classes") + return reconcile.Result{}, err + } + + shouldRequeue, msg, err := RunStorageClassEventReconcile(ctx, cl, log, scList, cephSC, cfg.ControllerNamespace) + log.Info(fmt.Sprintf("[CephStorageClassReconciler] CephStorageClass %s has been reconciled with message: %s", cephSC.Name, msg)) + phase := v1alpha1.PhaseCreated + if err != nil { + log.Error(err, fmt.Sprintf("[CephStorageClassReconciler] an error occured while reconciles the CephStorageClass, name: %s", cephSC.Name)) + phase = v1alpha1.PhaseFailed + } + + if msg != "" { + log.Debug(fmt.Sprintf("[CephStorageClassReconciler] Update the CephStorageClass %s with %s status phase and message: %s", cephSC.Name, phase, msg)) + upErr := updateCephStorageClassPhase(ctx, cl, cephSC, phase, msg) + if upErr != nil { + log.Error(upErr, fmt.Sprintf("[CephStorageClassReconciler] unable to update the CephStorageClass %s: %s", cephSC.Name, upErr.Error())) + shouldRequeue = true + } + } + + if shouldRequeue { + log.Warning(fmt.Sprintf("[CephStorageClassReconciler] Reconciler will requeue the request, name: %s", request.Name)) + return reconcile.Result{ + RequeueAfter: cfg.RequeueStorageClassInterval * time.Second, + }, nil + } + + log.Info(fmt.Sprintf("[CephStorageClassReconciler] ends Reconcile for the CephStorageClass %q", request.Name)) + return reconcile.Result{}, nil + }), + }) + if err != nil { + log.Error(err, "[RunCephStorageClassWatcherController] unable to create controller") + return nil, err + } + + err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.CephStorageClass{}), handler.Funcs{ + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + log.Info(fmt.Sprintf("[CreateFunc] get event for CephStorageClass %q. Add to the queue", e.Object.GetName())) + request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} + q.Add(request) + }, + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + log.Info(fmt.Sprintf("[UpdateFunc] get event for CephStorageClass %q. Check if it should be reconciled", e.ObjectNew.GetName())) + + oldCephSC, ok := e.ObjectOld.(*v1alpha1.CephStorageClass) + if !ok { + err = errors.New("unable to cast event object to a given type") + log.Error(err, "[UpdateFunc] an error occurred while handling create event") + return + } + newCephSC, ok := e.ObjectNew.(*v1alpha1.CephStorageClass) + if !ok { + err = errors.New("unable to cast event object to a given type") + log.Error(err, "[UpdateFunc] an error occurred while handling create event") + return + } + + if reflect.DeepEqual(oldCephSC.Spec, newCephSC.Spec) && newCephSC.DeletionTimestamp == nil { + log.Info(fmt.Sprintf("[UpdateFunc] an update event for the CephStorageClass %s has no Spec field updates. It will not be reconciled", newCephSC.Name)) + return + } + + log.Info(fmt.Sprintf("[UpdateFunc] the CephStorageClass %q will be reconciled. Add to the queue", newCephSC.Name)) + request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: newCephSC.Namespace, Name: newCephSC.Name}} + q.Add(request) + }, + }) + if err != nil { + log.Error(err, "[RunCephStorageClassWatcherController] unable to watch the events") + return nil, err + } + + return c, nil +} + +func RunStorageClassEventReconcile(ctx context.Context, cl client.Client, log logger.Logger, scList *v1.StorageClassList, cephSC *v1alpha1.CephStorageClass, controllerNamespace string) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] starts reconciliataion of CephStorageClass, name: %s", cephSC.Name)) + valid, msg := validateCephStorageClassSpec(cephSC) + if !valid { + err = fmt.Errorf("[RunStorageClassEventReconcile] CephStorageClass %s has invalid spec: %s", cephSC.Name, msg) + return false, msg, err + } + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] CephStorageClass %s has valid spec", cephSC.Name)) + + added, err := addFinalizerIfNotExists(ctx, cl, cephSC, CephStorageClassControllerFinalizerName) + if err != nil { + err = fmt.Errorf("[RunStorageClassEventReconcile] unable to add a finalizer %s to the CephStorageClass %s: %w", CephStorageClassControllerFinalizerName, cephSC.Name, err) + return true, err.Error(), err + } + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] finalizer %s was added to the CephStorageClass %s: %t", CephStorageClassControllerFinalizerName, cephSC.Name, added)) + + clusterID, err := getClusterID(ctx, cl, cephSC) + if err != nil { + err = fmt.Errorf("[RunStorageClassEventReconcile] unable to get clusterID for CephStorageClass %s: %w", cephSC.Name, err) + return true, err.Error(), err + } + + reconcileTypeForStorageClass, err := IdentifyReconcileFuncForStorageClass(log, scList, cephSC, controllerNamespace, clusterID) + if err != nil { + err = fmt.Errorf("[RunStorageClassEventReconcile] error occured while identifying the reconcile function for StorageClass %s: %w", cephSC.Name, err) + return true, err.Error(), err + } + + shouldRequeue = false + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] Successfully identified the reconcile type for StorageClass %s: %s", cephSC.Name, reconcileTypeForStorageClass)) + switch reconcileTypeForStorageClass { + case internal.CreateReconcile: + shouldRequeue, msg, err = reconcileStorageClassCreateFunc(ctx, cl, log, scList, cephSC, controllerNamespace, clusterID) + case internal.UpdateReconcile: + shouldRequeue, msg, err = reconcileStorageClassUpdateFunc(ctx, cl, log, scList, cephSC, controllerNamespace, clusterID) + case internal.DeleteReconcile: + shouldRequeue, msg, err = reconcileStorageClassDeleteFunc(ctx, cl, log, scList, cephSC) + default: + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] StorageClass for CephStorageClass %s should not be reconciled", cephSC.Name)) + msg = "Successfully reconciled" + } + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] ends reconciliataion of StorageClass, name: %s, shouldRequeue: %t, err: %v", cephSC.Name, shouldRequeue, err)) + + if err != nil || shouldRequeue { + return shouldRequeue, msg, err + } + + log.Debug(fmt.Sprintf("[RunStorageClassEventReconcile] Finish all reconciliations for CephStorageClass %q.", cephSC.Name)) + return false, msg, nil +} diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher_func.go b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go new file mode 100644 index 0000000..b63323a --- /dev/null +++ b/images/controller/pkg/controller/ceph_storage_class_watcher_func.go @@ -0,0 +1,507 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "d8-controller/api/v1alpha1" + storagev1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/internal" + "d8-controller/pkg/logger" + "fmt" + "reflect" + "strings" + + "slices" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func IdentifyReconcileFuncForStorageClass(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (reconcileType string, err error) { + if shouldReconcileByDeleteFunc(cephSC) { + return internal.DeleteReconcile, nil + } + + if shouldReconcileStorageClassByCreateFunc(scList, cephSC) { + return internal.CreateReconcile, nil + } + + should, err := shouldReconcileStorageClassByUpdateFunc(log, scList, cephSC, controllerNamespace, clusterID) + if err != nil { + return "", err + } + if should { + return internal.UpdateReconcile, nil + } + + return "", nil +} + +func shouldReconcileStorageClassByCreateFunc(scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass) bool { + if cephSC.DeletionTimestamp != nil { + return false + } + + for _, sc := range scList.Items { + if sc.Name == cephSC.Name { + return false + } + } + + return true +} + +func shouldReconcileStorageClassByUpdateFunc(log logger.Logger, scList *v1.StorageClassList, cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (bool, error) { + if cephSC.DeletionTimestamp != nil { + return false, nil + } + + for _, oldSC := range scList.Items { + if oldSC.Name == cephSC.Name { + if slices.Contains(allowedProvisioners, oldSC.Provisioner) { + newSC, err := ConfigureStorageClass(cephSC, controllerNamespace, clusterID) + if err != nil { + return false, err + } + + diff, err := GetSCDiff(&oldSC, newSC) + if err != nil { + return false, err + } + + if diff != "" { + log.Debug(fmt.Sprintf("[shouldReconcileStorageClassByUpdateFunc] a storage class %s should be updated. Diff: %s", oldSC.Name, diff)) + return true, nil + } + + if cephSC.Status != nil && cephSC.Status.Phase == v1alpha1.PhaseFailed { + return true, nil + } + + return false, nil + + } else { + err := fmt.Errorf("a storage class %s with provisioner % s does not belong to allowed provisioners: %v", oldSC.Name, oldSC.Provisioner, allowedProvisioners) + return false, err + } + } + } + + err := fmt.Errorf("a storage class %s does not exist", cephSC.Name) + return false, err +} + +func reconcileStorageClassCreateFunc( + ctx context.Context, + cl client.Client, + log logger.Logger, + scList *v1.StorageClassList, + cephSC *storagev1alpha1.CephStorageClass, + controllerNamespace, clusterID string, +) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] starts for CephStorageClass %q", cephSC.Name)) + + log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] starts storage class configuration for the CephStorageClass, name: %s", cephSC.Name)) + newSC, err := ConfigureStorageClass(cephSC, controllerNamespace, clusterID) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to configure a Storage Class for the CephStorageClass %s: %w", cephSC.Name, err) + return false, err.Error(), err + } + + log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] successfully configurated storage class for the CephStorageClass, name: %s", cephSC.Name)) + log.Trace(fmt.Sprintf("[reconcileStorageClassCreateFunc] storage class: %+v", newSC)) + + created, err := createStorageClassIfNotExists(ctx, cl, scList, newSC) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassCreateFunc] unable to create a Storage Class %s: %w", newSC.Name, err) + return true, err.Error(), err + } + + log.Debug(fmt.Sprintf("[reconcileStorageClassCreateFunc] a storage class %s was created: %t", newSC.Name, created)) + if created { + log.Info(fmt.Sprintf("[reconcileStorageClassCreateFunc] successfully create storage class, name: %s", newSC.Name)) + } else { + err = fmt.Errorf("[reconcileStorageClassCreateFunc] Storage class %s already exists", newSC.Name) + return true, err.Error(), err + } + + return false, "Successfully created", nil +} + +func reconcileStorageClassUpdateFunc( + ctx context.Context, + cl client.Client, + log logger.Logger, + scList *v1.StorageClassList, + cephSC *storagev1alpha1.CephStorageClass, + controllerNamespace, clusterID string, +) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileStorageClassUpdateFunc] starts for CephStorageClass %q", cephSC.Name)) + + var oldSC *v1.StorageClass + for _, s := range scList.Items { + if s.Name == cephSC.Name { + oldSC = &s + break + } + } + + if oldSC == nil { + err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to find a storage class for the CephStorageClass %s: %w", cephSC.Name, err) + return true, err.Error(), err + } + + log.Debug(fmt.Sprintf("[reconcileStorageClassUpdateFunc] successfully found a storage class for the CephStorageClass, name: %s", cephSC.Name)) + log.Trace(fmt.Sprintf("[reconcileStorageClassUpdateFunc] storage class: %+v", oldSC)) + + newSC, err := ConfigureStorageClass(cephSC, controllerNamespace, clusterID) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to configure a Storage Class for the CephStorageClass %s: %w", cephSC.Name, err) + return false, err.Error(), err + } + log.Debug(fmt.Sprintf("[reconcileStorageClassUpdateFunc] successfully configurated storage class for the CephStorageClass, name: %s", cephSC.Name)) + log.Trace(fmt.Sprintf("[reconcileStorageClassUpdateFunc] new storage class: %+v", newSC)) + log.Trace(fmt.Sprintf("[reconcileStorageClassUpdateFunc] old storage class: %+v", oldSC)) + + err = recreateStorageClass(ctx, cl, oldSC, newSC) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassUpdateFunc] unable to recreate a Storage Class %s: %w", newSC.Name, err) + return true, err.Error(), err + } + + log.Info(fmt.Sprintf("[reconcileStorageClassUpdateFunc] a Storage Class %s was successfully recreated", newSC.Name)) + + return false, "Successfully updated", nil +} + +func reconcileStorageClassDeleteFunc( + ctx context.Context, + cl client.Client, + log logger.Logger, + scList *v1.StorageClassList, + cephSC *storagev1alpha1.CephStorageClass, +) (shouldRequeue bool, msg string, err error) { + log.Debug(fmt.Sprintf("[reconcileStorageClassDeleteFunc] starts for CephStorageClass %q", cephSC.Name)) + + var sc *v1.StorageClass + for _, s := range scList.Items { + if s.Name == cephSC.Name { + sc = &s + break + } + } + + if sc == nil { + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] no storage class found for the CephStorageClass, name: %s", cephSC.Name)) + } + + if sc != nil { + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] successfully found a storage class for the CephStorageClass %s", cephSC.Name)) + log.Debug(fmt.Sprintf("[reconcileStorageClassDeleteFunc] starts identifying a provisioner for the storage class %s", sc.Name)) + + if slices.Contains(allowedProvisioners, sc.Provisioner) { + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] the storage class %s provisioner %s belongs to allowed provisioners: %v", sc.Name, sc.Provisioner, allowedProvisioners)) + err := deleteStorageClass(ctx, cl, sc) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassDeleteFunc] unable to delete a storage class %s: %w", sc.Name, err) + return true, err.Error(), err + } + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] successfully deleted a storage class, name: %s", sc.Name)) + } + + if !slices.Contains(allowedProvisioners, sc.Provisioner) { + log.Info(fmt.Sprintf("[reconcileStorageClassDeleteFunc] a storage class %s with provisioner %s does not belong to allowed provisioners: %v. Skip deletion of storage class", sc.Name, sc.Provisioner, allowedProvisioners)) + } + } + + _, err = removeFinalizerIfExists(ctx, cl, cephSC, CephStorageClassControllerFinalizerName) + if err != nil { + err = fmt.Errorf("[reconcileStorageClassDeleteFunc] unable to remove a finalizer %s from the CephStorageClass %s: %w", CephStorageClassControllerFinalizerName, cephSC.Name, err) + return true, err.Error(), err + } + + log.Debug("[reconcileStorageClassDeleteFunc] ends the reconciliation") + return false, "", nil +} + +func ConfigureStorageClass(cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (*v1.StorageClass, error) { + provisioner := GetStorageClassProvisioner(cephSC.Spec.Type) + allowVolumeExpansion := true + reclaimPolicy := corev1.PersistentVolumeReclaimPolicy(cephSC.Spec.ReclaimPolicy) + volumeBindingMode := v1.VolumeBindingImmediate + + params, err := GetStoragecClassParams(cephSC, controllerNamespace, clusterID) + if err != nil { + err = fmt.Errorf("CephStorageClass %q: unable to get a storage class parameters: %w", cephSC.Name, err) + return nil, err + } + + mountOpt := storagev1alpha1.DefaultMountOptions + + sc := &v1.StorageClass{ + TypeMeta: metav1.TypeMeta{ + Kind: StorageClassKind, + APIVersion: StorageClassAPIVersion, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cephSC.Name, + Namespace: cephSC.Namespace, + Finalizers: []string{CephStorageClassControllerFinalizerName}, + Labels: map[string]string{ + internal.StorageManagedLabelKey: CephStorageClassCtrlName, + }, + }, + Parameters: params, + Provisioner: provisioner, + ReclaimPolicy: &reclaimPolicy, + VolumeBindingMode: &volumeBindingMode, + AllowVolumeExpansion: &allowVolumeExpansion, + MountOptions: mountOpt, + } + + return sc, nil +} + +func GetStorageClassProvisioner(cephStorageClasstype string) string { + provisioner := "" + switch cephStorageClasstype { + case storagev1alpha1.CephStorageClassTypeRBD: + provisioner = CephStorageClassRBDProvisioner + case storagev1alpha1.CephStorageClassTypeCephFS: + provisioner = CephStorageClassCephFSProvisioner + } + + return provisioner + +} + +func GetStoragecClassParams(cephSC *storagev1alpha1.CephStorageClass, controllerNamespace, clusterID string) (map[string]string, error) { + secretName := internal.CephClusterConnectionSecretPrefix + cephSC.Spec.ClusterConnectionName + + params := map[string]string{ + "clusterID": clusterID, + "csi.storage.k8s.io/provisioner-secret-name": secretName, + "csi.storage.k8s.io/provisioner-secret-namespace": controllerNamespace, + "csi.storage.k8s.io/controller-expand-secret-name": secretName, + "csi.storage.k8s.io/controller-expand-secret-namespace": controllerNamespace, + "csi.storage.k8s.io/node-stage-secret-name": secretName, + "csi.storage.k8s.io/node-stage-secret-namespace": controllerNamespace, + } + + if cephSC.Spec.Type == storagev1alpha1.CephStorageClassTypeRBD { + params["imageFeatures"] = "layering" + params["csi.storage.k8s.io/fstype"] = cephSC.Spec.RBD.DefaultFSType + params["pool"] = cephSC.Spec.RBD.Pool + } + + if cephSC.Spec.Type == storagev1alpha1.CephStorageClassTypeCephFS { + params["fsName"] = cephSC.Spec.CephFS.FSName + params["pool"] = cephSC.Spec.CephFS.Pool + } + + return params, nil +} + +func updateCephStorageClassPhase(ctx context.Context, cl client.Client, cephSC *storagev1alpha1.CephStorageClass, phase, reason string) error { + if cephSC.Status == nil { + cephSC.Status = &storagev1alpha1.CephStorageClassStatus{} + } + cephSC.Status.Phase = phase + cephSC.Status.Reason = reason + + // TODO: add retry logic + err := cl.Status().Update(ctx, cephSC) + if err != nil { + return err + } + + return nil +} + +func createStorageClassIfNotExists(ctx context.Context, cl client.Client, scList *v1.StorageClassList, sc *v1.StorageClass) (bool, error) { + for _, s := range scList.Items { + if s.Name == sc.Name { + return false, nil + } + } + + err := cl.Create(ctx, sc) + if err != nil { + return false, err + } + + return true, err +} + +func GetSCDiff(oldSC, newSC *v1.StorageClass) (string, error) { + + if oldSC.Provisioner != newSC.Provisioner { + err := fmt.Errorf("CephStorageClass %q: the provisioner field is different in the StorageClass %q", newSC.Name, oldSC.Name) + return "", err + } + + if *oldSC.ReclaimPolicy != *newSC.ReclaimPolicy { + diff := fmt.Sprintf("ReclaimPolicy: %q -> %q", *oldSC.ReclaimPolicy, *newSC.ReclaimPolicy) + return diff, nil + } + + if *oldSC.VolumeBindingMode != *newSC.VolumeBindingMode { + diff := fmt.Sprintf("VolumeBindingMode: %q -> %q", *oldSC.VolumeBindingMode, *newSC.VolumeBindingMode) + return diff, nil + } + + if *oldSC.AllowVolumeExpansion != *newSC.AllowVolumeExpansion { + diff := fmt.Sprintf("AllowVolumeExpansion: %t -> %t", *oldSC.AllowVolumeExpansion, *newSC.AllowVolumeExpansion) + return diff, nil + } + + if !reflect.DeepEqual(oldSC.Parameters, newSC.Parameters) { + diff := fmt.Sprintf("Parameters: %+v -> %+v", oldSC.Parameters, newSC.Parameters) + return diff, nil + } + + if !reflect.DeepEqual(oldSC.MountOptions, newSC.MountOptions) { + diff := fmt.Sprintf("MountOptions: %v -> %v", oldSC.MountOptions, newSC.MountOptions) + return diff, nil + } + + return "", nil +} + +func recreateStorageClass(ctx context.Context, cl client.Client, oldSC, newSC *v1.StorageClass) error { + // It is necessary to pass the original StorageClass to the delete operation because + // the deletion will not succeed if the fields in the StorageClass provided to delete + // differ from those currently in the cluster. + err := deleteStorageClass(ctx, cl, oldSC) + if err != nil { + err = fmt.Errorf("[recreateStorageClass] unable to delete a storage class %s: %s", oldSC.Name, err.Error()) + return err + } + + err = cl.Create(ctx, newSC) + if err != nil { + err = fmt.Errorf("[recreateStorageClass] unable to create a storage class %s: %s", newSC.Name, err.Error()) + return err + } + + return nil +} + +func deleteStorageClass(ctx context.Context, cl client.Client, sc *v1.StorageClass) error { + if !slices.Contains(allowedProvisioners, sc.Provisioner) { + return fmt.Errorf("a storage class %s with provisioner %s does not belong to allowed provisioners: %v", sc.Name, sc.Provisioner, allowedProvisioners) + } + + _, err := removeFinalizerIfExists(ctx, cl, sc, CephStorageClassControllerFinalizerName) + if err != nil { + return err + } + + err = cl.Delete(ctx, sc) + if err != nil { + return err + } + + return nil +} + +func validateCephStorageClassSpec(cephSC *storagev1alpha1.CephStorageClass) (bool, string) { + if cephSC.DeletionTimestamp != nil { + return true, "" + } + + var ( + failedMsgBuilder strings.Builder + validationPassed = true + ) + + failedMsgBuilder.WriteString("Validation of CephStorageClass failed: ") + + if cephSC.Spec.ClusterConnectionName == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.clusterConnectionName field is empty; ") + } + + if cephSC.Spec.ReclaimPolicy == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.reclaimPolicy field is empty; ") + } + + if cephSC.Spec.Type == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.type field is empty; ") + } + + switch cephSC.Spec.Type { + case storagev1alpha1.CephStorageClassTypeRBD: + if cephSC.Spec.RBD == nil { + validationPassed = false + failedMsgBuilder.WriteString(fmt.Sprintf("CephStorageClass type is %s but the spec.rbd field is empty; ", storagev1alpha1.CephStorageClassTypeRBD)) + } else { + if cephSC.Spec.RBD.DefaultFSType == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.rbd.defaultFSType field is empty; ") + } + + if cephSC.Spec.RBD.Pool == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.rbd.pool field is empty; ") + } + } + case storagev1alpha1.CephStorageClassTypeCephFS: + if cephSC.Spec.CephFS == nil { + validationPassed = false + failedMsgBuilder.WriteString(fmt.Sprintf("CephStorageClass type is %s but the spec.cephfs field is empty; ", storagev1alpha1.CephStorageClassTypeRBD)) + } else { + if cephSC.Spec.CephFS.FSName == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.cephfs.fsName field is empty; ") + } + + if cephSC.Spec.CephFS.Pool == "" { + validationPassed = false + failedMsgBuilder.WriteString("the spec.cephfs.pool field is empty; ") + } + } + default: + validationPassed = false + failedMsgBuilder.WriteString(fmt.Sprintf("the spec.type field is not valid: %s. Allowed values: %s, %s", cephSC.Spec.Type, storagev1alpha1.CephStorageClassTypeRBD, storagev1alpha1.CephStorageClassTypeCephFS)) + } + + return validationPassed, failedMsgBuilder.String() +} + +func getClusterID(ctx context.Context, cl client.Client, cephSC *storagev1alpha1.CephStorageClass) (string, error) { + clusterConnectionName := cephSC.Spec.ClusterConnectionName + clusterConnection := &storagev1alpha1.CephClusterConnection{} + err := cl.Get(ctx, client.ObjectKey{Namespace: cephSC.Namespace, Name: clusterConnectionName}, clusterConnection) + if err != nil { + err = fmt.Errorf("[getClusterID] CephStorageClass %q: unable to get a CephClusterConnection %q: %w", cephSC.Name, clusterConnectionName, err) + return "", err + } + + clusterID := clusterConnection.Spec.ClusterID + if clusterID == "" { + err = fmt.Errorf("[getClusterID] CephStorageClass %q: the CephClusterConnection %q has an empty spec.clusterID field", cephSC.Name, clusterConnectionName) + return "", err + } + + return clusterID, nil +} diff --git a/images/controller/pkg/controller/ceph_storage_class_watcher_test.go b/images/controller/pkg/controller/ceph_storage_class_watcher_test.go new file mode 100644 index 0000000..a6cf365 --- /dev/null +++ b/images/controller/pkg/controller/ceph_storage_class_watcher_test.go @@ -0,0 +1,648 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller_test + +import ( + "context" + v1alpha1 "d8-controller/api/v1alpha1" + "d8-controller/pkg/controller" + "d8-controller/pkg/internal" + "d8-controller/pkg/logger" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/storage/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe(controller.CephStorageClassCtrlName, func() { + const ( + controllerNamespace = "test-namespace" + nameForCephSC = "example-ceph-fs" + nameForRBDSC = "example-rbd" + nameForBadSC = "example-bad" + ) + var ( + ctx = context.Background() + cl = NewFakeClient() + log = logger.Logger{} + + clusterConnectionName = "ceph-connection" + clusterID1 = "clusterID1" + reclaimPolicyDelete = "Delete" + reclaimPolicyRetain = "Retain" + storageTypeCephFS = "cephfs" + storageTypeRBD = "rbd" + fsName = "myfs" + pool = "mypool" + // defaultFSType = "ext4" + ) + + It("Create_ceph_sc_with_not_existing_ceph_connection", func() { + cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ + Name: nameForCephSC, + ClusterConnectionName: "not-existing", + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeCephFS, + CephFS: &CephFSConfig{ + FSName: fsName, + Pool: pool, + }, + }) + + err := cl.Create(ctx, cephSCtemplate) + Expect(err).NotTo(HaveOccurred()) + + csc := &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForCephSC)) + Expect(csc.Finalizers).To(HaveLen(0)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeTrue()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + csc.Finalizers = nil + err = cl.Update(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + err = cl.Delete(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("Create_ceph_cluster_connection", func() { + cephClusterConnection := &v1alpha1.CephClusterConnection{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterConnectionName, + }, + Spec: v1alpha1.CephClusterConnectionSpec{ + ClusterID: clusterID1, + Monitors: []string{"mon1", "mon2", "mon3"}, + UserID: "admin", + UserKey: "key", + }, + } + + err := cl.Create(ctx, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + }) + + It("Create_ceph_sc_with_cephfs", func() { + cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ + Name: nameForCephSC, + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeCephFS, + CephFS: &CephFSConfig{ + FSName: fsName, + Pool: pool, + }, + }) + + err := cl.Create(ctx, cephSCtemplate) + Expect(err).NotTo(HaveOccurred()) + + csc := &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForCephSC)) + Expect(csc.Finalizers).To(HaveLen(0)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandardChecksForCephSc(sc, nameForCephSC, controllerNamespace, CephStorageClassConfig{ + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeCephFS, + CephFS: &CephFSConfig{ + FSName: fsName, + Pool: pool, + }, + }) + }) + + It("Update_ceph_sc_with_cephfs", func() { + csc := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + csc.Spec.ReclaimPolicy = reclaimPolicyRetain + + err = cl.Update(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForCephSC)) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandardChecksForCephSc(sc, nameForCephSC, controllerNamespace, CephStorageClassConfig{ + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyRetain, + Type: storageTypeCephFS, + CephFS: &CephFSConfig{ + FSName: fsName, + Pool: pool, + }, + }) + }) + + It("Remove_ceph_sc_with_cephfs", func() { + csc := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Delete(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + csc = &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, csc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForCephSC}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("Create_ceph_sc_with_rbd", func() { + cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ + Name: nameForRBDSC, + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeRBD, + RBD: &RBDConfig{ + DefaultFSType: "ext4", + Pool: pool, + }, + }) + + err := cl.Create(ctx, cephSCtemplate) + Expect(err).NotTo(HaveOccurred()) + + csc := &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForRBDSC)) + Expect(csc.Finalizers).To(HaveLen(0)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandardChecksForCephSc(sc, nameForRBDSC, controllerNamespace, CephStorageClassConfig{ + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeRBD, + RBD: &RBDConfig{ + DefaultFSType: "ext4", + Pool: pool, + }, + }) + }) + + It("Update_ceph_sc_with_rbd", func() { + csc := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + csc.Spec.ReclaimPolicy = reclaimPolicyRetain + + err = cl.Update(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForRBDSC)) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandardChecksForCephSc(sc, nameForRBDSC, controllerNamespace, CephStorageClassConfig{ + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyRetain, + Type: storageTypeRBD, + RBD: &RBDConfig{ + DefaultFSType: "ext4", + Pool: pool, + }, + }) + }) + + It("Remove_ceph_sc_with_rbd", func() { + csc := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Delete(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + csc = &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("Create_ceph_sc_when_sc_with_another_provisioner_exists", func() { + sc := &v1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: nameForRBDSC, + }, + Provisioner: "test-provisioner", + } + + err := cl.Create(ctx, sc) + Expect(err).NotTo(HaveOccurred()) + + cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ + Name: nameForRBDSC, + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeCephFS, + CephFS: &CephFSConfig{ + FSName: fsName, + Pool: pool, + }, + }) + + err = cl.Create(ctx, cephSCtemplate) + Expect(err).NotTo(HaveOccurred()) + + csc := &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeTrue()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, sc) + Expect(err).NotTo(HaveOccurred()) + Expect(sc.Provisioner).To(Equal("test-provisioner")) + Expect(sc.Finalizers).To(HaveLen(0)) + Expect(sc.Labels).To(HaveLen(0)) + }) + + It("Update_ceph_sc_when_sc_with_another_provisioner_exists", func() { + csc := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + csc.Spec.ReclaimPolicy = reclaimPolicyRetain + + err = cl.Update(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeTrue()) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, sc) + Expect(err).NotTo(HaveOccurred()) + Expect(sc.Provisioner).To(Equal("test-provisioner")) + Expect(sc.Finalizers).To(HaveLen(0)) + Expect(sc.Labels).To(HaveLen(0)) + }) + + It("Remove_ceph_sc_when_sc_with_another_provisioner_exists", func() { + csc := &v1alpha1.CephStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Delete(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + csc = &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc.Finalizers).To(HaveLen(1)) + Expect(csc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + Expect(csc.DeletionTimestamp).NotTo(BeNil()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, csc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForRBDSC}, sc) + Expect(err).NotTo(HaveOccurred()) + Expect(sc.Provisioner).To(Equal("test-provisioner")) + Expect(sc.Finalizers).To(HaveLen(0)) + Expect(sc.Labels).To(HaveLen(0)) + }) + + It("Create_ceph_sc_with_invalid_type", func() { + cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ + Name: nameForBadSC, + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyDelete, + Type: "invalid", + CephFS: &CephFSConfig{ + FSName: fsName, + Pool: pool, + }, + }) + + err := cl.Create(ctx, cephSCtemplate) + Expect(err).NotTo(HaveOccurred()) + + csc := &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForBadSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForBadSC)) + Expect(csc.Finalizers).To(HaveLen(0)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + Expect(csc.Finalizers).To(HaveLen(0)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForBadSC}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + err = cl.Delete(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForBadSC}, csc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("Create_ceph_sc_with_props_for_another_type", func() { + cephSCtemplate := generateCephStorageClass(CephStorageClassConfig{ + Name: nameForBadSC, + ClusterConnectionName: clusterConnectionName, + ReclaimPolicy: reclaimPolicyDelete, + Type: storageTypeCephFS, + RBD: &RBDConfig{ + DefaultFSType: "ext4", + Pool: pool, + }, + }) + + err := cl.Create(ctx, cephSCtemplate) + Expect(err).NotTo(HaveOccurred()) + + csc := &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForBadSC}, csc) + Expect(err).NotTo(HaveOccurred()) + Expect(csc).NotTo(BeNil()) + Expect(csc.Name).To(Equal(nameForBadSC)) + Expect(csc.Finalizers).To(HaveLen(0)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, _, err := controller.RunStorageClassEventReconcile(ctx, cl, log, scList, csc, controllerNamespace) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + Expect(csc.Finalizers).To(HaveLen(0)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForBadSC}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + err = cl.Delete(ctx, csc) + Expect(err).NotTo(HaveOccurred()) + + csc = &v1alpha1.CephStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForBadSC}, csc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("Remove_ceph_cluster_connection", func() { + + cephClusterConnection := &v1alpha1.CephClusterConnection{} + err := cl.Get(ctx, client.ObjectKey{Name: clusterConnectionName}, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Delete(ctx, cephClusterConnection) + Expect(err).NotTo(HaveOccurred()) + + cephClusterConnection = &v1alpha1.CephClusterConnection{} + err = cl.Get(ctx, client.ObjectKey{Name: clusterConnectionName}, cephClusterConnection) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + +}) + +type CephStorageClassConfig struct { + Name string + ClusterConnectionName string + ReclaimPolicy string + Type string + CephFS *CephFSConfig + RBD *RBDConfig +} + +type CephFSConfig struct { + FSName string + Pool string +} + +type RBDConfig struct { + DefaultFSType string + Pool string +} + +func generateCephStorageClass(cfg CephStorageClassConfig) *v1alpha1.CephStorageClass { + var cephFS *v1alpha1.CephStorageClassCephFS + var rbd *v1alpha1.CephStorageClassRBD + + if cfg.CephFS != nil { + cephFS = &v1alpha1.CephStorageClassCephFS{ + FSName: cfg.CephFS.FSName, + Pool: cfg.CephFS.Pool, + } + } + + if cfg.RBD != nil { + rbd = &v1alpha1.CephStorageClassRBD{ + DefaultFSType: cfg.RBD.DefaultFSType, + Pool: cfg.RBD.Pool, + } + } + + return &v1alpha1.CephStorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: cfg.Name, + }, + Spec: v1alpha1.CephStorageClassSpec{ + ClusterConnectionName: cfg.ClusterConnectionName, + ReclaimPolicy: cfg.ReclaimPolicy, + Type: cfg.Type, + CephFS: cephFS, + RBD: rbd, + }, + } +} + +func performStandardChecksForCephSc(sc *v1.StorageClass, nameForTestResource, controllerNamespace string, cfg CephStorageClassConfig) { + Expect(sc).NotTo(BeNil()) + Expect(sc.Name).To(Equal(nameForTestResource)) + Expect(sc.Finalizers).To(HaveLen(1)) + Expect(sc.Finalizers).To(ContainElement(controller.CephStorageClassControllerFinalizerName)) + Expect(sc.Provisioner).To(Equal(controller.GetStorageClassProvisioner(cfg.Type))) + Expect(*sc.ReclaimPolicy).To(Equal(corev1.PersistentVolumeReclaimPolicy(cfg.ReclaimPolicy))) + Expect(*sc.VolumeBindingMode).To(Equal(v1.VolumeBindingImmediate)) + Expect(*sc.AllowVolumeExpansion).To(BeTrue()) + Expect(sc.Parameters).To(HaveKeyWithValue("csi.storage.k8s.io/provisioner-secret-name", internal.CephClusterConnectionSecretPrefix+cfg.ClusterConnectionName)) + Expect(sc.Parameters).To(HaveKeyWithValue("csi.storage.k8s.io/provisioner-secret-namespace", controllerNamespace)) + + if cfg.Type == "cephfs" { + Expect(sc.Parameters).To(HaveKeyWithValue("fsName", cfg.CephFS.FSName)) + Expect(sc.Parameters).To(HaveKeyWithValue("pool", cfg.CephFS.Pool)) + } else if cfg.Type == "rbd" { + Expect(sc.Parameters).To(HaveKeyWithValue("pool", cfg.RBD.Pool)) + Expect(sc.Parameters).To(HaveKeyWithValue("csi.storage.k8s.io/fstype", cfg.RBD.DefaultFSType)) + } +} diff --git a/images/controller/pkg/controller/common_func.go b/images/controller/pkg/controller/common_func.go new file mode 100644 index 0000000..57738d2 --- /dev/null +++ b/images/controller/pkg/controller/common_func.go @@ -0,0 +1,73 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "slices" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func shouldReconcileByDeleteFunc(obj metav1.Object) bool { + if obj.GetDeletionTimestamp() != nil { + return true + } + + return false +} + +func removeFinalizerIfExists(ctx context.Context, cl client.Client, obj metav1.Object, finalizerName string) (bool, error) { + removed := false + finalizers := obj.GetFinalizers() + for i, f := range finalizers { + if f == finalizerName { + finalizers = append(finalizers[:i], finalizers[i+1:]...) + removed = true + break + } + } + + if removed { + obj.SetFinalizers(finalizers) + err := cl.Update(ctx, obj.(client.Object)) + if err != nil { + return false, err + } + } + + return removed, nil +} + +func addFinalizerIfNotExists(ctx context.Context, cl client.Client, obj metav1.Object, finalizerName string) (bool, error) { + added := false + finalizers := obj.GetFinalizers() + if !slices.Contains(finalizers, finalizerName) { + finalizers = append(finalizers, finalizerName) + added = true + } + + if added { + obj.SetFinalizers(finalizers) + err := cl.Update(ctx, obj.(client.Object)) + if err != nil { + return false, err + } + } + return true, nil +} diff --git a/images/controller/pkg/controller/controller_suite_test.go b/images/controller/pkg/controller/controller_suite_test.go new file mode 100644 index 0000000..7574aa5 --- /dev/null +++ b/images/controller/pkg/controller/controller_suite_test.go @@ -0,0 +1,66 @@ +/* +Copyright 2023 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller_test + +import ( + v1alpha1 "d8-controller/api/v1alpha1" + "fmt" + "os" + "testing" + + v1 "k8s.io/api/apps/v1" + + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + sv1 "k8s.io/api/storage/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + apiruntime "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestController(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Controller Suite") +} + +func NewFakeClient() client.Client { + resourcesSchemeFuncs := []func(*apiruntime.Scheme) error{ + v1alpha1.AddToScheme, + clientgoscheme.AddToScheme, + extv1.AddToScheme, + v1.AddToScheme, + sv1.AddToScheme, + } + scheme := apiruntime.NewScheme() + for _, f := range resourcesSchemeFuncs { + err := f(scheme) + if err != nil { + println(fmt.Sprintf("Error adding scheme: %s", err)) + os.Exit(1) + } + } + + // See https://github.com/kubernetes-sigs/controller-runtime/issues/2362#issuecomment-1837270195 + builder := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&v1alpha1.CephStorageClass{}, &v1alpha1.CephClusterConnection{}) + + cl := builder.Build() + return cl +} diff --git a/images/controller/pkg/internal/const.go b/images/controller/pkg/internal/const.go new file mode 100644 index 0000000..cd2ed60 --- /dev/null +++ b/images/controller/pkg/internal/const.go @@ -0,0 +1,29 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +const ( + CephClusterConnectionSecretPrefix = "csi-ceph-secret-for-" + StorageManagedLabelKey = "storage.deckhouse.io/managed-by" + CSICephConfigMapName = "ceph-csi-config" + CreateReconcile = "Create" + UpdateReconcile = "Update" + DeleteReconcile = "Delete" + + UpdateConfigMapActionUpdate = "update" + UpdateConfigMapActionDelete = "delete" +) diff --git a/images/controller/pkg/kubutils/kubernetes.go b/images/controller/pkg/kubutils/kubernetes.go new file mode 100644 index 0000000..4714cfe --- /dev/null +++ b/images/controller/pkg/kubutils/kubernetes.go @@ -0,0 +1,35 @@ +/* +Copyright 2024 Flant JSC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubutils + +import ( + "fmt" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +func KubernetesDefaultConfigCreate() (*rest.Config, error) { + //todo validate empty + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + clientcmd.NewDefaultClientConfigLoadingRules(), + &clientcmd.ConfigOverrides{}, + ) + + // Get a config to talk to API server + config, err := clientConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("config kubernetes error %w", err) + } + return config, nil +} diff --git a/images/controller/pkg/logger/logger.go b/images/controller/pkg/logger/logger.go new file mode 100644 index 0000000..345af2b --- /dev/null +++ b/images/controller/pkg/logger/logger.go @@ -0,0 +1,84 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logger + +import ( + "flag" + "fmt" + "github.com/go-logr/logr" + "k8s.io/klog/v2" + "k8s.io/klog/v2/klogr" +) + +const ( + ErrorLevel Verbosity = "0" + WarningLevel Verbosity = "1" + InfoLevel Verbosity = "2" + DebugLevel Verbosity = "3" + TraceLevel Verbosity = "4" +) + +const ( + warnLvl = iota + 1 + infoLvl + debugLvl + traceLvl +) + +type ( + Verbosity string +) + +type Logger struct { + log logr.Logger +} + +func NewLogger(level Verbosity) (*Logger, error) { + klog.InitFlags(nil) + if err := flag.Set("v", string(level)); err != nil { + return nil, err + } + flag.Parse() + + log := klogr.New().WithCallDepth(1) + + return &Logger{log: log}, nil +} + +func (l Logger) GetLogger() logr.Logger { + return l.log +} + +func (l Logger) Error(err error, message string, keysAndValues ...interface{}) { + l.log.Error(err, fmt.Sprintf("ERROR %s", message), keysAndValues...) +} + +func (l Logger) Warning(message string, keysAndValues ...interface{}) { + l.log.V(warnLvl).Info(fmt.Sprintf("WARNING %s", message), keysAndValues...) +} + +func (l Logger) Info(message string, keysAndValues ...interface{}) { + l.log.V(infoLvl).Info(fmt.Sprintf("INFO %s", message), keysAndValues...) +} + +func (l Logger) Debug(message string, keysAndValues ...interface{}) { + l.log.V(debugLvl).Info(fmt.Sprintf("DEBUG %s", message), keysAndValues...) +} + +func (l Logger) Trace(message string, keysAndValues ...interface{}) { + l.log.V(traceLvl).Info(fmt.Sprintf("TRACE %s", message), keysAndValues...) +} diff --git a/openapi/config-values.yaml b/openapi/config-values.yaml index 556676e..f8005eb 100644 --- a/openapi/config-values.yaml +++ b/openapi/config-values.yaml @@ -3,12 +3,22 @@ required: - cephfsEnabled - rbdEnabled properties: - cephfsEnabled: - type: boolean - default: true - description: Cephfs driver state - rbdEnabled: - type: boolean - default: true - description: RBD driver state + cephfsEnabled: + type: boolean + default: true + description: Cephfs driver state + rbdEnabled: + type: boolean + default: true + description: RBD driver state + logLevel: + type: string + enum: + - ERROR + - WARN + - INFO + - DEBUG + - TRACE + description: Module log level + default: DEBUG diff --git a/templates/cephfs/storage-classes.yaml b/templates/cephfs/storage-classes.yaml deleted file mode 100644 index 7350c09..0000000 --- a/templates/cephfs/storage-classes.yaml +++ /dev/null @@ -1,37 +0,0 @@ -{{- range $cr := .Values.csiCeph.internal.crs }} - {{- if $cr.spec.cephfs }} - {{- range $sc := $cr.spec.cephfs.storageClasses }} ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ $cr.name }}-{{ $sc.namePostfix }} - namespace: d8-{{ $.Chart.Name }} - annotations: - migration-volume-binding-mode-changed: "" -{{ include "helm_lib_module_labels" (list $ (dict "app" $.Chart.Name)) | indent 2 }} -provisioner: cephfs.csi.ceph.com -reclaimPolicy: {{ $sc.reclaimPolicy }} - {{- if $sc.allowVolumeExpansion }} -allowVolumeExpansion: {{ $sc.allowVolumeExpansion }} - {{- end }} - {{- if $sc.mountOptions }} -mountOptions: - {{- range $option := $sc.mountOptions }} - - {{ $option }} - {{- end }} - {{- end }} -volumeBindingMode: WaitForFirstConsumer -parameters: - csi.storage.k8s.io/provisioner-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/provisioner-secret-namespace: d8-{{ $.Chart.Name }} - csi.storage.k8s.io/controller-expand-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/controller-expand-secret-namespace: d8-{{ $.Chart.Name }} - csi.storage.k8s.io/node-stage-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/node-stage-secret-namespace: d8-{{ $.Chart.Name }} - clusterID: {{ $cr.spec.clusterID }} - fsName: {{ $sc.fsName }} - pool: {{ $sc.pool }} - {{- end }} - {{- end }} -{{- end }} diff --git a/templates/cephfs/volume-snapshot-class.yaml b/templates/cephfs/volume-snapshot-class.yaml deleted file mode 100644 index e547011..0000000 --- a/templates/cephfs/volume-snapshot-class.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Source https://github.com/ceph/ceph-csi/blob/devel/examples/cephfs/snapshotclass.yaml -{{- if (.Values.global.enabledModules | has "snapshot-controller") }} - {{- range $cr := .Values.csiCeph.internal.crs }} - {{- if $cr.spec.cephfs }} - {{- range $sc := $cr.spec.cephfs.storageClasses }} ---- -apiVersion: snapshot.storage.k8s.io/v1 -kind: VolumeSnapshotClass -metadata: - {{- include "helm_lib_module_labels" (list $ (dict "app" $.Chart.Name)) | nindent 2 }} - name: {{ $cr.name }}-{{ $sc.namePostfix }} -driver: cephfs.csi.ceph.com -parameters: - clusterID: {{ $cr.spec.clusterID }} - csi.storage.k8s.io/snapshotter-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/snapshotter-secret-namespace: d8-{{ $.Chart.Name }} -deletionPolicy: Delete - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/templates/controller/deployment.yaml b/templates/controller/deployment.yaml new file mode 100644 index 0000000..a3e3b43 --- /dev/null +++ b/templates/controller/deployment.yaml @@ -0,0 +1,95 @@ +{{- define "controller_resources" }} +cpu: 10m +memory: 25Mi +{{- end }} + +{{- if (.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} +--- +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} +spec: + targetRef: + apiVersion: "apps/v1" + kind: Deployment + name: controller + updatePolicy: + updateMode: "Auto" + resourcePolicy: + containerPolicies: + - containerName: "controller" + minAllowed: + {{- include "controller_resources" . | nindent 8 }} + maxAllowed: + cpu: 200m + memory: 100Mi +{{- end }} +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "controller" )) | nindent 2 }} +spec: + minAvailable: {{ include "helm_lib_is_ha_to_value" (list . 1 0) }} + selector: + matchLabels: + app: controller +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} +spec: + {{- include "helm_lib_deployment_on_master_strategy_and_replicas_for_ha" . | nindent 2 }} + revisionHistoryLimit: 2 + selector: + matchLabels: + app: controller + template: + metadata: + labels: + app: controller + spec: + {{- include "helm_lib_priority_class" (tuple . "cluster-medium") | nindent 6 }} + {{- include "helm_lib_node_selector" (tuple . "system") | nindent 6 }} + {{- include "helm_lib_tolerations" (tuple . "system") | nindent 6 }} + {{- include "helm_lib_module_pod_security_context_run_as_user_nobody" . | nindent 6 }} + imagePullSecrets: + - name: {{ .Chart.Name }}-module-registry + serviceAccountName: controller + containers: + - name: controller + image: {{ include "helm_lib_module_image" (list . "controller") }} + imagePullPolicy: IfNotPresent + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 14 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "controller_resources" . | nindent 14 }} +{{- end }} + securityContext: + privileged: false + env: + - name: LOG_LEVEL +{{- if eq .Values.csiCeph.logLevel "ERROR" }} + value: "0" +{{- else if eq .Values.csiCeph.logLevel "WARN" }} + value: "1" +{{- else if eq .Values.csiCeph.logLevel "INFO" }} + value: "2" +{{- else if eq .Values.csiCeph.logLevel "DEBUG" }} + value: "3" +{{- else if eq .Values.csiCeph.logLevel "TRACE" }} + value: "4" +{{- end }} + - name: CONTROLLER_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace diff --git a/templates/controller/rbac-for-us.yaml b/templates/controller/rbac-for-us.yaml new file mode 100644 index 0000000..3cf96a4 --- /dev/null +++ b/templates/controller/rbac-for-us.yaml @@ -0,0 +1,111 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: d8:{{ .Chart.Name }}:controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} +rules: + - apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - list + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: d8:{{ .Chart.Name }}:controller + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} +rules: + - apiGroups: + - storage.deckhouse.io + resources: + - cephstorageclasses + - cephstorageclasses/status + - cephclusterconnections + - cephclusterconnections/status + verbs: + - get + - list + - create + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - create + - delete + - list + - get + - watch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: d8:{{ .Chart.Name }}:controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} +subjects: + - kind: ServiceAccount + name: controller + namespace: d8-{{ .Chart.Name }} +roleRef: + kind: Role + name: d8:{{ .Chart.Name }}:controller + apiGroup: rbac.authorization.k8s.io + + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: d8:{{ .Chart.Name }}:controller + {{- include "helm_lib_module_labels" (list . (dict "app" "controller")) | nindent 2 }} +subjects: + - kind: ServiceAccount + name: controller + namespace: d8-{{ .Chart.Name }} +roleRef: + kind: ClusterRole + name: d8:{{ .Chart.Name }}:controller + apiGroup: rbac.authorization.k8s.io + + diff --git a/templates/rbd/storage-classes.yaml b/templates/rbd/storage-classes.yaml deleted file mode 100644 index ac1b5ce..0000000 --- a/templates/rbd/storage-classes.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- range $cr := .Values.csiCeph.internal.crs }} - {{- if $cr.spec.rbd }} - {{- range $sc := $cr.spec.rbd.storageClasses }} ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ $cr.name }}-{{ $sc.namePostfix }} - namespace: d8-{{ $.Chart.Name }} - annotations: - migration-volume-binding-mode-changed: "" -{{ include "helm_lib_module_labels" (list $ (dict "app" $.Chart.Name)) | indent 2 }} -provisioner: rbd.csi.ceph.com -volumeBindingMode: WaitForFirstConsumer -parameters: - clusterID: {{ $cr.spec.clusterID }} - pool: {{ $sc.pool }} - imageFeatures: layering - csi.storage.k8s.io/provisioner-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/provisioner-secret-namespace: d8-{{ $.Chart.Name }} - csi.storage.k8s.io/controller-expand-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/controller-expand-secret-namespace: d8-{{ $.Chart.Name }} - csi.storage.k8s.io/node-stage-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/node-stage-secret-namespace: d8-{{ $.Chart.Name }} - csi.storage.k8s.io/fstype: {{ $sc.defaultFSType }} -reclaimPolicy: {{ $sc.reclaimPolicy }} -allowVolumeExpansion: {{ $sc.allowVolumeExpansion }} - {{- if $sc.mountOptions }} -mountOptions: - {{- range $option := $sc.mountOptions }} - - {{ $option }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/templates/rbd/volume-snapshot-class.yaml b/templates/rbd/volume-snapshot-class.yaml deleted file mode 100644 index 26f22d3..0000000 --- a/templates/rbd/volume-snapshot-class.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Source https://github.com/ceph/ceph-csi/blob/devel/examples/rbd/snapshotclass.yaml -{{- if (.Values.global.enabledModules | has "snapshot-controller") }} - {{- range $cr := .Values.csiCeph.internal.crs }} - {{- if $cr.spec.rbd }} - {{- range $sc := $cr.spec.rbd.storageClasses }} ---- -apiVersion: snapshot.storage.k8s.io/v1 -kind: VolumeSnapshotClass -metadata: - {{- include "helm_lib_module_labels" (list $ (dict "app" $.Chart.Name)) | nindent 2 }} - name: {{ $cr.name }}-{{ $sc.namePostfix }} -driver: rbd.csi.ceph.com -parameters: - clusterID: {{ $cr.spec.clusterID }} - imageFeatures: layering - csi.storage.k8s.io/snapshotter-secret-name: csi-{{ $cr.name }} - csi.storage.k8s.io/snapshotter-secret-namespace: d8-{{ $.Chart.Name }} -deletionPolicy: Delete - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/templates/registry-secret.yaml b/templates/registry-secret.yaml index 151b76a..6a1ac52 100644 --- a/templates/registry-secret.yaml +++ b/templates/registry-secret.yaml @@ -2,9 +2,13 @@ apiVersion: v1 kind: Secret metadata: - name: deckhouse-registry - namespace: d8-{{ $.Chart.Name }} + name: {{ .Chart.Name }}-module-registry + namespace: d8-{{ .Chart.Name }} {{- include "helm_lib_module_labels" (list .) | nindent 2 }} type: kubernetes.io/dockerconfigjson data: - .dockerconfigjson: {{ $.Values.global.modulesImages.registry.dockercfg }} +{{- if dig "registry" "dockercfg" false .Values.csiCeph }} + .dockerconfigjson: {{ .Values.csiCeph.registry.dockercfg }} +{{- else }} + .dockerconfigjson: "eyJhdXRocyI6IHsgInJlZ2lzdHJ5LmRlY2tob3VzZS5pbyI6IHt9fX0=" +{{- end }} diff --git a/templates/secret.yaml b/templates/secret.yaml deleted file mode 100644 index ce3da8a..0000000 --- a/templates/secret.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- range $cr := .Values.csiCeph.internal.crs }} ---- -apiVersion: v1 -kind: Secret -metadata: - name: csi-{{ $cr.name }} - namespace: d8-{{ $.Chart.Name }} -{{ include "helm_lib_module_labels" (list $ (dict "app" $.Chart.Name)) | indent 2 }} -stringData: - # Credentials for RBD - userID: {{ $cr.spec.userID }} - userKey: {{ $cr.spec.userKey }} - # Credentials for CephFS - adminID: {{ $cr.spec.userID }} - adminKey: {{ $cr.spec.userKey }} -{{- end }} diff --git a/tools/rbd-in-tree-to-ceph-csi-migration-helper.sh b/tools/rbd-in-tree-to-ceph-csi-migration-helper.sh deleted file mode 100755 index fe37411..0000000 --- a/tools/rbd-in-tree-to-ceph-csi-migration-helper.sh +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2022 Flant JSC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ "$#" != "2" ] || ! grep -qs "/" <<< "$1" || ! grep -qs "/" <<< "$2"; then - echo "Not enough arguments passed or namespace is not specified." - echo "Usage: ./rbd-in-tree-to-ceph-csi-migration-helper.sh / /" - exit 1 -fi - -sample_pvc_namespace="$(echo -n "$1" | awk '{gsub("/"," ");print $1}')" -target_pvc_namespace="$(echo -n "$2" | awk '{gsub("/"," ");print $1}')" - -sample_pvc_name="$(echo -n "$1" | awk '{gsub("/"," ");print $2}')" -target_pvc_name="$(echo -n "$2" | awk '{gsub("/"," ");print $2}')" - - -sample_pvc="$(kubectl -n "$sample_pvc_namespace" get pvc "$sample_pvc_name" -o json)" -target_pvc="$(kubectl -n "$target_pvc_namespace" get pvc "$target_pvc_name" -o json)" - -sample_pv_name="$(jq -r '.spec.volumeName' <<< "$sample_pvc")" -target_pv_name="$(jq -r '.spec.volumeName' <<< "$target_pvc")" - -sample_pv="$(kubectl get pv "$sample_pv_name" -o json)" -target_pv="$(kubectl get pv "$target_pv_name" -o json)" - -echo "Backup PVC $target_pvc_namespace/$target_pvc_name to backup-pvc-$target_pvc_namespace-$target_pvc_name.json" -echo "$target_pvc" > "backup-pvc-$target_pvc_namespace-$target_pvc_name.json" -echo "Backup PV $target_pv_name to backup-pv-$target_pv_name.json" -echo "$target_pv" > "backup-pv-$target_pv_name.json" - -pool_name="$(jq -r '.spec.csi.volumeAttributes.pool' <<< "$sample_pv")" -original_rbd_image_name="$(jq -r '.spec.rbd.image' <<< "$target_pv")" -new_rbd_image_name="$(jq -rn --arg s "$original_rbd_image_name" '$s | sub("kubernetes-dynamic-pvc-"; "csi-vol-")')" -new_rbd_image_uid="$(jq -rn --arg s "$original_rbd_image_name" '$s | sub("kubernetes-dynamic-pvc-"; "")')" -sample_rbd_image_uid="$(jq -r '.spec.csi.volumeAttributes.imageName | sub("csi-vol-"; "")' <<< "$sample_pv")" - -csi_section_for_target_pv="$(jq -r --arg i "$new_rbd_image_name" '.spec.csi.volumeAttributes.imageName = $i | .spec.csi' <<< "$sample_pv")" -new_storage_class_for_target="$(jq -r '.spec.storageClassName' <<< "$sample_pvc")" -new_annotations_for_target_pvc="$(jq -r '.metadata.annotations' <<< "$sample_pvc")" -new_annotations_for_target_pv="$(jq -r '.metadata.annotations' <<< "$sample_pv")" - -new_target_pvc="$(jq --argjson a "$new_annotations_for_target_pvc" --arg sc "$new_storage_class_for_target" ' - .metadata.annotations = $a | - .spec.storageClassName = $sc | - del(.metadata.resourceVersion) | - del(.metadata.uid) | - del(.metadata.creationTimestamp) | - del(.status) - ' <<< "$target_pvc")" - -while true; do - msg="Rename the rbd image in your ceph cluster using the following commands: ---- -rbd mv $pool_name/$original_rbd_image_name $pool_name/$new_rbd_image_name -rbd image-meta set $pool_name/$new_rbd_image_name rbd.csi.ceph.com/thick-provisioned false ---- -After renaming, enter yes to confirm: " - read -p "$msg" confirm - if [ "$confirm" == "yes" ]; then - break - fi -done - -while true; do - read -p "PersistentVolumeClaim $target_pvc_name and PersistentVolume $target_pv_name will be removed (Type yes to confirm): " confirm - if [ "$confirm" == "yes" ]; then - echo ">kubectl -n $target_pvc_namespace delete pvc $target_pvc_name" - kubectl -n "$target_pvc_namespace" delete pvc "$target_pvc_name" - echo ">kubectl delete pv $target_pv_name" - kubectl delete pv "$target_pv_name" - break - fi -done - -echo ">kubectl create -f - <<\"END\" -$new_target_pvc -END" - -while true; do - read -p "Apply this manifest in the cluster? (Type yes to confirm): " confirm - if [ "$confirm" == "yes" ]; then - kubectl create -f - <kubectl create -f - <<\"END\" -$new_target_pv -END" - -while true; do - read -p "Apply this manifest in the cluster? (Type yes to confirm): " confirm - if [ "$confirm" == "yes" ]; then - kubectl create -f - <