diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index 5cbcb933347c..4fdc3ac32564 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -112,9 +112,9 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl ) var ( - input ClusterUpgradeWithRuntimeSDKSpecInput - namespace, infraNamespace *corev1.Namespace - cancelWatches, cancelInfraWatches context.CancelFunc + input ClusterUpgradeWithRuntimeSDKSpecInput + namespace, clusterClassNamespace *corev1.Namespace + cancelWatches, cancelClusterClassNamespace context.CancelFunc controlPlaneMachineCount int64 workerMachineCount int64 @@ -151,7 +151,7 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl // Set up a Namespace where to host objects for this spec and create a watcher for the Namespace events. namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) - infraNamespace, cancelInfraWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + clusterClassNamespace, cancelClusterClassNamespace = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -166,7 +166,7 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl By("Deploy Test Extension ExtensionConfig") Expect(input.BootstrapClusterProxy.GetClient().Create(ctx, - extensionConfig(specName, input.ExtensionServiceNamespace, input.ExtensionServiceName, namespace.Name, infraNamespace.Name))). + extensionConfig(specName, input.ExtensionServiceNamespace, input.ExtensionServiceName, namespace.Name, clusterClassNamespace.Name))). To(Succeed(), "Failed to create the extension config") By("Creating a workload cluster; creation waits for BeforeClusterCreateHook to gate the operation") @@ -183,7 +183,7 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl variables := map[string]string{} if input.ClassNamespace { - variables["CLUSTER_CLASS_NAMESPACE"] = infraNamespace.Name + variables["CLUSTER_CLASS_NAMESPACE"] = clusterClassNamespace.Name } clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ @@ -337,12 +337,12 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl Byf("Deleting namespace used for optionally hosting the %q infrastructure spec", specName) framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ Deleter: input.BootstrapClusterProxy.GetClient(), - Name: infraNamespace.Name, + Name: clusterClassNamespace.Name, }) } } cancelWatches() - cancelInfraWatches() + cancelClusterClassNamespace() }) } diff --git a/test/e2e/clusterclass_changes.go b/test/e2e/clusterclass_changes.go index 454543f2a707..c3ebf3c32730 100644 --- a/test/e2e/clusterclass_changes.go +++ b/test/e2e/clusterclass_changes.go @@ -135,11 +135,11 @@ type ClusterClassChangesSpecInput struct { // indirect test coverage of this from other tests as well. func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClassChangesSpecInput) { var ( - specName = "clusterclass-changes" - input ClusterClassChangesSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult + specName = "clusterclass-changes" + input ClusterClassChangesSpecInput + namespace, clusterClassNamespace *corev1.Namespace + cancelWatches, cancelCCWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult ) BeforeEach(func() { @@ -155,6 +155,7 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas // Set up a Namespace where to host objects for this spec and create a watcher for the namespace events. namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + clusterClassNamespace, cancelCCWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, nil) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -216,6 +217,7 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas By("Rebasing the Cluster to a ClusterClass with a modified label for MachineDeployments and wait for changes to be applied to the MachineDeployment objects") rebaseClusterClassAndWait(ctx, rebaseClusterClassAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, + ClusterClassNamespace: clusterClassNamespace.Name, ClusterClass: clusterResources.ClusterClass, Cluster: clusterResources.Cluster, WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), @@ -233,6 +235,14 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + + Byf("Deleting namespace used for hosting the %q test clusterClass", specName) + framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ + Deleter: input.BootstrapClusterProxy.GetClient(), + Name: clusterClassNamespace.Name, + }) + + cancelCCWatches() }) } @@ -662,6 +672,7 @@ func assertMachinePoolTopologyFields(g Gomega, mp expv1.MachinePool, mpTopology type rebaseClusterClassAndWaitInput struct { ClusterProxy framework.ClusterProxy ClusterClass *clusterv1.ClusterClass + ClusterClassNamespace string Cluster *clusterv1.Cluster WaitForMachineDeployments []interface{} } @@ -674,15 +685,20 @@ func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndW Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling RebaseClusterClassAndWait") Expect(input.ClusterClass).ToNot(BeNil(), "Invalid argument. input.ClusterClass can't be nil when calling RebaseClusterClassAndWait") Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling RebaseClusterClassAndWait") + Expect(input.ClusterClassNamespace).ToNot(BeEmpty(), "Invalid argument. input.ClusterClassNamespace can't be empty when calling RebaseClusterClassAndWait") mgmtClient := input.ClusterProxy.GetClient() var testWorkerLabelName = "rebase-diff" + sourceClusterClass := input.ClusterClass.DeepCopy() + Expect(mgmtClient.Get(ctx, client.ObjectKeyFromObject(sourceClusterClass), sourceClusterClass)).To(Succeed()) + // Create a new ClusterClass with a new name and the new worker label set. - newClusterClass := input.ClusterClass.DeepCopy() + newClusterClass := sourceClusterClass.DeepCopy() newClusterClassName := fmt.Sprintf("%s-%s", input.ClusterClass.Name, util.RandomString(6)) newClusterClass.SetName(newClusterClassName) + newClusterClass.SetNamespace(input.ClusterClassNamespace) newClusterClass.SetResourceVersion("") for i, mdClass := range newClusterClass.Spec.Workers.MachineDeployments { if mdClass.Template.Metadata.Labels == nil { @@ -691,6 +707,86 @@ func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndW mdClass.Template.Metadata.Labels[testWorkerLabelName] = mdClass.Class newClusterClass.Spec.Workers.MachineDeployments[i] = mdClass } + + // Copy ClusterClass templates to the new namespace + for i, mdClass := range newClusterClass.Spec.Workers.MachineDeployments { + if mdClass.Template.Infrastructure.Ref != nil { + workerInfraTemplate, err := external.Get(ctx, mgmtClient, mdClass.Template.Infrastructure.Ref) + Expect(err).ToNot(HaveOccurred()) + workerInfraTemplate.SetNamespace(input.ClusterClassNamespace) + workerInfraTemplate.SetResourceVersion("") + workerInfraTemplate.SetOwnerReferences(nil) + Expect(mgmtClient.Create(ctx, workerInfraTemplate)).To(Succeed()) + mdClass.Template.Infrastructure.Ref = external.GetObjectReference(workerInfraTemplate) + } + + if mdClass.Template.Bootstrap.Ref != nil { + workerBootstrapTemplate, err := external.Get(ctx, mgmtClient, mdClass.Template.Bootstrap.Ref) + Expect(err).ToNot(HaveOccurred()) + workerBootstrapTemplate.SetNamespace(input.ClusterClassNamespace) + workerBootstrapTemplate.SetResourceVersion("") + workerBootstrapTemplate.SetOwnerReferences(nil) + Expect(mgmtClient.Create(ctx, workerBootstrapTemplate)).To(Succeed()) + mdClass.Template.Bootstrap.Ref = external.GetObjectReference(workerBootstrapTemplate) + } + + newClusterClass.Spec.Workers.MachineDeployments[i] = mdClass + } + + for i, mpClass := range newClusterClass.Spec.Workers.MachinePools { + if mpClass.Template.Infrastructure.Ref != nil { + workerInfraTemplate, err := external.Get(ctx, mgmtClient, mpClass.Template.Infrastructure.Ref) + Expect(err).ToNot(HaveOccurred()) + workerInfraTemplate.SetNamespace(input.ClusterClassNamespace) + workerInfraTemplate.SetResourceVersion("") + workerInfraTemplate.SetOwnerReferences(nil) + Expect(mgmtClient.Create(ctx, workerInfraTemplate)).To(Succeed()) + mpClass.Template.Infrastructure.Ref = external.GetObjectReference(workerInfraTemplate) + } + + if mpClass.Template.Bootstrap.Ref != nil { + workerBootstrapTemplate, err := external.Get(ctx, mgmtClient, mpClass.Template.Bootstrap.Ref) + Expect(err).ToNot(HaveOccurred()) + workerBootstrapTemplate.SetNamespace(input.ClusterClassNamespace) + workerBootstrapTemplate.SetResourceVersion("") + workerBootstrapTemplate.SetOwnerReferences(nil) + Expect(mgmtClient.Create(ctx, workerBootstrapTemplate)).To(Succeed()) + mpClass.Template.Bootstrap.Ref = external.GetObjectReference(workerBootstrapTemplate) + } + + newClusterClass.Spec.Workers.MachinePools[i] = mpClass + } + + if newClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref != nil { + machineInfraTemplate, err := external.Get(ctx, mgmtClient, newClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref) + Expect(err).ToNot(HaveOccurred()) + machineInfraTemplate.SetNamespace(input.ClusterClassNamespace) + machineInfraTemplate.SetResourceVersion("") + machineInfraTemplate.SetOwnerReferences(nil) + Expect(mgmtClient.Create(ctx, machineInfraTemplate)).To(Succeed()) + newClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref = external.GetObjectReference(machineInfraTemplate) + } + + if newClusterClass.Spec.ControlPlane.Ref != nil { + controlPlaneTemplate, err := external.Get(ctx, mgmtClient, newClusterClass.Spec.ControlPlane.Ref) + Expect(err).ToNot(HaveOccurred()) + controlPlaneTemplate.SetNamespace(input.ClusterClassNamespace) + controlPlaneTemplate.SetResourceVersion("") + controlPlaneTemplate.SetOwnerReferences(nil) + Expect(mgmtClient.Create(ctx, controlPlaneTemplate)).To(Succeed()) + newClusterClass.Spec.ControlPlane.Ref = external.GetObjectReference(controlPlaneTemplate) + } + + if newClusterClass.Spec.Infrastructure.Ref != nil { + infrastructureTemplate, err := external.Get(ctx, mgmtClient, newClusterClass.Spec.Infrastructure.Ref) + Expect(err).ToNot(HaveOccurred()) + infrastructureTemplate.SetNamespace(input.ClusterClassNamespace) + infrastructureTemplate.SetResourceVersion("") + infrastructureTemplate.SetOwnerReferences(nil) + Expect(mgmtClient.Create(ctx, infrastructureTemplate)).To(Succeed()) + newClusterClass.Spec.Infrastructure.Ref = external.GetObjectReference(infrastructureTemplate) + } + Expect(mgmtClient.Create(ctx, newClusterClass)).To(Succeed()) // Get the current ControlPlane, we will later verify that it has not changed. @@ -702,6 +798,7 @@ func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndW patchHelper, err := patch.NewHelper(input.Cluster, mgmtClient) Expect(err).ToNot(HaveOccurred()) input.Cluster.Spec.Topology.Class = newClusterClassName + input.Cluster.Spec.Topology.ClassNamespace = input.ClusterClassNamespace // We have to retry the patch. The ClusterClass was just created so the client cache in the // controller/webhook might not be aware of it yet. If the webhook is not aware of the ClusterClass // we get a "Cluster ... can't be validated. ClusterClass ... can not be retrieved" error. diff --git a/test/e2e/clusterclass_changes_test.go b/test/e2e/clusterclass_changes_test.go index 8deb2cddc841..45831f06b65a 100644 --- a/test/e2e/clusterclass_changes_test.go +++ b/test/e2e/clusterclass_changes_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/utils/ptr" ) -var _ = Describe("When testing ClusterClass changes [ClusterClass]", func() { +var _ = Describe("When testing ClusterClass changes [ClusterClass] [PR-Blocking]", func() { ClusterClassChangesSpec(ctx, func() ClusterClassChangesSpecInput { return ClusterClassChangesSpecInput{ E2EConfig: e2eConfig, diff --git a/test/e2e/quick_start.go b/test/e2e/quick_start.go index 4998060608e1..f5d83aecdd50 100644 --- a/test/e2e/quick_start.go +++ b/test/e2e/quick_start.go @@ -144,9 +144,11 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) variables := map[string]string{} if input.ClassNamespace != nil { variables["CLUSTER_CLASS_NAMESPACE"] = *input.ClassNamespace + By("Creating a cluster referencing a clusterClass from another namespace") + } else { + By("Creating a cluster referencing a clusterClass") } - By("Creating a cluster referencing a clusterClass from another namespace") clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{