Skip to content

Commit

Permalink
Add changes to CC rebase e2e test
Browse files Browse the repository at this point in the history
Signed-off-by: Danil-Grigorev <danil.grigorev@suse.com>
  • Loading branch information
Danil-Grigorev committed Jan 16, 2025
1 parent 19c3630 commit bb6604e
Show file tree
Hide file tree
Showing 4 changed files with 115 additions and 16 deletions.
16 changes: 8 additions & 8 deletions test/e2e/cluster_upgrade_runtimesdk.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,9 +112,9 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
)

var (
input ClusterUpgradeWithRuntimeSDKSpecInput
namespace, infraNamespace *corev1.Namespace
cancelWatches, cancelInfraWatches context.CancelFunc
input ClusterUpgradeWithRuntimeSDKSpecInput
namespace, clusterClassNamespace *corev1.Namespace
cancelWatches, cancelClusterClassNamespace context.CancelFunc

controlPlaneMachineCount int64
workerMachineCount int64
Expand Down Expand Up @@ -151,7 +151,7 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl

// Set up a Namespace where to host objects for this spec and create a watcher for the Namespace events.
namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated)
infraNamespace, cancelInfraWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated)
clusterClassNamespace, cancelClusterClassNamespace = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated)
clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
})
Expand All @@ -166,7 +166,7 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
By("Deploy Test Extension ExtensionConfig")

Expect(input.BootstrapClusterProxy.GetClient().Create(ctx,
extensionConfig(specName, input.ExtensionServiceNamespace, input.ExtensionServiceName, namespace.Name, infraNamespace.Name))).
extensionConfig(specName, input.ExtensionServiceNamespace, input.ExtensionServiceName, namespace.Name, clusterClassNamespace.Name))).
To(Succeed(), "Failed to create the extension config")

By("Creating a workload cluster; creation waits for BeforeClusterCreateHook to gate the operation")
Expand All @@ -183,7 +183,7 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl

variables := map[string]string{}
if input.ClassNamespace {
variables["CLUSTER_CLASS_NAMESPACE"] = infraNamespace.Name
variables["CLUSTER_CLASS_NAMESPACE"] = clusterClassNamespace.Name
}

clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
Expand Down Expand Up @@ -337,12 +337,12 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
Byf("Deleting namespace used for optionally hosting the %q infrastructure spec", specName)
framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{
Deleter: input.BootstrapClusterProxy.GetClient(),
Name: infraNamespace.Name,
Name: clusterClassNamespace.Name,
})
}
}
cancelWatches()
cancelInfraWatches()
cancelClusterClassNamespace()
})
}

Expand Down
109 changes: 103 additions & 6 deletions test/e2e/clusterclass_changes.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,11 +135,11 @@ type ClusterClassChangesSpecInput struct {
// indirect test coverage of this from other tests as well.
func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClassChangesSpecInput) {
var (
specName = "clusterclass-changes"
input ClusterClassChangesSpecInput
namespace *corev1.Namespace
cancelWatches context.CancelFunc
clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
specName = "clusterclass-changes"
input ClusterClassChangesSpecInput
namespace, clusterClassNamespace *corev1.Namespace
cancelWatches, cancelCCWatches context.CancelFunc
clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
)

BeforeEach(func() {
Expand All @@ -155,6 +155,7 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas

// Set up a Namespace where to host objects for this spec and create a watcher for the namespace events.
namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated)
clusterClassNamespace, cancelCCWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, nil)
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
})

Expand Down Expand Up @@ -216,6 +217,7 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas
By("Rebasing the Cluster to a ClusterClass with a modified label for MachineDeployments and wait for changes to be applied to the MachineDeployment objects")
rebaseClusterClassAndWait(ctx, rebaseClusterClassAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ClusterClassNamespace: clusterClassNamespace.Name,
ClusterClass: clusterResources.ClusterClass,
Cluster: clusterResources.Cluster,
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
Expand All @@ -233,6 +235,14 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas
AfterEach(func() {
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)

Byf("Deleting namespace used for hosting the %q test clusterClass", specName)
framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{
Deleter: input.BootstrapClusterProxy.GetClient(),
Name: clusterClassNamespace.Name,
})

cancelCCWatches()
})
}

Expand Down Expand Up @@ -662,6 +672,7 @@ func assertMachinePoolTopologyFields(g Gomega, mp expv1.MachinePool, mpTopology
type rebaseClusterClassAndWaitInput struct {
ClusterProxy framework.ClusterProxy
ClusterClass *clusterv1.ClusterClass
ClusterClassNamespace string
Cluster *clusterv1.Cluster
WaitForMachineDeployments []interface{}
}
Expand All @@ -674,15 +685,20 @@ func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndW
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling RebaseClusterClassAndWait")
Expect(input.ClusterClass).ToNot(BeNil(), "Invalid argument. input.ClusterClass can't be nil when calling RebaseClusterClassAndWait")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling RebaseClusterClassAndWait")
Expect(input.ClusterClassNamespace).ToNot(BeEmpty(), "Invalid argument. input.ClusterClassNamespace can't be empty when calling RebaseClusterClassAndWait")

mgmtClient := input.ClusterProxy.GetClient()

var testWorkerLabelName = "rebase-diff"

sourceClusterClass := input.ClusterClass.DeepCopy()
Expect(mgmtClient.Get(ctx, client.ObjectKeyFromObject(sourceClusterClass), sourceClusterClass)).To(Succeed())

// Create a new ClusterClass with a new name and the new worker label set.
newClusterClass := input.ClusterClass.DeepCopy()
newClusterClass := sourceClusterClass.DeepCopy()
newClusterClassName := fmt.Sprintf("%s-%s", input.ClusterClass.Name, util.RandomString(6))
newClusterClass.SetName(newClusterClassName)
newClusterClass.SetNamespace(input.ClusterClassNamespace)
newClusterClass.SetResourceVersion("")
for i, mdClass := range newClusterClass.Spec.Workers.MachineDeployments {
if mdClass.Template.Metadata.Labels == nil {
Expand All @@ -691,6 +707,86 @@ func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndW
mdClass.Template.Metadata.Labels[testWorkerLabelName] = mdClass.Class
newClusterClass.Spec.Workers.MachineDeployments[i] = mdClass
}

// Copy ClusterClass templates to the new namespace
for i, mdClass := range newClusterClass.Spec.Workers.MachineDeployments {
if mdClass.Template.Infrastructure.Ref != nil {
workerInfraTemplate, err := external.Get(ctx, mgmtClient, mdClass.Template.Infrastructure.Ref)
Expect(err).ToNot(HaveOccurred())
workerInfraTemplate.SetNamespace(input.ClusterClassNamespace)
workerInfraTemplate.SetResourceVersion("")
workerInfraTemplate.SetOwnerReferences(nil)
Expect(mgmtClient.Create(ctx, workerInfraTemplate)).To(Succeed())
mdClass.Template.Infrastructure.Ref = external.GetObjectReference(workerInfraTemplate)
}

if mdClass.Template.Bootstrap.Ref != nil {
workerBootstrapTemplate, err := external.Get(ctx, mgmtClient, mdClass.Template.Bootstrap.Ref)
Expect(err).ToNot(HaveOccurred())
workerBootstrapTemplate.SetNamespace(input.ClusterClassNamespace)
workerBootstrapTemplate.SetResourceVersion("")
workerBootstrapTemplate.SetOwnerReferences(nil)
Expect(mgmtClient.Create(ctx, workerBootstrapTemplate)).To(Succeed())
mdClass.Template.Bootstrap.Ref = external.GetObjectReference(workerBootstrapTemplate)
}

newClusterClass.Spec.Workers.MachineDeployments[i] = mdClass
}

for i, mpClass := range newClusterClass.Spec.Workers.MachinePools {
if mpClass.Template.Infrastructure.Ref != nil {
workerInfraTemplate, err := external.Get(ctx, mgmtClient, mpClass.Template.Infrastructure.Ref)
Expect(err).ToNot(HaveOccurred())
workerInfraTemplate.SetNamespace(input.ClusterClassNamespace)
workerInfraTemplate.SetResourceVersion("")
workerInfraTemplate.SetOwnerReferences(nil)
Expect(mgmtClient.Create(ctx, workerInfraTemplate)).To(Succeed())
mpClass.Template.Infrastructure.Ref = external.GetObjectReference(workerInfraTemplate)
}

if mpClass.Template.Bootstrap.Ref != nil {
workerBootstrapTemplate, err := external.Get(ctx, mgmtClient, mpClass.Template.Bootstrap.Ref)
Expect(err).ToNot(HaveOccurred())
workerBootstrapTemplate.SetNamespace(input.ClusterClassNamespace)
workerBootstrapTemplate.SetResourceVersion("")
workerBootstrapTemplate.SetOwnerReferences(nil)
Expect(mgmtClient.Create(ctx, workerBootstrapTemplate)).To(Succeed())
mpClass.Template.Bootstrap.Ref = external.GetObjectReference(workerBootstrapTemplate)
}

newClusterClass.Spec.Workers.MachinePools[i] = mpClass
}

if newClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref != nil {
machineInfraTemplate, err := external.Get(ctx, mgmtClient, newClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref)
Expect(err).ToNot(HaveOccurred())
machineInfraTemplate.SetNamespace(input.ClusterClassNamespace)
machineInfraTemplate.SetResourceVersion("")
machineInfraTemplate.SetOwnerReferences(nil)
Expect(mgmtClient.Create(ctx, machineInfraTemplate)).To(Succeed())
newClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref = external.GetObjectReference(machineInfraTemplate)
}

if newClusterClass.Spec.ControlPlane.Ref != nil {
controlPlaneTemplate, err := external.Get(ctx, mgmtClient, newClusterClass.Spec.ControlPlane.Ref)
Expect(err).ToNot(HaveOccurred())
controlPlaneTemplate.SetNamespace(input.ClusterClassNamespace)
controlPlaneTemplate.SetResourceVersion("")
controlPlaneTemplate.SetOwnerReferences(nil)
Expect(mgmtClient.Create(ctx, controlPlaneTemplate)).To(Succeed())
newClusterClass.Spec.ControlPlane.Ref = external.GetObjectReference(controlPlaneTemplate)
}

if newClusterClass.Spec.Infrastructure.Ref != nil {
infrastructureTemplate, err := external.Get(ctx, mgmtClient, newClusterClass.Spec.Infrastructure.Ref)
Expect(err).ToNot(HaveOccurred())
infrastructureTemplate.SetNamespace(input.ClusterClassNamespace)
infrastructureTemplate.SetResourceVersion("")
infrastructureTemplate.SetOwnerReferences(nil)
Expect(mgmtClient.Create(ctx, infrastructureTemplate)).To(Succeed())
newClusterClass.Spec.Infrastructure.Ref = external.GetObjectReference(infrastructureTemplate)
}

Expect(mgmtClient.Create(ctx, newClusterClass)).To(Succeed())

// Get the current ControlPlane, we will later verify that it has not changed.
Expand All @@ -702,6 +798,7 @@ func rebaseClusterClassAndWait(ctx context.Context, input rebaseClusterClassAndW
patchHelper, err := patch.NewHelper(input.Cluster, mgmtClient)
Expect(err).ToNot(HaveOccurred())
input.Cluster.Spec.Topology.Class = newClusterClassName
input.Cluster.Spec.Topology.ClassNamespace = input.ClusterClassNamespace
// We have to retry the patch. The ClusterClass was just created so the client cache in the
// controller/webhook might not be aware of it yet. If the webhook is not aware of the ClusterClass
// we get a "Cluster ... can't be validated. ClusterClass ... can not be retrieved" error.
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/clusterclass_changes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
"k8s.io/utils/ptr"
)

var _ = Describe("When testing ClusterClass changes [ClusterClass]", func() {
var _ = Describe("When testing ClusterClass changes [ClusterClass] [PR-Blocking]", func() {
ClusterClassChangesSpec(ctx, func() ClusterClassChangesSpecInput {
return ClusterClassChangesSpecInput{
E2EConfig: e2eConfig,
Expand Down
4 changes: 3 additions & 1 deletion test/e2e/quick_start.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,11 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput)
variables := map[string]string{}
if input.ClassNamespace != nil {
variables["CLUSTER_CLASS_NAMESPACE"] = *input.ClassNamespace
By("Creating a cluster referencing a clusterClass from another namespace")
} else {
By("Creating a cluster referencing a clusterClass")
}

By("Creating a cluster referencing a clusterClass from another namespace")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
Expand Down

0 comments on commit bb6604e

Please sign in to comment.