Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🌱 [POC][WIP] Test finalizer rereconcile #7614

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions cmd/clusterctl/client/cluster/ownergraph.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
package cluster

import (
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

type OwnerGraph map[string]OwnerGraphNode

type OwnerGraphNode struct {
Object corev1.ObjectReference
Owners []metav1.OwnerReference
}

func nodeToOwnerRef(n *node, attributes *ownerReferenceAttributes) metav1.OwnerReference {
ref := metav1.OwnerReference{
Name: n.identity.Name,
APIVersion: n.identity.APIVersion,
Kind: n.identity.Kind,
UID: n.identity.UID,
}
if attributes != nil {
if attributes.BlockOwnerDeletion != nil {
ref.BlockOwnerDeletion = attributes.BlockOwnerDeletion
}
if attributes.Controller != nil {
ref.Controller = attributes.Controller
}
}
return ref
}

func GetOwnerGraph(namespace string) (OwnerGraph, error) {
p := newProxy(Kubeconfig{Path: "", Context: ""})
invClient := newInventoryClient(p, nil)

graph := newObjectGraph(p, invClient)

// Gets all the types defined by the CRDs installed by clusterctl plus the ConfigMap/Secret core types.
err := graph.getDiscoveryTypes()
if err != nil {
return OwnerGraph{}, errors.Wrap(err, "failed to retrieve discovery types")
}

// Discovery the object graph for the selected types:
// - Nodes are defined the Kubernetes objects (Clusters, Machines etc.) identified during the discovery process.
// - Edges are derived by the OwnerReferences between nodes.
if err := graph.Discovery(namespace); err != nil {
return OwnerGraph{}, errors.Wrap(err, "failed to discover the object graph")
}
owners := OwnerGraph{}
for _, v := range graph.uidToNode {
n := OwnerGraphNode{Object: v.identity, Owners: []metav1.OwnerReference{}}
for owner, attributes := range v.owners {
n.Owners = append(n.Owners, nodeToOwnerRef(owner, &attributes))
}
owners[string(v.identity.UID)] = n
}
return owners, nil
}
5 changes: 2 additions & 3 deletions internal/controllers/machine/machine_controller_phases.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,8 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.C
return external.ReconcileOutput{}, err
}

// With the migration from v1alpha2 to v1alpha3, Machine controllers should be the owner for the
// infra Machines, hence remove any existing machineset controller owner reference
if controller := metav1.GetControllerOf(obj); controller != nil && controller.Kind == "MachineSet" {
// Ensure the Machine is the controller owner for infrastructure and bootstrap object it references.
if controller := metav1.GetControllerOf(obj); controller != nil && controller.Kind != "Machine" {
gv, err := schema.ParseGroupVersion(controller.APIVersion)
if err != nil {
return external.ReconcileOutput{}, err
Expand Down
13 changes: 9 additions & 4 deletions test/e2e/quick_start.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ type QuickStartSpecInput struct {
// Flavor, if specified is the template flavor used to create the cluster for testing.
// If not specified, and the e2econfig variable IPFamily is IPV6, then "ipv6" is used,
// otherwise the default flavor is used.
Flavor *string
Flavor *string
PostMachinesProvisioned func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace, workloadClusterName string)
}

// QuickStartSpec implements a spec that mimics the operation described in the Cluster API quick start, that is
Expand Down Expand Up @@ -82,7 +83,7 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput)
if input.Flavor != nil {
flavor = *input.Flavor
}

clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
Expand All @@ -92,7 +93,7 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput)
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: flavor,
Namespace: namespace.Name,
ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
ClusterName: clusterName,
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64(1),
WorkerMachineCount: pointer.Int64(1),
Expand All @@ -101,8 +102,12 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput)
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
PostMachinesProvisioned: func() {
if input.PostMachinesProvisioned != nil {
input.PostMachinesProvisioned(input.BootstrapClusterProxy, namespace.Name, clusterName)
}
},
}, clusterResources)

By("PASSED!")
})

Expand Down
33 changes: 33 additions & 0 deletions test/e2e/quick_start_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ package e2e
import (
. "github.com/onsi/ginkgo/v2"
"k8s.io/utils/pointer"

"sigs.k8s.io/cluster-api/test/framework"
)

var _ = Describe("When following the Cluster API quick-start [PR-Blocking]", func() {
Expand Down Expand Up @@ -75,3 +77,34 @@ var _ = Describe("When following the Cluster API quick-start with Ignition", fun
}
})
})

var _ = Describe("When following the Cluster API quick-start check finalizers are correctly reconciled after deletion", func() {
QuickStartSpec(ctx, func() QuickStartSpecInput {
return QuickStartSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) {
framework.AssertFinalizersAfterDeletion(ctx, proxy.GetClient(), namespace, clusterName)
},
}
})
})

var _ = Describe("When following the Cluster API quick-start with ClusterClass check finalizers are correctly reconciled after deletion [ClusterClass]", func() {
QuickStartSpec(ctx, func() QuickStartSpecInput {
return QuickStartSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
Flavor: pointer.String("topology"),
PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) {
framework.AssertFinalizersAfterDeletion(ctx, proxy.GetClient(), namespace, clusterName)
},
}
})
})
108 changes: 108 additions & 0 deletions test/framework/finalizer_helpers.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
package framework

import (
"context"
"fmt"
"time"

. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster"
"sigs.k8s.io/cluster-api/util/patch"
)

func AssertFinalizersAfterDeletion(ctx context.Context, cli client.Client, namespace, clusterName string) {
// Check that the ownerReferences are as expected on the first iteration.
initialFinalizers := getAllFinalizers(ctx, cli, namespace)

clusterKey := client.ObjectKey{Namespace: namespace, Name: clusterName}

// Pause the cluster by setting .spec.paused: "true"
setClusterPause(ctx, cli, clusterKey,
true)

removeFinalizers(ctx, cli, namespace)

// Unpause the cluster by setting .spec.paused: "false"
setClusterPause(ctx, cli, clusterKey,
false)

// Annotate the clusterClass, if one is in use, to speed up reconciliation.
annotateClusterClass(ctx, cli, clusterKey)

Eventually(func() {
afterDeleteFinalizers := getAllFinalizers(ctx, cli, namespace)

missing := map[string][]string{}
for k, v := range initialFinalizers {
if _, ok := afterDeleteFinalizers[k]; !ok {
missing[k] = v
}
}
missingString := ""
for k, v := range missing {
missingString = fmt.Sprintf("%s\n%s %s", missingString, k, v)
}
Expect(len(missing)).To(Equal(0), missingString)
})
}

func getAllFinalizers(ctx context.Context, cli client.Client, namespace string) map[string][]string {
finalizers := map[string][]string{}
graph, err := clusterctlcluster.GetOwnerGraph(namespace)
Expect(err).To(BeNil())
for _, v := range graph {
obj := &unstructured.Unstructured{}
obj.SetAPIVersion(v.Object.APIVersion)
obj.SetKind(v.Object.Kind)
err := cli.Get(ctx, client.ObjectKey{Namespace: v.Object.Namespace, Name: v.Object.Name}, obj)
Expect(err).To(BeNil())
if obj.GetFinalizers() != nil {
finalizers[fmt.Sprintf("%s/%s", v.Object.Kind, v.Object.Name)] = obj.GetFinalizers()
}
}
return finalizers
}

func setClusterPause(ctx context.Context, cli client.Client, clusterKey types.NamespacedName, value bool) {
cluster := &clusterv1.Cluster{}
Expect(cli.Get(ctx, clusterKey, cluster)).To(Succeed())

unpausePatch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"spec\":{\"paused\":%v}}", value)))
Expect(cli.Patch(ctx, cluster, unpausePatch)).To(Succeed())
}

func annotateClusterClass(ctx context.Context, cli client.Client, clusterKey types.NamespacedName) {
cluster := &clusterv1.Cluster{}
Expect(cli.Get(ctx, clusterKey, cluster)).To(Succeed())

if cluster.Spec.Topology != nil {
class := &clusterv1.ClusterClass{}
Expect(cli.Get(ctx, client.ObjectKey{Namespace: clusterKey.Namespace, Name: cluster.Spec.Topology.Class}, class)).To(Succeed())
annotationPatch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"annotations\":{\"cluster.x-k8s.io/modifiedAt\":\"%v\"}}}", time.Now().Format(time.RFC3339))))
Expect(cli.Patch(ctx, class, annotationPatch)).To(Succeed())
}
}

func removeFinalizers(ctx context.Context, cli client.Client, namespace string) {
graph, err := clusterctlcluster.GetOwnerGraph(namespace)
Expect(err).To(BeNil())
for _, object := range graph {
ref := object.Object
// Once all Clusters are paused remove the OwnerReference from all objects in the graph.
obj := new(unstructured.Unstructured)
obj.SetAPIVersion(ref.APIVersion)
obj.SetKind(ref.Kind)
obj.SetName(ref.Name)

Expect(cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: object.Object.Name}, obj)).To(Succeed())
helper, err := patch.NewHelper(obj, cli)
Expect(err).To(BeNil())
obj.SetFinalizers([]string{})
Expect(helper.Patch(ctx, obj)).To(Succeed())
}
}