diff --git a/cmd/clusterctl/client/cluster/ownergraph.go b/cmd/clusterctl/client/cluster/ownergraph.go new file mode 100644 index 000000000000..20c622f086e1 --- /dev/null +++ b/cmd/clusterctl/client/cluster/ownergraph.go @@ -0,0 +1,61 @@ +package cluster + +import ( + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type OwnerGraph map[string]OwnerGraphNode + +type OwnerGraphNode struct { + Object corev1.ObjectReference + Owners []metav1.OwnerReference +} + +func nodeToOwnerRef(n *node, attributes *ownerReferenceAttributes) metav1.OwnerReference { + ref := metav1.OwnerReference{ + Name: n.identity.Name, + APIVersion: n.identity.APIVersion, + Kind: n.identity.Kind, + UID: n.identity.UID, + } + if attributes != nil { + if attributes.BlockOwnerDeletion != nil { + ref.BlockOwnerDeletion = attributes.BlockOwnerDeletion + } + if attributes.Controller != nil { + ref.Controller = attributes.Controller + } + } + return ref +} + +func GetOwnerGraph(namespace string) (OwnerGraph, error) { + p := newProxy(Kubeconfig{Path: "", Context: ""}) + invClient := newInventoryClient(p, nil) + + graph := newObjectGraph(p, invClient) + + // Gets all the types defined by the CRDs installed by clusterctl plus the ConfigMap/Secret core types. + err := graph.getDiscoveryTypes() + if err != nil { + return OwnerGraph{}, errors.Wrap(err, "failed to retrieve discovery types") + } + + // Discovery the object graph for the selected types: + // - Nodes are defined the Kubernetes objects (Clusters, Machines etc.) identified during the discovery process. + // - Edges are derived by the OwnerReferences between nodes. + if err := graph.Discovery(namespace); err != nil { + return OwnerGraph{}, errors.Wrap(err, "failed to discover the object graph") + } + owners := OwnerGraph{} + for _, v := range graph.uidToNode { + n := OwnerGraphNode{Object: v.identity, Owners: []metav1.OwnerReference{}} + for owner, attributes := range v.owners { + n.Owners = append(n.Owners, nodeToOwnerRef(owner, &attributes)) + } + owners[string(v.identity.UID)] = n + } + return owners, nil +} diff --git a/internal/controllers/machine/machine_controller_phases.go b/internal/controllers/machine/machine_controller_phases.go index 4fa3685e3c3b..382fe3844a65 100644 --- a/internal/controllers/machine/machine_controller_phases.go +++ b/internal/controllers/machine/machine_controller_phases.go @@ -116,9 +116,8 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.C return external.ReconcileOutput{}, err } - // With the migration from v1alpha2 to v1alpha3, Machine controllers should be the owner for the - // infra Machines, hence remove any existing machineset controller owner reference - if controller := metav1.GetControllerOf(obj); controller != nil && controller.Kind == "MachineSet" { + // Ensure the Machine is the controller owner for infrastructure and bootstrap object it references. + if controller := metav1.GetControllerOf(obj); controller != nil && controller.Kind != "Machine" { gv, err := schema.ParseGroupVersion(controller.APIVersion) if err != nil { return external.ReconcileOutput{}, err diff --git a/test/e2e/quick_start.go b/test/e2e/quick_start.go index 58bb002d2452..5aa42250cba8 100644 --- a/test/e2e/quick_start.go +++ b/test/e2e/quick_start.go @@ -44,7 +44,8 @@ type QuickStartSpecInput struct { // Flavor, if specified is the template flavor used to create the cluster for testing. // If not specified, and the e2econfig variable IPFamily is IPV6, then "ipv6" is used, // otherwise the default flavor is used. - Flavor *string + Flavor *string + PostMachinesProvisioned func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace, workloadClusterName string) } // QuickStartSpec implements a spec that mimics the operation described in the Cluster API quick start, that is @@ -82,7 +83,7 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) if input.Flavor != nil { flavor = *input.Flavor } - + clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ @@ -92,7 +93,7 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, Flavor: flavor, Namespace: namespace.Name, - ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), + ClusterName: clusterName, KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), ControlPlaneMachineCount: pointer.Int64(1), WorkerMachineCount: pointer.Int64(1), @@ -101,8 +102,12 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + PostMachinesProvisioned: func() { + if input.PostMachinesProvisioned != nil { + input.PostMachinesProvisioned(input.BootstrapClusterProxy, namespace.Name, clusterName) + } + }, }, clusterResources) - By("PASSED!") }) diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go index bb79feee912d..758f346c96f3 100644 --- a/test/e2e/quick_start_test.go +++ b/test/e2e/quick_start_test.go @@ -22,6 +22,8 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" "k8s.io/utils/pointer" + + "sigs.k8s.io/cluster-api/test/framework" ) var _ = Describe("When following the Cluster API quick-start [PR-Blocking]", func() { @@ -75,3 +77,34 @@ var _ = Describe("When following the Cluster API quick-start with Ignition", fun } }) }) + +var _ = Describe("When following the Cluster API quick-start check finalizers are correctly reconciled after deletion", func() { + QuickStartSpec(ctx, func() QuickStartSpecInput { + return QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { + framework.AssertFinalizersAfterDeletion(ctx, proxy.GetClient(), namespace, clusterName) + }, + } + }) +}) + +var _ = Describe("When following the Cluster API quick-start with ClusterClass check finalizers are correctly reconciled after deletion [ClusterClass]", func() { + QuickStartSpec(ctx, func() QuickStartSpecInput { + return QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: pointer.String("topology"), + PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { + framework.AssertFinalizersAfterDeletion(ctx, proxy.GetClient(), namespace, clusterName) + }, + } + }) +}) diff --git a/test/framework/finalizer_helpers.go b/test/framework/finalizer_helpers.go new file mode 100644 index 000000000000..30000f781ab9 --- /dev/null +++ b/test/framework/finalizer_helpers.go @@ -0,0 +1,108 @@ +package framework + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" + "sigs.k8s.io/cluster-api/util/patch" +) + +func AssertFinalizersAfterDeletion(ctx context.Context, cli client.Client, namespace, clusterName string) { + // Check that the ownerReferences are as expected on the first iteration. + initialFinalizers := getAllFinalizers(ctx, cli, namespace) + + clusterKey := client.ObjectKey{Namespace: namespace, Name: clusterName} + + // Pause the cluster by setting .spec.paused: "true" + setClusterPause(ctx, cli, clusterKey, + true) + + removeFinalizers(ctx, cli, namespace) + + // Unpause the cluster by setting .spec.paused: "false" + setClusterPause(ctx, cli, clusterKey, + false) + + // Annotate the clusterClass, if one is in use, to speed up reconciliation. + annotateClusterClass(ctx, cli, clusterKey) + + Eventually(func() { + afterDeleteFinalizers := getAllFinalizers(ctx, cli, namespace) + + missing := map[string][]string{} + for k, v := range initialFinalizers { + if _, ok := afterDeleteFinalizers[k]; !ok { + missing[k] = v + } + } + missingString := "" + for k, v := range missing { + missingString = fmt.Sprintf("%s\n%s %s", missingString, k, v) + } + Expect(len(missing)).To(Equal(0), missingString) + }) +} + +func getAllFinalizers(ctx context.Context, cli client.Client, namespace string) map[string][]string { + finalizers := map[string][]string{} + graph, err := clusterctlcluster.GetOwnerGraph(namespace) + Expect(err).To(BeNil()) + for _, v := range graph { + obj := &unstructured.Unstructured{} + obj.SetAPIVersion(v.Object.APIVersion) + obj.SetKind(v.Object.Kind) + err := cli.Get(ctx, client.ObjectKey{Namespace: v.Object.Namespace, Name: v.Object.Name}, obj) + Expect(err).To(BeNil()) + if obj.GetFinalizers() != nil { + finalizers[fmt.Sprintf("%s/%s", v.Object.Kind, v.Object.Name)] = obj.GetFinalizers() + } + } + return finalizers +} + +func setClusterPause(ctx context.Context, cli client.Client, clusterKey types.NamespacedName, value bool) { + cluster := &clusterv1.Cluster{} + Expect(cli.Get(ctx, clusterKey, cluster)).To(Succeed()) + + unpausePatch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"spec\":{\"paused\":%v}}", value))) + Expect(cli.Patch(ctx, cluster, unpausePatch)).To(Succeed()) +} + +func annotateClusterClass(ctx context.Context, cli client.Client, clusterKey types.NamespacedName) { + cluster := &clusterv1.Cluster{} + Expect(cli.Get(ctx, clusterKey, cluster)).To(Succeed()) + + if cluster.Spec.Topology != nil { + class := &clusterv1.ClusterClass{} + Expect(cli.Get(ctx, client.ObjectKey{Namespace: clusterKey.Namespace, Name: cluster.Spec.Topology.Class}, class)).To(Succeed()) + annotationPatch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"annotations\":{\"cluster.x-k8s.io/modifiedAt\":\"%v\"}}}", time.Now().Format(time.RFC3339)))) + Expect(cli.Patch(ctx, class, annotationPatch)).To(Succeed()) + } +} + +func removeFinalizers(ctx context.Context, cli client.Client, namespace string) { + graph, err := clusterctlcluster.GetOwnerGraph(namespace) + Expect(err).To(BeNil()) + for _, object := range graph { + ref := object.Object + // Once all Clusters are paused remove the OwnerReference from all objects in the graph. + obj := new(unstructured.Unstructured) + obj.SetAPIVersion(ref.APIVersion) + obj.SetKind(ref.Kind) + obj.SetName(ref.Name) + + Expect(cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: object.Object.Name}, obj)).To(Succeed()) + helper, err := patch.NewHelper(obj, cli) + Expect(err).To(BeNil()) + obj.SetFinalizers([]string{}) + Expect(helper.Patch(ctx, obj)).To(Succeed()) + } +}