diff --git a/cluster-autoscaler/simulator/drainability/rules/longterminating/rule.go b/cluster-autoscaler/simulator/drainability/rules/longterminating/rule.go index 8b7c6aa64ca3..e5c50cf659b6 100644 --- a/cluster-autoscaler/simulator/drainability/rules/longterminating/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/longterminating/rule.go @@ -33,7 +33,7 @@ func New() *Rule { // Drainable decides what to do with long terminating pods on node drain. func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status { if drain.IsPodLongTerminating(pod, drainCtx.Timestamp) { - return drainability.NewDrainableStatus() + return drainability.NewSkipStatus() } return drainability.NewUndefinedStatus() } diff --git a/cluster-autoscaler/simulator/drainability/rules/longterminating/rule_test.go b/cluster-autoscaler/simulator/drainability/rules/longterminating/rule_test.go index e9660aa34f6e..be0d87a9da80 100644 --- a/cluster-autoscaler/simulator/drainability/rules/longterminating/rule_test.go +++ b/cluster-autoscaler/simulator/drainability/rules/longterminating/rule_test.go @@ -61,7 +61,7 @@ func TestDrainable(t *testing.T) { Phase: apiv1.PodUnknown, }, }, - want: drainability.NewDrainableStatus(), + want: drainability.NewSkipStatus(), }, "long terminating pod with extended grace period": { pod: &apiv1.Pod{ @@ -78,7 +78,7 @@ func TestDrainable(t *testing.T) { Phase: apiv1.PodUnknown, }, }, - want: drainability.NewDrainableStatus(), + want: drainability.NewSkipStatus(), }, } { t.Run(desc, func(t *testing.T) { diff --git a/cluster-autoscaler/simulator/drainability/rules/customcontroller/rule.go b/cluster-autoscaler/simulator/drainability/rules/replicacount/rule.go similarity index 98% rename from cluster-autoscaler/simulator/drainability/rules/customcontroller/rule.go rename to cluster-autoscaler/simulator/drainability/rules/replicacount/rule.go index 4661f12dc588..0612e11ea654 100644 --- a/cluster-autoscaler/simulator/drainability/rules/customcontroller/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/replicacount/rule.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package customcontroller +package replicacount import ( "fmt" @@ -66,7 +66,7 @@ func (r *Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) dr return drainability.NewBlockedStatus(drain.MinReplicasReached, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d", pod.Namespace, pod.Name, rc.Spec.Replicas, r.minReplicaCount)) } } else if pod_util.IsDaemonSetPod(pod) { - if refKind == "DaemonSet" { + if refKind != "DaemonSet" { // We don't have a listener for the other DaemonSet kind. // TODO: Use a generic client for checking the reference. return drainability.NewUndefinedStatus() diff --git a/cluster-autoscaler/simulator/drainability/rules/customcontroller/rule_test.go b/cluster-autoscaler/simulator/drainability/rules/replicacount/rule_test.go similarity index 70% rename from cluster-autoscaler/simulator/drainability/rules/customcontroller/rule_test.go rename to cluster-autoscaler/simulator/drainability/rules/replicacount/rule_test.go index 59d001878dfb..342a69738bb5 100644 --- a/cluster-autoscaler/simulator/drainability/rules/customcontroller/rule_test.go +++ b/cluster-autoscaler/simulator/drainability/rules/replicacount/rule_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package customcontroller +package replicacount import ( "testing" @@ -104,6 +104,21 @@ func TestDrainable(t *testing.T) { }, rcs: []*apiv1.ReplicationController{&rc}, }, + "RC-managed pod with missing reference": { + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + OwnerReferences: test.GenerateOwnerReferences("missing", "ReplicationController", "core/v1", ""), + }, + Spec: apiv1.PodSpec{ + NodeName: "node", + }, + }, + rcs: []*apiv1.ReplicationController{&rc}, + wantReason: drain.ControllerNotFound, + wantError: true, + }, "DS-managed pod": { pod: &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -116,6 +131,20 @@ func TestDrainable(t *testing.T) { }, }, }, + "DS-managed pod with missing reference": { + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + OwnerReferences: test.GenerateOwnerReferences("missing", "DaemonSet", "apps/v1", ""), + }, + Spec: apiv1.PodSpec{ + NodeName: "node", + }, + }, + wantReason: drain.ControllerNotFound, + wantError: true, + }, "DS-managed pod by a custom Daemonset": { pod: &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -131,6 +160,21 @@ func TestDrainable(t *testing.T) { }, }, }, + "DS-managed pod by a custom Daemonset with missing reference": { + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + OwnerReferences: test.GenerateOwnerReferences("missing", "CustomDaemonSet", "crd/v1", ""), + Annotations: map[string]string{ + "cluster-autoscaler.kubernetes.io/daemonset-pod": "true", + }, + }, + Spec: apiv1.PodSpec{ + NodeName: "node", + }, + }, + }, "Job-managed pod": { pod: &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -141,6 +185,18 @@ func TestDrainable(t *testing.T) { }, rcs: []*apiv1.ReplicationController{&rc}, }, + "Job-managed pod with missing reference": { + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + OwnerReferences: test.GenerateOwnerReferences("missing", "Job", "batch/v1", ""), + }, + }, + rcs: []*apiv1.ReplicationController{&rc}, + wantReason: drain.ControllerNotFound, + wantError: true, + }, "SS-managed pod": { pod: &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -151,6 +207,18 @@ func TestDrainable(t *testing.T) { }, rcs: []*apiv1.ReplicationController{&rc}, }, + "SS-managed pod with missing reference": { + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + OwnerReferences: test.GenerateOwnerReferences("missing", "StatefulSet", "apps/v1", ""), + }, + }, + rcs: []*apiv1.ReplicationController{&rc}, + wantReason: drain.ControllerNotFound, + wantError: true, + }, "RS-managed pod": { pod: &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -178,6 +246,21 @@ func TestDrainable(t *testing.T) { }, rss: []*appsv1.ReplicaSet{&rs}, }, + "RS-managed pod with missing reference": { + pod: &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + OwnerReferences: test.GenerateOwnerReferences("missing", "ReplicaSet", "apps/v1", ""), + }, + Spec: apiv1.PodSpec{ + NodeName: "node", + }, + }, + rss: []*appsv1.ReplicaSet{&rs}, + wantReason: drain.ControllerNotFound, + wantError: true, + }, } { t.Run(desc, func(t *testing.T) { var err error diff --git a/cluster-autoscaler/simulator/drainability/rules/replicated/rule.go b/cluster-autoscaler/simulator/drainability/rules/replicated/rule.go index cca20d11422d..8b652e18d3c1 100644 --- a/cluster-autoscaler/simulator/drainability/rules/replicated/rule.go +++ b/cluster-autoscaler/simulator/drainability/rules/replicated/rule.go @@ -27,14 +27,12 @@ import ( // Rule is a drainability rule on how to handle replicated pods. type Rule struct { skipNodesWithCustomControllerPods bool - minReplicaCount int } // New creates a new Rule. -func New(skipNodesWithCustomControllerPods bool, minReplicaCount int) *Rule { +func New(skipNodesWithCustomControllerPods bool) *Rule { return &Rule{ skipNodesWithCustomControllerPods: skipNodesWithCustomControllerPods, - minReplicaCount: minReplicaCount, } } diff --git a/cluster-autoscaler/simulator/drainability/rules/replicated/rule_test.go b/cluster-autoscaler/simulator/drainability/rules/replicated/rule_test.go index cee50dec100d..ce2943eb42c7 100644 --- a/cluster-autoscaler/simulator/drainability/rules/replicated/rule_test.go +++ b/cluster-autoscaler/simulator/drainability/rules/replicated/rule_test.go @@ -231,7 +231,7 @@ func TestDrainable(t *testing.T) { Listers: registry, Timestamp: testTime, } - status := New(test.skipNodesWithCustomControllerPods, 0).Drainable(drainCtx, test.pod) + status := New(test.skipNodesWithCustomControllerPods).Drainable(drainCtx, test.pod) assert.Equal(t, test.wantReason, status.BlockingReason) assert.Equal(t, test.wantError, status.Error != nil) }) diff --git a/cluster-autoscaler/simulator/drainability/rules/rules.go b/cluster-autoscaler/simulator/drainability/rules/rules.go index b02fb9aba0f0..facd618304b7 100644 --- a/cluster-autoscaler/simulator/drainability/rules/rules.go +++ b/cluster-autoscaler/simulator/drainability/rules/rules.go @@ -20,13 +20,13 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/pdb" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability" - "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/customcontroller" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/daemonset" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/localstorage" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/longterminating" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/mirror" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/notsafetoevict" pdbrule "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/pdb" + "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/replicacount" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/replicated" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/safetoevict" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules/system" @@ -52,7 +52,7 @@ func Default(deleteOptions options.NodeDeleteOptions) Rules { }{ {rule: mirror.New()}, {rule: longterminating.New()}, - {rule: customcontroller.New(deleteOptions.MinReplicaCount), skip: !deleteOptions.SkipNodesWithCustomControllerPods}, + {rule: replicacount.New(deleteOptions.MinReplicaCount), skip: !deleteOptions.SkipNodesWithCustomControllerPods}, // Interrupting checks {rule: daemonset.New()}, @@ -60,7 +60,7 @@ func Default(deleteOptions options.NodeDeleteOptions) Rules { {rule: terminal.New()}, // Blocking checks - {rule: replicated.New(deleteOptions.SkipNodesWithCustomControllerPods, deleteOptions.MinReplicaCount)}, + {rule: replicated.New(deleteOptions.SkipNodesWithCustomControllerPods)}, {rule: system.New(), skip: !deleteOptions.SkipNodesWithSystemPods}, {rule: notsafetoevict.New()}, {rule: localstorage.New(), skip: !deleteOptions.SkipNodesWithLocalStorage},