Skip to content

Commit

Permalink
Convert replicated, system, not-safe-to-evict, and local storage pods…
Browse files Browse the repository at this point in the history
… to drainability rules
  • Loading branch information
artemvmin committed Sep 30, 2023
1 parent 1dea12a commit 746bcad
Show file tree
Hide file tree
Showing 17 changed files with 1,802 additions and 661 deletions.
9 changes: 2 additions & 7 deletions cluster-autoscaler/simulator/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,6 @@ func TestFindNodesToRemove(t *testing.T) {
tracker := NewUsageTracker()

tests := []findNodesToRemoveTestConfig{
// just an empty node, should be removed
{
name: "just an empty node, should be removed",
pods: []*apiv1.Pod{},
Expand All @@ -161,7 +160,6 @@ func TestFindNodesToRemove(t *testing.T) {
toRemove: []NodeToBeRemoved{emptyNodeToRemove},
unremovable: []*UnremovableNode{},
},
// just a drainable node, but nowhere for pods to go to
{
name: "just a drainable node, but nowhere for pods to go to",
pods: []*apiv1.Pod{pod1, pod2},
Expand All @@ -170,7 +168,6 @@ func TestFindNodesToRemove(t *testing.T) {
toRemove: []NodeToBeRemoved{},
unremovable: []*UnremovableNode{{Node: drainableNode, Reason: NoPlaceToMovePods}},
},
// drainable node, and a mostly empty node that can take its pods
{
name: "drainable node, and a mostly empty node that can take its pods",
pods: []*apiv1.Pod{pod1, pod2, pod3},
Expand All @@ -179,7 +176,6 @@ func TestFindNodesToRemove(t *testing.T) {
toRemove: []NodeToBeRemoved{drainableNodeToRemove},
unremovable: []*UnremovableNode{{Node: nonDrainableNode, Reason: BlockedByPod, BlockingPod: &drain.BlockingPod{Pod: pod3, Reason: drain.NotReplicated}}},
},
// drainable node, and a full node that cannot fit anymore pods
{
name: "drainable node, and a full node that cannot fit anymore pods",
pods: []*apiv1.Pod{pod1, pod2, pod4},
Expand All @@ -188,7 +184,6 @@ func TestFindNodesToRemove(t *testing.T) {
toRemove: []NodeToBeRemoved{},
unremovable: []*UnremovableNode{{Node: drainableNode, Reason: NoPlaceToMovePods}},
},
// 4 nodes, 1 empty, 1 drainable
{
name: "4 nodes, 1 empty, 1 drainable",
pods: []*apiv1.Pod{pod1, pod2, pod3, pod4},
Expand All @@ -209,8 +204,8 @@ func TestFindNodesToRemove(t *testing.T) {
r := NewRemovalSimulator(registry, clusterSnapshot, predicateChecker, tracker, testDeleteOptions(), nil, false)
toRemove, unremovable := r.FindNodesToRemove(test.candidates, destinations, time.Now(), nil)
fmt.Printf("Test scenario: %s, found len(toRemove)=%v, expected len(test.toRemove)=%v\n", test.name, len(toRemove), len(test.toRemove))
assert.Equal(t, toRemove, test.toRemove)
assert.Equal(t, unremovable, test.unremovable)
assert.Equal(t, test.toRemove, toRemove)
assert.Equal(t, test.unremovable, unremovable)
})
}
}
Expand Down
6 changes: 3 additions & 3 deletions cluster-autoscaler/simulator/drain.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ func GetPodsToMove(nodeInfo *schedulerframework.NodeInfo, deleteOptions options.
drainCtx := &drainability.DrainContext{
RemainingPdbTracker: remainingPdbTracker,
DeleteOptions: deleteOptions,
Listers: listers,
Timestamp: timestamp,
}
for _, podInfo := range nodeInfo.Pods {
pod := podInfo.Pod
Expand All @@ -73,14 +75,12 @@ func GetPodsToMove(nodeInfo *schedulerframework.NodeInfo, deleteOptions options.
}
}

pods, daemonSetPods, blockingPod, err = drain.GetPodsForDeletionOnNodeDrain(
pods, daemonSetPods = drain.GetPodsForDeletionOnNodeDrain(
pods,
remainingPdbTracker.GetPdbs(),
deleteOptions.SkipNodesWithSystemPods,
deleteOptions.SkipNodesWithLocalStorage,
deleteOptions.SkipNodesWithCustomControllerPods,
listers,
int32(deleteOptions.MinReplicaCount),
timestamp)
pods = append(pods, drainPods...)
daemonSetPods = append(daemonSetPods, drainDs...)
Expand Down
5 changes: 3 additions & 2 deletions cluster-autoscaler/simulator/drain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ func TestGetPodsToMove(t *testing.T) {
desc string
pods []*apiv1.Pod
pdbs []*policyv1.PodDisruptionBudget
rules []rules.Rule
rules rules.Rules
wantPods []*apiv1.Pod
wantDs []*apiv1.Pod
wantBlocking *drain.BlockingPod
Expand Down Expand Up @@ -312,9 +312,10 @@ func TestGetPodsToMove(t *testing.T) {
SkipNodesWithLocalStorage: true,
SkipNodesWithCustomControllerPods: true,
}
rules := append(tc.rules, rules.Default()...)
tracker := pdb.NewBasicRemainingPdbTracker()
tracker.SetPdbs(tc.pdbs)
p, d, b, err := GetPodsToMove(schedulerframework.NewNodeInfo(tc.pods...), deleteOptions, tc.rules, nil, tracker, testTime)
p, d, b, err := GetPodsToMove(schedulerframework.NewNodeInfo(tc.pods...), deleteOptions, rules, nil, tracker, testTime)
if tc.wantErr {
assert.Error(t, err)
} else {
Expand Down
5 changes: 5 additions & 0 deletions cluster-autoscaler/simulator/drainability/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,17 @@ limitations under the License.
package drainability

import (
"time"

"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/pdb"
"k8s.io/autoscaler/cluster-autoscaler/simulator/options"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
)

// DrainContext contains parameters for drainability rules.
type DrainContext struct {
RemainingPdbTracker pdb.RemainingPdbTracker
DeleteOptions options.NodeDeleteOptions
Listers kube_util.ListerRegistry
Timestamp time.Time
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package localstorage

import (
"fmt"

apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability"
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
)

// Rule is a drainability rule on how to handle local storage pods.
type Rule struct{}

// New creates a new Rule.
func New() *Rule {
return &Rule{}
}

// Drainable decides what to do with local storage pods on node drain.
func (Rule) Drainable(drainCtx *drainability.DrainContext, pod *apiv1.Pod) drainability.Status {
if drain.IsPodLongTerminating(pod, drainCtx.Timestamp) || pod_util.IsDaemonSetPod(pod) || drain.HasSafeToEvictAnnotation(pod) || drain.IsPodTerminal(pod) {
return drainability.NewUndefinedStatus()
}

if drainCtx.DeleteOptions.SkipNodesWithLocalStorage && drain.HasBlockingLocalStorage(pod) {
return drainability.NewBlockedStatus(drain.LocalStorageRequested, fmt.Errorf("pod with local storage present: %s", pod.Name))
}

return drainability.NewUndefinedStatus()
}
Loading

0 comments on commit 746bcad

Please sign in to comment.