Skip to content

Commit

Permalink
Merge pull request #6518 from azylinski/oss-mv-estimatorBuilder-from-…
Browse files Browse the repository at this point in the history
…ctx-to-orchestrator-init

Move estimatorBuilder from AutoscalingContext to Orchestrator Init
  • Loading branch information
k8s-ci-robot authored Feb 9, 2024
2 parents 5e9330c + 399b16e commit 6cb5b5a
Show file tree
Hide file tree
Showing 10 changed files with 53 additions and 35 deletions.
5 changes: 0 additions & 5 deletions cluster-autoscaler/context/autoscaling_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown"
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/pdb"
"k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot"
"k8s.io/autoscaler/cluster-autoscaler/estimator"
"k8s.io/autoscaler/cluster-autoscaler/expander"
processor_callbacks "k8s.io/autoscaler/cluster-autoscaler/processors/callbacks"
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
Expand Down Expand Up @@ -52,8 +51,6 @@ type AutoscalingContext struct {
ClusterSnapshot clustersnapshot.ClusterSnapshot
// ExpanderStrategy is the strategy used to choose which node group to expand when scaling up
ExpanderStrategy expander.Strategy
// EstimatorBuilder is the builder function for node count estimator to be used.
EstimatorBuilder estimator.EstimatorBuilder
// ProcessorCallbacks is interface defining extra callback methods which can be called by processors used in extension points.
ProcessorCallbacks processor_callbacks.ProcessorCallbacks
// DebuggingSnapshotter is the interface for capturing the debugging snapshot
Expand Down Expand Up @@ -106,7 +103,6 @@ func NewAutoscalingContext(
autoscalingKubeClients *AutoscalingKubeClients,
cloudProvider cloudprovider.CloudProvider,
expanderStrategy expander.Strategy,
estimatorBuilder estimator.EstimatorBuilder,
processorCallbacks processor_callbacks.ProcessorCallbacks,
debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter,
remainingPdbTracker pdb.RemainingPdbTracker,
Expand All @@ -119,7 +115,6 @@ func NewAutoscalingContext(
PredicateChecker: predicateChecker,
ClusterSnapshot: clusterSnapshot,
ExpanderStrategy: expanderStrategy,
EstimatorBuilder: estimatorBuilder,
ProcessorCallbacks: processorCallbacks,
DebuggingSnapshotter: debuggingSnapshotter,
RemainingPdbTracker: remainingPdbTracker,
Expand Down
5 changes: 4 additions & 1 deletion cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ type ScaleUpOrchestrator struct {
resourceManager *resource.Manager
clusterStateRegistry *clusterstate.ClusterStateRegistry
scaleUpExecutor *scaleUpExecutor
estimatorBuilder estimator.EstimatorBuilder
taintConfig taints.TaintConfig
initialized bool
}
Expand All @@ -67,11 +68,13 @@ func (o *ScaleUpOrchestrator) Initialize(
autoscalingContext *context.AutoscalingContext,
processors *ca_processors.AutoscalingProcessors,
clusterStateRegistry *clusterstate.ClusterStateRegistry,
estimatorBuilder estimator.EstimatorBuilder,
taintConfig taints.TaintConfig,
) {
o.autoscalingContext = autoscalingContext
o.processors = processors
o.clusterStateRegistry = clusterStateRegistry
o.estimatorBuilder = estimatorBuilder
o.taintConfig = taintConfig
o.resourceManager = resource.NewManager(processors.CustomResourcesProcessor)
o.scaleUpExecutor = newScaleUpExecutor(autoscalingContext, processors.ScaleStateNotifier)
Expand Down Expand Up @@ -490,7 +493,7 @@ func (o *ScaleUpOrchestrator) ComputeExpansionOption(
option.SimilarNodeGroups = o.ComputeSimilarNodeGroups(nodeGroup, nodeInfos, schedulablePods, now)

estimateStart := time.Now()
expansionEstimator := o.autoscalingContext.EstimatorBuilder(
expansionEstimator := o.estimatorBuilder(
o.autoscalingContext.PredicateChecker,
o.autoscalingContext.ClusterSnapshot,
estimator.NewEstimationContext(o.autoscalingContext.MaxNodesTotal, option.SimilarNodeGroups, currentNodeCount),
Expand Down
29 changes: 20 additions & 9 deletions cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -975,7 +975,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR
processors := NewTestProcessors(&context)
processors.ScaleStateNotifier.Register(clusterState)
orchestrator := New()
orchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
orchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
expander := NewMockRepotingStrategy(t, config.ExpansionOptionToChoose)
context.ExpanderStrategy = expander

Expand Down Expand Up @@ -1078,7 +1078,7 @@ func TestScaleUpUnhealthy(t *testing.T) {

processors := NewTestProcessors(&context)
suOrchestrator := New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
scaleUpStatus, err := suOrchestrator.ScaleUp([]*apiv1.Pod{p3}, nodes, []*appsv1.DaemonSet{}, nodeInfos)

assert.NoError(t, err)
Expand Down Expand Up @@ -1128,7 +1128,7 @@ func TestBinpackingLimiter(t *testing.T) {
processors.BinpackingLimiter = &MockBinpackingLimiter{}

suOrchestrator := New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})

expander := NewMockRepotingStrategy(t, nil)
context.ExpanderStrategy = expander
Expand Down Expand Up @@ -1178,7 +1178,7 @@ func TestScaleUpNoHelp(t *testing.T) {

processors := NewTestProcessors(&context)
suOrchestrator := New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
scaleUpStatus, err := suOrchestrator.ScaleUp([]*apiv1.Pod{p3}, nodes, []*appsv1.DaemonSet{}, nodeInfos)
processors.ScaleUpStatusProcessor.Process(&context, scaleUpStatus)

Expand Down Expand Up @@ -1330,7 +1330,7 @@ func TestComputeSimilarNodeGroups(t *testing.T) {
assert.NoError(t, clusterState.UpdateNodes(nodes, nodeInfos, time.Now()))

suOrchestrator := &ScaleUpOrchestrator{}
suOrchestrator.Initialize(&ctx, &processors.AutoscalingProcessors{NodeGroupSetProcessor: nodeGroupSetProcessor}, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&ctx, &processors.AutoscalingProcessors{NodeGroupSetProcessor: nodeGroupSetProcessor}, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
similarNodeGroups := suOrchestrator.ComputeSimilarNodeGroups(provider.GetNodeGroup(tc.nodeGroup), nodeInfos, tc.schedulablePods, now)

var gotSimilarNodeGroups []string
Expand Down Expand Up @@ -1400,7 +1400,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {

processors := NewTestProcessors(&context)
suOrchestrator := New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
scaleUpStatus, typedErr := suOrchestrator.ScaleUp(pods, nodes, []*appsv1.DaemonSet{}, nodeInfos)

assert.NoError(t, typedErr)
Expand Down Expand Up @@ -1462,7 +1462,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())

suOrchestrator := New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
scaleUpStatus, err := suOrchestrator.ScaleUp([]*apiv1.Pod{p1}, nodes, []*appsv1.DaemonSet{}, nodeInfos)
assert.NoError(t, err)
assert.True(t, scaleUpStatus.WasSuccessful())
Expand Down Expand Up @@ -1517,7 +1517,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())

suOrchestrator := New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
scaleUpStatus, err := suOrchestrator.ScaleUp([]*apiv1.Pod{p1, p2, p3}, nodes, []*appsv1.DaemonSet{}, nodeInfos)
assert.NoError(t, err)
assert.True(t, scaleUpStatus.WasSuccessful())
Expand Down Expand Up @@ -1572,7 +1572,7 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) {
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())

suOrchestrator := New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
scaleUpStatus, err := suOrchestrator.ScaleUpToNodeGroupMinSize(nodes, nodeInfos)
assert.NoError(t, err)
assert.True(t, scaleUpStatus.WasSuccessful())
Expand Down Expand Up @@ -1685,3 +1685,14 @@ func simplifyScaleUpStatus(scaleUpStatus *status.ScaleUpStatus) ScaleUpStatusInf
PodsAwaitEvaluation: ExtractPodNames(scaleUpStatus.PodsAwaitEvaluation),
}
}

func newEstimatorBuilder() estimator.EstimatorBuilder {
estimatorBuilder, _ := estimator.NewEstimatorBuilder(
estimator.BinpackingEstimatorName,
estimator.NewThresholdBasedEstimationLimiter(nil),
estimator.NewDecreasingPodOrderer(),
nil,
)

return estimatorBuilder
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup"
"k8s.io/autoscaler/cluster-autoscaler/estimator"
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
"k8s.io/autoscaler/cluster-autoscaler/processors/provreq"
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
Expand Down Expand Up @@ -61,10 +62,11 @@ func (o *WrapperOrchestrator) Initialize(
autoscalingContext *context.AutoscalingContext,
processors *ca_processors.AutoscalingProcessors,
clusterStateRegistry *clusterstate.ClusterStateRegistry,
estimatorBuilder estimator.EstimatorBuilder,
taintConfig taints.TaintConfig,
) {
o.scaleUpOrchestrator.Initialize(autoscalingContext, processors, clusterStateRegistry, taintConfig)
o.provReqOrchestrator.Initialize(autoscalingContext, processors, clusterStateRegistry, taintConfig)
o.scaleUpOrchestrator.Initialize(autoscalingContext, processors, clusterStateRegistry, estimatorBuilder, taintConfig)
o.provReqOrchestrator.Initialize(autoscalingContext, processors, clusterStateRegistry, estimatorBuilder, taintConfig)
}

// ScaleUp run scaleUp function for regular pods of pods from ProvisioningRequest.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/estimator"
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
"k8s.io/autoscaler/cluster-autoscaler/processors/provreq"
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
Expand Down Expand Up @@ -78,6 +79,7 @@ func (f *fakeScaleUp) Initialize(
autoscalingContext *context.AutoscalingContext,
processors *ca_processors.AutoscalingProcessors,
clusterStateRegistry *clusterstate.ClusterStateRegistry,
estimatorBuilder estimator.EstimatorBuilder,
taintConfig taints.TaintConfig,
) {
}
Expand Down
2 changes: 2 additions & 0 deletions cluster-autoscaler/core/scaleup/scaleup.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/estimator"
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
Expand All @@ -36,6 +37,7 @@ type Orchestrator interface {
autoscalingContext *context.AutoscalingContext,
processors *ca_processors.AutoscalingProcessors,
clusterStateRegistry *clusterstate.ClusterStateRegistry,
estimatorBuilder estimator.EstimatorBuilder,
taintConfig taints.TaintConfig,
)
// ScaleUp tries to scale the cluster up. Returns appropriate status or error if
Expand Down
3 changes: 1 addition & 2 deletions cluster-autoscaler/core/static_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,6 @@ func NewStaticAutoscaler(
autoscalingKubeClients,
cloudProvider,
expanderStrategy,
estimatorBuilder,
processorCallbacks,
debuggingSnapshotter,
remainingPdbTracker,
Expand Down Expand Up @@ -192,7 +191,7 @@ func NewStaticAutoscaler(
if scaleUpOrchestrator == nil {
scaleUpOrchestrator = orchestrator.New()
}
scaleUpOrchestrator.Initialize(autoscalingContext, processors, clusterStateRegistry, taintConfig)
scaleUpOrchestrator.Initialize(autoscalingContext, processors, clusterStateRegistry, estimatorBuilder, taintConfig)

// Set the initial scale times to be less than the start time so as to
// not start in cooldown mode.
Expand Down
24 changes: 18 additions & 6 deletions cluster-autoscaler/core/static_autoscaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,8 @@ func setupAutoscaler(config *autoscalerSetupConfig) (*StaticAutoscaler, error) {

sdPlanner, sdActuator := newScaleDownPlannerAndActuator(&context, processors, clusterState)
suOrchestrator := orchestrator.New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})

suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})

autoscaler := &StaticAutoscaler{
AutoscalingContext: &context,
Expand Down Expand Up @@ -362,7 +363,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(options.NodeGroupDefaults))
sdPlanner, sdActuator := newScaleDownPlannerAndActuator(&context, processors, clusterState)
suOrchestrator := orchestrator.New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})

autoscaler := &StaticAutoscaler{
AutoscalingContext: &context,
Expand Down Expand Up @@ -561,7 +562,7 @@ func TestStaticAutoscalerRunOnceWithScaleDownDelayPerNG(t *testing.T) {

sdPlanner, sdActuator := newScaleDownPlannerAndActuator(&context, processors, clusterState)
suOrchestrator := orchestrator.New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})

autoscaler := &StaticAutoscaler{
AutoscalingContext: &context,
Expand Down Expand Up @@ -786,7 +787,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {

sdPlanner, sdActuator := newScaleDownPlannerAndActuator(&context, processors, clusterState)
suOrchestrator := orchestrator.New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})

autoscaler := &StaticAutoscaler{
AutoscalingContext: &context,
Expand Down Expand Up @@ -936,7 +937,7 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) {

sdPlanner, sdActuator := newScaleDownPlannerAndActuator(&context, processors, clusterState)
suOrchestrator := orchestrator.New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})

autoscaler := &StaticAutoscaler{
AutoscalingContext: &context,
Expand Down Expand Up @@ -1084,7 +1085,7 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) {
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(options.NodeGroupDefaults))
sdPlanner, sdActuator := newScaleDownPlannerAndActuator(&context, processors, clusterState)
suOrchestrator := orchestrator.New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})

autoscaler := &StaticAutoscaler{
AutoscalingContext: &context,
Expand Down Expand Up @@ -2409,3 +2410,14 @@ func newScaleDownPlannerAndActuator(ctx *context.AutoscalingContext, p *ca_proce
wrapper := legacy.NewScaleDownWrapper(sd, actuator)
return wrapper, wrapper
}

func newEstimatorBuilder() estimator.EstimatorBuilder {
estimatorBuilder, _ := estimator.NewEstimatorBuilder(
estimator.BinpackingEstimatorName,
estimator.NewThresholdBasedEstimationLimiter(nil),
estimator.NewDecreasingPodOrderer(),
nil,
)

return estimatorBuilder
}
10 changes: 0 additions & 10 deletions cluster-autoscaler/core/test/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/deletiontracker"
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/pdb"
"k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot"
"k8s.io/autoscaler/cluster-autoscaler/estimator"
"k8s.io/autoscaler/cluster-autoscaler/expander"
"k8s.io/autoscaler/cluster-autoscaler/expander/random"
"k8s.io/autoscaler/cluster-autoscaler/metrics"
Expand Down Expand Up @@ -211,14 +210,6 @@ func NewScaleTestAutoscalingContext(
if err != nil {
return context.AutoscalingContext{}, err
}
// Ignoring error here is safe - if a test doesn't specify valid estimatorName,
// it either doesn't need one, or should fail when it turns out to be nil.
estimatorBuilder, _ := estimator.NewEstimatorBuilder(
options.EstimatorName,
estimator.NewThresholdBasedEstimationLimiter(nil),
estimator.NewDecreasingPodOrderer(),
/* EstimationAnalyserFunc */ nil,
)
predicateChecker, err := predicatechecker.NewTestPredicateChecker()
if err != nil {
return context.AutoscalingContext{}, err
Expand All @@ -240,7 +231,6 @@ func NewScaleTestAutoscalingContext(
PredicateChecker: predicateChecker,
ClusterSnapshot: clusterSnapshot,
ExpanderStrategy: random.NewStrategy(),
EstimatorBuilder: estimatorBuilder,
ProcessorCallbacks: processorCallbacks,
DebuggingSnapshotter: debuggingSnapshotter,
RemainingPdbTracker: remainingPdbTracker,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/estimator"
"k8s.io/autoscaler/cluster-autoscaler/processors/status"
"k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/apis/autoscaling.x-k8s.io/v1beta1"
provreq_pods "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/pods"
Expand Down Expand Up @@ -66,6 +67,7 @@ func (o *provReqOrchestrator) Initialize(
autoscalingContext *context.AutoscalingContext,
processors *ca_processors.AutoscalingProcessors,
clusterStateRegistry *clusterstate.ClusterStateRegistry,
estimatorBuilder estimator.EstimatorBuilder,
taintConfig taints.TaintConfig,
) {
o.initialized = true
Expand Down

0 comments on commit 6cb5b5a

Please sign in to comment.