Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

optimize PA messages: const 'HPA' -> actual pa type #354

Merged
merged 1 commit into from
Nov 8, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 13 additions & 12 deletions pkg/controller/podautoscaler/podautoscaler_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,13 +309,14 @@ func (r *PodAutoscalerReconciler) reconcileHPA(ctx context.Context, pa autoscali
// while allowing for customization in the specific stages mentioned above.
func (r *PodAutoscalerReconciler) reconcileCustomPA(ctx context.Context, pa autoscalingv1alpha1.PodAutoscaler) (ctrl.Result, error) {
paStatusOriginal := pa.Status.DeepCopy()
paType := pa.Spec.ScalingStrategy
scaleReference := fmt.Sprintf("%s/%s/%s", pa.Spec.ScaleTargetRef.Kind, pa.Namespace, pa.Spec.ScaleTargetRef.Name)

targetGV, err := schema.ParseGroupVersion(pa.Spec.ScaleTargetRef.APIVersion)
if err != nil {
r.EventRecorder.Event(&pa, corev1.EventTypeWarning, "FailedGetScale", err.Error())
// TODO: convert conditionType to type instead of using string
setCondition(&pa, "AbleToScale", metav1.ConditionFalse, "FailedGetScale", "the PodAutoscaler controller was unable to get the target's current scale: %v", err)
setCondition(&pa, "AbleToScale", metav1.ConditionFalse, "FailedGetScale", "the %s controller was unable to get the target's current scale: %v", paType, err)
if err := r.updateStatusIfNeeded(ctx, paStatusOriginal, &pa); err != nil {
return ctrl.Result{}, err
}
Expand All @@ -329,7 +330,7 @@ func (r *PodAutoscalerReconciler) reconcileCustomPA(ctx context.Context, pa auto
mappings, err := r.Mapper.RESTMappings(targetGK)
if err != nil {
r.EventRecorder.Event(&pa, corev1.EventTypeWarning, "FailedGetScale", err.Error())
setCondition(&pa, "AbleToScale", metav1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
setCondition(&pa, "AbleToScale", metav1.ConditionFalse, "FailedGetScale", "the %s controller was unable to get the target's current scale: %v", paType, err)
if err := r.updateStatusIfNeeded(ctx, paStatusOriginal, &pa); err != nil {
return ctrl.Result{}, err
}
Expand All @@ -340,14 +341,14 @@ func (r *PodAutoscalerReconciler) reconcileCustomPA(ctx context.Context, pa auto
scale, targetGR, err := r.scaleForResourceMappings(ctx, pa.Namespace, pa.Spec.ScaleTargetRef.Name, mappings)
if err != nil {
r.EventRecorder.Event(&pa, corev1.EventTypeWarning, "FailedGetScale", err.Error())
setCondition(&pa, "AbleToScale", metav1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
setCondition(&pa, "AbleToScale", metav1.ConditionFalse, "FailedGetScale", "the %s controller was unable to get the target's current scale: %v", paType, err)
if err := r.updateStatusIfNeeded(ctx, paStatusOriginal, &pa); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, fmt.Errorf("failed to query scale subresource for %s: %v", scaleReference, err)
}

setCondition(&pa, "AbleToScale", metav1.ConditionTrue, "SucceededGetScale", "the HPA controller was able to get the target's current scale")
setCondition(&pa, "AbleToScale", metav1.ConditionTrue, "SucceededGetScale", "the %s controller was able to get the target's current scale", paType)

// Update the scale required metrics periodically
err = r.updateMetricsForScale(ctx, pa, scale)
Expand Down Expand Up @@ -441,7 +442,7 @@ func (r *PodAutoscalerReconciler) reconcileCustomPA(ctx context.Context, pa auto
if rescale {
if err := r.updateScale(ctx, pa.Namespace, targetGR, scale, desiredReplicas); err != nil {
r.EventRecorder.Eventf(&pa, corev1.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err)
setCondition(&pa, "AbleToScale", metav1.ConditionFalse, "FailedUpdateScale", "the HPA controller was unable to update the target scale: %v", err)
setCondition(&pa, "AbleToScale", metav1.ConditionFalse, "FailedUpdateScale", "the %s controller was unable to update the target scale: %v", paType, err)
r.setCurrentReplicasAndMetricsInStatus(&pa, currentReplicas)
if err := r.updateStatusIfNeeded(ctx, paStatusOriginal, &pa); err != nil {
utilruntime.HandleError(err)
Expand Down Expand Up @@ -529,19 +530,19 @@ func (r *PodAutoscalerReconciler) updateScale(ctx context.Context, namespace str
return nil
}

// setCondition sets the specific condition type on the given HPA to the specified value with the given reason
// setCondition sets the specific condition type on the given PA to the specified value with the given reason
// and message. The message and args are treated like a format string. The condition will be added if it is
// not present.
func setCondition(hpa *autoscalingv1alpha1.PodAutoscaler, conditionType string, status metav1.ConditionStatus, reason, message string, args ...interface{}) {
hpa.Status.Conditions = podutils.SetConditionInList(hpa.Status.Conditions, conditionType, status, reason, message, args...)
func setCondition(pa *autoscalingv1alpha1.PodAutoscaler, conditionType string, status metav1.ConditionStatus, reason, message string, args ...interface{}) {
pa.Status.Conditions = podutils.SetConditionInList(pa.Status.Conditions, conditionType, status, reason, message, args...)
}

// setCurrentReplicasAndMetricsInStatus sets the current replica count and metrics in the status of the HPA.
// setCurrentReplicasAndMetricsInStatus sets the current replica count and metrics in the status of the PA.
func (r *PodAutoscalerReconciler) setCurrentReplicasAndMetricsInStatus(pa *autoscalingv1alpha1.PodAutoscaler, currentReplicas int32) {
r.setStatus(pa, currentReplicas, pa.Status.DesiredScale, false)
}

// setStatus recreates the status of the given HPA, updating the current and
// setStatus recreates the status of the given PA, updating the current and
// desired replicas, as well as the metric statuses
func (r *PodAutoscalerReconciler) setStatus(pa *autoscalingv1alpha1.PodAutoscaler, currentReplicas, desiredReplicas int32, rescale bool) {
pa.Status = autoscalingv1alpha1.PodAutoscalerStatus{
Expand All @@ -565,7 +566,7 @@ func (r *PodAutoscalerReconciler) updateStatusIfNeeded(ctx context.Context, oldS
return r.updateStatus(ctx, newPA)
}

// updateStatus actually does the update request for the status of the given HPA
// updateStatus actually does the update request for the status of the given PA
func (r *PodAutoscalerReconciler) updateStatus(ctx context.Context, pa *autoscalingv1alpha1.PodAutoscaler) error {
if err := r.Status().Update(ctx, pa); err != nil {
r.EventRecorder.Event(pa, corev1.EventTypeWarning, "FailedUpdateStatus", err.Error())
Expand All @@ -580,7 +581,7 @@ func (r *PodAutoscalerReconciler) updateStatus(ctx context.Context, pa *autoscal
// returning the maximum of the computed replica counts, a description of the associated metric, and the statuses of
// all metrics computed.
// It may return both valid metricDesiredReplicas and an error,
// when some metrics still work and HPA should perform scaling based on them.
// when some metrics still work and PA should perform scaling based on them.
// If PodAutoscaler cannot do anything due to error, it returns -1 in metricDesiredReplicas as a failure signal.
func (r *PodAutoscalerReconciler) computeReplicasForMetrics(ctx context.Context, pa autoscalingv1alpha1.PodAutoscaler, scale *unstructured.Unstructured) (replicas int32, relatedMetrics string, timestamp time.Time, err error) {
logger := klog.FromContext(ctx)
Expand Down
Loading