From 18e183965858b9efcb47f0e95e8784810ada82a6 Mon Sep 17 00:00:00 2001 From: Oleg Sidorov Date: Thu, 13 Feb 2020 16:01:22 +0100 Subject: [PATCH] Bugfixes in release controller strategy executor This commit addresses multiple issues identified in strategy executor, among which: * Wrong recepients for patch updates: due to an error in the code some patches were applied to a wrong generation of release and target objects. * Target object spec checkers used to return an incomplete spec if only some of the clusters are misbehaving: there was a risk of de-scheduling the workload on healthy clusters. Signed-off-by: Oleg Sidorov --- pkg/controller/release/checks.go | 30 +-- pkg/controller/release/release_controller.go | 3 +- .../release/release_controller_test.go | 210 ++++++++++++++++-- pkg/controller/release/strategy_executor.go | 120 ++++++---- pkg/controller/release/strategy_patches.go | 2 +- pkg/testing/util.go | 19 +- 6 files changed, 291 insertions(+), 93 deletions(-) diff --git a/pkg/controller/release/checks.go b/pkg/controller/release/checks.go index 1d811b1cc..f7fa0b4bc 100644 --- a/pkg/controller/release/checks.go +++ b/pkg/controller/release/checks.go @@ -26,20 +26,25 @@ func checkCapacity( clustersNotReadyMap := make(map[string]struct{}) for _, spec := range ct.Spec.Clusters { + t := spec if spec.Percent != stepCapacity { - t := shipper.ClusterCapacityTarget{ + t = shipper.ClusterCapacityTarget{ Name: spec.Name, Percent: stepCapacity, TotalReplicaCount: spec.TotalReplicaCount, } - newSpec.Clusters = append(newSpec.Clusters, t) clustersNotReadyMap[spec.Name] = struct{}{} canProceed = false } + newSpec.Clusters = append(newSpec.Clusters, t) } if canProceed { + // We return an empty new spec if cluster spec check went fine + // so far. + newSpec = nil + if ct.Status.ObservedGeneration >= ct.Generation { canProceed, reason = targetutil.IsReady(ct.Status.Conditions) } else { @@ -69,11 +74,7 @@ func checkCapacity( reason = fmt.Sprintf("%v", clustersNotReady) } - if len(newSpec.Clusters) > 0 { - return canProceed, newSpec, reason - } else { - return canProceed, nil, reason - } + return canProceed, newSpec, reason } func checkTraffic( @@ -90,19 +91,24 @@ func checkTraffic( clustersNotReadyMap := make(map[string]struct{}) for _, spec := range tt.Spec.Clusters { + t := spec if spec.Weight != stepTrafficWeight { - t := shipper.ClusterTrafficTarget{ + t = shipper.ClusterTrafficTarget{ Name: spec.Name, Weight: stepTrafficWeight, } - newSpec.Clusters = append(newSpec.Clusters, t) clustersNotReadyMap[spec.Name] = struct{}{} canProceed = false } + newSpec.Clusters = append(newSpec.Clusters, t) } if canProceed { + // We return an empty new spec if cluster spec check went fine + // so far. + newSpec = nil + if tt.Status.ObservedGeneration >= tt.Generation { canProceed, reason = targetutil.IsReady(tt.Status.Conditions) } else { @@ -132,9 +138,5 @@ func checkTraffic( reason = fmt.Sprintf("%v", clustersNotReady) } - if len(newSpec.Clusters) > 0 { - return canProceed, newSpec, reason - } else { - return canProceed, nil, reason - } + return canProceed, newSpec, reason } diff --git a/pkg/controller/release/release_controller.go b/pkg/controller/release/release_controller.go index 8928ad602..bd6655654 100644 --- a/pkg/controller/release/release_controller.go +++ b/pkg/controller/release/release_controller.go @@ -4,10 +4,10 @@ import ( "fmt" "time" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/labels" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -476,7 +476,6 @@ func (c *Controller) executeReleaseStrategy(relinfo *releaseInfo, diff *diffutil } else { achievedStep = int32(len(rel.Spec.Environment.Strategy.Steps)) - 1 achievedStepName = rel.Spec.Environment.Strategy.Steps[achievedStep].Name - } if prevStep == nil || achievedStep != prevStep.Step { rel.Status.AchievedStep = &shipper.AchievedStep{ diff --git a/pkg/controller/release/release_controller_test.go b/pkg/controller/release/release_controller_test.go index 91a2ee957..3ee98240a 100644 --- a/pkg/controller/release/release_controller_test.go +++ b/pkg/controller/release/release_controller_test.go @@ -1242,7 +1242,8 @@ func (f *fixture) expectCapacityNotReady(relpair releaseInfoPair, targetStep, ac // } // } - var rel *shipper.Release + rel := relpair.contender.release + if role == Contender { newStatus = map[string]interface{}{ "status": shipper.ReleaseStatus{ @@ -1275,8 +1276,6 @@ func (f *fixture) expectCapacityNotReady(relpair releaseInfoPair, targetStep, ac }, }, } - rel = relpair.contender.release - } else { newStatus = map[string]interface{}{ "status": shipper.ReleaseStatus{ @@ -1324,8 +1323,6 @@ func (f *fixture) expectCapacityNotReady(relpair releaseInfoPair, targetStep, ac }, }, } - - rel = relpair.incumbent.release } patch, _ := json.Marshal(newStatus) @@ -1350,7 +1347,8 @@ func (f *fixture) expectTrafficNotReady(relpair releaseInfoPair, targetStep, ach // } // } - var rel *shipper.Release + rel := relpair.contender.release + if role == Contender { newStatus = map[string]interface{}{ "status": shipper.ReleaseStatus{ @@ -1388,8 +1386,6 @@ func (f *fixture) expectTrafficNotReady(relpair releaseInfoPair, targetStep, ach }, }, } - - rel = relpair.contender.release } else { newStatus = map[string]interface{}{ "status": shipper.ReleaseStatus{ @@ -1432,8 +1428,6 @@ func (f *fixture) expectTrafficNotReady(relpair releaseInfoPair, targetStep, ach }, }, } - - rel = relpair.incumbent.release } patch, _ := json.Marshal(newStatus) @@ -1483,15 +1477,15 @@ func TestContenderReleasePhaseIsWaitingForCommandForInitialStepState(t *testing. incumbent.capacityTarget.Spec.Clusters[0].Percent = 100 f.addObjects( - incumbent.release.DeepCopy(), - incumbent.installationTarget.DeepCopy(), - incumbent.capacityTarget.DeepCopy(), - incumbent.trafficTarget.DeepCopy(), - contender.release.DeepCopy(), contender.installationTarget.DeepCopy(), contender.capacityTarget.DeepCopy(), contender.trafficTarget.DeepCopy(), + + incumbent.release.DeepCopy(), + incumbent.installationTarget.DeepCopy(), + incumbent.capacityTarget.DeepCopy(), + incumbent.trafficTarget.DeepCopy(), ) var step int32 = 0 f.expectReleaseWaitingForCommand(contender.release, step) @@ -1950,7 +1944,7 @@ func TestIncumbentTrafficShouldDecrease(t *testing.T) { ) tt := incumbent.trafficTarget.DeepCopy() - r := incumbent.release.DeepCopy() + r := contender.release.DeepCopy() f.expectTrafficStatusPatch(contender.release.Spec.TargetStep, tt, r, 50, Incumbent) f.run() } @@ -1991,7 +1985,7 @@ func TestIncumbentTrafficShouldDecreaseWithRolloutBlockOverride(t *testing.T) { ) tt := incumbent.trafficTarget.DeepCopy() - r := incumbent.release.DeepCopy() + r := contender.release.DeepCopy() f.expectTrafficStatusPatch(contender.release.Spec.TargetStep, tt, r, 50, Incumbent) overrideEvent := fmt.Sprintf("%s RolloutBlockOverridden %s", corev1.EventTypeNormal, rolloutBlockKey) f.expectedEvents = append([]string{overrideEvent}, f.expectedEvents...) @@ -2079,7 +2073,7 @@ func TestIncumbentCapacityShouldDecrease(t *testing.T) { ) tt := incumbent.capacityTarget.DeepCopy() - r := incumbent.release.DeepCopy() + r := contender.release.DeepCopy() f.expectCapacityStatusPatch(contender.release.Spec.TargetStep, tt, r, 50, uint(totalReplicaCount), Incumbent) f.run() } @@ -2122,7 +2116,7 @@ func TestIncumbentCapacityShouldDecreaseWithRolloutBlockOverride(t *testing.T) { ) tt := incumbent.capacityTarget.DeepCopy() - r := incumbent.release.DeepCopy() + r := contender.release.DeepCopy() f.expectCapacityStatusPatch(contender.release.Spec.TargetStep, tt, r, 50, uint(totalReplicaCount), Incumbent) overrideEvent := fmt.Sprintf("%s RolloutBlockOverridden %s", corev1.EventTypeNormal, rolloutBlockKey) f.expectedEvents = append([]string{overrideEvent}, f.expectedEvents...) @@ -2829,3 +2823,181 @@ func TestIncumbentOutOfRangeTargetStep(t *testing.T) { f.run() } + +func TestUnhealthyTrafficAndCapacityIncumbentConvergesConsistently(t *testing.T) { + namespace := "test-namespace" + incumbentName, contenderName := "test-incumbent", "test-contender" + app := buildApplication(namespace, "test-app") + cluster := buildCluster("minikube") + brokenCluster := buildCluster("broken-cluster") + + f := newFixture(t, app.DeepCopy(), cluster.DeepCopy()) + replicaCount := int32(4) + + contender := f.buildContender(namespace, contenderName, replicaCount) + incumbent := f.buildIncumbent(namespace, incumbentName, replicaCount) + + addCluster(incumbent, brokenCluster) + + // Mark contender as fully healthy + var step int32 = 2 + contender.release.Spec.TargetStep = step + contender.capacityTarget.Spec.Clusters[0].Percent = 100 + contender.capacityTarget.Spec.Clusters[0].TotalReplicaCount = replicaCount + contender.trafficTarget.Spec.Clusters[0].Weight = 100 + contender.release.Status.AchievedStep = &shipper.AchievedStep{Step: 2} + + incumbent.trafficTarget.Spec.Clusters[0].Weight = 0 + incumbent.trafficTarget.Spec.Clusters[1].Weight = 0 + incumbent.trafficTarget.Status.Clusters = []*shipper.ClusterTrafficStatus{ + { + AchievedTraffic: 50, + }, + { + AchievedTraffic: 0, + }, + } + incumbent.trafficTarget.Status.Conditions, _ = targetutil.SetTargetCondition( + incumbent.trafficTarget.Status.Conditions, + targetutil.NewTargetCondition( + shipper.TargetConditionTypeReady, + corev1.ConditionFalse, + ClustersNotReady, "[broken-cluster]", + ), + ) + incumbent.capacityTarget.Spec.Clusters[0].Name = "broken-cluster" + incumbent.capacityTarget.Spec.Clusters[0].Percent = 0 + incumbent.capacityTarget.Spec.Clusters[0].TotalReplicaCount = replicaCount + incumbent.capacityTarget.Spec.Clusters[1].Name = "minikube" + incumbent.capacityTarget.Spec.Clusters[1].Percent = 0 + incumbent.capacityTarget.Spec.Clusters[1].TotalReplicaCount = 0 + incumbent.capacityTarget.Status.Conditions, _ = targetutil.SetTargetCondition( + incumbent.capacityTarget.Status.Conditions, + targetutil.NewTargetCondition( + shipper.TargetConditionTypeReady, + corev1.ConditionFalse, + ClustersNotReady, "[broken-cluster]")) + incumbent.capacityTarget.Status.Clusters = []shipper.ClusterCapacityStatus{ + { + AvailableReplicas: 42, // anything but spec-matching + AchievedPercent: 42, // anything but spec-matching + }, + { + AvailableReplicas: 0, + AchievedPercent: 0, + }, + } + + expected := contender.release.DeepCopy() + condScheduled := releaseutil.NewReleaseCondition(shipper.ReleaseConditionTypeScheduled, corev1.ConditionTrue, "", "") + releaseutil.SetReleaseCondition(&expected.Status, *condScheduled) + condStrategyExecuted := releaseutil.NewReleaseCondition(shipper.ReleaseConditionTypeStrategyExecuted, corev1.ConditionTrue, "", "") + releaseutil.SetReleaseCondition(&expected.Status, *condStrategyExecuted) + + f.addObjects( + contender.release.DeepCopy(), + contender.installationTarget.DeepCopy(), + contender.capacityTarget.DeepCopy(), + contender.trafficTarget.DeepCopy(), + + incumbent.release.DeepCopy(), + incumbent.installationTarget.DeepCopy(), + incumbent.capacityTarget.DeepCopy(), + incumbent.trafficTarget.DeepCopy(), + ) + + f.actions = append(f.actions, + kubetesting.NewUpdateAction( + shipper.SchemeGroupVersion.WithResource("releases"), + contender.release.GetNamespace(), + expected)) + + var patch []byte + + newContenderStatus := map[string]interface{}{ + "status": shipper.ReleaseStatus{ + Strategy: &shipper.ReleaseStrategyStatus{ + State: shipper.ReleaseStrategyState{ + WaitingForInstallation: shipper.StrategyStateFalse, + WaitingForCommand: shipper.StrategyStateFalse, + WaitingForTraffic: shipper.StrategyStateTrue, + WaitingForCapacity: shipper.StrategyStateFalse, + }, + Conditions: []shipper.ReleaseStrategyCondition{ + shipper.ReleaseStrategyCondition{ + Type: shipper.StrategyConditionContenderAchievedCapacity, + Status: corev1.ConditionTrue, + Step: step, + }, + shipper.ReleaseStrategyCondition{ + Type: shipper.StrategyConditionContenderAchievedInstallation, + Status: corev1.ConditionTrue, + Step: step, + }, + shipper.ReleaseStrategyCondition{ + Type: shipper.StrategyConditionContenderAchievedTraffic, + Status: corev1.ConditionTrue, + Step: step, + }, + shipper.ReleaseStrategyCondition{ + Type: shipper.StrategyConditionIncumbentAchievedTraffic, + Status: corev1.ConditionFalse, + Step: step, + Reason: ClustersNotReady, + Message: fmt.Sprintf("release \"test-incumbent\" hasn't achieved traffic in clusters: [broken-cluster]. for more details try `kubectl describe tt test-incumbent`"), + }, + }, + }, + }, + } + patch, _ = json.Marshal(newContenderStatus) + + f.actions = append(f.actions, kubetesting.NewPatchAction( + shipper.SchemeGroupVersion.WithResource("releases"), + contender.release.GetNamespace(), + contender.release.GetName(), + types.MergePatchType, + patch, + )) + + newIncumbentStatus := map[string]interface{}{ + "status": shipper.ReleaseStatus{ + Strategy: &shipper.ReleaseStrategyStatus{ + State: shipper.ReleaseStrategyState{ + WaitingForInstallation: shipper.StrategyStateFalse, + WaitingForCommand: shipper.StrategyStateFalse, + WaitingForTraffic: shipper.StrategyStateFalse, + WaitingForCapacity: shipper.StrategyStateTrue, + }, + Conditions: []shipper.ReleaseStrategyCondition{ + shipper.ReleaseStrategyCondition{ + Type: shipper.StrategyConditionContenderAchievedCapacity, + Status: corev1.ConditionFalse, + Step: step, + Reason: ClustersNotReady, + Message: fmt.Sprintf("release \"test-incumbent\" hasn't achieved capacity in clusters: [broken-cluster]. for more details try `kubectl describe ct test-incumbent`"), + }, + shipper.ReleaseStrategyCondition{ + Type: shipper.StrategyConditionContenderAchievedInstallation, + Status: corev1.ConditionTrue, + Step: step, + }, + }, + }, + }, + } + patch, _ = json.Marshal(newIncumbentStatus) + + f.actions = append(f.actions, kubetesting.NewPatchAction( + shipper.SchemeGroupVersion.WithResource("releases"), + incumbent.release.GetNamespace(), + incumbent.release.GetName(), + types.MergePatchType, + patch, + )) + + f.expectedEvents = append(f.expectedEvents, + `Normal ReleaseConditionChanged [] -> [Scheduled True], [] -> [StrategyExecuted True]`) + + f.run() +} diff --git a/pkg/controller/release/strategy_executor.go b/pkg/controller/release/strategy_executor.go index 3bacbcdf9..61b41d26a 100644 --- a/pkg/controller/release/strategy_executor.go +++ b/pkg/controller/release/strategy_executor.go @@ -32,14 +32,15 @@ func (p *Pipeline) Enqueue(step PipelineStep) { } type Extra struct { - IsLastStep bool - HasIncumbent bool + IsLastStep bool + HasTail bool + Initiator *shipper.Release } func (p *Pipeline) Process(strategy *shipper.RolloutStrategy, step int32, extra Extra, cond conditions.StrategyConditionsMap) (bool, []StrategyPatch, []ReleaseStrategyStateTransition) { var patches []StrategyPatch var trans []ReleaseStrategyStateTransition - var complete = true + complete := true for _, stage := range *p { cont, steppatches, steptrans := stage(strategy, step, extra, cond) patches = append(patches, steppatches...) @@ -89,7 +90,6 @@ func NewStrategyExecutor(strategy *shipper.RolloutStrategy, step int32) *Strateg func (e *StrategyExecutor) Execute(prev, curr, succ *releaseInfo) (bool, []StrategyPatch, []ReleaseStrategyStateTransition) { isHead, hasTail := succ == nil, prev != nil - hasIncumbent := prev != nil || succ != nil // There is no really a point in making any changes until the successor // has completed it's transition, therefore we're hoilding off and aborting @@ -104,6 +104,9 @@ func (e *StrategyExecutor) Execute(prev, curr, succ *releaseInfo) (bool, []Strat } } + // This pre-fill is super important, otherwise conditions will be sorted + // in a wrong order and corresponding patches will override wrong + // conditions. var releaseStrategyConditions []shipper.ReleaseStrategyCondition if curr.release.Status.Strategy != nil { releaseStrategyConditions = curr.release.Status.Strategy.Conditions @@ -111,46 +114,31 @@ func (e *StrategyExecutor) Execute(prev, curr, succ *releaseInfo) (bool, []Strat cond := conditions.NewStrategyConditions(releaseStrategyConditions...) // the last step is slightly special from others: at this moment shipper - // no longer waits for a command but marks a release as complete. + // is no longer waiting for a command but marks a release as complete. isLastStep := int(e.step) == len(e.strategy.Steps)-1 // The reason because isHead is not included in the extra set is mainly // because the pipeline is picking up 2 distinct tuples of releases // (curr+succ) and (prev+curr), therefore isHead is supposed to be // calculated by enforcers. extra := Extra{ - HasIncumbent: hasIncumbent, - IsLastStep: isLastStep, + Initiator: curr.release, + IsLastStep: isLastStep, + HasTail: hasTail, } pipeline := NewPipeline() - if isHead { - pipeline.Enqueue(genInstallationEnforcer(curr, nil)) - } + pipeline.Enqueue(genInstallationEnforcer(curr, nil)) pipeline.Enqueue(genCapacityEnforcer(curr, succ)) pipeline.Enqueue(genTrafficEnforcer(curr, succ)) - + if hasTail { + pipeline.Enqueue(genTrafficEnforcer(prev, curr)) + pipeline.Enqueue(genCapacityEnforcer(prev, curr)) + } if isHead { - if hasTail { - pipeline.Enqueue(genTrafficEnforcer(prev, curr)) - pipeline.Enqueue(genCapacityEnforcer(prev, curr)) - } pipeline.Enqueue(genReleaseStrategyStateEnforcer(curr, nil)) } - complete, patches, trans := pipeline.Process(e.strategy, e.step, extra, cond) - - alterPatches := make([]StrategyPatch, 0, len(patches)) -Patch: - for _, patch := range patches { - for _, obj := range []interface{}{curr.release, curr.capacityTarget, curr.trafficTarget} { - if patch.Alters(obj) { - alterPatches = append(alterPatches, patch) - continue Patch - } - } - } - - return complete, alterPatches, trans + return pipeline.Process(e.strategy, e.step, extra, cond) } func genInstallationEnforcer(curr, succ *releaseInfo) PipelineStep { @@ -166,15 +154,19 @@ func genInstallationEnforcer(curr, succ *releaseInfo) PipelineStep { }, ) - patch := buildContenderStrategyConditionsPatch( - curr.release.Name, + patches := make([]StrategyPatch, 0, 1) + relPatch := buildContenderStrategyConditionsPatch( + extra.Initiator.Name, cond, targetStep, extra.IsLastStep, - extra.HasIncumbent, + extra.HasTail, ) + if relPatch.Alters(extra.Initiator) { + patches = append(patches, relPatch) + } - return PipelineBreak, []StrategyPatch{patch}, nil + return PipelineBreak, patches, nil } cond.SetTrue( @@ -194,13 +186,17 @@ func genCapacityEnforcer(curr, succ *releaseInfo) PipelineStep { var condType shipper.StrategyConditionType var capacityWeight int32 isHead := succ == nil + isInitiator := releasesIdentical(extra.Initiator, curr.release) + if isInitiator { + condType = shipper.StrategyConditionContenderAchievedCapacity + } else { + condType = shipper.StrategyConditionIncumbentAchievedCapacity + } if isHead { capacityWeight = strategy.Steps[targetStep].Capacity.Contender - condType = shipper.StrategyConditionContenderAchievedCapacity } else { capacityWeight = strategy.Steps[targetStep].Capacity.Incumbent - condType = shipper.StrategyConditionIncumbentAchievedCapacity } if achieved, newSpec, clustersNotReady := checkCapacity(curr.capacityTarget, capacityWeight); !achieved { @@ -222,16 +218,20 @@ func genCapacityEnforcer(curr, succ *releaseInfo) PipelineStep { NewSpec: newSpec, Name: curr.release.Name, } - patches = append(patches, ctPatch) + if ctPatch.Alters(curr.capacityTarget) { + patches = append(patches, ctPatch) + } relPatch := buildContenderStrategyConditionsPatch( - curr.release.Name, + extra.Initiator.Name, cond, targetStep, extra.IsLastStep, - extra.HasIncumbent, + extra.HasTail, ) - patches = append(patches, relPatch) + if relPatch.Alters(extra.Initiator) { + patches = append(patches, relPatch) + } return PipelineBreak, patches, nil } @@ -243,6 +243,8 @@ func genCapacityEnforcer(curr, succ *releaseInfo) PipelineStep { conditions.StrategyConditionsUpdate{ Step: targetStep, LastTransitionTime: time.Now(), + Reason: "", + Message: "", }, ) @@ -255,13 +257,17 @@ func genTrafficEnforcer(curr, succ *releaseInfo) PipelineStep { var condType shipper.StrategyConditionType var trafficWeight int32 isHead := succ == nil + isInitiator := releasesIdentical(extra.Initiator, curr.release) + if isInitiator { + condType = shipper.StrategyConditionContenderAchievedTraffic + } else { + condType = shipper.StrategyConditionIncumbentAchievedTraffic + } if isHead { trafficWeight = strategy.Steps[targetStep].Traffic.Contender - condType = shipper.StrategyConditionContenderAchievedTraffic } else { trafficWeight = strategy.Steps[targetStep].Traffic.Incumbent - condType = shipper.StrategyConditionIncumbentAchievedTraffic } if achieved, newSpec, reason := checkTraffic(curr.trafficTarget, uint32(trafficWeight)); !achieved { @@ -283,16 +289,20 @@ func genTrafficEnforcer(curr, succ *releaseInfo) PipelineStep { NewSpec: newSpec, Name: curr.release.Name, } - patches = append(patches, ttPatch) + if ttPatch.Alters(curr.trafficTarget) { + patches = append(patches, ttPatch) + } relPatch := buildContenderStrategyConditionsPatch( - curr.release.Name, + extra.Initiator.Name, cond, targetStep, extra.IsLastStep, - extra.HasIncumbent, + extra.HasTail, ) - patches = append(patches, relPatch) + if relPatch.Alters(extra.Initiator) { + patches = append(patches, relPatch) + } return PipelineBreak, patches, nil } @@ -320,7 +330,7 @@ func genReleaseStrategyStateEnforcer(curr, succ *releaseInfo) PipelineStep { newReleaseStrategyState := cond.AsReleaseStrategyState( targetStep, - extra.HasIncumbent, + extra.HasTail, extra.IsLastStep, ) @@ -336,14 +346,16 @@ func genReleaseStrategyStateEnforcer(curr, succ *releaseInfo) PipelineStep { releaseStrategyStateTransitions) relPatch := buildContenderStrategyConditionsPatch( - curr.release.Name, + extra.Initiator.Name, cond, targetStep, extra.IsLastStep, - extra.HasIncumbent, + extra.HasTail, ) - patches = append(patches, relPatch) + if relPatch.Alters(extra.Initiator) { + patches = append(patches, relPatch) + } return PipelineContinue, patches, releaseStrategyStateTransitions } @@ -392,3 +404,13 @@ func valueOrUnknown(v shipper.StrategyState) shipper.StrategyState { } return v } + +// This method is a super basic helper and should not be re-used as a reliable +// release similarity checker +func releasesIdentical(r1, r2 *shipper.Release) bool { + if r1 == nil || r2 == nil { + return r1 == r2 + } + return r1.Namespace == r2.Namespace && + r1.Name == r2.Name +} diff --git a/pkg/controller/release/strategy_patches.go b/pkg/controller/release/strategy_patches.go index 673ae5f71..74e438873 100644 --- a/pkg/controller/release/strategy_patches.go +++ b/pkg/controller/release/strategy_patches.go @@ -55,7 +55,7 @@ func (p *TrafficTargetSpecPatch) PatchSpec() (string, schema.GroupVersionKind, [ } func (p *TrafficTargetSpecPatch) Alters(o interface{}) bool { - // CapacityTargetSpecPatch is an altering one by it's nature: it's only + // TrafficTargetSpecPatch is an altering one by it's nature: it's only // being created if a capacity target adjustment is required. Therefore // we're saving a few peanuts and moving on with always-apply strategy. return !p.IsEmpty() diff --git a/pkg/testing/util.go b/pkg/testing/util.go index 96a63f48f..fd9fc9050 100644 --- a/pkg/testing/util.go +++ b/pkg/testing/util.go @@ -209,18 +209,20 @@ func prettyPrintAction(a kubetesting.Action) string { verb := a.GetVerb() gvk := a.GetResource() ns := a.GetNamespace() + extra := "" - template := fmt.Sprintf("Verb: %s\nGVK: %s\nNamespace: %s\n--------\n%%s", verb, gvk.String(), ns) + template := fmt.Sprintf("Verb: %s\nGVK: %s\nNamespace: %s\n%%s--------\n%%s", verb, gvk.String(), ns) switch action := a.(type) { case kubetesting.CreateActionImpl: + extra := fmt.Sprintf("Name: %s\n", action.Name) obj, err := yaml.Marshal(action.GetObject()) if err != nil { panic(fmt.Sprintf("could not marshal %+v: %q", action.GetObject(), err)) } - return fmt.Sprintf(template, string(obj)) + return fmt.Sprintf(template, extra, string(obj)) case kubetesting.UpdateActionImpl: obj, err := yaml.Marshal(action.GetObject()) @@ -228,30 +230,31 @@ func prettyPrintAction(a kubetesting.Action) string { panic(fmt.Sprintf("could not marshal %+v: %q", action.GetObject(), err)) } - return fmt.Sprintf(template, string(obj)) + return fmt.Sprintf(template, extra, string(obj)) case kubetesting.PatchActionImpl: + extra = fmt.Sprintf("Name: %s\n", action.Name) patch := prettyPrintActionPatch(action) - return fmt.Sprintf(template, patch) + return fmt.Sprintf(template, extra, patch) case kubetesting.GetActionImpl: message := fmt.Sprintf("(no object body: GET %s)", action.GetName()) - return fmt.Sprintf(template, message) + return fmt.Sprintf(template, extra, message) case kubetesting.ListActionImpl: message := fmt.Sprintf("(no object body: GET %s)", action.GetKind()) - return fmt.Sprintf(template, message) + return fmt.Sprintf(template, extra, message) case kubetesting.WatchActionImpl: return fmt.Sprintf(template, "(no object body: WATCH)") case kubetesting.DeleteActionImpl: message := fmt.Sprintf("(no object body: DELETE %s)", action.GetName()) - return fmt.Sprintf(template, message) + return fmt.Sprintf(template, extra, message) case kubetesting.ActionImpl: message := fmt.Sprintf("(no object body: %s %s)", action.GetVerb(), action.GetResource()) - return fmt.Sprintf(template, message) + return fmt.Sprintf(template, extra, message) } panic(fmt.Sprintf("unknown action! patch printAction to support %T %+v", a, a))