Skip to content

Commit

Permalink
checkpoint feedback
Browse files Browse the repository at this point in the history
  • Loading branch information
jmdeal committed Feb 26, 2025
1 parent 61aa5f5 commit 199602b
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 60 deletions.
8 changes: 2 additions & 6 deletions pkg/providers/capacityreservation/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,9 @@ type Query struct {

func QueriesFromSelectorTerms(terms ...v1.CapacityReservationSelectorTerm) []*Query {
queries := []*Query{}
ids := []string{}
for i := range terms {
if terms[i].ID != "" {
ids = append(ids, terms[i].ID)
if id := terms[i].ID; id != "" {
queries = append(queries, &Query{ID: id})
}
if len(terms[i].Tags) != 0 {
queries = append(queries, &Query{
Expand All @@ -49,9 +48,6 @@ func QueriesFromSelectorTerms(terms ...v1.CapacityReservationSelectorTerm) []*Qu
})
}
}
queries = append(queries, lo.Map(ids, func(id string, _ int) *Query {
return &Query{ID: id}
})...)
return queries
}

Expand Down
15 changes: 9 additions & 6 deletions test/suites/drift/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -991,7 +991,14 @@ var _ = Describe("Drift", Ordered, func() {
})
It("should drift nodeclaim when the reservation is no longer selected by the nodeclass", func() {
nodeClass.Spec.CapacityReservationSelectorTerms = []v1.CapacityReservationSelectorTerm{{ID: largeCapacityReservationID}}
pod := coretest.Pod()
// Include the do-not-disrupt annotation to prevent replacement NodeClaims from leaking between tests
pod := coretest.Pod(coretest.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
karpv1.DoNotDisruptAnnotationKey: "true",
},
},
})
env.ExpectCreated(nodePool, nodeClass, pod)
nc := env.EventuallyExpectNodeClaimCount("==", 1)[0]
env.EventuallyExpectNodeClaimsReady(nc)
Expand All @@ -1005,7 +1012,6 @@ var _ = Describe("Drift", Ordered, func() {
env.EventuallyExpectDrifted(nc)
})
It("should drift nodeclaim when the nodeclaim is demoted to on-demand", func() {
var canceled bool
capacityReservationID := aws.ExpectCapacityReservationCreated(
env.Context,
env.EC2API,
Expand All @@ -1016,9 +1022,7 @@ var _ = Describe("Drift", Ordered, func() {
nil,
)
DeferCleanup(func() {
if !canceled {
aws.ExpectCapacityReservationsCanceled(env.Context, env.EC2API, capacityReservationID)
}
aws.ExpectCapacityReservationsCanceled(env.Context, env.EC2API, capacityReservationID)
})

nodeClass.Spec.CapacityReservationSelectorTerms = []v1.CapacityReservationSelectorTerm{{ID: capacityReservationID}}
Expand All @@ -1042,7 +1046,6 @@ var _ = Describe("Drift", Ordered, func() {
n := env.EventuallyExpectNodeCount("==", 1)[0]

aws.ExpectCapacityReservationsCanceled(env.Context, env.EC2API, capacityReservationID)
canceled = true

// The NodeClaim capacity reservation controller runs once every minute, we'll give a little extra time to avoid
// a failure from a small delay, but the capacity type label should be updated and the reservation-id label should
Expand Down
48 changes: 0 additions & 48 deletions test/suites/scheduling/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"

"sigs.k8s.io/controller-runtime/pkg/client"
karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1"
"sigs.k8s.io/karpenter/pkg/test"

Expand Down Expand Up @@ -807,53 +806,6 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() {
Expect(reservedCount).To(Equal(1))
env.EventuallyExpectNodeCount("==", 2)
})
It("should demote reserved instances when the reservation is canceled", func() {
var canceled bool
capacityReservationID := environmentaws.ExpectCapacityReservationCreated(
env.Context,
env.EC2API,
ec2types.InstanceTypeM5Large,
env.ZoneInfo[0].Zone,
1,
nil,
nil,
)
DeferCleanup(func() {
if !canceled {
environmentaws.ExpectCapacityReservationsCanceled(env.Context, env.EC2API, capacityReservationID)
}
})

nodeClass.Spec.CapacityReservationSelectorTerms = []v1.CapacityReservationSelectorTerm{{ID: capacityReservationID}}
pod := test.Pod()
env.ExpectCreated(nodePool, nodeClass, pod)

nc := env.EventuallyExpectNodeClaimCount("==", 1)[0]
req, ok := lo.Find(nc.Spec.Requirements, func(req karpv1.NodeSelectorRequirementWithMinValues) bool {
return req.Key == v1.LabelCapacityReservationID
})
Expect(ok).To(BeTrue())
Expect(req.Values).To(ConsistOf(capacityReservationID))
n := env.EventuallyExpectNodeCount("==", 1)[0]

environmentaws.ExpectCapacityReservationsCanceled(env.Context, env.EC2API, capacityReservationID)
canceled = true

// The NodeClaim capacity reservation controller runs once every minute, we'll give a little extra time to avoid
// a failure from a small delay, but the capacity type label should be updated and the reservation-id label should
// be removed within a minute of the reservation being canceled.
Eventually(func(g Gomega) {
updatedNodeClaim := &karpv1.NodeClaim{}
g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nc), updatedNodeClaim)).To(BeNil())
g.Expect(updatedNodeClaim.Labels).To(HaveKeyWithValue(karpv1.CapacityTypeLabelKey, karpv1.CapacityTypeOnDemand))
g.Expect(updatedNodeClaim.Labels).ToNot(HaveKey(v1.LabelCapacityReservationID))

updatedNode := &corev1.Node{}
g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(n), updatedNode)).To(BeNil())
g.Expect(updatedNodeClaim.Labels).To(HaveKeyWithValue(karpv1.CapacityTypeLabelKey, karpv1.CapacityTypeOnDemand))
g.Expect(updatedNodeClaim.Labels).ToNot(HaveKey(v1.LabelCapacityReservationID))
}).WithTimeout(75 * time.Second).Should(Succeed())
})
})
})

Expand Down

0 comments on commit 199602b

Please sign in to comment.