Skip to content

Commit

Permalink
list pod list once to avoid timeout
Browse files Browse the repository at this point in the history
  • Loading branch information
AliceZhang2016 committed Apr 12, 2021
1 parent 1484454 commit fcca48e
Showing 1 changed file with 30 additions and 16 deletions.
46 changes: 30 additions & 16 deletions test/e2e/scheduling/priorities.go
Original file line number Diff line number Diff line change
Expand Up @@ -462,8 +462,12 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
var maxCPUFraction, maxMemFraction float64 = ratio, ratio
var cpuFractionMap = make(map[string]float64)
var memFractionMap = make(map[string]float64)

// For each node, stores its pods info
nodeNameToPodList := podListForEachNode(cs)

for _, node := range nodes {
cpuFraction, memFraction := computeCPUMemFraction(cs, node, requestedResource)
cpuFraction, memFraction := computeCPUMemFraction(node, requestedResource, nodeNameToPodList[node.Name])
cpuFractionMap[node.Name] = cpuFraction
memFractionMap[node.Name] = memFraction
if cpuFraction > maxCPUFraction {
Expand Down Expand Up @@ -521,33 +525,43 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
}
}

nodeNameToPodList = podListForEachNode(cs)
for _, node := range nodes {
ginkgo.By("Compute Cpu, Mem Fraction after create balanced pods.")
computeCPUMemFraction(cs, node, requestedResource)
computeCPUMemFraction(node, requestedResource, nodeNameToPodList[node.Name])
}

return cleanUp, nil
}

func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) {
framework.Logf("ComputeCPUMemFraction for node: %v", node.Name)
totalRequestedCPUResource := resource.Requests.Cpu().MilliValue()
totalRequestedMemResource := resource.Requests.Memory().Value()
allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
func podListForEachNode(cs clientset.Interface) map[string][]*v1.Pod {
nodeNameToPodList := make(map[string][]*v1.Pod)
allPods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Failf("Expect error of invalid, got : %v", err)
}
for _, pod := range allpods.Items {
if pod.Spec.NodeName == node.Name {
framework.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(&pod).MilliCPU, getNonZeroRequests(&pod).Memory)
// Ignore best effort pods while computing fractions as they won't be taken in account by scheduler.
if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort {
continue
}
totalRequestedCPUResource += getNonZeroRequests(&pod).MilliCPU
totalRequestedMemResource += getNonZeroRequests(&pod).Memory
for _, pod := range allPods.Items {
nodeName := pod.Spec.NodeName
nodeNameToPodList[nodeName] = append(nodeNameToPodList[nodeName], &pod)
}
return nodeNameToPodList
}

func computeCPUMemFraction(node v1.Node, resource *v1.ResourceRequirements, pods []*v1.Pod) (float64, float64) {
framework.Logf("ComputeCPUMemFraction for node: %v", node.Name)
totalRequestedCPUResource := resource.Requests.Cpu().MilliValue()
totalRequestedMemResource := resource.Requests.Memory().Value()

for _, pod := range pods {
framework.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(pod).MilliCPU, getNonZeroRequests(pod).Memory)
// Ignore best effort pods while computing fractions as they won't be taken in account by scheduler.
if v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort {
continue
}
totalRequestedCPUResource += getNonZeroRequests(pod).MilliCPU
totalRequestedMemResource += getNonZeroRequests(pod).Memory
}

cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
framework.ExpectEqual(found, true)
cpuAllocatableMil := cpuAllocatable.MilliValue()
Expand Down

0 comments on commit fcca48e

Please sign in to comment.