diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 789e1862e50..22246e3d707 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -151,15 +151,15 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods. // It is so because we need to have precise control on what's running in the cluster. It("validates resource limits of pods that are allowed to run [Conformance]", func() { - nodeMaxCapacity := int64(0) + nodeMaxAllocatable := int64(0) - nodeToCapacityMap := make(map[string]int64) + nodeToAllocatableMap := make(map[string]int64) for _, node := range nodeList.Items { - capacity, found := node.Status.Capacity["cpu"] + allocatable, found := node.Status.Allocatable["cpu"] Expect(found).To(Equal(true)) - nodeToCapacityMap[node.Name] = capacity.MilliValue() - if nodeMaxCapacity < capacity.MilliValue() { - nodeMaxCapacity = capacity.MilliValue() + nodeToAllocatableMap[node.Name] = allocatable.MilliValue() + if nodeMaxAllocatable < allocatable.MilliValue() { + nodeMaxAllocatable = allocatable.MilliValue() } } framework.WaitForStableCluster(cs, masterNodes) @@ -167,23 +167,23 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { pods, err := cs.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) framework.ExpectNoError(err) for _, pod := range pods.Items { - _, found := nodeToCapacityMap[pod.Spec.NodeName] + _, found := nodeToAllocatableMap[pod.Spec.NodeName] if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed { framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName) - nodeToCapacityMap[pod.Spec.NodeName] -= getRequestedCPU(pod) + nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedCPU(pod) } } var podsNeededForSaturation int - milliCpuPerPod := nodeMaxCapacity / maxNumberOfPods + milliCpuPerPod := nodeMaxAllocatable / maxNumberOfPods if milliCpuPerPod < minPodCPURequest { milliCpuPerPod = minPodCPURequest } framework.Logf("Using pod capacity: %vm", milliCpuPerPod) - for name, leftCapacity := range nodeToCapacityMap { - framework.Logf("Node: %v has cpu capacity: %vm", name, leftCapacity) - podsNeededForSaturation += (int)(leftCapacity / milliCpuPerPod) + for name, leftAllocatable := range nodeToAllocatableMap { + framework.Logf("Node: %v has cpu allocatable: %vm", name, leftAllocatable) + podsNeededForSaturation += (int)(leftAllocatable / milliCpuPerPod) } By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation))