Merge pull request #47628 from bizhao/e2e

Automatic merge from submit-queue (batch tested with PRs 45610, 47628)

Replace capacity with allocatable to calculate pod resource

It is not accurate to use capacity to do the calculation.



**What this PR does / why we need it**:
The currently cpu resource calculation for a pod in end2end test is incorrect.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #
fixes #47627

**Special notes for your reviewer**:
More details about capacity and allocatable:
https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node-allocatable.md

**Release note**:

NONE
This commit is contained in:
Kubernetes Submit Queue 2017-06-27 17:46:11 -07:00 committed by GitHub
commit 11b5956f7a

View File

@ -151,15 +151,15 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods. // It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster. // It is so because we need to have precise control on what's running in the cluster.
It("validates resource limits of pods that are allowed to run [Conformance]", func() { It("validates resource limits of pods that are allowed to run [Conformance]", func() {
nodeMaxCapacity := int64(0) nodeMaxAllocatable := int64(0)
nodeToCapacityMap := make(map[string]int64) nodeToAllocatableMap := make(map[string]int64)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
capacity, found := node.Status.Capacity["cpu"] allocatable, found := node.Status.Allocatable["cpu"]
Expect(found).To(Equal(true)) Expect(found).To(Equal(true))
nodeToCapacityMap[node.Name] = capacity.MilliValue() nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
if nodeMaxCapacity < capacity.MilliValue() { if nodeMaxAllocatable < allocatable.MilliValue() {
nodeMaxCapacity = capacity.MilliValue() nodeMaxAllocatable = allocatable.MilliValue()
} }
} }
framework.WaitForStableCluster(cs, masterNodes) framework.WaitForStableCluster(cs, masterNodes)
@ -167,23 +167,23 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
pods, err := cs.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) pods, err := cs.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, pod := range pods.Items { for _, pod := range pods.Items {
_, found := nodeToCapacityMap[pod.Spec.NodeName] _, found := nodeToAllocatableMap[pod.Spec.NodeName]
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed { if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName) framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
nodeToCapacityMap[pod.Spec.NodeName] -= getRequestedCPU(pod) nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
} }
} }
var podsNeededForSaturation int var podsNeededForSaturation int
milliCpuPerPod := nodeMaxCapacity / maxNumberOfPods milliCpuPerPod := nodeMaxAllocatable / maxNumberOfPods
if milliCpuPerPod < minPodCPURequest { if milliCpuPerPod < minPodCPURequest {
milliCpuPerPod = minPodCPURequest milliCpuPerPod = minPodCPURequest
} }
framework.Logf("Using pod capacity: %vm", milliCpuPerPod) framework.Logf("Using pod capacity: %vm", milliCpuPerPod)
for name, leftCapacity := range nodeToCapacityMap { for name, leftAllocatable := range nodeToAllocatableMap {
framework.Logf("Node: %v has cpu capacity: %vm", name, leftCapacity) framework.Logf("Node: %v has cpu allocatable: %vm", name, leftAllocatable)
podsNeededForSaturation += (int)(leftCapacity / milliCpuPerPod) podsNeededForSaturation += (int)(leftAllocatable / milliCpuPerPod)
} }
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation)) By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation))