diff --git a/pkg/api/resource/helpers_test.go b/pkg/api/resource/helpers_test.go index fbcc3688b60..cab00528709 100644 --- a/pkg/api/resource/helpers_test.go +++ b/pkg/api/resource/helpers_test.go @@ -30,9 +30,8 @@ func TestResourceHelpers(t *testing.T) { memoryLimit := resource.MustParse("10G") resourceSpec := api.ResourceRequirements{ Limits: api.ResourceList{ - "cpu": cpuLimit, - "memory": memoryLimit, - "kube.io/storage": memoryLimit, + api.ResourceCPU: cpuLimit, + api.ResourceMemory: memoryLimit, }, } if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 { @@ -43,8 +42,7 @@ func TestResourceHelpers(t *testing.T) { } resourceSpec = api.ResourceRequirements{ Limits: api.ResourceList{ - "memory": memoryLimit, - "kube.io/storage": memoryLimit, + api.ResourceMemory: memoryLimit, }, } if res := resourceSpec.Limits.Cpu(); res.Value() != 0 { diff --git a/pkg/api/v1/resource/helpers_test.go b/pkg/api/v1/resource/helpers_test.go index 0cf00dd1f7b..a0f41253f79 100644 --- a/pkg/api/v1/resource/helpers_test.go +++ b/pkg/api/v1/resource/helpers_test.go @@ -30,9 +30,8 @@ func TestResourceHelpers(t *testing.T) { memoryLimit := resource.MustParse("10G") resourceSpec := v1.ResourceRequirements{ Limits: v1.ResourceList{ - "cpu": cpuLimit, - "memory": memoryLimit, - "kube.io/storage": memoryLimit, + v1.ResourceCPU: cpuLimit, + v1.ResourceMemory: memoryLimit, }, } if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 { @@ -43,8 +42,7 @@ func TestResourceHelpers(t *testing.T) { } resourceSpec = v1.ResourceRequirements{ Limits: v1.ResourceList{ - "memory": memoryLimit, - "kube.io/storage": memoryLimit, + v1.ResourceMemory: memoryLimit, }, } if res := resourceSpec.Limits.Cpu(); res.Value() != 0 { diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 657d304f438..b5a1ba0924b 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -563,7 +563,7 @@ func TestHandleMemExceeded(t *testing.T) { spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "memory": resource.MustParse("90"), + v1.ResourceMemory: resource.MustParse("90"), }, }}}, } diff --git a/pkg/kubelet/preemption/preemption_test.go b/pkg/kubelet/preemption/preemption_test.go index fcd1950c360..331b4f72a57 100644 --- a/pkg/kubelet/preemption/preemption_test.go +++ b/pkg/kubelet/preemption/preemption_test.go @@ -338,47 +338,47 @@ func getTestPods() map[string]*v1.Pod { allPods := map[string]*v1.Pod{ tinyBurstable: getPodWithResources(tinyBurstable, v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("1m"), - "memory": resource.MustParse("1Mi"), + v1.ResourceCPU: resource.MustParse("1m"), + v1.ResourceMemory: resource.MustParse("1Mi"), }, }), bestEffort: getPodWithResources(bestEffort, v1.ResourceRequirements{}), critical: getPodWithResources(critical, v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("100Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), }, }), burstable: getPodWithResources(burstable, v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("100Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), }, }), guaranteed: getPodWithResources(guaranteed, v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("100Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), }, Limits: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("100Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), }, }), highRequestBurstable: getPodWithResources(highRequestBurstable, v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("300m"), - "memory": resource.MustParse("300Mi"), + v1.ResourceCPU: resource.MustParse("300m"), + v1.ResourceMemory: resource.MustParse("300Mi"), }, }), highRequestGuaranteed: getPodWithResources(highRequestGuaranteed, v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("300m"), - "memory": resource.MustParse("300Mi"), + v1.ResourceCPU: resource.MustParse("300m"), + v1.ResourceMemory: resource.MustParse("300Mi"), }, Limits: v1.ResourceList{ - "cpu": resource.MustParse("300m"), - "memory": resource.MustParse("300Mi"), + v1.ResourceCPU: resource.MustParse("300m"), + v1.ResourceMemory: resource.MustParse("300Mi"), }, }), } diff --git a/pkg/kubelet/rkt/rkt_test.go b/pkg/kubelet/rkt/rkt_test.go index 9a1a7d75b39..7dda72763bf 100644 --- a/pkg/kubelet/rkt/rkt_test.go +++ b/pkg/kubelet/rkt/rkt_test.go @@ -1073,8 +1073,8 @@ func TestSetApp(t *testing.T) { Command: []string{"/bin/bar", "$(env-bar)"}, WorkingDir: tmpDir, Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{"cpu": resource.MustParse("50m"), "memory": resource.MustParse("50M")}, - Requests: v1.ResourceList{"cpu": resource.MustParse("5m"), "memory": resource.MustParse("5M")}, + Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("50m"), v1.ResourceMemory: resource.MustParse("50M")}, + Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("5m"), v1.ResourceMemory: resource.MustParse("5M")}, }, }, mountPoints: []appctypes.MountPoint{ @@ -1137,8 +1137,8 @@ func TestSetApp(t *testing.T) { Args: []string{"hello", "world", "$(env-bar)"}, WorkingDir: tmpDir, Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{"cpu": resource.MustParse("50m")}, - Requests: v1.ResourceList{"memory": resource.MustParse("5M")}, + Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("50m")}, + Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("5M")}, }, }, mountPoints: []appctypes.MountPoint{ diff --git a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go index 09a4afd9bf5..777be1b1499 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go @@ -51,16 +51,16 @@ func TestBalancedResourceAllocation(t *testing.T) { { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("1000m"), - "memory": resource.MustParse("0"), + v1.ResourceCPU: resource.MustParse("1000m"), + v1.ResourceMemory: resource.MustParse("0"), }, }, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("2000m"), - "memory": resource.MustParse("0"), + v1.ResourceCPU: resource.MustParse("2000m"), + v1.ResourceMemory: resource.MustParse("0"), }, }, }, @@ -74,16 +74,16 @@ func TestBalancedResourceAllocation(t *testing.T) { { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("1000m"), - "memory": resource.MustParse("2000"), + v1.ResourceCPU: resource.MustParse("1000m"), + v1.ResourceMemory: resource.MustParse("2000"), }, }, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("2000m"), - "memory": resource.MustParse("3000"), + v1.ResourceCPU: resource.MustParse("2000m"), + v1.ResourceMemory: resource.MustParse("3000"), }, }, }, diff --git a/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go b/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go index 05afb586a1e..08e083361b4 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go @@ -51,16 +51,16 @@ func TestLeastRequested(t *testing.T) { { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("1000m"), - "memory": resource.MustParse("0"), + v1.ResourceCPU: resource.MustParse("1000m"), + v1.ResourceMemory: resource.MustParse("0"), }, }, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("2000m"), - "memory": resource.MustParse("0"), + v1.ResourceCPU: resource.MustParse("2000m"), + v1.ResourceMemory: resource.MustParse("0"), }, }, }, @@ -74,16 +74,16 @@ func TestLeastRequested(t *testing.T) { { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("1000m"), - "memory": resource.MustParse("2000"), + v1.ResourceCPU: resource.MustParse("1000m"), + v1.ResourceMemory: resource.MustParse("2000"), }, }, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("2000m"), - "memory": resource.MustParse("3000"), + v1.ResourceCPU: resource.MustParse("2000m"), + v1.ResourceMemory: resource.MustParse("3000"), }, }, }, diff --git a/plugin/pkg/scheduler/algorithm/priorities/metadata_test.go b/plugin/pkg/scheduler/algorithm/priorities/metadata_test.go index c8bc1e1cdbb..b8fd653ba24 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/metadata_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/metadata_test.go @@ -85,8 +85,8 @@ func TestPriorityMetadata(t *testing.T) { ImagePullPolicy: "Always", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("200m"), - "memory": resource.MustParse("2000"), + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("2000"), }, }, }, diff --git a/plugin/pkg/scheduler/algorithm/priorities/most_requested_test.go b/plugin/pkg/scheduler/algorithm/priorities/most_requested_test.go index 5497ae2dd34..a77692b4af9 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/most_requested_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/most_requested_test.go @@ -45,16 +45,16 @@ func TestMostRequested(t *testing.T) { { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("1000m"), - "memory": resource.MustParse("0"), + v1.ResourceCPU: resource.MustParse("1000m"), + v1.ResourceMemory: resource.MustParse("0"), }, }, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("2000m"), - "memory": resource.MustParse("0"), + v1.ResourceCPU: resource.MustParse("2000m"), + v1.ResourceMemory: resource.MustParse("0"), }, }, }, @@ -68,16 +68,16 @@ func TestMostRequested(t *testing.T) { { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("1000m"), - "memory": resource.MustParse("2000"), + v1.ResourceCPU: resource.MustParse("1000m"), + v1.ResourceMemory: resource.MustParse("2000"), }, }, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("2000m"), - "memory": resource.MustParse("3000"), + v1.ResourceCPU: resource.MustParse("2000m"), + v1.ResourceMemory: resource.MustParse("3000"), }, }, }, @@ -89,16 +89,16 @@ func TestMostRequested(t *testing.T) { { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("2000m"), - "memory": resource.MustParse("4000"), + v1.ResourceCPU: resource.MustParse("2000m"), + v1.ResourceMemory: resource.MustParse("4000"), }, }, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("3000m"), - "memory": resource.MustParse("5000"), + v1.ResourceCPU: resource.MustParse("3000m"), + v1.ResourceMemory: resource.MustParse("5000"), }, }, }, diff --git a/plugin/pkg/scheduler/algorithm/priorities/test_util.go b/plugin/pkg/scheduler/algorithm/priorities/test_util.go index fd21ea8ac24..9eb26f2d93c 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/test_util.go +++ b/plugin/pkg/scheduler/algorithm/priorities/test_util.go @@ -30,12 +30,12 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node { ObjectMeta: metav1.ObjectMeta{Name: node}, Status: v1.NodeStatus{ Capacity: v1.ResourceList{ - "cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), - "memory": *resource.NewQuantity(memory, resource.BinarySI), + v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), }, Allocatable: v1.ResourceList{ - "cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), - "memory": *resource.NewQuantity(memory, resource.BinarySI), + v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), }, }, } diff --git a/plugin/pkg/scheduler/core/generic_scheduler_test.go b/plugin/pkg/scheduler/core/generic_scheduler_test.go index 48828b3bb98..f4abcaa5057 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler_test.go +++ b/plugin/pkg/scheduler/core/generic_scheduler_test.go @@ -390,12 +390,12 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node { ObjectMeta: metav1.ObjectMeta{Name: node}, Status: v1.NodeStatus{ Capacity: v1.ResourceList{ - "cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), - "memory": *resource.NewQuantity(memory, resource.BinarySI), + v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), }, Allocatable: v1.ResourceList{ - "cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), - "memory": *resource.NewQuantity(memory, resource.BinarySI), + v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), }, }, } @@ -438,9 +438,9 @@ func TestZeroRequest(t *testing.T) { { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse( + v1.ResourceCPU: resource.MustParse( strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"), - "memory": resource.MustParse( + v1.ResourceMemory: resource.MustParse( strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)), }, }, @@ -455,9 +455,9 @@ func TestZeroRequest(t *testing.T) { { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse( + v1.ResourceCPU: resource.MustParse( strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"), - "memory": resource.MustParse( + v1.ResourceMemory: resource.MustParse( strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)), }, }, diff --git a/test/e2e/pods.go b/test/e2e/pods.go index 9bd8ea4be5d..380b0bafd9f 100644 --- a/test/e2e/pods.go +++ b/test/e2e/pods.go @@ -215,12 +215,12 @@ var _ = framework.KubeDescribe("Pods Extended", func() { Image: "gcr.io/google_containers/nginx-slim:0.7", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("100Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), }, Requests: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("100Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), }, }, }, diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 98a38967a5b..030f545c12d 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -157,7 +157,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { nodeToAllocatableMap := make(map[string]int64) for _, node := range nodeList.Items { - allocatable, found := node.Status.Allocatable["cpu"] + allocatable, found := node.Status.Allocatable[v1.ResourceCPU] Expect(found).To(Equal(true)) nodeToAllocatableMap[node.Name] = allocatable.MilliValue() if nodeMaxAllocatable < allocatable.MilliValue() { @@ -201,10 +201,10 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { Labels: map[string]string{"name": ""}, Resources: &v1.ResourceRequirements{ Limits: v1.ResourceList{ - "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), + v1.ResourceCPU: *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), }, Requests: v1.ResourceList{ - "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), + v1.ResourceCPU: *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), }, }, }), true, framework.Logf)) @@ -215,7 +215,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { Labels: map[string]string{"name": "additional"}, Resources: &v1.ResourceRequirements{ Limits: v1.ResourceList{ - "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), + v1.ResourceCPU: *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), }, }, } diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 5ceb99c3b05..4d0e27670b4 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -278,20 +278,20 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n // we need the max one to keep the same cpu/mem use rate ratio = math.Max(maxCPUFraction, maxMemFraction) for _, node := range nodes { - memAllocatable, found := node.Status.Allocatable["memory"] + memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory] Expect(found).To(Equal(true)) memAllocatableVal := memAllocatable.Value() - cpuAllocatable, found := node.Status.Allocatable["cpu"] + cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU] Expect(found).To(Equal(true)) cpuAllocatableMil := cpuAllocatable.MilliValue() needCreateResource := v1.ResourceList{} cpuFraction := cpuFractionMap[node.Name] memFraction := memFractionMap[node.Name] - needCreateResource["cpu"] = *resource.NewMilliQuantity(int64((ratio-cpuFraction)*float64(cpuAllocatableMil)), resource.DecimalSI) + needCreateResource[v1.ResourceCPU] = *resource.NewMilliQuantity(int64((ratio-cpuFraction)*float64(cpuAllocatableMil)), resource.DecimalSI) - needCreateResource["memory"] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)), resource.BinarySI) + needCreateResource[v1.ResourceMemory] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)), resource.BinarySI) err := testutils.StartPods(cs, 1, ns, string(uuid.NewUUID()), *initPausePod(f, pausePodConfig{ @@ -332,12 +332,12 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re totalRequestedMemResource += getNonZeroRequests(&pod).Memory } } - cpuAllocatable, found := node.Status.Allocatable["cpu"] + cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU] Expect(found).To(Equal(true)) cpuAllocatableMil := cpuAllocatable.MilliValue() cpuFraction := float64(totalRequestedCpuResource) / float64(cpuAllocatableMil) - memAllocatable, found := node.Status.Allocatable["memory"] + memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory] Expect(found).To(Equal(true)) memAllocatableVal := memAllocatable.Value() memFraction := float64(totalRequestedMemResource) / float64(memAllocatableVal) diff --git a/test/e2e_node/allocatable_eviction_test.go b/test/e2e_node/allocatable_eviction_test.go index 99bbb0adcc7..dda2795301b 100644 --- a/test/e2e_node/allocatable_eviction_test.go +++ b/test/e2e_node/allocatable_eviction_test.go @@ -57,7 +57,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru // The default hard eviction threshold is 250Mb, so Allocatable = Capacity - Reserved - 250Mb // We want Allocatable = 50Mb, so set Reserved = Capacity - Allocatable - 250Mb = Capacity - 300Mb kubeReserved.Sub(resource.MustParse("300Mi")) - initialConfig.KubeReserved = kubeletconfig.ConfigurationMap(map[string]string{"memory": kubeReserved.String()}) + initialConfig.KubeReserved = kubeletconfig.ConfigurationMap(map[string]string{string(v1.ResourceMemory): kubeReserved.String()}) initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey} initialConfig.ExperimentalNodeAllocatableIgnoreEvictionThreshold = false initialConfig.CgroupsPerQOS = true diff --git a/test/e2e_node/benchmark_util.go b/test/e2e_node/benchmark_util.go index 6064f287c47..fc4fd9555f7 100644 --- a/test/e2e_node/benchmark_util.go +++ b/test/e2e_node/benchmark_util.go @@ -26,6 +26,7 @@ import ( "strconv" "time" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/perftype" @@ -156,12 +157,12 @@ func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[stri node, err := f.ClientSet.Core().Nodes().Get(nodeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - cpu, ok := node.Status.Capacity["cpu"] + cpu, ok := node.Status.Capacity[v1.ResourceCPU] if !ok { framework.Failf("Fail to fetch CPU capacity value of test node.") } - memory, ok := node.Status.Capacity["memory"] + memory, ok := node.Status.Capacity[v1.ResourceMemory] if !ok { framework.Failf("Fail to fetch Memory capacity value of test node.") } diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index f12bf90627c..1b304013e17 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -172,8 +172,8 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() { Name: podName, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("50Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("50Mi"), }, }, }, @@ -213,8 +213,8 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() { Name: podName, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("50Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("50Mi"), }, }, }, diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index 5a0edfc9dea..663d6fd7a31 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -56,18 +56,18 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() { // Define test pods nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("100Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), }, Limits: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("100Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), }, }) nonCriticalBurstable := getTestPod(false, burstablePodName, v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("100Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), }, }) nonCriticalBestEffort := getTestPod(false, bestEffortPodName, v1.ResourceRequirements{}) diff --git a/test/e2e_node/memory_eviction_test.go b/test/e2e_node/memory_eviction_test.go index 0dae58b5792..1059b62b2ca 100644 --- a/test/e2e_node/memory_eviction_test.go +++ b/test/e2e_node/memory_eviction_test.go @@ -139,12 +139,12 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. guaranteed := getMemhogPod("guaranteed-pod", "guaranteed", v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("100Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), }, Limits: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("100Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), }}) guaranteed = f.PodClient().CreateSync(guaranteed) glog.Infof("pod created with name: %s", guaranteed.Name) @@ -152,8 +152,8 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu // A pod is burstable if limits and requests do not match across all containers. burstable := getMemhogPod("burstable-pod", "burstable", v1.ResourceRequirements{ Requests: v1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("100Mi"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), }}) burstable = f.PodClient().CreateSync(burstable) glog.Infof("pod created with name: %s", burstable.Name) @@ -256,7 +256,7 @@ func getMemhogPod(podName string, ctnName string, res v1.ResourceRequirements) * // This helps prevent a guaranteed pod from triggering an OOM kill due to it's low memory limit, // which will cause the test to fail inappropriately. var memLimit string - if limit, ok := res.Limits["memory"]; ok { + if limit, ok := res.Limits[v1.ResourceMemory]; ok { memLimit = strconv.Itoa(int( float64(limit.Value()) * 0.8)) } else { diff --git a/test/e2e_node/node_container_manager_test.go b/test/e2e_node/node_container_manager_test.go index 1232cdaa790..c26fb1eefe7 100644 --- a/test/e2e_node/node_container_manager_test.go +++ b/test/e2e_node/node_container_manager_test.go @@ -40,12 +40,12 @@ import ( func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) { initialConfig.EnforceNodeAllocatable = []string{"pods", "kube-reserved", "system-reserved"} initialConfig.SystemReserved = kubeletconfig.ConfigurationMap{ - "cpu": "100m", - "memory": "100Mi", + string(v1.ResourceCPU): "100m", + string(v1.ResourceMemory): "100Mi", } initialConfig.KubeReserved = kubeletconfig.ConfigurationMap{ - "cpu": "100m", - "memory": "100Mi", + string(v1.ResourceCPU): "100m", + string(v1.ResourceMemory): "100Mi", } initialConfig.EvictionHard = "memory.available<100Mi" // Necessary for allocatable cgroup creation. @@ -210,23 +210,23 @@ func runTest(f *framework.Framework) error { return fmt.Errorf("Expected all resources in capacity to be found in allocatable") } // CPU based evictions are not supported. - if allocatableCPU.Cmp(schedulerAllocatable["cpu"]) != 0 { - return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable["cpu"], capacity["cpu"]) + if allocatableCPU.Cmp(schedulerAllocatable[v1.ResourceCPU]) != 0 { + return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable[v1.ResourceCPU], capacity[v1.ResourceCPU]) } - if allocatableMemory.Cmp(schedulerAllocatable["memory"]) != 0 { - return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable["cpu"], capacity["memory"]) + if allocatableMemory.Cmp(schedulerAllocatable[v1.ResourceMemory]) != 0 { + return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable[v1.ResourceCPU], capacity[v1.ResourceMemory]) } if !cgroupManager.Exists(cm.CgroupName(kubeReservedCgroup)) { return fmt.Errorf("Expected kube reserved cgroup Does not exist") } // Expect CPU shares on kube reserved cgroup to equal it's reservation which is `100m`. - kubeReservedCPU := resource.MustParse(currentConfig.KubeReserved["cpu"]) + kubeReservedCPU := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceCPU)]) if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], kubeReservedCgroup, "cpu.shares"), cm.MilliCPUToShares(kubeReservedCPU.MilliValue()), 10); err != nil { return err } // Expect Memory limit kube reserved cgroup to equal configured value `100Mi`. - kubeReservedMemory := resource.MustParse(currentConfig.KubeReserved["memory"]) + kubeReservedMemory := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceMemory)]) if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], kubeReservedCgroup, "memory.limit_in_bytes"), kubeReservedMemory.Value(), 0); err != nil { return err } @@ -234,12 +234,12 @@ func runTest(f *framework.Framework) error { return fmt.Errorf("Expected system reserved cgroup Does not exist") } // Expect CPU shares on system reserved cgroup to equal it's reservation which is `100m`. - systemReservedCPU := resource.MustParse(currentConfig.SystemReserved["cpu"]) + systemReservedCPU := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceCPU)]) if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], systemReservedCgroup, "cpu.shares"), cm.MilliCPUToShares(systemReservedCPU.MilliValue()), 10); err != nil { return err } // Expect Memory limit on node allocatable cgroup to equal allocatable. - systemReservedMemory := resource.MustParse(currentConfig.SystemReserved["memory"]) + systemReservedMemory := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceMemory)]) if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], systemReservedCgroup, "memory.limit_in_bytes"), systemReservedMemory.Value(), 0); err != nil { return err }