mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Replace hard-code "cpu" and "memory" to consts
This commit is contained in:
parent
ba110e9f08
commit
1c4dbcf5ca
@ -30,9 +30,8 @@ func TestResourceHelpers(t *testing.T) {
|
|||||||
memoryLimit := resource.MustParse("10G")
|
memoryLimit := resource.MustParse("10G")
|
||||||
resourceSpec := api.ResourceRequirements{
|
resourceSpec := api.ResourceRequirements{
|
||||||
Limits: api.ResourceList{
|
Limits: api.ResourceList{
|
||||||
"cpu": cpuLimit,
|
api.ResourceCPU: cpuLimit,
|
||||||
"memory": memoryLimit,
|
api.ResourceMemory: memoryLimit,
|
||||||
"kube.io/storage": memoryLimit,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 {
|
if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 {
|
||||||
@ -43,8 +42,7 @@ func TestResourceHelpers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
resourceSpec = api.ResourceRequirements{
|
resourceSpec = api.ResourceRequirements{
|
||||||
Limits: api.ResourceList{
|
Limits: api.ResourceList{
|
||||||
"memory": memoryLimit,
|
api.ResourceMemory: memoryLimit,
|
||||||
"kube.io/storage": memoryLimit,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if res := resourceSpec.Limits.Cpu(); res.Value() != 0 {
|
if res := resourceSpec.Limits.Cpu(); res.Value() != 0 {
|
||||||
|
@ -30,9 +30,8 @@ func TestResourceHelpers(t *testing.T) {
|
|||||||
memoryLimit := resource.MustParse("10G")
|
memoryLimit := resource.MustParse("10G")
|
||||||
resourceSpec := v1.ResourceRequirements{
|
resourceSpec := v1.ResourceRequirements{
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
"cpu": cpuLimit,
|
v1.ResourceCPU: cpuLimit,
|
||||||
"memory": memoryLimit,
|
v1.ResourceMemory: memoryLimit,
|
||||||
"kube.io/storage": memoryLimit,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 {
|
if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 {
|
||||||
@ -43,8 +42,7 @@ func TestResourceHelpers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
resourceSpec = v1.ResourceRequirements{
|
resourceSpec = v1.ResourceRequirements{
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
"memory": memoryLimit,
|
v1.ResourceMemory: memoryLimit,
|
||||||
"kube.io/storage": memoryLimit,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if res := resourceSpec.Limits.Cpu(); res.Value() != 0 {
|
if res := resourceSpec.Limits.Cpu(); res.Value() != 0 {
|
||||||
|
@ -563,7 +563,7 @@ func TestHandleMemExceeded(t *testing.T) {
|
|||||||
spec := v1.PodSpec{NodeName: string(kl.nodeName),
|
spec := v1.PodSpec{NodeName: string(kl.nodeName),
|
||||||
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
|
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"memory": resource.MustParse("90"),
|
v1.ResourceMemory: resource.MustParse("90"),
|
||||||
},
|
},
|
||||||
}}},
|
}}},
|
||||||
}
|
}
|
||||||
|
@ -338,47 +338,47 @@ func getTestPods() map[string]*v1.Pod {
|
|||||||
allPods := map[string]*v1.Pod{
|
allPods := map[string]*v1.Pod{
|
||||||
tinyBurstable: getPodWithResources(tinyBurstable, v1.ResourceRequirements{
|
tinyBurstable: getPodWithResources(tinyBurstable, v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1m"),
|
v1.ResourceCPU: resource.MustParse("1m"),
|
||||||
"memory": resource.MustParse("1Mi"),
|
v1.ResourceMemory: resource.MustParse("1Mi"),
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
bestEffort: getPodWithResources(bestEffort, v1.ResourceRequirements{}),
|
bestEffort: getPodWithResources(bestEffort, v1.ResourceRequirements{}),
|
||||||
critical: getPodWithResources(critical, v1.ResourceRequirements{
|
critical: getPodWithResources(critical, v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
burstable: getPodWithResources(burstable, v1.ResourceRequirements{
|
burstable: getPodWithResources(burstable, v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
guaranteed: getPodWithResources(guaranteed, v1.ResourceRequirements{
|
guaranteed: getPodWithResources(guaranteed, v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
},
|
},
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
highRequestBurstable: getPodWithResources(highRequestBurstable, v1.ResourceRequirements{
|
highRequestBurstable: getPodWithResources(highRequestBurstable, v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("300m"),
|
v1.ResourceCPU: resource.MustParse("300m"),
|
||||||
"memory": resource.MustParse("300Mi"),
|
v1.ResourceMemory: resource.MustParse("300Mi"),
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
highRequestGuaranteed: getPodWithResources(highRequestGuaranteed, v1.ResourceRequirements{
|
highRequestGuaranteed: getPodWithResources(highRequestGuaranteed, v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("300m"),
|
v1.ResourceCPU: resource.MustParse("300m"),
|
||||||
"memory": resource.MustParse("300Mi"),
|
v1.ResourceMemory: resource.MustParse("300Mi"),
|
||||||
},
|
},
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("300m"),
|
v1.ResourceCPU: resource.MustParse("300m"),
|
||||||
"memory": resource.MustParse("300Mi"),
|
v1.ResourceMemory: resource.MustParse("300Mi"),
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
@ -1073,8 +1073,8 @@ func TestSetApp(t *testing.T) {
|
|||||||
Command: []string{"/bin/bar", "$(env-bar)"},
|
Command: []string{"/bin/bar", "$(env-bar)"},
|
||||||
WorkingDir: tmpDir,
|
WorkingDir: tmpDir,
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Limits: v1.ResourceList{"cpu": resource.MustParse("50m"), "memory": resource.MustParse("50M")},
|
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("50m"), v1.ResourceMemory: resource.MustParse("50M")},
|
||||||
Requests: v1.ResourceList{"cpu": resource.MustParse("5m"), "memory": resource.MustParse("5M")},
|
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("5m"), v1.ResourceMemory: resource.MustParse("5M")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mountPoints: []appctypes.MountPoint{
|
mountPoints: []appctypes.MountPoint{
|
||||||
@ -1137,8 +1137,8 @@ func TestSetApp(t *testing.T) {
|
|||||||
Args: []string{"hello", "world", "$(env-bar)"},
|
Args: []string{"hello", "world", "$(env-bar)"},
|
||||||
WorkingDir: tmpDir,
|
WorkingDir: tmpDir,
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Limits: v1.ResourceList{"cpu": resource.MustParse("50m")},
|
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("50m")},
|
||||||
Requests: v1.ResourceList{"memory": resource.MustParse("5M")},
|
Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("5M")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mountPoints: []appctypes.MountPoint{
|
mountPoints: []appctypes.MountPoint{
|
||||||
|
@ -51,16 +51,16 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1000m"),
|
v1.ResourceCPU: resource.MustParse("1000m"),
|
||||||
"memory": resource.MustParse("0"),
|
v1.ResourceMemory: resource.MustParse("0"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
v1.ResourceCPU: resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("0"),
|
v1.ResourceMemory: resource.MustParse("0"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -74,16 +74,16 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1000m"),
|
v1.ResourceCPU: resource.MustParse("1000m"),
|
||||||
"memory": resource.MustParse("2000"),
|
v1.ResourceMemory: resource.MustParse("2000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
v1.ResourceCPU: resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("3000"),
|
v1.ResourceMemory: resource.MustParse("3000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -51,16 +51,16 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1000m"),
|
v1.ResourceCPU: resource.MustParse("1000m"),
|
||||||
"memory": resource.MustParse("0"),
|
v1.ResourceMemory: resource.MustParse("0"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
v1.ResourceCPU: resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("0"),
|
v1.ResourceMemory: resource.MustParse("0"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -74,16 +74,16 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1000m"),
|
v1.ResourceCPU: resource.MustParse("1000m"),
|
||||||
"memory": resource.MustParse("2000"),
|
v1.ResourceMemory: resource.MustParse("2000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
v1.ResourceCPU: resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("3000"),
|
v1.ResourceMemory: resource.MustParse("3000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -85,8 +85,8 @@ func TestPriorityMetadata(t *testing.T) {
|
|||||||
ImagePullPolicy: "Always",
|
ImagePullPolicy: "Always",
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("200m"),
|
v1.ResourceCPU: resource.MustParse("200m"),
|
||||||
"memory": resource.MustParse("2000"),
|
v1.ResourceMemory: resource.MustParse("2000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -45,16 +45,16 @@ func TestMostRequested(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1000m"),
|
v1.ResourceCPU: resource.MustParse("1000m"),
|
||||||
"memory": resource.MustParse("0"),
|
v1.ResourceMemory: resource.MustParse("0"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
v1.ResourceCPU: resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("0"),
|
v1.ResourceMemory: resource.MustParse("0"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -68,16 +68,16 @@ func TestMostRequested(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1000m"),
|
v1.ResourceCPU: resource.MustParse("1000m"),
|
||||||
"memory": resource.MustParse("2000"),
|
v1.ResourceMemory: resource.MustParse("2000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
v1.ResourceCPU: resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("3000"),
|
v1.ResourceMemory: resource.MustParse("3000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -89,16 +89,16 @@ func TestMostRequested(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
v1.ResourceCPU: resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("4000"),
|
v1.ResourceMemory: resource.MustParse("4000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("3000m"),
|
v1.ResourceCPU: resource.MustParse("3000m"),
|
||||||
"memory": resource.MustParse("5000"),
|
v1.ResourceMemory: resource.MustParse("5000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -30,12 +30,12 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
|
|||||||
ObjectMeta: metav1.ObjectMeta{Name: node},
|
ObjectMeta: metav1.ObjectMeta{Name: node},
|
||||||
Status: v1.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
Capacity: v1.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||||
},
|
},
|
||||||
Allocatable: v1.ResourceList{
|
Allocatable: v1.ResourceList{
|
||||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -390,12 +390,12 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
|
|||||||
ObjectMeta: metav1.ObjectMeta{Name: node},
|
ObjectMeta: metav1.ObjectMeta{Name: node},
|
||||||
Status: v1.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
Capacity: v1.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||||
},
|
},
|
||||||
Allocatable: v1.ResourceList{
|
Allocatable: v1.ResourceList{
|
||||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -438,9 +438,9 @@ func TestZeroRequest(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse(
|
v1.ResourceCPU: resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"),
|
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"),
|
||||||
"memory": resource.MustParse(
|
v1.ResourceMemory: resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
|
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -455,9 +455,9 @@ func TestZeroRequest(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse(
|
v1.ResourceCPU: resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"),
|
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"),
|
||||||
"memory": resource.MustParse(
|
v1.ResourceMemory: resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
|
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -215,12 +215,12 @@ var _ = framework.KubeDescribe("Pods Extended", func() {
|
|||||||
Image: "gcr.io/google_containers/nginx-slim:0.7",
|
Image: "gcr.io/google_containers/nginx-slim:0.7",
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
},
|
},
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -157,7 +157,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
|
|
||||||
nodeToAllocatableMap := make(map[string]int64)
|
nodeToAllocatableMap := make(map[string]int64)
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
allocatable, found := node.Status.Allocatable["cpu"]
|
allocatable, found := node.Status.Allocatable[v1.ResourceCPU]
|
||||||
Expect(found).To(Equal(true))
|
Expect(found).To(Equal(true))
|
||||||
nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
|
nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
|
||||||
if nodeMaxAllocatable < allocatable.MilliValue() {
|
if nodeMaxAllocatable < allocatable.MilliValue() {
|
||||||
@ -201,10 +201,10 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
Labels: map[string]string{"name": ""},
|
Labels: map[string]string{"name": ""},
|
||||||
Resources: &v1.ResourceRequirements{
|
Resources: &v1.ResourceRequirements{
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
v1.ResourceCPU: *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
||||||
},
|
},
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
v1.ResourceCPU: *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}), true, framework.Logf))
|
}), true, framework.Logf))
|
||||||
@ -215,7 +215,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
Labels: map[string]string{"name": "additional"},
|
Labels: map[string]string{"name": "additional"},
|
||||||
Resources: &v1.ResourceRequirements{
|
Resources: &v1.ResourceRequirements{
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
v1.ResourceCPU: *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -278,20 +278,20 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
|
|||||||
// we need the max one to keep the same cpu/mem use rate
|
// we need the max one to keep the same cpu/mem use rate
|
||||||
ratio = math.Max(maxCPUFraction, maxMemFraction)
|
ratio = math.Max(maxCPUFraction, maxMemFraction)
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
memAllocatable, found := node.Status.Allocatable["memory"]
|
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
|
||||||
Expect(found).To(Equal(true))
|
Expect(found).To(Equal(true))
|
||||||
memAllocatableVal := memAllocatable.Value()
|
memAllocatableVal := memAllocatable.Value()
|
||||||
|
|
||||||
cpuAllocatable, found := node.Status.Allocatable["cpu"]
|
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
|
||||||
Expect(found).To(Equal(true))
|
Expect(found).To(Equal(true))
|
||||||
cpuAllocatableMil := cpuAllocatable.MilliValue()
|
cpuAllocatableMil := cpuAllocatable.MilliValue()
|
||||||
|
|
||||||
needCreateResource := v1.ResourceList{}
|
needCreateResource := v1.ResourceList{}
|
||||||
cpuFraction := cpuFractionMap[node.Name]
|
cpuFraction := cpuFractionMap[node.Name]
|
||||||
memFraction := memFractionMap[node.Name]
|
memFraction := memFractionMap[node.Name]
|
||||||
needCreateResource["cpu"] = *resource.NewMilliQuantity(int64((ratio-cpuFraction)*float64(cpuAllocatableMil)), resource.DecimalSI)
|
needCreateResource[v1.ResourceCPU] = *resource.NewMilliQuantity(int64((ratio-cpuFraction)*float64(cpuAllocatableMil)), resource.DecimalSI)
|
||||||
|
|
||||||
needCreateResource["memory"] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)), resource.BinarySI)
|
needCreateResource[v1.ResourceMemory] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)), resource.BinarySI)
|
||||||
|
|
||||||
err := testutils.StartPods(cs, 1, ns, string(uuid.NewUUID()),
|
err := testutils.StartPods(cs, 1, ns, string(uuid.NewUUID()),
|
||||||
*initPausePod(f, pausePodConfig{
|
*initPausePod(f, pausePodConfig{
|
||||||
@ -332,12 +332,12 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
|
|||||||
totalRequestedMemResource += getNonZeroRequests(&pod).Memory
|
totalRequestedMemResource += getNonZeroRequests(&pod).Memory
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cpuAllocatable, found := node.Status.Allocatable["cpu"]
|
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
|
||||||
Expect(found).To(Equal(true))
|
Expect(found).To(Equal(true))
|
||||||
cpuAllocatableMil := cpuAllocatable.MilliValue()
|
cpuAllocatableMil := cpuAllocatable.MilliValue()
|
||||||
|
|
||||||
cpuFraction := float64(totalRequestedCpuResource) / float64(cpuAllocatableMil)
|
cpuFraction := float64(totalRequestedCpuResource) / float64(cpuAllocatableMil)
|
||||||
memAllocatable, found := node.Status.Allocatable["memory"]
|
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
|
||||||
Expect(found).To(Equal(true))
|
Expect(found).To(Equal(true))
|
||||||
memAllocatableVal := memAllocatable.Value()
|
memAllocatableVal := memAllocatable.Value()
|
||||||
memFraction := float64(totalRequestedMemResource) / float64(memAllocatableVal)
|
memFraction := float64(totalRequestedMemResource) / float64(memAllocatableVal)
|
||||||
|
@ -57,7 +57,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
|
|||||||
// The default hard eviction threshold is 250Mb, so Allocatable = Capacity - Reserved - 250Mb
|
// The default hard eviction threshold is 250Mb, so Allocatable = Capacity - Reserved - 250Mb
|
||||||
// We want Allocatable = 50Mb, so set Reserved = Capacity - Allocatable - 250Mb = Capacity - 300Mb
|
// We want Allocatable = 50Mb, so set Reserved = Capacity - Allocatable - 250Mb = Capacity - 300Mb
|
||||||
kubeReserved.Sub(resource.MustParse("300Mi"))
|
kubeReserved.Sub(resource.MustParse("300Mi"))
|
||||||
initialConfig.KubeReserved = kubeletconfig.ConfigurationMap(map[string]string{"memory": kubeReserved.String()})
|
initialConfig.KubeReserved = kubeletconfig.ConfigurationMap(map[string]string{string(v1.ResourceMemory): kubeReserved.String()})
|
||||||
initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey}
|
initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey}
|
||||||
initialConfig.ExperimentalNodeAllocatableIgnoreEvictionThreshold = false
|
initialConfig.ExperimentalNodeAllocatableIgnoreEvictionThreshold = false
|
||||||
initialConfig.CgroupsPerQOS = true
|
initialConfig.CgroupsPerQOS = true
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/perftype"
|
"k8s.io/kubernetes/test/e2e/perftype"
|
||||||
@ -156,12 +157,12 @@ func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[stri
|
|||||||
node, err := f.ClientSet.Core().Nodes().Get(nodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.Core().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
cpu, ok := node.Status.Capacity["cpu"]
|
cpu, ok := node.Status.Capacity[v1.ResourceCPU]
|
||||||
if !ok {
|
if !ok {
|
||||||
framework.Failf("Fail to fetch CPU capacity value of test node.")
|
framework.Failf("Fail to fetch CPU capacity value of test node.")
|
||||||
}
|
}
|
||||||
|
|
||||||
memory, ok := node.Status.Capacity["memory"]
|
memory, ok := node.Status.Capacity[v1.ResourceMemory]
|
||||||
if !ok {
|
if !ok {
|
||||||
framework.Failf("Fail to fetch Memory capacity value of test node.")
|
framework.Failf("Fail to fetch Memory capacity value of test node.")
|
||||||
}
|
}
|
||||||
|
@ -172,8 +172,8 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
|||||||
Name: podName,
|
Name: podName,
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("50Mi"),
|
v1.ResourceMemory: resource.MustParse("50Mi"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -213,8 +213,8 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
|||||||
Name: podName,
|
Name: podName,
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("50Mi"),
|
v1.ResourceMemory: resource.MustParse("50Mi"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -56,18 +56,18 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() {
|
|||||||
// Define test pods
|
// Define test pods
|
||||||
nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{
|
nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
},
|
},
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
nonCriticalBurstable := getTestPod(false, burstablePodName, v1.ResourceRequirements{
|
nonCriticalBurstable := getTestPod(false, burstablePodName, v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
nonCriticalBestEffort := getTestPod(false, bestEffortPodName, v1.ResourceRequirements{})
|
nonCriticalBestEffort := getTestPod(false, bestEffortPodName, v1.ResourceRequirements{})
|
||||||
|
@ -139,12 +139,12 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
|||||||
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||||
guaranteed := getMemhogPod("guaranteed-pod", "guaranteed", v1.ResourceRequirements{
|
guaranteed := getMemhogPod("guaranteed-pod", "guaranteed", v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
},
|
},
|
||||||
Limits: v1.ResourceList{
|
Limits: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
}})
|
}})
|
||||||
guaranteed = f.PodClient().CreateSync(guaranteed)
|
guaranteed = f.PodClient().CreateSync(guaranteed)
|
||||||
glog.Infof("pod created with name: %s", guaranteed.Name)
|
glog.Infof("pod created with name: %s", guaranteed.Name)
|
||||||
@ -152,8 +152,8 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
|||||||
// A pod is burstable if limits and requests do not match across all containers.
|
// A pod is burstable if limits and requests do not match across all containers.
|
||||||
burstable := getMemhogPod("burstable-pod", "burstable", v1.ResourceRequirements{
|
burstable := getMemhogPod("burstable-pod", "burstable", v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("100m"),
|
v1.ResourceCPU: resource.MustParse("100m"),
|
||||||
"memory": resource.MustParse("100Mi"),
|
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
}})
|
}})
|
||||||
burstable = f.PodClient().CreateSync(burstable)
|
burstable = f.PodClient().CreateSync(burstable)
|
||||||
glog.Infof("pod created with name: %s", burstable.Name)
|
glog.Infof("pod created with name: %s", burstable.Name)
|
||||||
@ -256,7 +256,7 @@ func getMemhogPod(podName string, ctnName string, res v1.ResourceRequirements) *
|
|||||||
// This helps prevent a guaranteed pod from triggering an OOM kill due to it's low memory limit,
|
// This helps prevent a guaranteed pod from triggering an OOM kill due to it's low memory limit,
|
||||||
// which will cause the test to fail inappropriately.
|
// which will cause the test to fail inappropriately.
|
||||||
var memLimit string
|
var memLimit string
|
||||||
if limit, ok := res.Limits["memory"]; ok {
|
if limit, ok := res.Limits[v1.ResourceMemory]; ok {
|
||||||
memLimit = strconv.Itoa(int(
|
memLimit = strconv.Itoa(int(
|
||||||
float64(limit.Value()) * 0.8))
|
float64(limit.Value()) * 0.8))
|
||||||
} else {
|
} else {
|
||||||
|
@ -40,12 +40,12 @@ import (
|
|||||||
func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) {
|
func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
initialConfig.EnforceNodeAllocatable = []string{"pods", "kube-reserved", "system-reserved"}
|
initialConfig.EnforceNodeAllocatable = []string{"pods", "kube-reserved", "system-reserved"}
|
||||||
initialConfig.SystemReserved = kubeletconfig.ConfigurationMap{
|
initialConfig.SystemReserved = kubeletconfig.ConfigurationMap{
|
||||||
"cpu": "100m",
|
string(v1.ResourceCPU): "100m",
|
||||||
"memory": "100Mi",
|
string(v1.ResourceMemory): "100Mi",
|
||||||
}
|
}
|
||||||
initialConfig.KubeReserved = kubeletconfig.ConfigurationMap{
|
initialConfig.KubeReserved = kubeletconfig.ConfigurationMap{
|
||||||
"cpu": "100m",
|
string(v1.ResourceCPU): "100m",
|
||||||
"memory": "100Mi",
|
string(v1.ResourceMemory): "100Mi",
|
||||||
}
|
}
|
||||||
initialConfig.EvictionHard = "memory.available<100Mi"
|
initialConfig.EvictionHard = "memory.available<100Mi"
|
||||||
// Necessary for allocatable cgroup creation.
|
// Necessary for allocatable cgroup creation.
|
||||||
@ -210,23 +210,23 @@ func runTest(f *framework.Framework) error {
|
|||||||
return fmt.Errorf("Expected all resources in capacity to be found in allocatable")
|
return fmt.Errorf("Expected all resources in capacity to be found in allocatable")
|
||||||
}
|
}
|
||||||
// CPU based evictions are not supported.
|
// CPU based evictions are not supported.
|
||||||
if allocatableCPU.Cmp(schedulerAllocatable["cpu"]) != 0 {
|
if allocatableCPU.Cmp(schedulerAllocatable[v1.ResourceCPU]) != 0 {
|
||||||
return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable["cpu"], capacity["cpu"])
|
return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable[v1.ResourceCPU], capacity[v1.ResourceCPU])
|
||||||
}
|
}
|
||||||
if allocatableMemory.Cmp(schedulerAllocatable["memory"]) != 0 {
|
if allocatableMemory.Cmp(schedulerAllocatable[v1.ResourceMemory]) != 0 {
|
||||||
return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable["cpu"], capacity["memory"])
|
return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable[v1.ResourceCPU], capacity[v1.ResourceMemory])
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cgroupManager.Exists(cm.CgroupName(kubeReservedCgroup)) {
|
if !cgroupManager.Exists(cm.CgroupName(kubeReservedCgroup)) {
|
||||||
return fmt.Errorf("Expected kube reserved cgroup Does not exist")
|
return fmt.Errorf("Expected kube reserved cgroup Does not exist")
|
||||||
}
|
}
|
||||||
// Expect CPU shares on kube reserved cgroup to equal it's reservation which is `100m`.
|
// Expect CPU shares on kube reserved cgroup to equal it's reservation which is `100m`.
|
||||||
kubeReservedCPU := resource.MustParse(currentConfig.KubeReserved["cpu"])
|
kubeReservedCPU := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceCPU)])
|
||||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], kubeReservedCgroup, "cpu.shares"), cm.MilliCPUToShares(kubeReservedCPU.MilliValue()), 10); err != nil {
|
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], kubeReservedCgroup, "cpu.shares"), cm.MilliCPUToShares(kubeReservedCPU.MilliValue()), 10); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Expect Memory limit kube reserved cgroup to equal configured value `100Mi`.
|
// Expect Memory limit kube reserved cgroup to equal configured value `100Mi`.
|
||||||
kubeReservedMemory := resource.MustParse(currentConfig.KubeReserved["memory"])
|
kubeReservedMemory := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceMemory)])
|
||||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], kubeReservedCgroup, "memory.limit_in_bytes"), kubeReservedMemory.Value(), 0); err != nil {
|
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], kubeReservedCgroup, "memory.limit_in_bytes"), kubeReservedMemory.Value(), 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -234,12 +234,12 @@ func runTest(f *framework.Framework) error {
|
|||||||
return fmt.Errorf("Expected system reserved cgroup Does not exist")
|
return fmt.Errorf("Expected system reserved cgroup Does not exist")
|
||||||
}
|
}
|
||||||
// Expect CPU shares on system reserved cgroup to equal it's reservation which is `100m`.
|
// Expect CPU shares on system reserved cgroup to equal it's reservation which is `100m`.
|
||||||
systemReservedCPU := resource.MustParse(currentConfig.SystemReserved["cpu"])
|
systemReservedCPU := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceCPU)])
|
||||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], systemReservedCgroup, "cpu.shares"), cm.MilliCPUToShares(systemReservedCPU.MilliValue()), 10); err != nil {
|
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], systemReservedCgroup, "cpu.shares"), cm.MilliCPUToShares(systemReservedCPU.MilliValue()), 10); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Expect Memory limit on node allocatable cgroup to equal allocatable.
|
// Expect Memory limit on node allocatable cgroup to equal allocatable.
|
||||||
systemReservedMemory := resource.MustParse(currentConfig.SystemReserved["memory"])
|
systemReservedMemory := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceMemory)])
|
||||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], systemReservedCgroup, "memory.limit_in_bytes"), systemReservedMemory.Value(), 0); err != nil {
|
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], systemReservedCgroup, "memory.limit_in_bytes"), systemReservedMemory.Value(), 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user