mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-30 23:57:46 +00:00
remove unused GetNonzeroRequests function
This commit is contained in:
parent
b1f07bb36c
commit
8b15843d00
@ -16,11 +16,6 @@ limitations under the License.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
// For each of these resources, a container that doesn't request the resource explicitly
|
||||
// will be treated as having requested the amount indicated below, for the purpose
|
||||
// of computing priority only. This ensures that when scheduling zero-request pods, such
|
||||
@ -35,41 +30,3 @@ const (
|
||||
// DefaultMemoryRequest defines default memory request size.
|
||||
DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
|
||||
)
|
||||
|
||||
// GetNonzeroRequests returns the default cpu in milli-cpu and memory in bytes resource requests if none is found or
|
||||
// what is provided on the request.
|
||||
func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) {
|
||||
cpu := GetRequestForResource(v1.ResourceCPU, requests, true)
|
||||
mem := GetRequestForResource(v1.ResourceMemory, requests, true)
|
||||
return cpu.MilliValue(), mem.Value()
|
||||
|
||||
}
|
||||
|
||||
// GetRequestForResource returns the requested values unless nonZero is true and there is no defined request
|
||||
// for CPU and memory.
|
||||
// If nonZero is true and the resource has no defined request for CPU or memory, it returns a default value.
|
||||
func GetRequestForResource(resourceName v1.ResourceName, requests *v1.ResourceList, nonZero bool) resource.Quantity {
|
||||
if requests == nil {
|
||||
return resource.Quantity{}
|
||||
}
|
||||
switch resourceName {
|
||||
case v1.ResourceCPU:
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[v1.ResourceCPU]; !found && nonZero {
|
||||
return *resource.NewMilliQuantity(DefaultMilliCPURequest, resource.DecimalSI)
|
||||
}
|
||||
return requests.Cpu().DeepCopy()
|
||||
case v1.ResourceMemory:
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[v1.ResourceMemory]; !found && nonZero {
|
||||
return *resource.NewQuantity(DefaultMemoryRequest, resource.DecimalSI)
|
||||
}
|
||||
return requests.Memory().DeepCopy()
|
||||
default:
|
||||
quantity, found := (*requests)[resourceName]
|
||||
if !found {
|
||||
return resource.Quantity{}
|
||||
}
|
||||
return quantity.DeepCopy()
|
||||
}
|
||||
}
|
||||
|
@ -1,177 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
func TestGetNonZeroRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
requests v1.ResourceList
|
||||
expectedCPU int64
|
||||
expectedMemory int64
|
||||
}{
|
||||
{
|
||||
"cpu_and_memory_not_found",
|
||||
v1.ResourceList{},
|
||||
DefaultMilliCPURequest,
|
||||
DefaultMemoryRequest,
|
||||
},
|
||||
{
|
||||
"only_cpu_exist",
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
},
|
||||
200,
|
||||
DefaultMemoryRequest,
|
||||
},
|
||||
{
|
||||
"only_memory_exist",
|
||||
v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("400Mi"),
|
||||
},
|
||||
DefaultMilliCPURequest,
|
||||
400 * 1024 * 1024,
|
||||
},
|
||||
{
|
||||
"cpu_memory_exist",
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
v1.ResourceMemory: resource.MustParse("400Mi"),
|
||||
},
|
||||
200,
|
||||
400 * 1024 * 1024,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
realCPU, realMemory := GetNonzeroRequests(&test.requests)
|
||||
assert.EqualValuesf(t, test.expectedCPU, realCPU, "Failed to test: %s", test.name)
|
||||
assert.EqualValuesf(t, test.expectedMemory, realMemory, "Failed to test: %s", test.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRequestForResource(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
requests v1.ResourceList
|
||||
resource v1.ResourceName
|
||||
expectedQuantity int64
|
||||
nonZero bool
|
||||
}{
|
||||
{
|
||||
"extended_resource_not_found",
|
||||
v1.ResourceList{},
|
||||
v1.ResourceName("intel.com/foo"),
|
||||
0,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"extended_resource_found",
|
||||
v1.ResourceList{
|
||||
v1.ResourceName("intel.com/foo"): resource.MustParse("4"),
|
||||
},
|
||||
v1.ResourceName("intel.com/foo"),
|
||||
4,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"cpu_not_found",
|
||||
v1.ResourceList{},
|
||||
v1.ResourceCPU,
|
||||
DefaultMilliCPURequest,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"memory_not_found",
|
||||
v1.ResourceList{},
|
||||
v1.ResourceMemory,
|
||||
DefaultMemoryRequest,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"cpu_exist",
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
},
|
||||
v1.ResourceCPU,
|
||||
200,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"memory_exist",
|
||||
v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("400Mi"),
|
||||
},
|
||||
v1.ResourceMemory,
|
||||
400 * 1024 * 1024,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"ephemeralStorage_exist",
|
||||
v1.ResourceList{
|
||||
v1.ResourceEphemeralStorage: resource.MustParse("400Mi"),
|
||||
},
|
||||
v1.ResourceEphemeralStorage,
|
||||
400 * 1024 * 1024,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"ephemeralStorage_not_found",
|
||||
v1.ResourceList{},
|
||||
v1.ResourceEphemeralStorage,
|
||||
0,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"cpu_not_found, useRequested is true",
|
||||
v1.ResourceList{},
|
||||
v1.ResourceCPU,
|
||||
0,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"memory_not_found, useRequested is true",
|
||||
v1.ResourceList{},
|
||||
v1.ResourceMemory,
|
||||
0,
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
realQuantity := GetRequestForResource(test.resource, &test.requests, test.nonZero)
|
||||
var realQuantityI64 int64
|
||||
if test.resource == v1.ResourceCPU {
|
||||
realQuantityI64 = realQuantity.MilliValue()
|
||||
} else {
|
||||
realQuantityI64 = realQuantity.Value()
|
||||
}
|
||||
assert.EqualValuesf(t, test.expectedQuantity, realQuantityI64, "Failed to test: %s", test.name)
|
||||
})
|
||||
}
|
||||
}
|
@ -537,9 +537,10 @@ func getNonZeroRequests(pod *v1.Pod) Resource {
|
||||
result := Resource{}
|
||||
for i := range pod.Spec.Containers {
|
||||
container := &pod.Spec.Containers[i]
|
||||
cpu, memory := schedutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||
result.MilliCPU += cpu
|
||||
result.Memory += memory
|
||||
cpu := getNonZeroRequestForResource(v1.ResourceCPU, &container.Resources.Requests)
|
||||
memory := getNonZeroRequestForResource(v1.ResourceMemory, &container.Resources.Requests)
|
||||
result.MilliCPU += cpu.MilliValue()
|
||||
result.Memory += memory.Value()
|
||||
}
|
||||
return result
|
||||
}
|
||||
@ -556,3 +557,31 @@ func addTaintToNode(ctx context.Context, cs clientset.Interface, nodeName string
|
||||
e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint)
|
||||
e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint)
|
||||
}
|
||||
|
||||
// getNonZeroRequestForResource returns the requested values,
|
||||
// if the resource has undefined request for CPU or memory, it returns a default value.
|
||||
func getNonZeroRequestForResource(resourceName v1.ResourceName, requests *v1.ResourceList) resource.Quantity {
|
||||
if requests == nil {
|
||||
return resource.Quantity{}
|
||||
}
|
||||
switch resourceName {
|
||||
case v1.ResourceCPU:
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[v1.ResourceCPU]; !found {
|
||||
return *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI)
|
||||
}
|
||||
return requests.Cpu().DeepCopy()
|
||||
case v1.ResourceMemory:
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[v1.ResourceMemory]; !found {
|
||||
return *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI)
|
||||
}
|
||||
return requests.Memory().DeepCopy()
|
||||
default:
|
||||
quantity, found := (*requests)[resourceName]
|
||||
if !found {
|
||||
return resource.Quantity{}
|
||||
}
|
||||
return quantity.DeepCopy()
|
||||
}
|
||||
}
|
||||
|
@ -153,80 +153,6 @@ func TestNodeResourcesScoring(t *testing.T) {
|
||||
// expectedNodeName is the list of node names. The pod should be scheduled on either of them.
|
||||
expectedNodeName []string
|
||||
}{
|
||||
{
|
||||
name: "with least allocated strategy, pod scheduled to node with more allocatable resources",
|
||||
pod: func(testCtx *testutils.TestContext) *v1.Pod {
|
||||
return st.MakePod().Namespace(testCtx.NS.Name).Name("pod").
|
||||
Res(map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
resourceGPU: "1",
|
||||
}).Obj()
|
||||
},
|
||||
existingPods: func(testCtx *testutils.TestContext) []*v1.Pod {
|
||||
return []*v1.Pod{
|
||||
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod").Node("node-1").
|
||||
Res(map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
resourceGPU: "1",
|
||||
}).Obj(),
|
||||
}
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node-1").Capacity(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "8",
|
||||
v1.ResourceMemory: "16G",
|
||||
resourceGPU: "4",
|
||||
}).Obj(),
|
||||
st.MakeNode().Name("node-2").Capacity(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "8",
|
||||
v1.ResourceMemory: "16G",
|
||||
resourceGPU: "4",
|
||||
}).Obj(),
|
||||
},
|
||||
strategy: configv1.LeastAllocated,
|
||||
expectedNodeName: []string{"node-2"},
|
||||
},
|
||||
{
|
||||
name: "with most allocated strategy, pod scheduled to node with less allocatable resources",
|
||||
pod: func(testCtx *testutils.TestContext) *v1.Pod {
|
||||
return st.MakePod().Namespace(testCtx.NS.Name).Name("pod").
|
||||
Res(map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
resourceGPU: "1",
|
||||
}).Obj()
|
||||
},
|
||||
existingPods: func(testCtx *testutils.TestContext) []*v1.Pod {
|
||||
return []*v1.Pod{
|
||||
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod").Node("node-1").
|
||||
Res(map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
resourceGPU: "1",
|
||||
}).Obj(),
|
||||
}
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node-1").Capacity(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "8",
|
||||
v1.ResourceMemory: "16G",
|
||||
resourceGPU: "4",
|
||||
}).Obj(),
|
||||
st.MakeNode().Name("node-2").Capacity(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "8",
|
||||
v1.ResourceMemory: "16G",
|
||||
resourceGPU: "4",
|
||||
}).Obj(),
|
||||
},
|
||||
strategy: configv1.MostAllocated,
|
||||
expectedNodeName: []string{"node-1"},
|
||||
},
|
||||
{
|
||||
name: "with least allocated strategy, take existing sidecars into consideration",
|
||||
pod: func(testCtx *testutils.TestContext) *v1.Pod {
|
||||
|
Loading…
Reference in New Issue
Block a user