Merge pull request #12718 from HaiyangDING/ChangeLimitToRequest

Use request to deal with resource requirement in priority functions.
This commit is contained in:
Robert Bailey 2015-08-17 13:52:28 -07:00
commit 5d58c2ca77
2 changed files with 43 additions and 42 deletions

View File

@ -43,29 +43,30 @@ func calculateScore(requested int64, capacity int64, node string) int {
// For each of these resources, a pod that doesn't request the resource explicitly
// will be treated as having requested the amount indicated below, for the purpose
// of computing priority only. This ensures that when scheduling zero-limit pods, such
// pods will not all be scheduled to the machine with the smallest in-use limit,
// and that when scheduling regular pods, such pods will not see zero-limit pods as
// of computing priority only. This ensures that when scheduling zero-request pods, such
// pods will not all be scheduled to the machine with the smallest in-use request,
// and that when scheduling regular pods, such pods will not see zero-request pods as
// consuming no resources whatsoever. We chose these values to be similar to the
// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary.
const defaultMilliCpuLimit int64 = 100 // 0.1 core
const defaultMemoryLimit int64 = 200 * 1024 * 1024 // 200 MB
// As described in #11713, we use request instead of limit to deal with resource requirements.
const defaultMilliCpuRequest int64 = 100 // 0.1 core
const defaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList"
// as an additional argument here) rather than using constants
func getNonzeroLimits(limits *api.ResourceList) (int64, int64) {
func getNonzeroRequests(requests *api.ResourceList) (int64, int64) {
var out_millicpu, out_memory int64
// Override if un-set, but not if explicitly set to zero
if (*limits.Cpu() == resource.Quantity{}) {
out_millicpu = defaultMilliCpuLimit
if (*requests.Cpu() == resource.Quantity{}) {
out_millicpu = defaultMilliCpuRequest
} else {
out_millicpu = limits.Cpu().MilliValue()
out_millicpu = requests.Cpu().MilliValue()
}
// Override if un-set, but not if explicitly set to zero
if (*limits.Memory() == resource.Quantity{}) {
out_memory = defaultMemoryLimit
if (*requests.Memory() == resource.Quantity{}) {
out_memory = defaultMemoryRequest
} else {
out_memory = limits.Memory().Value()
out_memory = requests.Memory().Value()
}
return out_millicpu, out_memory
}
@ -80,7 +81,7 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al
for _, existingPod := range pods {
for _, container := range existingPod.Spec.Containers {
cpu, memory := getNonzeroLimits(&container.Resources.Limits)
cpu, memory := getNonzeroRequests(&container.Resources.Requests)
totalMilliCPU += cpu
totalMemory += memory
}
@ -88,7 +89,7 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al
// Add the resources requested by the current pod being scheduled.
// This also helps differentiate between differently sized, but empty, minions.
for _, container := range pod.Spec.Containers {
cpu, memory := getNonzeroLimits(&container.Resources.Limits)
cpu, memory := getNonzeroRequests(&container.Resources.Requests)
totalMilliCPU += cpu
totalMemory += memory
}
@ -196,7 +197,7 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
score := int(0)
for _, existingPod := range pods {
for _, container := range existingPod.Spec.Containers {
cpu, memory := getNonzeroLimits(&container.Resources.Limits)
cpu, memory := getNonzeroRequests(&container.Resources.Requests)
totalMilliCPU += cpu
totalMemory += memory
}
@ -204,7 +205,7 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
// Add the resources requested by the current pod being scheduled.
// This also helps differentiate between differently sized, but empty, minions.
for _, container := range pod.Spec.Containers {
cpu, memory := getNonzeroLimits(&container.Resources.Limits)
cpu, memory := getNonzeroRequests(&container.Resources.Requests)
totalMilliCPU += cpu
totalMemory += memory
}

View File

@ -40,7 +40,7 @@ func makeMinion(node string, milliCPU, memory int64) api.Node {
}
}
func TestZeroLimit(t *testing.T) {
func TestZeroRequest(t *testing.T) {
// A pod with no resources. We expect spreading to count it as having the default resources.
noResources := api.PodSpec{
Containers: []api.Container{
@ -49,16 +49,16 @@ func TestZeroLimit(t *testing.T) {
}
noResources1 := noResources
noResources1.NodeName = "machine1"
// A pod with the same resources as a 0-limit pod gets by default as its resources (for spreading).
// A pod with the same resources as a 0-request pod gets by default as its resources (for spreading).
small := api.PodSpec{
Containers: []api.Container{
{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
Requests: api.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(defaultMilliCpuLimit, 10) + "m"),
strconv.FormatInt(defaultMilliCpuRequest, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(defaultMemoryLimit, 10)),
strconv.FormatInt(defaultMemoryRequest, 10)),
},
},
},
@ -71,11 +71,11 @@ func TestZeroLimit(t *testing.T) {
Containers: []api.Container{
{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
Requests: api.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(defaultMilliCpuLimit*3, 10) + "m"),
strconv.FormatInt(defaultMilliCpuRequest*3, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(defaultMemoryLimit*3, 10)),
strconv.FormatInt(defaultMemoryRequest*3, 10)),
},
},
},
@ -91,13 +91,13 @@ func TestZeroLimit(t *testing.T) {
nodes []api.Node
test string
}{
// The point of these next two tests is to show you get the same priority for a zero-limit pod
// as for a pod with the defaults limits, both when the zero-limit pod is already on the machine
// and when the zero-limit pod is the one being scheduled.
// The point of these next two tests is to show you get the same priority for a zero-request pod
// as for a pod with the defaults requests, both when the zero-request pod is already on the machine
// and when the zero-request pod is the one being scheduled.
{
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryLimit*10), makeMinion("machine2", 1000, defaultMemoryLimit*10)},
test: "test priority of zero-limit pod with machine with zero-limit pod",
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryRequest*10), makeMinion("machine2", 1000, defaultMemoryRequest*10)},
test: "test priority of zero-request pod with machine with zero-request pod",
pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
@ -105,8 +105,8 @@ func TestZeroLimit(t *testing.T) {
},
{
pod: &api.Pod{Spec: small},
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryLimit*10), makeMinion("machine2", 1000, defaultMemoryLimit*10)},
test: "test priority of nonzero-limit pod with machine with zero-limit pod",
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryRequest*10), makeMinion("machine2", 1000, defaultMemoryRequest*10)},
test: "test priority of nonzero-request pod with machine with zero-request pod",
pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
@ -115,8 +115,8 @@ func TestZeroLimit(t *testing.T) {
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
{
pod: &api.Pod{Spec: large},
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryLimit*10), makeMinion("machine2", 1000, defaultMemoryLimit*10)},
test: "test priority of larger pod with machine with zero-limit pod",
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryRequest*10), makeMinion("machine2", 1000, defaultMemoryRequest*10)},
test: "test priority of larger pod with machine with zero-request pod",
pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
@ -138,7 +138,7 @@ func TestZeroLimit(t *testing.T) {
t.Errorf("unexpected error: %v", err)
}
for _, hp := range list {
if test.test == "test priority of larger pod with machine with zero-limit pod" {
if test.test == "test priority of larger pod with machine with zero-request pod" {
if hp.Score == expectedPriority {
t.Errorf("%s: expected non-%d for all priorities, got list %#v", test.test, expectedPriority, list)
}
@ -174,7 +174,7 @@ func TestLeastRequested(t *testing.T) {
Containers: []api.Container{
{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
Requests: api.ResourceList{
"cpu": resource.MustParse("1000m"),
"memory": resource.MustParse("0"),
},
@ -182,7 +182,7 @@ func TestLeastRequested(t *testing.T) {
},
{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
Requests: api.ResourceList{
"cpu": resource.MustParse("2000m"),
"memory": resource.MustParse("0"),
},
@ -197,7 +197,7 @@ func TestLeastRequested(t *testing.T) {
Containers: []api.Container{
{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
Requests: api.ResourceList{
"cpu": resource.MustParse("1000m"),
"memory": resource.MustParse("2000"),
},
@ -205,7 +205,7 @@ func TestLeastRequested(t *testing.T) {
},
{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
Requests: api.ResourceList{
"cpu": resource.MustParse("2000m"),
"memory": resource.MustParse("3000"),
},
@ -506,7 +506,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
Containers: []api.Container{
{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
Requests: api.ResourceList{
"cpu": resource.MustParse("1000m"),
"memory": resource.MustParse("0"),
},
@ -514,7 +514,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
},
{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
Requests: api.ResourceList{
"cpu": resource.MustParse("2000m"),
"memory": resource.MustParse("0"),
},
@ -529,7 +529,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
Containers: []api.Container{
{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
Requests: api.ResourceList{
"cpu": resource.MustParse("1000m"),
"memory": resource.MustParse("2000"),
},
@ -537,7 +537,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
},
{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
Requests: api.ResourceList{
"cpu": resource.MustParse("2000m"),
"memory": resource.MustParse("3000"),
},