Add resource requests support

This commit is contained in:
Dr. Stefan Schimanski 2015-11-10 14:20:44 +01:00
parent 1a958b0517
commit a3f02cda17
9 changed files with 263 additions and 203 deletions

View File

@ -22,11 +22,9 @@ import (
"github.com/gogo/protobuf/proto"
mesos "github.com/mesos/mesos-go/mesosproto"
mutil "github.com/mesos/mesos-go/mesosutil"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/contrib/mesos/pkg/node"
mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
)
const (
@ -43,93 +41,6 @@ func fakePodTask(id string) (*T, error) {
})
}
func TestUnlimitedResources(t *testing.T) {
assert := assert.New(t)
task, _ := fakePodTask("unlimited")
pod := &task.Pod
pod.Spec = api.PodSpec{
Containers: []api.Container{{
Name: "a",
Ports: []api.ContainerPort{{
HostPort: 123,
}},
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
api.ResourceCPU: *resource.NewQuantity(3, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(768*1024*1024, resource.BinarySI),
},
},
}, {
Name: "b",
}, {
Name: "c",
}},
}
beforeLimitingCPU := mresource.CPUForPod(pod, mresource.DefaultDefaultContainerCPULimit)
beforeLimitingMem := mresource.MemForPod(pod, mresource.DefaultDefaultContainerMemLimit)
unboundedCPU := mresource.LimitPodCPU(pod, mresource.DefaultDefaultContainerCPULimit)
unboundedMem := mresource.LimitPodMem(pod, mresource.DefaultDefaultContainerMemLimit)
cpu := mresource.PodCPULimit(pod)
mem := mresource.PodMemLimit(pod)
assert.True(unboundedCPU, "CPU resources are defined as unlimited")
assert.True(unboundedMem, "mem resources are defined as unlimited")
assert.Equal(2*float64(mresource.DefaultDefaultContainerCPULimit)+3.0, float64(cpu))
assert.Equal(2*float64(mresource.DefaultDefaultContainerMemLimit)+768.0, float64(mem))
assert.Equal(cpu, beforeLimitingCPU)
assert.Equal(mem, beforeLimitingMem)
}
func TestLimitedResources(t *testing.T) {
assert := assert.New(t)
task, _ := fakePodTask("limited")
pod := &task.Pod
pod.Spec = api.PodSpec{
Containers: []api.Container{{
Name: "a",
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
api.ResourceCPU: *resource.NewQuantity(1, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(256*1024*1024, resource.BinarySI),
},
},
}, {
Name: "b",
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
api.ResourceCPU: *resource.NewQuantity(2, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(512*1024*1024, resource.BinarySI),
},
},
}},
}
beforeLimitingCPU := mresource.CPUForPod(pod, mresource.DefaultDefaultContainerCPULimit)
beforeLimitingMem := mresource.MemForPod(pod, mresource.DefaultDefaultContainerMemLimit)
unboundedCPU := mresource.LimitPodCPU(pod, mresource.DefaultDefaultContainerCPULimit)
unboundedMem := mresource.LimitPodMem(pod, mresource.DefaultDefaultContainerMemLimit)
cpu := mresource.PodCPULimit(pod)
mem := mresource.PodMemLimit(pod)
assert.False(unboundedCPU, "CPU resources are defined as limited")
assert.False(unboundedMem, "mem resources are defined as limited")
assert.Equal(3.0, float64(cpu))
assert.Equal(768.0, float64(mem))
assert.Equal(cpu, beforeLimitingCPU)
assert.Equal(mem, beforeLimitingMem)
}
func TestEmptyOffer(t *testing.T) {
t.Parallel()
task, err := fakePodTask("foo")

View File

@ -190,7 +190,7 @@ func TestWildcardHostPortMatching(t *testing.T) {
}},
}},
}
task, err = New(api.NewDefaultContext(), "", *pod, &mesos.ExecutorInfo{})
task, err = New(api.NewDefaultContext(), "", pod)
if err != nil {
t.Fatal(err)
}

View File

@ -100,8 +100,15 @@ func NewPodFitsResourcesPredicate(c mresource.CPUShares, m mresource.MegaBytes)
// calculate cpu and mem sum over all containers of the pod
// TODO (@sttts): also support pod.spec.resources.limit.request
// TODO (@sttts): take into account the executor resources
cpu := mresource.CPUForPod(&t.Pod, c)
mem := mresource.MemForPod(&t.Pod, m)
_, cpu, _, err := mresource.CPUForPod(&t.Pod, c)
if err != nil {
return false
}
_, mem, _, err := mresource.MemForPod(&t.Pod, m)
if err != nil {
return false
}
log.V(4).Infof("trying to match offer with pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
if (cpu > offeredCpus) || (mem > offeredMem) {
log.V(3).Infof("not enough resources for pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)

View File

@ -27,15 +27,14 @@ import (
// k8s api.Pod.Spec's that don't declare resources (all containers in k8s-mesos require cpu
// and memory limits).
func NewDefaultProcurement(c mresource.CPUShares, m mresource.MegaBytes) Procurement {
requireSome := &RequireSomePodResources{
resourceProcurer := &RequirePodResources{
defaultContainerCPULimit: c,
defaultContainerMemLimit: m,
}
return AllOrNothingProcurement([]Procurement{
ValidateProcurement,
NodeProcurement,
requireSome.Procure,
PodResourcesProcurement,
resourceProcurer.Procure,
PortsProcurement,
}).Procure
}
@ -80,37 +79,33 @@ func NodeProcurement(t *T, offer *mesos.Offer) error {
return nil
}
type RequireSomePodResources struct {
type RequirePodResources struct {
defaultContainerCPULimit mresource.CPUShares
defaultContainerMemLimit mresource.MegaBytes
}
func (r *RequireSomePodResources) Procure(t *T, offer *mesos.Offer) error {
func (r *RequirePodResources) Procure(t *T, offer *mesos.Offer) error {
// write resource limits into the pod spec which is transferred to the executor. From here
// on we can expect that the pod spec of a task has proper limits for CPU and memory.
// TODO(sttts): For a later separation of the kubelet and the executor also patch the pod on the apiserver
// TODO(sttts): fall back to requested resources if resource limit cannot be fulfilled by the offer
// TODO(jdef): changing the state of t.Pod here feels dirty, especially since we don't use a kosher
// method to clone the api.Pod state in T.Clone(). This needs some love.
if unlimitedCPU := mresource.LimitPodCPU(&t.Pod, r.defaultContainerCPULimit); unlimitedCPU {
log.V(2).Infof("Pod %s/%s without cpu limits is admitted %.2f cpu shares", t.Pod.Namespace, t.Pod.Name, mresource.PodCPULimit(&t.Pod))
_, cpuLimit, _, err := mresource.LimitPodCPU(&t.Pod, r.defaultContainerCPULimit)
if err != nil {
return err
}
if unlimitedMem := mresource.LimitPodMem(&t.Pod, r.defaultContainerMemLimit); unlimitedMem {
log.V(2).Infof("Pod %s/%s without memory limits is admitted %.2f MB", t.Pod.Namespace, t.Pod.Name, mresource.PodMemLimit(&t.Pod))
_, memLimit, _, err := mresource.LimitPodMem(&t.Pod, r.defaultContainerMemLimit)
if err != nil {
return err
}
return nil
}
// PodResourcesProcurement converts k8s pod cpu and memory resource requirements into
// mesos resource allocations.
func PodResourcesProcurement(t *T, offer *mesos.Offer) error {
// compute used resources
cpu := mresource.PodCPULimit(&t.Pod)
mem := mresource.PodMemLimit(&t.Pod)
log.V(3).Infof("Recording offer(s) %s/%s against pod %v: cpu: %.2f, mem: %.2f MB", offer.Id, t.Pod.Namespace, t.Pod.Name, cpuLimit, memLimit)
log.V(3).Infof("Recording offer(s) %s/%s against pod %v: cpu: %.2f, mem: %.2f MB", offer.Id, t.Pod.Namespace, t.Pod.Name, cpu, mem)
t.Spec.CPU = cpuLimit
t.Spec.Memory = memLimit
t.Spec.CPU = cpu
t.Spec.Memory = mem
return nil
}

View File

@ -19,87 +19,126 @@ package resource
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
)
const (
DefaultDefaultContainerCPULimit = CPUShares(0.25) // CPUs allocated for pods without CPU limit
DefaultDefaultContainerMemLimit = MegaBytes(64.0) // memory allocated for pods without memory limit
MinimumContainerCPU = CPUShares(0.01) // minimum CPUs allowed by Mesos
MinimumContainerMem = MegaBytes(32.0) // minimum memory allowed by Mesos
)
// CPUFromPodSpec computes the cpu shares that the pod is admitted to use. Containers
// without CPU limit are NOT taken into account.
func PodCPULimit(pod *api.Pod) CPUShares {
cpuQuantity := resourcequotacontroller.PodCPU(pod)
return CPUShares(float64(cpuQuantity.MilliValue()) / 1000.0)
}
var (
zero = *resource.NewQuantity(0, resource.BinarySI)
)
// MemFromPodSpec computes the amount of memory that the pod is admitted to use. Containers
// without memory limit are NOT taken into account.
func PodMemLimit(pod *api.Pod) MegaBytes {
memQuantity := resourcequotacontroller.PodMemory(pod)
return MegaBytes(float64(memQuantity.Value()) / 1024.0 / 1024.0)
}
// podResource computes requested resources and the limit. If write is true,
// it will also write missing requests and limits into the pod.
func podResources(pod *api.Pod, resourceName api.ResourceName, def, min resource.Quantity, write bool) (
requestSum *resource.Quantity,
limitSum *resource.Quantity,
modified bool,
err error,
) {
requestSum = (&zero).Copy()
limitSum = (&zero).Copy()
modified = false
err = nil
// limitPodResource sets the given default resource limit for each container that
// does not limit the given resource yet. limitPodResource returns true if and only if
// at least one container had no limit for that resource.
func limitPodResource(pod *api.Pod, resourceName api.ResourceName, defaultLimit resource.Quantity) bool {
unlimited := false
for j := range pod.Spec.Containers {
container := &pod.Spec.Containers[j]
// create maps
if container.Resources.Limits == nil {
container.Resources.Limits = api.ResourceList{}
}
_, ok := container.Resources.Limits[resourceName]
if !ok {
container.Resources.Limits[resourceName] = defaultLimit
unlimited = true
if container.Resources.Requests == nil {
container.Resources.Requests = api.ResourceList{}
}
// request and limit defined?
request, requestFound := container.Resources.Requests[resourceName]
limit, limitFound := container.Resources.Limits[resourceName]
// fill-in missing request and/or limit
if !requestFound && !limitFound {
limit = def
request = def
modified = true
} else if requestFound && !limitFound {
limit = request
modified = true
} else if !requestFound && limitFound {
// TODO(sttts): possibly use the default here?
request = limit
modified = true
}
// make request and limit at least as big as min
if (&request).Cmp(min) < 0 {
request = *(&min).Copy()
modified = true
}
if (&limit).Cmp(min) < 0 {
limit = *(&min).Copy()
modified = true
}
// add up the request and limit sum for all containers
err = requestSum.Add(request)
if err != nil {
return
}
err = limitSum.Add(limit)
if err != nil {
return
}
// optionally write request and limit back
if write {
container.Resources.Requests[resourceName] = request
container.Resources.Limits[resourceName] = limit
}
}
return unlimited
return
}
// unlimitedPodResources counts how many containers in the pod have no limit for the given resource
func unlimitedCountainerNum(pod *api.Pod, resourceName api.ResourceName) int {
unlimited := 0
for j := range pod.Spec.Containers {
container := &pod.Spec.Containers[j]
if container.Resources.Limits == nil {
unlimited += 1
continue
}
if _, ok := container.Resources.Limits[resourceName]; !ok {
unlimited += 1
}
// LimitPodCPU sets default CPU requests and limits of each container that
// does not limit its CPU resource yet. LimitPodCPU returns the new request,
// limit and whether the pod was modified.
func LimitPodCPU(pod *api.Pod, defaultLimit CPUShares) (request, limit CPUShares, modified bool, err error) {
r, l, m, err := podResources(pod, api.ResourceCPU, *defaultLimit.Quantity(), *MinimumContainerCPU.Quantity(), true)
if err != nil {
return 0.0, 0.0, false, err
}
return unlimited
return NewCPUShares(*r), NewCPUShares(*l), m, nil
}
// limitPodCPU sets DefaultContainerCPUs for the CPU limit of each container that
// does not limit its CPU resource yet. limitPodCPU returns true if and only if
// at least one container had no CPU limit set.
func LimitPodCPU(pod *api.Pod, defaultLimit CPUShares) bool {
defaultCPUQuantity := resource.NewMilliQuantity(int64(float64(defaultLimit)*1000.0), resource.DecimalSI)
return limitPodResource(pod, api.ResourceCPU, *defaultCPUQuantity)
// LimitPodMem sets default memory requests and limits of each container that
// does not limit its memory resource yet. LimitPodMem returns the new request,
// limit and whether the pod was modified.
func LimitPodMem(pod *api.Pod, defaultLimit MegaBytes) (request, limit MegaBytes, modified bool, err error) {
r, l, m, err := podResources(pod, api.ResourceMemory, *defaultLimit.Quantity(), *MinimumContainerMem.Quantity(), true)
if err != nil {
return 0.0, 0.0, false, err
}
return NewMegaBytes(*r), NewMegaBytes(*l), m, nil
}
// limitPodMem sets DefaultContainerMem for the memory limit of each container that
// does not limit its memory resource yet. limitPodMem returns true if and only if
// at least one container had no memory limit set.
func LimitPodMem(pod *api.Pod, defaultLimit MegaBytes) bool {
defaultMemQuantity := resource.NewQuantity(int64(float64(defaultLimit)*1024.0*1024.0), resource.BinarySI)
return limitPodResource(pod, api.ResourceMemory, *defaultMemQuantity)
// CPUForPod computes the limits from the spec plus the default CPU limit difference for unlimited containers
func CPUForPod(pod *api.Pod, defaultLimit CPUShares) (request, limit CPUShares, modified bool, err error) {
r, l, m, err := podResources(pod, api.ResourceCPU, *defaultLimit.Quantity(), *MinimumContainerCPU.Quantity(), false)
if err != nil {
return 0.0, 0.0, false, err
}
return NewCPUShares(*r), NewCPUShares(*l), m, nil
}
// CPUForPod computes the limits from the spec plus the default CPU limit for unlimited containers
func CPUForPod(pod *api.Pod, defaultLimit CPUShares) CPUShares {
return PodCPULimit(pod) + CPUShares(unlimitedCountainerNum(pod, api.ResourceCPU))*defaultLimit
}
// MemForPod computes the limits from the spec plus the default memory limit for unlimited containers
func MemForPod(pod *api.Pod, defaultLimit MegaBytes) MegaBytes {
return PodMemLimit(pod) + MegaBytes(unlimitedCountainerNum(pod, api.ResourceMemory))*defaultLimit
// MemForPod computes the limits from the spec plus the default memory limit difference for unlimited containers
func MemForPod(pod *api.Pod, defaultLimit MegaBytes) (request, limit MegaBytes, modified bool, err error) {
r, l, m, err := podResources(pod, api.ResourceMemory, *defaultLimit.Quantity(), *MinimumContainerMem.Quantity(), true)
if err != nil {
return 0.0, 0.0, false, err
}
return NewMegaBytes(*r), NewMegaBytes(*l), m, nil
}

View File

@ -0,0 +1,113 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"math"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/controller/resourcequota"
)
type resources struct {
cpuR, cpuL, memR, memL float64
}
func TestResources(tst *testing.T) {
assert := assert.New(tst)
const (
defCpu = float64(DefaultDefaultContainerCPULimit)
defMem = float64(DefaultDefaultContainerMemLimit)
minCpu = float64(MinimumContainerCPU)
minMem = float64(MinimumContainerMem)
)
undef := math.NaN()
defined := func(f float64) bool { return !math.IsNaN(f) }
for _, t := range []struct {
input, want resources
}{
{resources{undef, 3.0, undef, 768.0}, resources{defCpu + 3.0, defCpu + 3.0, defMem + 768.0, defMem + 768.0}},
{resources{0.0, 3.0, 0.0, 768.0}, resources{defCpu + minCpu, defCpu + 3.0, defMem + minMem, defMem + 768.0}},
{resources{undef, undef, undef, undef}, resources{2 * defCpu, 2 * defCpu, 2 * defMem, 2 * defMem}},
{resources{0.0, 0.0, 0.0, 0.0}, resources{minCpu + defCpu, minCpu + defCpu, minMem + defMem, minMem + defMem}},
{resources{2.0, 3.0, undef, 768.0}, resources{defCpu + 2.0, defCpu + 3.0, defMem + 768.0, defMem + 768.0}},
{resources{2.0, 3.0, 256.0, 768.0}, resources{defCpu + 2.0, defCpu + 3.0, defMem + 256.0, defMem + 768.0}},
} {
pod := &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{{
Name: "a",
}, {
Name: "b",
}},
},
}
if defined(t.input.cpuR) || defined(t.input.memR) {
pod.Spec.Containers[0].Resources.Requests = api.ResourceList{}
if defined(t.input.cpuR) {
pod.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *CPUShares(t.input.cpuR).Quantity()
}
if defined(t.input.memR) {
pod.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *MegaBytes(t.input.memR).Quantity()
}
}
if defined(t.input.cpuL) || defined(t.input.memL) {
pod.Spec.Containers[0].Resources.Limits = api.ResourceList{}
if defined(t.input.cpuL) {
pod.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *CPUShares(t.input.cpuL).Quantity()
}
if defined(t.input.memL) {
pod.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *MegaBytes(t.input.memL).Quantity()
}
}
tst.Logf("Testing resource computation for %v => request=%v limit=%v", t, pod.Spec.Containers[0].Resources.Requests, pod.Spec.Containers[0].Resources.Limits)
tst.Logf("hasRequests: cpu => %v, mem => %v", resourcequota.PodHasRequests(pod, api.ResourceCPU), resourcequota.PodHasRequests(pod, api.ResourceMemory))
beforeCpuR, beforeCpuL, _, err := CPUForPod(pod, DefaultDefaultContainerCPULimit)
assert.NoError(err, "CPUForPod should not return an error")
beforeMemR, beforeMemL, _, err := MemForPod(pod, DefaultDefaultContainerMemLimit)
assert.NoError(err, "MemForPod should not return an error")
cpuR, cpuL, _, err := LimitPodCPU(pod, DefaultDefaultContainerCPULimit)
assert.NoError(err, "LimitPodCPU should not return an error")
memR, memL, _, err := LimitPodMem(pod, DefaultDefaultContainerMemLimit)
assert.NoError(err, "LimitPodMem should not return an error")
tst.Logf("New resources container 0: request=%v limit=%v", pod.Spec.Containers[0].Resources.Requests, pod.Spec.Containers[0].Resources.Limits)
tst.Logf("New resources container 1: request=%v limit=%v", pod.Spec.Containers[1].Resources.Requests, pod.Spec.Containers[1].Resources.Limits)
assert.Equal(t.want.cpuR, float64(beforeCpuR), "cpu request before modifiation is wrong")
assert.Equal(t.want.cpuL, float64(beforeCpuL), "cpu limit before modifiation is wrong")
assert.Equal(t.want.memR, float64(beforeMemR), "mem request before modifiation is wrong")
assert.Equal(t.want.memL, float64(beforeMemL), "mem limit before modifiation is wrong")
assert.Equal(t.want.cpuR, float64(cpuR), "cpu request is wrong")
assert.Equal(t.want.cpuL, float64(cpuL), "cpu limit is wrong")
assert.Equal(t.want.memR, float64(memR), "mem request is wrong")
assert.Equal(t.want.memL, float64(memL), "mem limit is wrong")
}
}

View File

@ -19,6 +19,8 @@ package resource
import (
"fmt"
"strconv"
"k8s.io/kubernetes/pkg/api/resource"
)
type MegaBytes float64
@ -30,11 +32,11 @@ func (f *CPUShares) Set(s string) error {
return err
}
func (f *CPUShares) Type() string {
func (f CPUShares) Type() string {
return "float64"
}
func (f *CPUShares) String() string { return fmt.Sprintf("%v", *f) }
func (f CPUShares) String() string { return fmt.Sprintf("%v", float64(f)) }
func (f *MegaBytes) Set(s string) error {
v, err := strconv.ParseFloat(s, 64)
@ -42,8 +44,24 @@ func (f *MegaBytes) Set(s string) error {
return err
}
func (f *MegaBytes) Type() string {
func (f MegaBytes) Type() string {
return "float64"
}
func (f *MegaBytes) String() string { return fmt.Sprintf("%v", *f) }
func (f MegaBytes) String() string { return fmt.Sprintf("%v", float64(f)) }
func (f MegaBytes) Quantity() *resource.Quantity {
return resource.NewQuantity(int64(float64(f)*1024.0*1024.0), resource.BinarySI)
}
func (f CPUShares) Quantity() *resource.Quantity {
return resource.NewMilliQuantity(int64(float64(f)*1000.0), resource.DecimalSI)
}
func NewCPUShares(q resource.Quantity) CPUShares {
return CPUShares(float64(q.MilliValue()) / 1000.0)
}
func NewMegaBytes(q resource.Quantity) MegaBytes {
return MegaBytes(float64(q.Value()) / 1024.0 / 1024.0)
}

View File

@ -453,18 +453,15 @@ func (s *SchedulerServer) prepareExecutorInfo(hks hyperkube.Interface) (*mesos.E
return nil, nil, fmt.Errorf("error parsing static pod spec at %v: %v", podPath, err)
}
// TODO(sttts): allow unlimited static pods as well and patch in the default resource limits
unlimitedCPU := mresource.LimitPodCPU(&pod, s.defaultContainerCPULimit)
unlimitedMem := mresource.LimitPodMem(&pod, s.defaultContainerMemLimit)
if unlimitedCPU {
return nil, nil, fmt.Errorf("found static pod without limit on cpu resources: %v", podPath)
_, cpu, _, err := mresource.LimitPodCPU(&pod, s.defaultContainerCPULimit)
if err != nil {
return nil, nil, fmt.Errorf("cannot derive cpu limit for static pod: %v", podPath)
}
if unlimitedMem {
return nil, nil, fmt.Errorf("found static pod without limit on memory resources: %v", podPath)
_, mem, _, err := mresource.LimitPodMem(&pod, s.defaultContainerMemLimit)
if err != nil {
return nil, nil, fmt.Errorf("cannot derive memory limit for static pod: %v", podPath)
}
cpu := mresource.PodCPULimit(&pod)
mem := mresource.PodMemLimit(&pod)
log.V(2).Infof("reserving %.2f cpu shares and %.2f MB of memory to static pod %s", cpu, mem, pod.Name)
staticPodCPUs += float64(cpu)

View File

@ -281,23 +281,3 @@ func PodHasRequests(pod *api.Pod, resourceName api.ResourceName) bool {
}
return true
}
// PodCPU computes total cpu limit across all containers in pod
// TODO: Remove this once the mesos scheduler becomes request aware
func PodCPU(pod *api.Pod) *resource.Quantity {
val := int64(0)
for j := range pod.Spec.Containers {
val = val + pod.Spec.Containers[j].Resources.Limits.Cpu().MilliValue()
}
return resource.NewMilliQuantity(int64(val), resource.DecimalSI)
}
// PodMemory computes total memory limit across all containers in a pod
// TODO: Remove this once the mesos scheduler becomes request aware
func PodMemory(pod *api.Pod) *resource.Quantity {
val := int64(0)
for j := range pod.Spec.Containers {
val = val + pod.Spec.Containers[j].Resources.Limits.Memory().Value()
}
return resource.NewQuantity(int64(val), resource.DecimalSI)
}