Consolidate extended resources and hugepages in Scheduler.

This commit is contained in:
Avesh Agarwal 2017-09-15 10:33:41 -04:00
parent 3e8a8286d8
commit ae05a6da34
6 changed files with 81 additions and 116 deletions

View File

@ -265,6 +265,11 @@ func IsIntegerResourceName(str string) bool {
return integerResources.Has(str) || IsExtendedResourceName(api.ResourceName(str)) return integerResources.Has(str) || IsExtendedResourceName(api.ResourceName(str))
} }
// Extended and HugePages resources
func IsScalarResourceName(name api.ResourceName) bool {
return IsExtendedResourceName(name) || IsHugePageResourceName(name)
}
// this function aims to check if the service's ClusterIP is set or not // this function aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here // the objective is not to perform validation here
func IsServiceIPSet(service *api.Service) bool { func IsServiceIPSet(service *api.Service) bool {

View File

@ -95,6 +95,11 @@ func IsOvercommitAllowed(name v1.ResourceName) bool {
!overcommitBlacklist.Has(string(name)) !overcommitBlacklist.Has(string(name))
} }
// Extended and Hugepages resources
func IsScalarResourceName(name v1.ResourceName) bool {
return IsExtendedResourceName(name) || IsHugePageResourceName(name)
}
// this function aims to check if the service's ClusterIP is set or not // this function aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here // the objective is not to perform validation here
func IsServiceIPSet(service *v1.Service) bool { func IsServiceIPSet(service *v1.Service) bool {

View File

@ -514,16 +514,10 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
result.NvidiaGPU = gpu result.NvidiaGPU = gpu
} }
default: default:
if v1helper.IsExtendedResourceName(rName) { if v1helper.IsScalarResourceName(rName) {
value := rQuantity.Value() value := rQuantity.Value()
if value > result.ExtendedResources[rName] { if value > result.ScalarResources[rName] {
result.SetExtended(rName, value) result.SetScalar(rName, value)
}
}
if v1helper.IsHugePageResourceName(rName) {
value := rQuantity.Value()
if value > result.HugePages[rName] {
result.SetHugePages(rName, value)
} }
} }
} }
@ -563,8 +557,7 @@ func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
podRequest.Memory == 0 && podRequest.Memory == 0 &&
podRequest.NvidiaGPU == 0 && podRequest.NvidiaGPU == 0 &&
podRequest.EphemeralStorage == 0 && podRequest.EphemeralStorage == 0 &&
len(podRequest.ExtendedResources) == 0 && len(podRequest.ScalarResources) == 0 {
len(podRequest.HugePages) == 0 {
return len(predicateFails) == 0, predicateFails, nil return len(predicateFails) == 0, predicateFails, nil
} }
@ -583,15 +576,9 @@ func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceEphemeralStorage, podRequest.EphemeralStorage, nodeInfo.RequestedResource().EphemeralStorage, allocatable.EphemeralStorage)) predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceEphemeralStorage, podRequest.EphemeralStorage, nodeInfo.RequestedResource().EphemeralStorage, allocatable.EphemeralStorage))
} }
for rName, rQuant := range podRequest.ExtendedResources { for rName, rQuant := range podRequest.ScalarResources {
if allocatable.ExtendedResources[rName] < rQuant+nodeInfo.RequestedResource().ExtendedResources[rName] { if allocatable.ScalarResources[rName] < rQuant+nodeInfo.RequestedResource().ScalarResources[rName] {
predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.ExtendedResources[rName], nodeInfo.RequestedResource().ExtendedResources[rName], allocatable.ExtendedResources[rName])) predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.ScalarResources[rName], nodeInfo.RequestedResource().ScalarResources[rName], allocatable.ScalarResources[rName]))
}
}
for rName, rQuant := range podRequest.HugePages {
if allocatable.HugePages[rName] < rQuant+nodeInfo.RequestedResource().HugePages[rName] {
predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.HugePages[rName], nodeInfo.RequestedResource().HugePages[rName], allocatable.HugePages[rName]))
} }
} }

View File

@ -257,85 +257,85 @@ func TestPodFitsResources(t *testing.T) {
test: "equal edge case for init container", test: "equal edge case for init container",
}, },
{ {
pod: newResourcePod(schedulercache.Resource{ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}), pod: newResourcePod(schedulercache.Resource{ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})), nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})),
fits: true, fits: true,
test: "opaque resource fits", test: "opaque resource fits",
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), schedulercache.Resource{ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), schedulercache.Resource{ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})), nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})),
fits: true, fits: true,
test: "opaque resource fits for init container", test: "opaque resource fits for init container",
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}), schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})), newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})),
fits: false, fits: false,
test: "opaque resource capacity enforced", test: "opaque resource capacity enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 10, 0, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 10, 0, 5)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}), schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})), newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})),
fits: false, fits: false,
test: "opaque resource capacity enforced for init container", test: "opaque resource capacity enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 10, 0, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 10, 0, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}), schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})), newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})),
fits: false, fits: false,
test: "opaque resource allocatable enforced", test: "opaque resource allocatable enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 1, 5, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 1, 5, 5)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}), schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})), newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})),
fits: false, fits: false,
test: "opaque resource allocatable enforced for init container", test: "opaque resource allocatable enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 1, 5, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 1, 5, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}, schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}), schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})), newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
fits: false, fits: false,
test: "opaque resource allocatable enforced for multiple containers", test: "opaque resource allocatable enforced for multiple containers",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 6, 2, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 6, 2, 5)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}, schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}), schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})), newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
fits: true, fits: true,
test: "opaque resource allocatable admits multiple init containers", test: "opaque resource allocatable admits multiple init containers",
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 6}}, schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 6}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}), schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})), newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
fits: false, fits: false,
test: "opaque resource allocatable enforced for multiple init containers", test: "opaque resource allocatable enforced for multiple init containers",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 6, 2, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 6, 2, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}), schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})),
fits: false, fits: false,
@ -344,7 +344,7 @@ func TestPodFitsResources(t *testing.T) {
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}), schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})),
fits: false, fits: false,
@ -353,28 +353,28 @@ func TestPodFitsResources(t *testing.T) {
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 10}}), schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 0}})), newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
fits: false, fits: false,
test: "hugepages resource capacity enforced", test: "hugepages resource capacity enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 10}}), schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 0}})), newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
fits: false, fits: false,
test: "hugepages resource capacity enforced for init container", test: "hugepages resource capacity enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 3}}, schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 3}}), schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 2}})), newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
fits: false, fits: false,
test: "hugepages resource allocatable enforced for multiple containers", test: "hugepages resource allocatable enforced for multiple containers",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)},

View File

@ -111,7 +111,7 @@ func TestAssumePodScheduled(t *testing.T) {
requestedResource: &Resource{ requestedResource: &Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
ExtendedResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 3}, ScalarResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 3},
}, },
nonzeroRequest: &Resource{ nonzeroRequest: &Resource{
MilliCPU: 100, MilliCPU: 100,
@ -127,7 +127,7 @@ func TestAssumePodScheduled(t *testing.T) {
requestedResource: &Resource{ requestedResource: &Resource{
MilliCPU: 300, MilliCPU: 300,
Memory: 1524, Memory: 1524,
ExtendedResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 8}, ScalarResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 8},
}, },
nonzeroRequest: &Resource{ nonzeroRequest: &Resource{
MilliCPU: 300, MilliCPU: 300,

View File

@ -71,8 +71,8 @@ type Resource struct {
// We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value()) // We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value())
// explicitly as int, to avoid conversions and improve performance. // explicitly as int, to avoid conversions and improve performance.
AllowedPodNumber int AllowedPodNumber int
ExtendedResources map[v1.ResourceName]int64 // ScalarResources
HugePages map[v1.ResourceName]int64 ScalarResources map[v1.ResourceName]int64
} }
// New creates a Resource from ResourceList // New creates a Resource from ResourceList
@ -101,11 +101,8 @@ func (r *Resource) Add(rl v1.ResourceList) {
case v1.ResourceEphemeralStorage: case v1.ResourceEphemeralStorage:
r.EphemeralStorage += rQuant.Value() r.EphemeralStorage += rQuant.Value()
default: default:
if v1helper.IsExtendedResourceName(rName) { if v1helper.IsScalarResourceName(rName) {
r.AddExtended(rName, rQuant.Value()) r.AddScalar(rName, rQuant.Value())
}
if v1helper.IsHugePageResourceName(rName) {
r.AddHugePages(rName, rQuant.Value())
} }
} }
} }
@ -119,11 +116,12 @@ func (r *Resource) ResourceList() v1.ResourceList {
v1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI), v1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI),
} }
for rName, rQuant := range r.ExtendedResources { for rName, rQuant := range r.ScalarResources {
if v1helper.IsHugePageResourceName(rName) {
result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)
} else {
result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI) result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
} }
for rName, rQuant := range r.HugePages {
result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)
} }
return result return result
} }
@ -136,43 +134,25 @@ func (r *Resource) Clone() *Resource {
AllowedPodNumber: r.AllowedPodNumber, AllowedPodNumber: r.AllowedPodNumber,
EphemeralStorage: r.EphemeralStorage, EphemeralStorage: r.EphemeralStorage,
} }
if r.ExtendedResources != nil { if r.ScalarResources != nil {
res.ExtendedResources = make(map[v1.ResourceName]int64) res.ScalarResources = make(map[v1.ResourceName]int64)
for k, v := range r.ExtendedResources { for k, v := range r.ScalarResources {
res.ExtendedResources[k] = v res.ScalarResources[k] = v
}
}
if r.HugePages != nil {
res.HugePages = make(map[v1.ResourceName]int64)
for k, v := range r.HugePages {
res.HugePages[k] = v
} }
} }
return res return res
} }
func (r *Resource) AddExtended(name v1.ResourceName, quantity int64) { func (r *Resource) AddScalar(name v1.ResourceName, quantity int64) {
r.SetExtended(name, r.ExtendedResources[name]+quantity) r.SetScalar(name, r.ScalarResources[name]+quantity)
} }
func (r *Resource) SetExtended(name v1.ResourceName, quantity int64) { func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) {
// Lazily allocate opaque integer resource map. // Lazily allocate scalar resource map.
if r.ExtendedResources == nil { if r.ScalarResources == nil {
r.ExtendedResources = map[v1.ResourceName]int64{} r.ScalarResources = map[v1.ResourceName]int64{}
} }
r.ExtendedResources[name] = quantity r.ScalarResources[name] = quantity
}
func (r *Resource) AddHugePages(name v1.ResourceName, quantity int64) {
r.SetHugePages(name, r.HugePages[name]+quantity)
}
func (r *Resource) SetHugePages(name v1.ResourceName, quantity int64) {
// Lazily allocate hugepages resource map.
if r.HugePages == nil {
r.HugePages = map[v1.ResourceName]int64{}
}
r.HugePages[name] = quantity
} }
// NewNodeInfo returns a ready to use empty NodeInfo object. // NewNodeInfo returns a ready to use empty NodeInfo object.
@ -326,17 +306,11 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) {
n.requestedResource.Memory += res.Memory n.requestedResource.Memory += res.Memory
n.requestedResource.NvidiaGPU += res.NvidiaGPU n.requestedResource.NvidiaGPU += res.NvidiaGPU
n.requestedResource.EphemeralStorage += res.EphemeralStorage n.requestedResource.EphemeralStorage += res.EphemeralStorage
if n.requestedResource.ExtendedResources == nil && len(res.ExtendedResources) > 0 { if n.requestedResource.ScalarResources == nil && len(res.ScalarResources) > 0 {
n.requestedResource.ExtendedResources = map[v1.ResourceName]int64{} n.requestedResource.ScalarResources = map[v1.ResourceName]int64{}
} }
for rName, rQuant := range res.ExtendedResources { for rName, rQuant := range res.ScalarResources {
n.requestedResource.ExtendedResources[rName] += rQuant n.requestedResource.ScalarResources[rName] += rQuant
}
if n.requestedResource.HugePages == nil && len(res.HugePages) > 0 {
n.requestedResource.HugePages = map[v1.ResourceName]int64{}
}
for rName, rQuant := range res.HugePages {
n.requestedResource.HugePages[rName] += rQuant
} }
n.nonzeroRequest.MilliCPU += non0_cpu n.nonzeroRequest.MilliCPU += non0_cpu
n.nonzeroRequest.Memory += non0_mem n.nonzeroRequest.Memory += non0_mem
@ -387,17 +361,11 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
n.requestedResource.MilliCPU -= res.MilliCPU n.requestedResource.MilliCPU -= res.MilliCPU
n.requestedResource.Memory -= res.Memory n.requestedResource.Memory -= res.Memory
n.requestedResource.NvidiaGPU -= res.NvidiaGPU n.requestedResource.NvidiaGPU -= res.NvidiaGPU
if len(res.ExtendedResources) > 0 && n.requestedResource.ExtendedResources == nil { if len(res.ScalarResources) > 0 && n.requestedResource.ScalarResources == nil {
n.requestedResource.ExtendedResources = map[v1.ResourceName]int64{} n.requestedResource.ScalarResources = map[v1.ResourceName]int64{}
} }
for rName, rQuant := range res.ExtendedResources { for rName, rQuant := range res.ScalarResources {
n.requestedResource.ExtendedResources[rName] -= rQuant n.requestedResource.ScalarResources[rName] -= rQuant
}
if len(res.HugePages) > 0 && n.requestedResource.HugePages == nil {
n.requestedResource.HugePages = map[v1.ResourceName]int64{}
}
for rName, rQuant := range res.HugePages {
n.requestedResource.HugePages[rName] -= rQuant
} }
n.nonzeroRequest.MilliCPU -= non0_cpu n.nonzeroRequest.MilliCPU -= non0_cpu
n.nonzeroRequest.Memory -= non0_mem n.nonzeroRequest.Memory -= non0_mem