Merge pull request #48922 from ConnorDoyle/integer-resources-as-default

Automatic merge from submit-queue (batch tested with PRs 46317, 48922, 50651, 50230, 47599)

Resources outside the `*kubernetes.io` namespace are integers and cannot be over-committed.

**What this PR does / why we need it**:

Fixes #50473 

Rationale: since the scheduler handles all resources except CPU as integers, that could just be the default behavior for namespaced resources.

cc @RenaudWasTaken @vishh 

**Release note**:

```release-note
Resources outside the `*kubernetes.io` namespace are integers and cannot be over-committed.
```
This commit is contained in:
Kubernetes Submit Queue 2017-08-16 19:50:15 -07:00 committed by GitHub
commit ce1485c626
14 changed files with 141 additions and 70 deletions

View File

@ -115,6 +115,21 @@ func IsStandardContainerResourceName(str string) bool {
return standardContainerResources.Has(str)
}
// IsExtendedResourceName returns true if the resource name is not in the
// default namespace, or it has the opaque integer resource prefix.
func IsExtendedResourceName(name api.ResourceName) bool {
// TODO: Remove OIR part following deprecation.
return !IsDefaultNamespaceResource(name) || IsOpaqueIntResourceName(name)
}
// IsDefaultNamespaceResource returns true if the resource name is in the
// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are
// implicitly in the kubernetes.io/ namespace.
func IsDefaultNamespaceResource(name api.ResourceName) bool {
return !strings.Contains(string(name), "/") ||
strings.Contains(string(name), api.ResourceDefaultNamespacePrefix)
}
// IsOpaqueIntResourceName returns true if the resource name has the opaque
// integer resource prefix.
func IsOpaqueIntResourceName(name api.ResourceName) bool {
@ -131,6 +146,15 @@ func OpaqueIntResourceName(name string) api.ResourceName {
return api.ResourceName(fmt.Sprintf("%s%s", api.ResourceOpaqueIntPrefix, name))
}
var overcommitBlacklist = sets.NewString(string(api.ResourceNvidiaGPU))
// IsOvercommitAllowed returns true if the resource is in the default
// namespace and not blacklisted.
func IsOvercommitAllowed(name api.ResourceName) bool {
return IsDefaultNamespaceResource(name) &&
!overcommitBlacklist.Has(string(name))
}
var standardLimitRangeTypes = sets.NewString(
string(api.LimitTypePod),
string(api.LimitTypeContainer),
@ -204,7 +228,7 @@ var integerResources = sets.NewString(
// IsIntegerResourceName returns true if the resource is measured in integer values
func IsIntegerResourceName(str string) bool {
return integerResources.Has(str) || IsOpaqueIntResourceName(api.ResourceName(str))
return integerResources.Has(str) || IsExtendedResourceName(api.ResourceName(str))
}
// this function aims to check if the service's ClusterIP is set or not

View File

@ -3178,6 +3178,8 @@ const (
const (
// Namespace prefix for opaque counted resources (alpha).
ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-"
// Default namespace prefix.
ResourceDefaultNamespacePrefix = "kubernetes.io/"
)
// ResourceList is a set of (resource name, quantity) pairs.

View File

@ -26,6 +26,7 @@ go_library(
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/selection:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)

View File

@ -24,9 +24,25 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api/helper"
)
// IsExtendedResourceName returns true if the resource name is not in the
// default namespace, or it has the opaque integer resource prefix.
func IsExtendedResourceName(name v1.ResourceName) bool {
// TODO: Remove OIR part following deprecation.
return !IsDefaultNamespaceResource(name) || IsOpaqueIntResourceName(name)
}
// IsDefaultNamespaceResource returns true if the resource name is in the
// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are
// implicitly in the kubernetes.io/ namespace.
func IsDefaultNamespaceResource(name v1.ResourceName) bool {
return !strings.Contains(string(name), "/") ||
strings.Contains(string(name), v1.ResourceDefaultNamespacePrefix)
}
// IsOpaqueIntResourceName returns true if the resource name has the opaque
// integer resource prefix.
func IsOpaqueIntResourceName(name v1.ResourceName) bool {
@ -43,6 +59,15 @@ func OpaqueIntResourceName(name string) v1.ResourceName {
return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceOpaqueIntPrefix, name))
}
var overcommitBlacklist = sets.NewString(string(v1.ResourceNvidiaGPU))
// IsOvercommitAllowed returns true if the resource is in the default
// namespace and not blacklisted.
func IsOvercommitAllowed(name v1.ResourceName) bool {
return IsDefaultNamespaceResource(name) &&
!overcommitBlacklist.Has(string(name))
}
// this function aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func IsServiceIPSet(service *v1.Service) bool {

View File

@ -10,6 +10,8 @@ go_library(
srcs = ["validation.go"],
deps = [
"//pkg/api/helper:go_default_library",
"//pkg/api/v1/helper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",

View File

@ -20,12 +20,15 @@ import (
"fmt"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/api/helper"
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
)
const isNegativeErrorMsg string = `must be greater than or equal to 0`
@ -46,9 +49,9 @@ func ValidateResourceRequirements(requirements *v1.ResourceRequirements, fldPath
// Check that request <= limit.
requestQuantity, exists := requirements.Requests[resourceName]
if exists {
// For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
if resourceName == v1.ResourceNvidiaGPU && quantity.Cmp(requestQuantity) != 0 {
allErrs = append(allErrs, field.Invalid(reqPath, requestQuantity.String(), fmt.Sprintf("must be equal to %s limit", v1.ResourceNvidiaGPU)))
// Ensure overcommit is allowed for the resource if request != limit
if quantity.Cmp(requestQuantity) != 0 && !v1helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Invalid(reqPath, requestQuantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName)))
} else if quantity.Cmp(requestQuantity) < 0 {
allErrs = append(allErrs, field.Invalid(limPath, quantity.String(), fmt.Sprintf("must be greater than or equal to %s request", resourceName)))
}
@ -99,6 +102,12 @@ func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) f
// Validate compute resource typename.
// Refer to docs/design/resources.md for more details.
func validateResourceName(value string, fldPath *field.Path) field.ErrorList {
// Opaque integer resources (OIR) deprecation began in v1.8
// TODO: Remove warning after OIR deprecation cycle.
if v1helper.IsOpaqueIntResourceName(v1.ResourceName(value)) {
glog.Errorf("DEPRECATION WARNING! Opaque integer resources are deprecated starting with v1.8: %s", value)
}
allErrs := field.ErrorList{}
for _, msg := range validation.IsQualifiedName(value) {
allErrs = append(allErrs, field.Invalid(fldPath, value, msg))

View File

@ -3358,6 +3358,12 @@ func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList {
// Validate compute resource typename.
// Refer to docs/design/resources.md for more details.
func validateResourceName(value string, fldPath *field.Path) field.ErrorList {
// Opaque integer resources (OIR) deprecation began in v1.8
// TODO: Remove warning after OIR deprecation cycle.
if helper.IsOpaqueIntResourceName(api.ResourceName(value)) {
glog.Errorf("DEPRECATION WARNING! Opaque integer resources are deprecated starting with v1.8: %s", value)
}
allErrs := field.ErrorList{}
for _, msg := range validation.IsQualifiedName(value) {
allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
@ -3715,9 +3721,9 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPat
// Check that request <= limit.
requestQuantity, exists := requirements.Requests[resourceName]
if exists {
// For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
if resourceName == api.ResourceNvidiaGPU && quantity.Cmp(requestQuantity) != 0 {
allErrs = append(allErrs, field.Invalid(reqPath, requestQuantity.String(), fmt.Sprintf("must be equal to %s limit", api.ResourceNvidiaGPU)))
// Ensure overcommit is allowed for the resource if request != limit
if quantity.Cmp(requestQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Invalid(reqPath, requestQuantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName)))
} else if quantity.Cmp(requestQuantity) < 0 {
allErrs = append(allErrs, field.Invalid(limPath, quantity.String(), fmt.Sprintf("must be greater than or equal to %s request", resourceName)))
}

View File

@ -3287,7 +3287,7 @@ func TestValidateContainers(t *testing.T) {
Limits: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
api.ResourceName("my.org/resource"): resource.MustParse("10m"),
api.ResourceName("my.org/resource"): resource.MustParse("10"),
},
},
ImagePullPolicy: "IfNotPresent",
@ -3349,12 +3349,12 @@ func TestValidateContainers(t *testing.T) {
Requests: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
api.ResourceName("my.org/resource"): resource.MustParse("10m"),
api.ResourceName("my.org/resource"): resource.MustParse("10"),
},
Limits: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
api.ResourceName("my.org/resource"): resource.MustParse("10m"),
api.ResourceName("my.org/resource"): resource.MustParse("10"),
},
},
ImagePullPolicy: "IfNotPresent",
@ -3370,7 +3370,7 @@ func TestValidateContainers(t *testing.T) {
},
Limits: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
api.ResourceName("my.org/resource"): resource.MustParse("10m"),
api.ResourceName("my.org/resource"): resource.MustParse("10"),
},
},
ImagePullPolicy: "IfNotPresent",

View File

@ -577,11 +577,11 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) {
if node.Status.Allocatable == nil {
node.Status.Allocatable = make(v1.ResourceList)
}
// Remove opaque integer resources from allocatable that are no longer
// Remove extended resources from allocatable that are no longer
// present in capacity.
for k := range node.Status.Allocatable {
_, found := node.Status.Capacity[k]
if !found && v1helper.IsOpaqueIntResourceName(k) {
if !found && v1helper.IsExtendedResourceName(k) {
delete(node.Status.Allocatable, k)
}
}

View File

@ -533,10 +533,10 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
result.StorageOverlay = overlay
}
default:
if v1helper.IsOpaqueIntResourceName(rName) {
if v1helper.IsExtendedResourceName(rName) {
value := rQuantity.Value()
if value > result.OpaqueIntResources[rName] {
result.SetOpaque(rName, value)
if value > result.ExtendedResources[rName] {
result.SetExtended(rName, value)
}
}
}
@ -572,7 +572,7 @@ func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.No
// We couldn't parse metadata - fallback to computing it.
podRequest = GetResourceRequest(pod)
}
if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && podRequest.StorageOverlay == 0 && podRequest.StorageScratch == 0 && len(podRequest.OpaqueIntResources) == 0 {
if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && podRequest.StorageOverlay == 0 && podRequest.StorageScratch == 0 && len(podRequest.ExtendedResources) == 0 {
return len(predicateFails) == 0, predicateFails, nil
}
@ -603,9 +603,9 @@ func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.No
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceStorageOverlay, podRequest.StorageOverlay, nodeInfo.RequestedResource().StorageOverlay, allocatable.StorageOverlay))
}
for rName, rQuant := range podRequest.OpaqueIntResources {
if allocatable.OpaqueIntResources[rName] < rQuant+nodeInfo.RequestedResource().OpaqueIntResources[rName] {
predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.OpaqueIntResources[rName], nodeInfo.RequestedResource().OpaqueIntResources[rName], allocatable.OpaqueIntResources[rName]))
for rName, rQuant := range podRequest.ExtendedResources {
if allocatable.ExtendedResources[rName] < rQuant+nodeInfo.RequestedResource().ExtendedResources[rName] {
predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.ExtendedResources[rName], nodeInfo.RequestedResource().ExtendedResources[rName], allocatable.ExtendedResources[rName]))
}
}

View File

@ -254,85 +254,85 @@ func TestPodFitsResources(t *testing.T) {
test: "equal edge case for init container",
},
{
pod: newResourcePod(schedulercache.Resource{OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
pod: newResourcePod(schedulercache.Resource{ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})),
fits: true,
test: "opaque resource fits",
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), schedulercache.Resource{OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), schedulercache.Resource{ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})),
fits: true,
test: "opaque resource fits for init container",
},
{
pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})),
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})),
fits: false,
test: "opaque resource capacity enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 10, 0, 5)},
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})),
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})),
fits: false,
test: "opaque resource capacity enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 10, 0, 5)},
},
{
pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})),
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})),
fits: false,
test: "opaque resource allocatable enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 1, 5, 5)},
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})),
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})),
fits: false,
test: "opaque resource allocatable enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 1, 5, 5)},
},
{
pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 3}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
fits: false,
test: "opaque resource allocatable enforced for multiple containers",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 6, 2, 5)},
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 3}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
fits: true,
test: "opaque resource allocatable admits multiple init containers",
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 6}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 6}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
fits: false,
test: "opaque resource allocatable enforced for multiple init containers",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 6, 2, 5)},
},
{
pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})),
fits: false,
@ -341,7 +341,7 @@ func TestPodFitsResources(t *testing.T) {
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})),
fits: false,

View File

@ -109,9 +109,9 @@ func TestAssumePodScheduled(t *testing.T) {
pods: []*v1.Pod{testPods[4]},
wNodeInfo: &NodeInfo{
requestedResource: &Resource{
MilliCPU: 100,
Memory: 500,
OpaqueIntResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 3},
MilliCPU: 100,
Memory: 500,
ExtendedResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 3},
},
nonzeroRequest: &Resource{
MilliCPU: 100,
@ -125,9 +125,9 @@ func TestAssumePodScheduled(t *testing.T) {
pods: []*v1.Pod{testPods[4], testPods[5]},
wNodeInfo: &NodeInfo{
requestedResource: &Resource{
MilliCPU: 300,
Memory: 1524,
OpaqueIntResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 8},
MilliCPU: 300,
Memory: 1524,
ExtendedResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 8},
},
nonzeroRequest: &Resource{
MilliCPU: 300,

View File

@ -70,8 +70,8 @@ type Resource struct {
StorageOverlay int64
// We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value())
// explicitly as int, to avoid conversions and improve performance.
AllowedPodNumber int
OpaqueIntResources map[v1.ResourceName]int64
AllowedPodNumber int
ExtendedResources map[v1.ResourceName]int64
}
// New creates a Resource from ResourceList
@ -102,8 +102,8 @@ func (r *Resource) Add(rl v1.ResourceList) {
case v1.ResourceStorageOverlay:
r.StorageOverlay += rQuant.Value()
default:
if v1helper.IsOpaqueIntResourceName(rName) {
r.AddOpaque(rName, rQuant.Value())
if v1helper.IsExtendedResourceName(rName) {
r.AddExtended(rName, rQuant.Value())
}
}
}
@ -118,7 +118,7 @@ func (r *Resource) ResourceList() v1.ResourceList {
v1.ResourceStorageOverlay: *resource.NewQuantity(r.StorageOverlay, resource.BinarySI),
v1.ResourceStorageScratch: *resource.NewQuantity(r.StorageScratch, resource.BinarySI),
}
for rName, rQuant := range r.OpaqueIntResources {
for rName, rQuant := range r.ExtendedResources {
result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
}
return result
@ -133,25 +133,25 @@ func (r *Resource) Clone() *Resource {
StorageOverlay: r.StorageOverlay,
StorageScratch: r.StorageScratch,
}
if r.OpaqueIntResources != nil {
res.OpaqueIntResources = make(map[v1.ResourceName]int64)
for k, v := range r.OpaqueIntResources {
res.OpaqueIntResources[k] = v
if r.ExtendedResources != nil {
res.ExtendedResources = make(map[v1.ResourceName]int64)
for k, v := range r.ExtendedResources {
res.ExtendedResources[k] = v
}
}
return res
}
func (r *Resource) AddOpaque(name v1.ResourceName, quantity int64) {
r.SetOpaque(name, r.OpaqueIntResources[name]+quantity)
func (r *Resource) AddExtended(name v1.ResourceName, quantity int64) {
r.SetExtended(name, r.ExtendedResources[name]+quantity)
}
func (r *Resource) SetOpaque(name v1.ResourceName, quantity int64) {
func (r *Resource) SetExtended(name v1.ResourceName, quantity int64) {
// Lazily allocate opaque integer resource map.
if r.OpaqueIntResources == nil {
r.OpaqueIntResources = map[v1.ResourceName]int64{}
if r.ExtendedResources == nil {
r.ExtendedResources = map[v1.ResourceName]int64{}
}
r.OpaqueIntResources[name] = quantity
r.ExtendedResources[name] = quantity
}
// NewNodeInfo returns a ready to use empty NodeInfo object.
@ -306,11 +306,11 @@ func (n *NodeInfo) addPod(pod *v1.Pod) {
n.requestedResource.NvidiaGPU += res.NvidiaGPU
n.requestedResource.StorageOverlay += res.StorageOverlay
n.requestedResource.StorageScratch += res.StorageScratch
if n.requestedResource.OpaqueIntResources == nil && len(res.OpaqueIntResources) > 0 {
n.requestedResource.OpaqueIntResources = map[v1.ResourceName]int64{}
if n.requestedResource.ExtendedResources == nil && len(res.ExtendedResources) > 0 {
n.requestedResource.ExtendedResources = map[v1.ResourceName]int64{}
}
for rName, rQuant := range res.OpaqueIntResources {
n.requestedResource.OpaqueIntResources[rName] += rQuant
for rName, rQuant := range res.ExtendedResources {
n.requestedResource.ExtendedResources[rName] += rQuant
}
n.nonzeroRequest.MilliCPU += non0_cpu
n.nonzeroRequest.Memory += non0_mem
@ -361,11 +361,11 @@ func (n *NodeInfo) removePod(pod *v1.Pod) error {
n.requestedResource.MilliCPU -= res.MilliCPU
n.requestedResource.Memory -= res.Memory
n.requestedResource.NvidiaGPU -= res.NvidiaGPU
if len(res.OpaqueIntResources) > 0 && n.requestedResource.OpaqueIntResources == nil {
n.requestedResource.OpaqueIntResources = map[v1.ResourceName]int64{}
if len(res.ExtendedResources) > 0 && n.requestedResource.ExtendedResources == nil {
n.requestedResource.ExtendedResources = map[v1.ResourceName]int64{}
}
for rName, rQuant := range res.OpaqueIntResources {
n.requestedResource.OpaqueIntResources[rName] -= rQuant
for rName, rQuant := range res.ExtendedResources {
n.requestedResource.ExtendedResources[rName] -= rQuant
}
n.nonzeroRequest.MilliCPU -= non0_cpu
n.nonzeroRequest.Memory -= non0_mem

View File

@ -3611,6 +3611,8 @@ const (
const (
// Namespace prefix for opaque counted resources (alpha).
ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-"
// Default namespace prefix.
ResourceDefaultNamespacePrefix = "kubernetes.io/"
)
// ResourceList is a set of (resource name, quantity) pairs.