Merge pull request #128407 from ndixita/pod-level-resources

[PodLevelResources] Pod Level Resources Feature Alpha
This commit is contained in:
Kubernetes Prow Robot 2024-11-08 07:10:50 +00:00 committed by GitHub
commit c25f5eefe4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
126 changed files with 6373 additions and 1308 deletions

View File

@ -9754,6 +9754,10 @@
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge,retainKeys"
},
"resources": {
"$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements",
"description": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate."
},
"restartPolicy": {
"description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
"type": "string"

View File

@ -5681,6 +5681,14 @@
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge,retainKeys"
},
"resources": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"
}
],
"description": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate."
},
"restartPolicy": {
"description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
"type": "string"

View File

@ -3928,6 +3928,14 @@
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge,retainKeys"
},
"resources": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"
}
],
"description": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate."
},
"restartPolicy": {
"description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
"type": "string"

View File

@ -3130,6 +3130,14 @@
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge,retainKeys"
},
"resources": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"
}
],
"description": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate."
},
"restartPolicy": {
"description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
"type": "string"

View File

@ -72,6 +72,12 @@ func SetResourceVersion(rv string) Tweak {
}
}
func SetPodResources(resources *api.ResourceRequirements) Tweak {
return func(pod *api.Pod) {
pod.Spec.Resources = resources
}
}
func SetContainers(containers ...api.Container) Tweak {
return func(pod *api.Pod) {
pod.Spec.Containers = containers

View File

@ -384,6 +384,7 @@ func GetValidationOptionsFromPodSpecAndMeta(podSpec, oldPodSpec *api.PodSpec, po
AllowNamespacedSysctlsForHostNetAndHostIPC: false,
AllowNonLocalProjectedTokenPath: false,
AllowPodLifecycleSleepActionZeroValue: utilfeature.DefaultFeatureGate.Enabled(features.PodLifecycleSleepActionAllowZero),
PodLevelResourcesEnabled: utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
}
// If old spec uses relaxed validation or enabled the RelaxedEnvironmentVariableValidation feature gate,
@ -621,6 +622,7 @@ func dropDisabledFields(
}
}
dropDisabledPodLevelResources(podSpec, oldPodSpec)
dropDisabledProcMountField(podSpec, oldPodSpec)
dropDisabledNodeInclusionPolicyFields(podSpec, oldPodSpec)
@ -674,6 +676,14 @@ func dropDisabledFields(
dropSELinuxChangePolicy(podSpec, oldPodSpec)
}
func dropDisabledPodLevelResources(podSpec, oldPodSpec *api.PodSpec) {
// If the feature is disabled and not in use, drop Resources at the pod-level
// from PodSpec.
if !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && !podLevelResourcesInUse(oldPodSpec) {
podSpec.Resources = nil
}
}
func dropPodLifecycleSleepAction(podSpec, oldPodSpec *api.PodSpec) {
if utilfeature.DefaultFeatureGate.Enabled(features.PodLifecycleSleepAction) || podLifecycleSleepActionInUse(oldPodSpec) {
return
@ -1050,6 +1060,28 @@ func supplementalGroupsPolicyInUse(podSpec *api.PodSpec) bool {
return false
}
// podLevelResourcesInUse returns true if pod-spec is non-nil and Resources field at
// pod-level has non-empty Requests or Limits.
func podLevelResourcesInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
if podSpec.Resources == nil {
return false
}
if len(podSpec.Resources.Requests) > 0 {
return true
}
if len(podSpec.Resources.Limits) > 0 {
return true
}
return false
}
// inPlacePodVerticalScalingInUse returns true if pod spec is non-nil and ResizePolicy is set
func inPlacePodVerticalScalingInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {

View File

@ -2703,6 +2703,149 @@ func TestDropInPlacePodVerticalScaling(t *testing.T) {
}
}
func TestDropPodLevelResources(t *testing.T) {
containers := []api.Container{
{
Name: "c1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
},
}
podWithPodLevelResources := func() *api.Pod {
return &api.Pod{
Spec: api.PodSpec{
Resources: &api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceCPU: resource.MustParse("100m"),
api.ResourceMemory: resource.MustParse("50Gi"),
},
Limits: api.ResourceList{
api.ResourceCPU: resource.MustParse("100m"),
api.ResourceMemory: resource.MustParse("50Gi"),
},
},
Containers: containers,
},
}
}
podWithoutPodLevelResources := func() *api.Pod {
return &api.Pod{
Spec: api.PodSpec{
Containers: containers,
},
}
}
podInfo := []struct {
description string
hasPodLevelResources bool
pod func() *api.Pod
}{
{
description: "has pod-level resources",
hasPodLevelResources: true,
pod: podWithPodLevelResources,
},
{
description: "does not have pod-level resources",
hasPodLevelResources: false,
pod: podWithoutPodLevelResources,
},
{
description: "is nil",
hasPodLevelResources: false,
pod: func() *api.Pod { return nil },
},
{
description: "is empty struct",
hasPodLevelResources: false,
// refactor to generalize and use podWithPodLevelResources()
pod: func() *api.Pod {
return &api.Pod{
Spec: api.PodSpec{
Resources: &api.ResourceRequirements{},
Containers: containers,
},
}
},
},
{
description: "is empty Requests list",
hasPodLevelResources: false,
pod: func() *api.Pod {
return &api.Pod{
Spec: api.PodSpec{Resources: &api.ResourceRequirements{
Requests: api.ResourceList{},
}}}
},
},
{
description: "is empty Limits list",
hasPodLevelResources: false,
pod: func() *api.Pod {
return &api.Pod{
Spec: api.PodSpec{Resources: &api.ResourceRequirements{
Limits: api.ResourceList{},
}}}
},
},
}
for _, enabled := range []bool{true, false} {
for _, oldPodInfo := range podInfo {
for _, newPodInfo := range podInfo {
oldPodHasPodLevelResources, oldPod := oldPodInfo.hasPodLevelResources, oldPodInfo.pod()
newPodHasPodLevelResources, newPod := newPodInfo.hasPodLevelResources, newPodInfo.pod()
if newPod == nil {
continue
}
t.Run(fmt.Sprintf("feature enabled=%v, old pod %v, new pod %v", enabled, oldPodInfo.description, newPodInfo.description), func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, enabled)
var oldPodSpec *api.PodSpec
if oldPod != nil {
oldPodSpec = &oldPod.Spec
}
dropDisabledFields(&newPod.Spec, nil, oldPodSpec, nil)
// old pod should never be changed
if !reflect.DeepEqual(oldPod, oldPodInfo.pod()) {
t.Errorf("old pod changed: %v", cmp.Diff(oldPod, oldPodInfo.pod()))
}
switch {
case enabled || oldPodHasPodLevelResources:
// new pod shouldn't change if feature enabled or if old pod has
// any pod level resources
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
}
case newPodHasPodLevelResources:
// new pod should be changed
if reflect.DeepEqual(newPod, newPodInfo.pod()) {
t.Errorf("new pod was not changed")
}
// new pod should not have any pod-level resources
if !reflect.DeepEqual(newPod, podWithoutPodLevelResources()) {
t.Errorf("new pod has pod-level resources: %v", cmp.Diff(newPod, podWithoutPodLevelResources()))
}
default:
if newPod.Spec.Resources != nil {
t.Errorf("expected nil, got: %v", newPod.Spec.Resources)
}
}
})
}
}
}
}
func TestDropSidecarContainers(t *testing.T) {
containerRestartPolicyAlways := api.ContainerRestartPolicyAlways

View File

@ -339,6 +339,10 @@ func SetObjectDefaults_DaemonSet(in *appsv1.DaemonSet) {
}
}
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DaemonSetList(in *appsv1.DaemonSetList) {
@ -644,6 +648,10 @@ func SetObjectDefaults_Deployment(in *appsv1.Deployment) {
}
}
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DeploymentList(in *appsv1.DeploymentList) {
@ -949,6 +957,10 @@ func SetObjectDefaults_ReplicaSet(in *appsv1.ReplicaSet) {
}
}
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_ReplicaSetList(in *appsv1.ReplicaSetList) {
@ -1254,6 +1266,10 @@ func SetObjectDefaults_StatefulSet(in *appsv1.StatefulSet) {
}
}
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
for i := range in.Spec.VolumeClaimTemplates {
a := &in.Spec.VolumeClaimTemplates[i]
apiscorev1.SetDefaults_PersistentVolumeClaim(a)

View File

@ -335,6 +335,10 @@ func SetObjectDefaults_Deployment(in *appsv1beta1.Deployment) {
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DeploymentList(in *appsv1beta1.DeploymentList) {
@ -640,6 +644,10 @@ func SetObjectDefaults_StatefulSet(in *appsv1beta1.StatefulSet) {
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
for i := range in.Spec.VolumeClaimTemplates {
a := &in.Spec.VolumeClaimTemplates[i]
corev1.SetDefaults_PersistentVolumeClaim(a)

View File

@ -339,6 +339,10 @@ func SetObjectDefaults_DaemonSet(in *appsv1beta2.DaemonSet) {
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DaemonSetList(in *appsv1beta2.DaemonSetList) {
@ -644,6 +648,10 @@ func SetObjectDefaults_Deployment(in *appsv1beta2.Deployment) {
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DeploymentList(in *appsv1beta2.DeploymentList) {
@ -949,6 +957,10 @@ func SetObjectDefaults_ReplicaSet(in *appsv1beta2.ReplicaSet) {
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_ReplicaSetList(in *appsv1beta2.ReplicaSetList) {
@ -1254,6 +1266,10 @@ func SetObjectDefaults_StatefulSet(in *appsv1beta2.StatefulSet) {
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
for i := range in.Spec.VolumeClaimTemplates {
a := &in.Spec.VolumeClaimTemplates[i]
corev1.SetDefaults_PersistentVolumeClaim(a)

View File

@ -335,6 +335,10 @@ func SetObjectDefaults_CronJob(in *batchv1.CronJob) {
}
}
apiscorev1.SetDefaults_ResourceList(&in.Spec.JobTemplate.Spec.Template.Spec.Overhead)
if in.Spec.JobTemplate.Spec.Template.Spec.Resources != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.JobTemplate.Spec.Template.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&in.Spec.JobTemplate.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_CronJobList(in *batchv1.CronJobList) {
@ -640,6 +644,10 @@ func SetObjectDefaults_Job(in *batchv1.Job) {
}
}
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_JobList(in *batchv1.JobList) {

View File

@ -333,6 +333,10 @@ func SetObjectDefaults_CronJob(in *batchv1beta1.CronJob) {
}
}
corev1.SetDefaults_ResourceList(&in.Spec.JobTemplate.Spec.Template.Spec.Overhead)
if in.Spec.JobTemplate.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.JobTemplate.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.JobTemplate.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_CronJobList(in *batchv1beta1.CronJobList) {

View File

@ -21,7 +21,9 @@ package qos
import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
)
var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
@ -39,6 +41,45 @@ func GetPodQOS(pod *core.Pod) core.PodQOSClass {
return ComputePodQOS(pod)
}
// zeroQuantity represents a resource.Quantity with value "0", used as a baseline
// for resource comparisons.
var zeroQuantity = resource.MustParse("0")
// processResourceList adds non-zero quantities for supported QoS compute resources
// quantities from newList to list.
func processResourceList(list, newList core.ResourceList) {
for name, quantity := range newList {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := list[name]; !exists {
list[name] = delta
} else {
delta.Add(list[name])
list[name] = delta
}
}
}
}
// getQOSResources returns a set of resource names from the provided resource list that:
// 1. Are supported QoS compute resources
// 2. Have quantities greater than zero
func getQOSResources(list core.ResourceList) sets.Set[string] {
qosResources := sets.New[string]()
for name, quantity := range list {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosResources.Insert(string(name))
}
}
return qosResources
}
// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is more
// expensive than GetPodQOS which should be used for pods having a non-empty .Status.QOSClass.
// A pod is besteffort if none of its containers have specified any requests or limits.
@ -48,8 +89,24 @@ func GetPodQOS(pod *core.Pod) core.PodQOSClass {
func ComputePodQOS(pod *core.Pod) core.PodQOSClass {
requests := core.ResourceList{}
limits := core.ResourceList{}
zeroQuantity := resource.MustParse("0")
isGuaranteed := true
// When pod-level resources are specified, we use them to determine QoS class.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) &&
pod.Spec.Resources != nil {
if len(pod.Spec.Resources.Requests) > 0 {
// process requests
processResourceList(requests, pod.Spec.Resources.Requests)
}
if len(pod.Spec.Resources.Limits) > 0 {
// process limits
processResourceList(limits, pod.Spec.Resources.Limits)
qosLimitResources := getQOSResources(pod.Spec.Resources.Limits)
if !qosLimitResources.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) {
isGuaranteed = false
}
}
} else {
// note, ephemeral containers are not considered for QoS as they cannot define resources
allContainers := []core.Container{}
allContainers = append(allContainers, pod.Spec.Containers...)
@ -92,10 +149,12 @@ func ComputePodQOS(pod *core.Pod) core.PodQOSClass {
isGuaranteed = false
}
}
}
if len(requests) == 0 && len(limits) == 0 {
return core.PodQOSBestEffort
}
// Check is requests match limits for all resources.
// Check if requests match limits for all resources.
if isGuaranteed {
for name, req := range requests {
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {

View File

@ -3609,6 +3609,20 @@ type PodSpec struct {
// +featureGate=DynamicResourceAllocation
// +optional
ResourceClaims []PodResourceClaim
// Resources is the total amount of CPU and Memory resources required by all
// containers in the pod. It supports specifying Requests and Limits for
// "cpu" and "memory" resource names only. ResourceClaims are not supported.
//
// This field enables fine-grained control over resource allocation for the
// entire pod, allowing resource sharing among containers in a pod.
// TODO: For beta graduation, expand this comment with a detailed explanation.
//
// This is an alpha field and requires enabling the PodLevelResources feature
// gate.
//
// +featureGate=PodLevelResources
// +optional
Resources *ResourceRequirements
}
// PodResourceClaim references exactly one ResourceClaim through a ClaimSource.

View File

@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
"k8s.io/kubernetes/pkg/api/v1/service"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/parsers"
@ -217,6 +218,13 @@ func SetDefaults_Pod(obj *v1.Pod) {
}
}
}
// Pod Requests default values must be applied after container-level default values
// have been populated.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) {
defaultPodRequests(obj)
}
if obj.Spec.EnableServiceLinks == nil {
enableServiceLinks := v1.DefaultEnableServiceLinks
obj.Spec.EnableServiceLinks = &enableServiceLinks
@ -438,3 +446,56 @@ func SetDefaults_PodLogOptions(obj *v1.PodLogOptions) {
}
}
}
// defaultPodRequests applies default values for pod-level requests, only when
// pod-level limits are set, in following scenarios:
// 1. When at least one container (regular, init or sidecar) has requests set:
// The pod-level requests become equal to the effective requests of all containers
// in the pod.
// 2. When no containers have requests set: The pod-level requests become equal to
// pod-level limits.
// This defaulting behavior ensures consistent resource accounting at the pod-level
// while maintaining compatibility with the container-level specifications, as detailed
// in KEP-2837: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2837-pod-level-resource-spec/README.md#proposed-validation--defaulting-rules
func defaultPodRequests(obj *v1.Pod) {
// We only populate defaults when the pod-level resources are partly specified already.
if obj.Spec.Resources == nil {
return
}
if len(obj.Spec.Resources.Limits) == 0 {
return
}
var podReqs v1.ResourceList
podReqs = obj.Spec.Resources.Requests
if podReqs == nil {
podReqs = make(v1.ResourceList)
}
aggrCtrReqs := resourcehelper.AggregateContainerRequests(obj, resourcehelper.PodResourcesOptions{})
// When containers specify requests for a resource (supported by
// PodLevelResources feature) and pod-level requests are not set, the pod-level requests
// default to the effective requests of all the containers for that resource.
for key, aggrCtrLim := range aggrCtrReqs {
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) {
podReqs[key] = aggrCtrLim.DeepCopy()
}
}
// When no containers specify requests for a resource, the pod-level requests
// will default to match the pod-level limits, if pod-level
// limits exist for that resource.
for key, podLim := range obj.Spec.Resources.Limits {
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) {
podReqs[key] = podLim.DeepCopy()
}
}
// Only set pod-level resource requests in the PodSpec if the requirements map
// contains entries after collecting container-level requests and pod-level limits.
if len(podReqs) > 0 {
obj.Spec.Resources.Requests = podReqs
}
}

View File

@ -380,6 +380,880 @@ func testPodDefaults(t *testing.T, featuresEnabled bool) {
}
}
func TestPodResourcesDefaults(t *testing.T) {
cases := []struct {
name string
podLevelResourcesEnabled bool
containers []v1.Container
podResources *v1.ResourceRequirements
expectedPodSpec v1.PodSpec
}{
{
name: "pod resources=unset, container resources=unset",
containers: []v1.Container{{Resources: v1.ResourceRequirements{}}, {Resources: v1.ResourceRequirements{}}},
expectedPodSpec: v1.PodSpec{
Containers: []v1.Container{{Resources: v1.ResourceRequirements{}}, {Resources: v1.ResourceRequirements{}}},
},
}, {
name: "pod resources=unset, container requests=unset limits=set",
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
expectedPodSpec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
},
}, {
name: "pod resources=unset, container requests=set limits=unset",
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
expectedPodSpec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
},
}, {
name: "pod resources=unset, container requests=set limits=set",
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
expectedPodSpec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
},
}, {
name: "pod requests=unset limits=set, container resources=unset",
podLevelResourcesEnabled: true,
podResources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
expectedPodSpec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
},
}, {
name: "pod limits=nil, container requests=unset limits=set",
podLevelResourcesEnabled: true,
podResources: &v1.ResourceRequirements{
Limits: nil,
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
expectedPodSpec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
},
}, {
name: "pod limits=empty map, container requests=unset limits=set",
podLevelResourcesEnabled: true,
podResources: &v1.ResourceRequirements{
Limits: v1.ResourceList{},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
expectedPodSpec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
},
}, {
name: "pod requests=empty map limits=set, container requests=unset limits=set",
podLevelResourcesEnabled: true,
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
podResources: &v1.ResourceRequirements{
Requests: v1.ResourceList{},
Limits: v1.ResourceList{
"cpu": resource.MustParse("5m"),
"memory": resource.MustParse("7Mi"),
},
},
expectedPodSpec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("3m"),
"memory": resource.MustParse("6Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("5m"),
"memory": resource.MustParse("7Mi"),
},
},
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
},
}, {
name: "pod requests=nil limits=set, container requests=unset limits=set",
podLevelResourcesEnabled: true,
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
podResources: &v1.ResourceRequirements{
Requests: nil,
Limits: v1.ResourceList{
"cpu": resource.MustParse("5m"),
"memory": resource.MustParse("7Mi"),
},
},
expectedPodSpec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("3m"),
"memory": resource.MustParse("6Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("5m"),
"memory": resource.MustParse("7Mi"),
},
},
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
},
}, {
name: "pod requests=unset limits=set, container requests=unset limits=set",
podLevelResourcesEnabled: true,
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
podResources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("5m"),
"memory": resource.MustParse("7Mi"),
},
},
expectedPodSpec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("3m"),
"memory": resource.MustParse("6Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("5m"),
"memory": resource.MustParse("7Mi"),
},
},
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
},
}, {
name: "pod requests=unset limits=set, container requests=set limits=unset",
podLevelResourcesEnabled: true,
podResources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("4m"),
"memory": resource.MustParse("8Mi"),
},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("2Mi"),
},
},
},
},
expectedPodSpec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("3Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("4m"),
"memory": resource.MustParse("8Mi"),
},
},
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("2Mi"),
},
},
},
},
},
}, {
name: "pod requests=unset cpu limits=set, container resources=unset",
podLevelResourcesEnabled: true,
podResources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("4m"),
},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{},
}, {
Resources: v1.ResourceRequirements{},
},
},
expectedPodSpec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("4m"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("4m"),
},
},
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{},
}, {
Resources: v1.ResourceRequirements{},
},
},
},
}, {
name: "pod requests=unset limits=set, container requests=set limits=set",
podLevelResourcesEnabled: true,
podResources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("4m"),
"memory": resource.MustParse("8Mi"),
},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("2Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
expectedPodSpec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("3Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("4m"),
"memory": resource.MustParse("8Mi"),
},
},
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("2Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
},
}, {
name: "pod requests=unset limits=set, container memory requests=set limits=unset",
podLevelResourcesEnabled: true,
podResources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("4m"),
"memory": resource.MustParse("8Mi"),
},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"memory": resource.MustParse("5Mi"),
},
},
},
},
expectedPodSpec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("4m"),
"memory": resource.MustParse("6Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("4m"),
"memory": resource.MustParse("8Mi"),
},
},
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("5Mi"),
},
},
},
},
},
}, {
name: "pod limits=set, container unsupported requests=set limits=set",
podLevelResourcesEnabled: true,
podResources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"storage": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"ephemeral-storage": resource.MustParse("5Mi"),
},
},
},
},
expectedPodSpec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"storage": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"ephemeral-storage": resource.MustParse("5Mi"),
},
Limits: v1.ResourceList{
"ephemeral-storage": resource.MustParse("5Mi"),
},
},
},
},
},
}, {
name: "pod unsupported resources limits=set, container unsupported requests=set limits=set",
podLevelResourcesEnabled: true,
podResources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
"storage": resource.MustParse("1Mi"),
},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"storage": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"ephemeral-storage": resource.MustParse("5Mi"),
},
},
},
},
expectedPodSpec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{},
Limits: v1.ResourceList{
"storage": resource.MustParse("1Mi"),
},
},
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"storage": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"ephemeral-storage": resource.MustParse("5Mi"),
},
Limits: v1.ResourceList{
"ephemeral-storage": resource.MustParse("5Mi"),
},
},
},
},
},
}, {
name: "pod supported and unsupported resources limits=set, container unsupported requests=set limits=set",
podLevelResourcesEnabled: true,
podResources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
"storage": resource.MustParse("1Mi"),
},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"storage": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"ephemeral-storage": resource.MustParse("5Mi"),
},
},
},
},
expectedPodSpec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("2m"),
"memory": resource.MustParse("1Mi"),
},
},
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse("1Mi"),
},
Limits: v1.ResourceList{
"storage": resource.MustParse("1Mi"),
},
},
}, {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"ephemeral-storage": resource.MustParse("5Mi"),
},
Limits: v1.ResourceList{
"ephemeral-storage": resource.MustParse("5Mi"),
},
},
},
},
},
}}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
if tc.podLevelResourcesEnabled {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, true)
}
spec := v1.PodSpec{
Containers: tc.containers,
Resources: tc.podResources,
}
p := v1.Pod{Spec: *spec.DeepCopy()}
corev1.SetDefaults_Pod(&p)
for i, container := range p.Spec.Containers {
for resource, quantity := range container.Resources.Requests {
if quantity.Cmp(tc.expectedPodSpec.Containers[i].Resources.Requests[resource]) != 0 {
t.Errorf("got: %v, expected: %v", quantity, tc.expectedPodSpec.Containers[i].Resources.Requests[resource])
}
}
}
if tc.podResources != nil {
for resource, quantity := range p.Spec.Resources.Requests {
if quantity.Cmp(tc.expectedPodSpec.Resources.Requests[resource]) != 0 {
t.Errorf("got: %v, expected: %v", quantity, tc.expectedPodSpec.Resources.Requests[resource])
}
}
}
})
}
}
func TestPodHostNetworkDefaults(t *testing.T) {
cases := []struct {
name string

View File

@ -20,7 +20,9 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
)
var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
@ -41,16 +43,74 @@ func GetPodQOS(pod *v1.Pod) v1.PodQOSClass {
return ComputePodQOS(pod)
}
// zeroQuantity represents a resource.Quantity with value "0", used as a baseline
// for resource comparisons.
var zeroQuantity = resource.MustParse("0")
// processResourceList adds non-zero quantities for supported QoS compute resources
// quantities from newList to list.
func processResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := list[name]; !exists {
list[name] = delta
} else {
delta.Add(list[name])
list[name] = delta
}
}
}
}
// getQOSResources returns a set of resource names from the provided resource list that:
// 1. Are supported QoS compute resources
// 2. Have quantities greater than zero
func getQOSResources(list v1.ResourceList) sets.Set[string] {
qosResources := sets.New[string]()
for name, quantity := range list {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosResources.Insert(string(name))
}
}
return qosResources
}
// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is more
// expensive than GetPodQOS which should be used for pods having a non-empty .Status.QOSClass.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
// TODO(ndixita): Refactor ComputePodQOS into smaller functions to make it more
// readable and maintainable.
func ComputePodQOS(pod *v1.Pod) v1.PodQOSClass {
requests := v1.ResourceList{}
limits := v1.ResourceList{}
zeroQuantity := resource.MustParse("0")
isGuaranteed := true
// When pod-level resources are specified, we use them to determine QoS class.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) &&
pod.Spec.Resources != nil {
if len(pod.Spec.Resources.Requests) > 0 {
// process requests
processResourceList(requests, pod.Spec.Resources.Requests)
}
if len(pod.Spec.Resources.Limits) > 0 {
// process limits
processResourceList(limits, pod.Spec.Resources.Limits)
qosLimitResources := getQOSResources(pod.Spec.Resources.Limits)
if !qosLimitResources.HasAll(string(v1.ResourceMemory), string(v1.ResourceCPU)) {
isGuaranteed = false
}
}
} else {
// note, ephemeral containers are not considered for QoS as they cannot define resources
allContainers := []v1.Container{}
allContainers = append(allContainers, pod.Spec.Containers...)
allContainers = append(allContainers, pod.Spec.InitContainers...)
@ -92,6 +152,8 @@ func ComputePodQOS(pod *v1.Pod) v1.PodQOSClass {
isGuaranteed = false
}
}
}
if len(requests) == 0 && len(limits) == 0 {
return v1.PodQOSBestEffort
}

View File

@ -22,15 +22,19 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper/qos"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/features"
)
func TestComputePodQOS(t *testing.T) {
testCases := []struct {
pod *v1.Pod
expected v1.PodQOSClass
podLevelResourcesEnabled bool
}{
{
pod: newPod("guaranteed", []v1.Container{
@ -126,8 +130,76 @@ func TestComputePodQOS(t *testing.T) {
}),
expected: v1.PodQOSBurstable,
},
{
pod: newPodWithResources(
"guaranteed-with-pod-level-resources",
[]v1.Container{
newContainer("best-effort", getResourceList("", ""), getResourceList("", "")),
},
getResourceRequirements(getResourceList("10m", "100Mi"), getResourceList("10m", "100Mi")),
),
expected: v1.PodQOSGuaranteed,
podLevelResourcesEnabled: true,
},
{
pod: newPodWithResources(
"guaranteed-with-pod-and-container-level-resources",
[]v1.Container{
newContainer("burstable", getResourceList("3m", "10Mi"), getResourceList("5m", "20Mi")),
},
getResourceRequirements(getResourceList("10m", "100Mi"), getResourceList("10m", "100Mi")),
),
expected: v1.PodQOSGuaranteed,
podLevelResourcesEnabled: true,
},
{
pod: newPodWithResources(
"burstable-with-pod-level-resources",
[]v1.Container{
newContainer("best-effort", getResourceList("", ""), getResourceList("", "")),
},
getResourceRequirements(getResourceList("10m", "10Mi"), getResourceList("20m", "50Mi")),
),
expected: v1.PodQOSBurstable,
podLevelResourcesEnabled: true,
},
{
pod: newPodWithResources(
"burstable-with-pod-and-container-level-resources",
[]v1.Container{
newContainer("burstable", getResourceList("5m", "10Mi"), getResourceList("5m", "10Mi")),
},
getResourceRequirements(getResourceList("10m", "10Mi"), getResourceList("20m", "50Mi")),
),
expected: v1.PodQOSBurstable,
podLevelResourcesEnabled: true,
},
{
pod: newPodWithResources(
"burstable-with-pod-and-container-level-requests",
[]v1.Container{
newContainer("burstable", getResourceList("5m", "10Mi"), getResourceList("", "")),
},
getResourceRequirements(getResourceList("10m", "10Mi"), getResourceList("", "")),
),
expected: v1.PodQOSBurstable,
podLevelResourcesEnabled: true,
},
{
pod: newPodWithResources(
"burstable-with-pod-and-container-level-resources-2",
[]v1.Container{
newContainer("burstable", getResourceList("5m", "10Mi"), getResourceList("", "")),
newContainer("guaranteed", getResourceList("5m", "10Mi"), getResourceList("5m", "10Mi")),
},
getResourceRequirements(getResourceList("10m", "10Mi"), getResourceList("5m", "")),
),
expected: v1.PodQOSBurstable,
podLevelResourcesEnabled: true,
},
}
for id, testCase := range testCases {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, testCase.podLevelResourcesEnabled)
if actual := ComputePodQOS(testCase.pod); testCase.expected != actual {
t.Errorf("[%d]: invalid qos pod %s, expected: %s, actual: %s", id, testCase.pod.Name, testCase.expected, actual)
}
@ -158,17 +230,17 @@ func addResource(rName, value string, rl v1.ResourceList) v1.ResourceList {
return rl
}
func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
func getResourceRequirements(requests, limits v1.ResourceList) *v1.ResourceRequirements {
res := v1.ResourceRequirements{}
res.Requests = requests
res.Limits = limits
return res
return &res
}
func newContainer(name string, requests v1.ResourceList, limits v1.ResourceList) v1.Container {
return v1.Container{
Name: name,
Resources: getResourceRequirements(requests, limits),
Resources: *(getResourceRequirements(requests, limits)),
}
}
@ -183,6 +255,14 @@ func newPod(name string, containers []v1.Container) *v1.Pod {
}
}
func newPodWithResources(name string, containers []v1.Container, podResources *v1.ResourceRequirements) *v1.Pod {
pod := newPod(name, containers)
if podResources != nil {
pod.Spec.Resources = podResources
}
return pod
}
func newPodWithInitContainers(name string, containers []v1.Container, initContainers []v1.Container) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{

View File

@ -6750,6 +6750,7 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *corev1.PodSpec, out *core.PodSpe
// INFO: in.HostUsers opted out of conversion generation
out.SchedulingGates = *(*[]core.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates))
out.ResourceClaims = *(*[]core.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims))
out.Resources = (*core.ResourceRequirements)(unsafe.Pointer(in.Resources))
return nil
}
@ -6805,6 +6806,7 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *corev1.PodSpe
out.OS = (*corev1.PodOS)(unsafe.Pointer(in.OS))
out.SchedulingGates = *(*[]corev1.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates))
out.ResourceClaims = *(*[]corev1.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims))
out.Resources = (*corev1.ResourceRequirements)(unsafe.Pointer(in.Resources))
return nil
}

View File

@ -500,6 +500,10 @@ func SetObjectDefaults_Pod(in *corev1.Pod) {
}
}
SetDefaults_ResourceList(&in.Spec.Overhead)
if in.Spec.Resources != nil {
SetDefaults_ResourceList(&in.Spec.Resources.Limits)
SetDefaults_ResourceList(&in.Spec.Resources.Requests)
}
for i := range in.Status.InitContainerStatuses {
a := &in.Status.InitContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
@ -859,6 +863,10 @@ func SetObjectDefaults_PodTemplate(in *corev1.PodTemplate) {
}
}
SetDefaults_ResourceList(&in.Template.Spec.Overhead)
if in.Template.Spec.Resources != nil {
SetDefaults_ResourceList(&in.Template.Spec.Resources.Limits)
SetDefaults_ResourceList(&in.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_PodTemplateList(in *corev1.PodTemplateList) {
@ -1165,6 +1173,10 @@ func SetObjectDefaults_ReplicationController(in *corev1.ReplicationController) {
}
}
SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
}

View File

@ -47,6 +47,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
resourcehelper "k8s.io/component-helpers/resource"
schedulinghelper "k8s.io/component-helpers/scheduling/corev1"
kubeletapis "k8s.io/kubelet/pkg/apis"
@ -333,7 +334,7 @@ func ValidateRuntimeClassName(name string, fldPath *field.Path) field.ErrorList
// validateOverhead can be used to check whether the given Overhead is valid.
func validateOverhead(overhead core.ResourceList, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
// reuse the ResourceRequirements validation logic
return ValidateResourceRequirements(&core.ResourceRequirements{Limits: overhead}, nil, fldPath, opts)
return ValidateContainerResourceRequirements(&core.ResourceRequirements{Limits: overhead}, nil, fldPath, opts)
}
// Validates that given value is not negative.
@ -3589,7 +3590,7 @@ func validateContainerCommon(ctr *core.Container, volumes map[string]core.Volume
allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, ctr, path.Child("volumeMounts"), opts)...)
allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, path.Child("volumeDevices"))...)
allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, path.Child("imagePullPolicy"))...)
allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...)
allErrs = append(allErrs, ValidateContainerResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...)
allErrs = append(allErrs, validateResizePolicy(ctr.ResizePolicy, path.Child("resizePolicy"), podRestartPolicy)...)
allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, path.Child("securityContext"), hostUsers)...)
return allErrs
@ -4048,6 +4049,8 @@ type PodValidationOptions struct {
AllowPodLifecycleSleepActionZeroValue bool
// Allow only Recursive value of SELinuxChangePolicy.
AllowOnlyRecursiveSELinuxChangePolicy bool
// Indicates whether PodLevelResources feature is enabled or disabled.
PodLevelResourcesEnabled bool
}
// validatePodMetadataAndSpec tests if required fields in the pod.metadata and pod.spec are set,
@ -4202,6 +4205,11 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi
allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, gracePeriod, fldPath.Child("containers"), opts, &spec.RestartPolicy, hostUsers)...)
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, gracePeriod, fldPath.Child("initContainers"), opts, &spec.RestartPolicy, hostUsers)...)
allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts, &spec.RestartPolicy, hostUsers)...)
if opts.PodLevelResourcesEnabled {
allErrs = append(allErrs, validatePodResources(spec, podClaimNames, fldPath.Child("resources"), opts)...)
}
allErrs = append(allErrs, validatePodHostNetworkDeps(spec, fldPath, opts)...)
allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
@ -4282,6 +4290,77 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi
return allErrs
}
func validatePodResources(spec *core.PodSpec, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
if spec.Resources == nil {
return nil
}
allErrs := field.ErrorList{}
if spec.Resources.Claims != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("claims"), "claims cannot be set for Resources at pod-level"))
}
// validatePodResourceRequirements checks if resource names and quantities are
// valid, and requests are less than limits.
allErrs = append(allErrs, validatePodResourceRequirements(spec.Resources, podClaimNames, fldPath, opts)...)
allErrs = append(allErrs, validatePodResourceConsistency(spec, fldPath)...)
return allErrs
}
// validatePodResourceConsistency checks if aggregate container-level requests are
// less than or equal to pod-level requests, and individual container-level limits
// are less than or equal to pod-level limits.
func validatePodResourceConsistency(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// Convert the *core.PodSpec to *v1.PodSpec to satisfy the call to
// resourcehelper.PodRequests method, in the subsequent lines,
// which requires a *v1.Pod object (containing a *v1.PodSpec).
v1PodSpec := &v1.PodSpec{}
// TODO(ndixita): Convert_core_PodSpec_To_v1_PodSpec is risky. Add a copy of
// AggregateContainerRequests against internal core.Pod type for beta release of
// PodLevelResources feature.
if err := corev1.Convert_core_PodSpec_To_v1_PodSpec(spec, v1PodSpec, nil); err != nil {
allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("invalid %q: %v", fldPath, err.Error())))
}
reqPath := fldPath.Child("requests")
// resourcehelper.AggregateContainerRequests method requires a Pod object to
// calculate the total requests requirements of a pod. Hence a Pod object using
// v1PodSpec i.e. (&v1.Pod{Spec: *v1PodSpec}, is created on the fly, and passed
// to the AggregateContainerRequests method to facilitate proper resource
// calculation without modifying AggregateContainerRequests method.
aggrContainerReqs := resourcehelper.AggregateContainerRequests(&v1.Pod{Spec: *v1PodSpec}, resourcehelper.PodResourcesOptions{})
// Pod-level requests must be >= aggregate requests of all containers in a pod.
for resourceName, ctrReqs := range aggrContainerReqs {
key := resourceName.String()
podSpecRequests := spec.Resources.Requests[core.ResourceName(key)]
fldPath := reqPath.Key(key)
if ctrReqs.Cmp(podSpecRequests) > 0 {
allErrs = append(allErrs, field.Invalid(fldPath, podSpecRequests.String(), fmt.Sprintf("must be greater than or equal to aggregate container requests of %s", ctrReqs.String())))
}
}
// Individual Container limits must be <= Pod-level limits.
for i, ctr := range spec.Containers {
for resourceName, ctrLimit := range ctr.Resources.Limits {
podSpecLimits, exists := spec.Resources.Limits[core.ResourceName(resourceName.String())]
if !exists {
continue
}
if ctrLimit.Cmp(podSpecLimits) > 0 {
fldPath := fldPath.Child("containers").Index(i).Key(resourceName.String()).Child("limits")
allErrs = append(allErrs, field.Invalid(fldPath, ctrLimit.String(), fmt.Sprintf("must be less than or equal to pod limits of %s", podSpecLimits.String())))
}
}
}
return allErrs
}
func validateLinux(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
securityContext := spec.SecurityContext
@ -5486,6 +5565,16 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
allErrs := ValidateImmutableField(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...)
// pods with pod-level resources cannot be resized
isPodLevelResourcesSet := func(pod *core.Pod) bool {
return pod.Spec.Resources != nil &&
(len(pod.Spec.Resources.Requests)+len(pod.Spec.Resources.Limits) > 0)
}
if isPodLevelResourcesSet(oldPod) || isPodLevelResourcesSet(newPod) {
return field.ErrorList{field.Forbidden(field.NewPath(""), "pods with pod-level resources cannot be resized")}
}
// static pods cannot be resized.
if _, ok := oldPod.Annotations[core.MirrorPodAnnotationKey]; ok {
return field.ErrorList{field.Forbidden(field.NewPath(""), "static pods cannot be resized")}
@ -6424,6 +6513,22 @@ func validateContainerResourceName(value core.ResourceName, fldPath *field.Path)
return allErrs
}
// validatePodResourceName verifies that:
// 1. The resource name is a valid compute resource name for pod-level specification.
// 2. The resource is supported by the PodLevelResources feature.
func validatePodResourceName(resourceName core.ResourceName, fldPath *field.Path) field.ErrorList {
allErrs := validateResourceName(resourceName, fldPath)
if len(allErrs) != 0 {
return allErrs
}
if !resourcehelper.IsSupportedPodLevelResource(v1.ResourceName(resourceName)) {
return append(allErrs, field.NotSupported(fldPath, resourceName, sets.List(resourcehelper.SupportedPodLevelResources())))
}
return allErrs
}
// Validate resource names that can go in a resource quota
// Refer to docs/design/resources.md for more details.
func ValidateResourceQuotaResourceName(value core.ResourceName, fldPath *field.Path) field.ErrorList {
@ -6771,8 +6876,16 @@ func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) fiel
return field.ErrorList{}
}
func validatePodResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
return validateResourceRequirements(requirements, validatePodResourceName, podClaimNames, fldPath, opts)
}
func ValidateContainerResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
return validateResourceRequirements(requirements, validateContainerResourceName, podClaimNames, fldPath, opts)
}
// Validates resource requirement spec.
func ValidateResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
func validateResourceRequirements(requirements *core.ResourceRequirements, resourceNameFn func(core.ResourceName, *field.Path) field.ErrorList, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
limPath := fldPath.Child("limits")
reqPath := fldPath.Child("requests")
@ -6785,7 +6898,7 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, podCl
fldPath := limPath.Key(string(resourceName))
// Validate resource name.
allErrs = append(allErrs, validateContainerResourceName(resourceName, fldPath)...)
allErrs = append(allErrs, resourceNameFn(resourceName, fldPath)...)
// Validate resource quantity.
allErrs = append(allErrs, ValidateResourceQuantityValue(resourceName, quantity, fldPath)...)
@ -6804,7 +6917,8 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, podCl
for resourceName, quantity := range requirements.Requests {
fldPath := reqPath.Key(string(resourceName))
// Validate resource name.
allErrs = append(allErrs, validateContainerResourceName(resourceName, fldPath)...)
allErrs = append(allErrs, resourceNameFn(resourceName, fldPath)...)
// Validate resource quantity.
allErrs = append(allErrs, ValidateResourceQuantityValue(resourceName, quantity, fldPath)...)

View File

@ -5758,7 +5758,7 @@ func TestAlphaLocalStorageCapacityIsolation(t *testing.T) {
resource.BinarySI),
},
}
if errs := ValidateResourceRequirements(&containerLimitCase, nil, field.NewPath("resources"), PodValidationOptions{}); len(errs) != 0 {
if errs := ValidateContainerResourceRequirements(&containerLimitCase, nil, field.NewPath("resources"), PodValidationOptions{}); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
@ -12299,7 +12299,6 @@ func TestValidatePodCreateWithSchedulingGates(t *testing.T) {
}
func TestValidatePodUpdate(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
var (
activeDeadlineSecondsZero = int64(0)
activeDeadlineSecondsNegative = int64(-30)
@ -13670,7 +13669,41 @@ func TestValidatePodUpdate(t *testing.T) {
err: "pod updates may not change fields other than",
test: "the podAntiAffinity cannot be updated on gated pods",
},
{
new: *podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Limits: getResources("200m", "0", "1Gi", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
),
old: *podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Limits: getResources("100m", "0", "1Gi", ""),
}))),
),
err: "pod updates may not change fields other than",
test: "cpu limit change with pod-level resources",
}, {
new: *podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Limits: getResources("100m", "100Mi", "", ""),
}))),
),
old: *podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Limits: getResources("100m", "200Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
),
err: "pod updates may not change fields other than",
test: "memory limit change with pod-level resources",
},
}
for _, test := range tests {
test.new.ObjectMeta.ResourceVersion = "1"
test.old.ObjectMeta.ResourceVersion = "1"
@ -18643,6 +18676,260 @@ func TestValidateServiceUpdate(t *testing.T) {
}
}
func TestValidatePodResourceConsistency(t *testing.T) {
path := field.NewPath("resources")
tests := []struct {
name string
podResources core.ResourceRequirements
containers []core.Container
expectedErrors []string
}{{
name: "aggregate container requests less than pod requests",
podResources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceCPU: resource.MustParse("10"),
core.ResourceMemory: resource.MustParse("10Mi"),
},
},
containers: []core.Container{
{
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceCPU: resource.MustParse("5"),
core.ResourceMemory: resource.MustParse("5Mi"),
},
},
}, {
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceCPU: resource.MustParse("4"),
core.ResourceMemory: resource.MustParse("3Mi"),
},
},
},
},
}, {
name: "aggregate container requests equal to pod requests",
podResources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceCPU: resource.MustParse("10"),
core.ResourceMemory: resource.MustParse("10Mi"),
},
},
containers: []core.Container{
{
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceCPU: resource.MustParse("5"),
core.ResourceMemory: resource.MustParse("5Mi"),
},
},
}, {
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceCPU: resource.MustParse("5"),
core.ResourceMemory: resource.MustParse("5Mi"),
},
},
},
},
}, {
name: "aggregate container requests greater than pod requests",
podResources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceCPU: resource.MustParse("10"),
core.ResourceMemory: resource.MustParse("10Mi"),
},
},
containers: []core.Container{
{
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceCPU: resource.MustParse("6"),
core.ResourceMemory: resource.MustParse("5Mi"),
},
},
}, {
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceCPU: resource.MustParse("8"),
core.ResourceMemory: resource.MustParse("3Mi"),
},
},
},
},
expectedErrors: []string{"must be greater than or equal to aggregate container requests"},
}, {
name: "aggregate container limits less than pod limits",
podResources: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceCPU: resource.MustParse("10"),
core.ResourceMemory: resource.MustParse("10Mi"),
},
},
containers: []core.Container{
{
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceCPU: resource.MustParse("5"),
core.ResourceMemory: resource.MustParse("5Mi"),
},
},
}, {
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceCPU: resource.MustParse("4"),
core.ResourceMemory: resource.MustParse("3Mi"),
},
},
},
},
}, {
name: "aggregate container limits equal to pod limits",
podResources: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceCPU: resource.MustParse("10"),
core.ResourceMemory: resource.MustParse("10Mi"),
},
},
containers: []core.Container{
{
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceCPU: resource.MustParse("5"),
core.ResourceMemory: resource.MustParse("5Mi"),
},
},
}, {
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceCPU: resource.MustParse("5"),
core.ResourceMemory: resource.MustParse("5Mi"),
},
},
},
},
}, {
name: "aggregate container limits greater than pod limits",
podResources: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceCPU: resource.MustParse("10"),
core.ResourceMemory: resource.MustParse("10Mi"),
},
},
containers: []core.Container{
{
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceCPU: resource.MustParse("5"),
core.ResourceMemory: resource.MustParse("5Mi"),
},
},
}, {
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceCPU: resource.MustParse("6"),
core.ResourceMemory: resource.MustParse("9Mi"),
},
},
},
},
}, {
name: "indivdual container limits greater than pod limits",
podResources: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceCPU: resource.MustParse("10"),
core.ResourceMemory: resource.MustParse("10Mi"),
},
},
containers: []core.Container{
{
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceCPU: resource.MustParse("11"),
core.ResourceMemory: resource.MustParse("12Mi"),
},
},
},
},
expectedErrors: []string{
"must be less than or equal to pod limits",
"must be less than or equal to pod limits",
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
spec := core.PodSpec{
Resources: &tc.podResources,
Containers: tc.containers,
}
errs := validatePodResourceConsistency(&spec, path)
if len(errs) != len(tc.expectedErrors) {
t.Errorf("expected %d errors, got %d errors, got errors: %v", len(tc.expectedErrors), len(errs), errs)
}
for _, expectedErr := range tc.expectedErrors {
expectedErrExists := false
for _, gotErr := range errs {
if strings.Contains(gotErr.Error(), expectedErr) {
expectedErrExists = true
break
}
}
if !expectedErrExists {
t.Errorf("expected: %v, got errors: %v", expectedErr, errs)
}
}
})
}
}
func TestValidatePodResourceNames(t *testing.T) {
table := []struct {
input core.ResourceName
expectedFailure bool
}{
{"memory", false},
{"cpu", false},
{"storage", true},
{"requests.cpu", true},
{"requests.memory", true},
{"requests.storage", true},
{"limits.cpu", true},
{"limits.memory", true},
{"limits.storage", true},
{"network", true},
{"disk", true},
{"", true},
{".", true},
{"..", true},
{"my.favorite.app.co/12345", true},
{"my.favorite.app.co/_12345", true},
{"my.favorite.app.co/12345_", true},
{"kubernetes.io/..", true},
{core.ResourceName("kubernetes.io/" + strings.Repeat("a", 64)), true},
{core.ResourceName("kubernetes.io/" + strings.Repeat("a", 64)), true},
{core.ResourceName("kubernetes.io/" + core.ResourceCPU), true},
{core.ResourceName("kubernetes.io/" + core.ResourceMemory), true},
{"kubernetes.io//", true},
{"kubernetes.io", true},
{"kubernetes.io/will/not/work/", true},
}
for _, item := range table {
errs := validatePodResourceName(item.input, field.NewPath("field"))
if len(errs) != 0 && !item.expectedFailure {
t.Errorf("expected no failure for input %q, got: %v", item.input, errs)
}
if len(errs) == 0 && item.expectedFailure {
t.Errorf("expected failure for input %q", item.input)
}
}
}
func TestValidateResourceNames(t *testing.T) {
table := []struct {
input core.ResourceName
@ -20656,6 +20943,7 @@ func TestValidateOSFields(t *testing.T) {
"ResourceClaims[*].Name",
"ResourceClaims[*].ResourceClaimName",
"ResourceClaims[*].ResourceClaimTemplateName",
"Resources",
"RestartPolicy",
"RuntimeClassName",
"SchedulerName",
@ -23082,10 +23370,14 @@ func TestValidatePodTemplateSpecSeccomp(t *testing.T) {
func TestValidateResourceRequirements(t *testing.T) {
path := field.NewPath("resources")
// TODO(ndixita): refactor the tests to check the expected errors are equal to
// got errors.
tests := []struct {
name string
requirements core.ResourceRequirements
opts PodValidationOptions
validateFn func(requirements *core.ResourceRequirements,
podClaimNames sets.Set[string], fldPath *field.Path,
opts PodValidationOptions) field.ErrorList
}{{
name: "limits and requests of hugepage resource are equal",
requirements: core.ResourceRequirements{
@ -23098,7 +23390,7 @@ func TestValidateResourceRequirements(t *testing.T) {
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
},
},
opts: PodValidationOptions{},
validateFn: ValidateContainerResourceRequirements,
}, {
name: "limits and requests of memory resource are equal",
requirements: core.ResourceRequirements{
@ -23109,7 +23401,7 @@ func TestValidateResourceRequirements(t *testing.T) {
core.ResourceMemory: resource.MustParse("2Mi"),
},
},
opts: PodValidationOptions{},
validateFn: ValidateContainerResourceRequirements,
}, {
name: "limits and requests of cpu resource are equal",
requirements: core.ResourceRequirements{
@ -23120,13 +23412,36 @@ func TestValidateResourceRequirements(t *testing.T) {
core.ResourceCPU: resource.MustParse("10"),
},
},
opts: PodValidationOptions{},
validateFn: ValidateContainerResourceRequirements,
},
{
name: "limits and requests of memory resource are equal",
requirements: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceMemory: resource.MustParse("2Mi"),
},
Requests: core.ResourceList{
core.ResourceMemory: resource.MustParse("2Mi"),
},
},
validateFn: validatePodResourceRequirements,
}, {
name: "limits and requests of cpu resource are equal",
requirements: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceCPU: resource.MustParse("10"),
},
Requests: core.ResourceList{
core.ResourceCPU: resource.MustParse("10"),
},
},
validateFn: validatePodResourceRequirements,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if errs := ValidateResourceRequirements(&tc.requirements, nil, path, tc.opts); len(errs) != 0 {
if errs := tc.validateFn(&tc.requirements, nil, path, PodValidationOptions{}); len(errs) != 0 {
t.Errorf("unexpected errors: %v", errs)
}
})
@ -23135,7 +23450,9 @@ func TestValidateResourceRequirements(t *testing.T) {
errTests := []struct {
name string
requirements core.ResourceRequirements
opts PodValidationOptions
validateFn func(requirements *core.ResourceRequirements,
podClaimNames sets.Set[string], fldPath *field.Path,
opts PodValidationOptions) field.ErrorList
}{{
name: "hugepage resource without cpu or memory",
requirements: core.ResourceRequirements{
@ -23146,13 +23463,69 @@ func TestValidateResourceRequirements(t *testing.T) {
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
},
},
opts: PodValidationOptions{},
validateFn: ValidateContainerResourceRequirements,
}, {
name: "pod resource with hugepages",
requirements: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
},
Requests: core.ResourceList{
core.ResourceName(core.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("2Mi"),
},
},
validateFn: validatePodResourceRequirements,
}, {
name: "pod resource with ephemeral-storage",
requirements: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceName(core.ResourceEphemeralStorage): resource.MustParse("2Mi"),
},
Requests: core.ResourceList{
core.ResourceName(core.ResourceEphemeralStorage + "2Mi"): resource.MustParse("2Mi"),
},
},
validateFn: validatePodResourceRequirements,
}, {
name: "pod resource with unsupported prefixed resources",
requirements: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceName("kubernetesio/" + core.ResourceCPU): resource.MustParse("2"),
},
Requests: core.ResourceList{
core.ResourceName("kubernetesio/" + core.ResourceMemory): resource.MustParse("2"),
},
},
validateFn: validatePodResourceRequirements,
}, {
name: "pod resource with unsupported native resources",
requirements: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceName("kubernetes.io/" + strings.Repeat("a", 63)): resource.MustParse("2"),
},
Requests: core.ResourceList{
core.ResourceName("kubernetes.io/" + strings.Repeat("a", 63)): resource.MustParse("2"),
},
},
validateFn: validatePodResourceRequirements,
},
{
name: "pod resource with unsupported empty native resource name",
requirements: core.ResourceRequirements{
Limits: core.ResourceList{
core.ResourceName("kubernetes.io/"): resource.MustParse("2"),
},
Requests: core.ResourceList{
core.ResourceName("kubernetes.io"): resource.MustParse("2"),
},
},
validateFn: validatePodResourceRequirements,
},
}
for _, tc := range errTests {
t.Run(tc.name, func(t *testing.T) {
if errs := ValidateResourceRequirements(&tc.requirements, nil, path, tc.opts); len(errs) == 0 {
if errs := tc.validateFn(&tc.requirements, nil, path, PodValidationOptions{}); len(errs) == 0 {
t.Error("expected errors")
}
})
@ -25112,6 +25485,148 @@ func TestValidatePodResize(t *testing.T) {
new *core.Pod
err string
}{
{
test: "pod-level resources with container cpu limit change",
new: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Limits: getResources("100m", "100Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
),
old: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Limits: getResources("200m", "100Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
),
err: "pods with pod-level resources cannot be resized",
}, {
test: "pod-level resources with container memory limit change",
new: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Limits: getResources("100m", "100Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
),
old: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Limits: getResources("100m", "200Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
),
err: "pods with pod-level resources cannot be resized",
},
{
test: "pod-level resources with container cpu request change",
new: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Requests: getResources("100m", "200Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
),
old: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Requests: getResources("200m", "200Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
),
err: "pods with pod-level resources cannot be resized",
}, {
test: "pod-level resources with container memory request change",
new: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Requests: getResources("100m", "100Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
),
old: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Requests: getResources("100m", "200Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
),
err: "pods with pod-level resources cannot be resized",
},
{
test: "pod-level resources with pod-level memory limit change",
new: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Requests: getResources("100m", "100Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("200m", "200Mi", "", "")}),
),
old: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Requests: getResources("100m", "100Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("200m", "100Mi", "", "")}),
),
err: "pods with pod-level resources cannot be resized",
},
{
test: "pod-level resources with pod-level memory request change",
new: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Requests: getResources("100m", "100Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Requests: getResources("200m", "200Mi", "", "")}),
),
old: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Requests: getResources("100m", "100Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Requests: getResources("200m", "100Mi", "", "")}),
),
err: "pods with pod-level resources cannot be resized",
},
{
test: "pod-level resources with pod-level cpu limit change",
new: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Requests: getResources("100m", "100Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("200m", "200Mi", "", "")}),
),
old: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Requests: getResources("100m", "100Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
),
err: "pods with pod-level resources cannot be resized",
},
{
test: "pod-level resources with pod-level cpu request change",
new: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Requests: getResources("100m", "100Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Requests: getResources("100m", "200Mi", "", "")}),
),
old: podtest.MakePod("pod",
podtest.SetContainers(podtest.MakeContainer("container",
podtest.SetContainerResources(core.ResourceRequirements{
Requests: getResources("100m", "100Mi", "", ""),
}))),
podtest.SetPodResources(&core.ResourceRequirements{Requests: getResources("200m", "200Mi", "", "")}),
),
err: "pods with pod-level resources cannot be resized",
},
{
test: "cpu limit change",
old: mkPod(core.ResourceList{}, getResources("100m", "0", "1Gi", "")),

View File

@ -4373,6 +4373,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(ResourceRequirements)
(*in).DeepCopyInto(*out)
}
return
}

View File

@ -341,6 +341,10 @@ func SetObjectDefaults_DaemonSet(in *extensionsv1beta1.DaemonSet) {
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DaemonSetList(in *extensionsv1beta1.DaemonSetList) {
@ -646,6 +650,10 @@ func SetObjectDefaults_Deployment(in *extensionsv1beta1.Deployment) {
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DeploymentList(in *extensionsv1beta1.DeploymentList) {
@ -981,6 +989,10 @@ func SetObjectDefaults_ReplicaSet(in *extensionsv1beta1.ReplicaSet) {
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_ReplicaSetList(in *extensionsv1beta1.ReplicaSetList) {

View File

@ -54,7 +54,7 @@ func ValidateRuntimeClassUpdate(new, old *node.RuntimeClass) field.ErrorList {
func validateOverhead(overhead *node.Overhead, fldPath *field.Path) field.ErrorList {
// reuse the ResourceRequirements validation logic
return corevalidation.ValidateResourceRequirements(&core.ResourceRequirements{Limits: overhead.PodFixed}, nil, fldPath,
return corevalidation.ValidateContainerResourceRequirements(&core.ResourceRequirements{Limits: overhead.PodFixed}, nil, fldPath,
corevalidation.PodValidationOptions{})
}

View File

@ -852,6 +852,13 @@ const (
// Enables external service account JWT signing and key management.
// If enabled, it allows passing --service-account-signing-endpoint flag to configure external signer.
ExternalServiceAccountTokenSigner featuregate.Feature = "ExternalServiceAccountTokenSigner"
// owner: @ndixita
// key: https://kep.k8s.io/2837
// alpha: 1.32
//
// Enables specifying resources at pod-level.
PodLevelResources featuregate.Feature = "PodLevelResources"
)
func init() {

View File

@ -580,6 +580,10 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.35
},
PodLevelResources: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
PodLifecycleSleepAction: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},

View File

@ -28583,12 +28583,18 @@ func schema_k8sio_api_core_v1_PodSpec(ref common.ReferenceCallback) common.OpenA
},
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
},
Required: []string{"containers"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EphemeralContainer", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodOS", "k8s.io/api/core/v1.PodReadinessGate", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSchedulingGate", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
"k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EphemeralContainer", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodOS", "k8s.io/api/core/v1.PodReadinessGate", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSchedulingGate", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}

View File

@ -118,9 +118,11 @@ func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 {
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
func ResourceConfigForPod(allocatedPod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig {
podLevelResourcesEnabled := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources)
// sum requests and limits.
reqs := resource.PodRequests(allocatedPod, resource.PodResourcesOptions{
// pod is already configured to the allocated resources, and we explicitly don't want to use
// the actual resources if we're instantiating a resize.
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !podLevelResourcesEnabled,
UseStatusResources: false,
})
// track if limits were applied for each resource.
@ -128,6 +130,8 @@ func ResourceConfigForPod(allocatedPod *v1.Pod, enforceCPULimits bool, cpuPeriod
cpuLimitsDeclared := true
limits := resource.PodLimits(allocatedPod, resource.PodResourcesOptions{
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !podLevelResourcesEnabled,
ContainerFn: func(res v1.ResourceList, containerType resource.ContainerType) {
if res.Cpu().IsZero() {
cpuLimitsDeclared = false
@ -137,6 +141,16 @@ func ResourceConfigForPod(allocatedPod *v1.Pod, enforceCPULimits bool, cpuPeriod
}
},
})
if podLevelResourcesEnabled && resource.IsPodLevelResourcesSet(allocatedPod) {
if !allocatedPod.Spec.Resources.Limits.Cpu().IsZero() {
cpuLimitsDeclared = true
}
if !allocatedPod.Spec.Resources.Limits.Memory().IsZero() {
memoryLimitsDeclared = true
}
}
// map hugepage pagesize (bytes) to limits (bytes)
hugePageLimits := HugePageLimits(reqs)

View File

@ -74,6 +74,7 @@ func TestResourceConfigForPod(t *testing.T) {
expected *ResourceConfig
enforceCPULimits bool
quotaPeriod uint64 // in microseconds
podLevelResourcesEnabled bool
}{
"besteffort": {
pod: &v1.Pod{
@ -274,12 +275,126 @@ func TestResourceConfigForPod(t *testing.T) {
quotaPeriod: tunedQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstablePartialShares},
},
"burstable-with-pod-level-requests": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
},
Containers: []v1.Container{
{
Name: "Container with no resources",
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares},
},
"burstable-with-pod-and-container-level-requests": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
},
Containers: []v1.Container{
{
Name: "Container with resources",
Resources: getResourceRequirements(getResourceList("10m", "50Mi"), getResourceList("", "")),
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares},
},
"burstable-with-pod-level-resources": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
Limits: getResourceList("200m", "200Mi"),
},
Containers: []v1.Container{
{
Name: "Container with no resources",
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
},
"burstable-with-pod-and-container-level-resources": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
Limits: getResourceList("200m", "200Mi"),
},
Containers: []v1.Container{
{
Name: "Container with resources",
Resources: getResourceRequirements(getResourceList("10m", "50Mi"), getResourceList("50m", "100Mi")),
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
},
"guaranteed-with-pod-level-resources": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
Limits: getResourceList("100m", "100Mi"),
},
Containers: []v1.Container{
{
Name: "Container with no resources",
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
},
"guaranteed-with-pod-and-container-level-resources": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: getResourceList("100m", "100Mi"),
Limits: getResourceList("100m", "100Mi"),
},
Containers: []v1.Container{
{
Name: "Container with resources",
Resources: getResourceRequirements(getResourceList("10m", "50Mi"), getResourceList("50m", "100Mi")),
},
},
},
},
podLevelResourcesEnabled: true,
enforceCPULimits: true,
quotaPeriod: defaultQuotaPeriod,
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
},
}
for testName, testCase := range testCases {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.PodLevelResources, testCase.podLevelResourcesEnabled)
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, false)
if !reflect.DeepEqual(actual.CPUPeriod, testCase.expected.CPUPeriod) {
t.Errorf("unexpected result, test: %v, cpu period not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUPeriod, *actual.CPUPeriod)
}
@ -287,7 +402,7 @@ func TestResourceConfigForPod(t *testing.T) {
t.Errorf("unexpected result, test: %v, cpu quota not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUQuota, *actual.CPUQuota)
}
if !reflect.DeepEqual(actual.CPUShares, testCase.expected.CPUShares) {
t.Errorf("unexpected result, test: %v, cpu shares not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUShares, &actual.CPUShares)
t.Errorf("unexpected result, test: %v, cpu shares not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUShares, *actual.CPUShares)
}
if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) {
t.Errorf("unexpected result, test: %v, memory not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.Memory, *actual.Memory)

View File

@ -179,7 +179,11 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass]
// we only care about the burstable qos tier
continue
}
req := resource.PodRequests(pod, resource.PodResourcesOptions{Reuse: reuseReqs})
req := resource.PodRequests(pod, resource.PodResourcesOptions{
Reuse: reuseReqs,
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources),
})
if request, found := req[v1.ResourceCPU]; found {
burstablePodCPURequest += request.MilliValue()
}

View File

@ -35,6 +35,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
@ -94,6 +95,34 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C
return lc, nil
}
// getCPULimit returns the memory limit for the container to be used to calculate
// Linux Container Resources.
func getCPULimit(pod *v1.Pod, container *v1.Container) *resource.Quantity {
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
// When container-level CPU limit is not set, the pod-level
// limit is used in the calculation for components relying on linux resource limits
// to be set.
if container.Resources.Limits.Cpu().IsZero() {
return pod.Spec.Resources.Limits.Cpu()
}
}
return container.Resources.Limits.Cpu()
}
// getMemoryLimit returns the memory limit for the container to be used to calculate
// Linux Container Resources.
func getMemoryLimit(pod *v1.Pod, container *v1.Container) *resource.Quantity {
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
// When container-level memory limit is not set, the pod-level
// limit is used in the calculation for components relying on linux resource limits
// to be set.
if container.Resources.Limits.Memory().IsZero() {
return pod.Spec.Resources.Limits.Memory()
}
}
return container.Resources.Limits.Memory()
}
// generateLinuxContainerResources generates linux container resources config for runtime
func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod, container *v1.Container, enforceMemoryQoS bool) *runtimeapi.LinuxContainerResources {
// set linux container resources
@ -101,7 +130,10 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod,
if _, cpuRequestExists := container.Resources.Requests[v1.ResourceCPU]; cpuRequestExists {
cpuRequest = container.Resources.Requests.Cpu()
}
lcr := m.calculateLinuxResources(cpuRequest, container.Resources.Limits.Cpu(), container.Resources.Limits.Memory())
memoryLimit := getMemoryLimit(pod, container)
cpuLimit := getCPULimit(pod, container)
lcr := m.calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit)
lcr.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container,
int64(m.machineInfo.MemoryCapacity)))

View File

@ -168,12 +168,13 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) {
tests := []struct {
name string
podResources v1.ResourceRequirements
containerResources v1.ResourceRequirements
podResources *v1.ResourceRequirements
expected *runtimeapi.LinuxContainerResources
}{
{
name: "Request 128M/1C, Limit 256M/3C",
podResources: v1.ResourceRequirements{
containerResources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("128Mi"),
v1.ResourceCPU: resource.MustParse("1"),
@ -192,7 +193,7 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) {
},
{
name: "Request 128M/2C, No Limit",
podResources: v1.ResourceRequirements{
containerResources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("128Mi"),
v1.ResourceCPU: resource.MustParse("2"),
@ -205,6 +206,27 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) {
MemoryLimitInBytes: 0,
},
},
{
name: "Container Level Request 128M/1C, Pod Level Limit 256M/3C",
containerResources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("128Mi"),
v1.ResourceCPU: resource.MustParse("1"),
},
},
podResources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("256Mi"),
v1.ResourceCPU: resource.MustParse("3"),
},
},
expected: &runtimeapi.LinuxContainerResources{
CpuPeriod: 100000,
CpuQuota: 300000,
CpuShares: 1024,
MemoryLimitInBytes: 256 * 1024 * 1024,
},
},
}
for _, test := range tests {
@ -222,12 +244,17 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) {
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{"testCommand"},
WorkingDir: "testWorkingDir",
Resources: test.podResources,
Resources: test.containerResources,
},
},
},
}
if test.podResources != nil {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, true)
pod.Spec.Resources = test.podResources
}
linuxConfig, err := m.generateLinuxContainerConfig(&pod.Spec.Containers[0], pod, new(int64), "", nil, false)
assert.NoError(t, err)
assert.Equal(t, test.expected.CpuPeriod, linuxConfig.GetResources().CpuPeriod, test.name)

View File

@ -22,7 +22,9 @@ package kuberuntime
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/features"
resourcehelper "k8s.io/component-helpers/resource"
)
@ -44,6 +46,8 @@ func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod)
func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources {
opts := resourcehelper.PodResourcesOptions{
ExcludeOverhead: true,
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
}
req := resourcehelper.PodRequests(pod, opts)
lim := resourcehelper.PodLimits(pod, opts)

View File

@ -27,6 +27,7 @@ package qos // import "k8s.io/kubernetes/pkg/kubelet/qos"
import (
v1 "k8s.io/api/core/v1"
resourcehelper "k8s.io/component-helpers/resource"
)
// minRegularContainerMemory returns the minimum memory resource quantity
@ -41,3 +42,30 @@ func minRegularContainerMemory(pod v1.Pod) int64 {
}
return memoryValue
}
// remainingPodMemReqPerContainer calculates the remaining pod memory request per
// container by:
// 1. Taking the total pod memory requests
// 2. Subtracting total container memory requests from pod memory requests
// 3. Dividing the remainder by the number of containers.
// This gives us the additional memory request that is not allocated to any
// containers in the pod. This value will be divided equally among all containers to
// calculate oom score adjusment.
// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2837-pod-level-resource-spec/README.md#oom-score-adjustment
// for more details.
func remainingPodMemReqPerContainer(pod *v1.Pod) int64 {
var remainingMemory int64
if pod.Spec.Resources.Requests.Memory().IsZero() {
return remainingMemory
}
numContainers := len(pod.Spec.Containers) + len(pod.Spec.InitContainers)
// Aggregated requests of all containers.
aggrContainerReqs := resourcehelper.AggregateContainerRequests(pod, resourcehelper.PodResourcesOptions{})
remainingMemory = pod.Spec.Resources.Requests.Memory().Value() - aggrContainerReqs.Memory().Value()
remainingMemoryPerContainer := remainingMemory / int64(numContainers)
return remainingMemoryPerContainer
}

View File

@ -19,6 +19,7 @@ package qos
import (
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/types"
@ -63,14 +64,41 @@ func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapa
// which use more than their request will have an OOM score of 1000 and will be prime
// targets for OOM kills.
// Note that this is a heuristic, it won't work if a container has many small processes.
memoryRequest := container.Resources.Requests.Memory().Value()
oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity
containerMemReq := container.Resources.Requests.Memory().Value()
var oomScoreAdjust, remainingReqPerContainer int64
// When PodLevelResources feature is enabled, the OOM score adjustment formula is modified
// to account for pod-level memory requests. Any extra pod memory request that's
// not allocated to the containers is divided equally among all containers and
// added to their individual memory requests when calculating the OOM score
// adjustment. Otherwise, only container-level memory requests are used. See
// https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2837-pod-level-resource-spec/README.md#oom-score-adjustment
// for more details.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) &&
resourcehelper.IsPodLevelRequestsSet(pod) {
// TODO(ndixita): Refactor to use this formula in all cases, as
// remainingReqPerContainer will be 0 when pod-level resources are not set.
remainingReqPerContainer = remainingPodMemReqPerContainer(pod)
oomScoreAdjust = 1000 - (1000 * (containerMemReq + remainingReqPerContainer) / memoryCapacity)
} else {
oomScoreAdjust = 1000 - (1000*containerMemReq)/memoryCapacity
}
// adapt the sidecarContainer memoryRequest for OOM ADJ calculation
// calculate the oom score adjustment based on: max-memory( currentSideCarContainer , min-memory(regular containers) ) .
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) && isSidecarContainer(pod, container) {
// check min memory quantity in regular containers
minMemoryRequest := minRegularContainerMemory(*pod)
// When calculating minMemoryOomScoreAdjust for sidecar containers with PodLevelResources enabled,
// we add the per-container share of unallocated pod memory requests to the minimum memory request.
// This ensures the OOM score adjustment i.e. minMemoryOomScoreAdjust
// calculation remains consistent
// with how we handle pod-level memory requests for regular containers.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) &&
resourcehelper.IsPodLevelRequestsSet(pod) {
minMemoryRequest += remainingReqPerContainer
}
minMemoryOomScoreAdjust := 1000 - (1000*minMemoryRequest)/memoryCapacity
// the OOM adjustment for sidecar container will match
// or fall below the OOM score adjustment of regular containers in the Pod.

View File

@ -179,6 +179,8 @@ var (
}
sampleDefaultMemRequest = resource.MustParse(strconv.FormatInt(standardMemoryAmount/8, 10))
sampleDefaultMemLimit = resource.MustParse(strconv.FormatInt(1000+(standardMemoryAmount/8), 10))
sampleDefaultPodMemRequest = resource.MustParse(strconv.FormatInt(standardMemoryAmount/4, 10))
sampleDefaultPodMemLimit = resource.MustParse(strconv.FormatInt(1000+(standardMemoryAmount/4), 10))
sampleContainer = v1.Container{
Name: "main-1",
@ -300,6 +302,347 @@ var (
},
},
}
// Pod definitions with their resource specifications are defined in this section.
// TODO(ndixita): cleanup the tests to create a method that generates pod
// definitions based on input resource parameters, replacing the current
// approach of individual pod variables.
guaranteedPodResourcesNoContainerResources = v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemRequest,
},
},
Containers: []v1.Container{
{
Name: "no-request-limit-1",
Resources: v1.ResourceRequirements{},
},
{
Name: "no-request-limit-2",
Resources: v1.ResourceRequirements{},
},
},
},
}
guaranteedPodResourcesEqualContainerRequests = v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemRequest,
},
},
Containers: []v1.Container{
{
Name: "guaranteed-container-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
},
},
{
Name: "guaranteed-container-2",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
},
},
},
},
}
guaranteedPodResourcesUnequalContainerRequests = v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemRequest,
},
},
Containers: []v1.Container{
{
Name: "burstable-container",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemLimit,
},
},
},
{
Name: "best-effort-container",
Resources: v1.ResourceRequirements{},
},
},
},
}
burstablePodResourcesNoContainerResources = v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemLimit,
},
},
Containers: []v1.Container{
{
Name: "no-request-limit-1",
Resources: v1.ResourceRequirements{},
},
{
Name: "no-request-limit-2",
Resources: v1.ResourceRequirements{},
},
},
},
}
burstablePodResourcesEqualContainerRequests = v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemLimit,
},
},
Containers: []v1.Container{
{
Name: "guaranteed-container-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
},
},
{
Name: "guaranteed-container-2",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
},
},
},
},
}
burstablePodResourcesUnequalContainerRequests = v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemLimit,
},
},
Containers: []v1.Container{
{
Name: "burstable-container",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
},
},
{
Name: "best-effort-container",
Resources: v1.ResourceRequirements{},
},
},
},
}
burstablePodResourcesNoContainerResourcesWithSidecar = v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemLimit,
},
},
Containers: []v1.Container{
{
Name: "no-request-limit",
Resources: v1.ResourceRequirements{},
},
},
InitContainers: []v1.Container{
{
Name: "no-request-limit-sidecar",
Resources: v1.ResourceRequirements{},
RestartPolicy: &restartPolicyAlways,
},
},
},
}
burstablePodResourcesEqualContainerRequestsWithSidecar = v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemLimit,
},
},
Containers: []v1.Container{
{
Name: "burstable-container",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemLimit,
},
},
},
},
InitContainers: []v1.Container{
{
Name: "burstable-sidecar",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemRequest,
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultMemLimit,
},
},
RestartPolicy: &restartPolicyAlways,
},
},
},
}
burstablePodResourcesUnequalContainerRequestsWithSidecar = v1.Pod{
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: resource.MustParse("2000000000"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemLimit,
},
},
Containers: []v1.Container{
{
Name: "burstable-container-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: resource.MustParse("1000000000"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemLimit,
},
},
},
{
Name: "burstable-container-2",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: resource.MustParse("500000000"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemLimit,
},
},
},
},
InitContainers: []v1.Container{
{
Name: "burstable-sidecar",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: resource.MustParse("200000000"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
v1.ResourceMemory: sampleDefaultPodMemLimit,
},
},
RestartPolicy: &restartPolicyAlways,
},
},
},
}
)
type lowHighOOMScoreAdjTest struct {
@ -311,6 +654,7 @@ type oomTest struct {
memoryCapacity int64
lowHighOOMScoreAdj map[string]lowHighOOMScoreAdjTest // [container-name] : min and max oom_score_adj score the container should be assigned.
sidecarContainersFeatureEnabled bool
podLevelResourcesFeatureEnabled bool
}
func TestGetContainerOOMScoreAdjust(t *testing.T) {
@ -425,10 +769,96 @@ func TestGetContainerOOMScoreAdjust(t *testing.T) {
},
sidecarContainersFeatureEnabled: true,
},
"guaranteed-pod-resources-no-container-resources": {
pod: &guaranteedPodResourcesNoContainerResources,
lowHighOOMScoreAdj: map[string]lowHighOOMScoreAdjTest{
"no-request-limit-1": {lowOOMScoreAdj: -997, highOOMScoreAdj: -997},
"no-request-limit-2": {lowOOMScoreAdj: -997, highOOMScoreAdj: -997},
},
memoryCapacity: 4000000000,
podLevelResourcesFeatureEnabled: true,
},
"guaranteed-pod-resources-equal-container-resources": {
pod: &guaranteedPodResourcesEqualContainerRequests,
lowHighOOMScoreAdj: map[string]lowHighOOMScoreAdjTest{
"guaranteed-container-1": {lowOOMScoreAdj: -997, highOOMScoreAdj: -997},
"guaranteed-container-2": {lowOOMScoreAdj: -997, highOOMScoreAdj: -997},
},
memoryCapacity: 4000000000,
podLevelResourcesFeatureEnabled: true,
},
"guaranteed-pod-resources-unequal-container-requests": {
pod: &guaranteedPodResourcesUnequalContainerRequests,
lowHighOOMScoreAdj: map[string]lowHighOOMScoreAdjTest{
"burstable-container": {lowOOMScoreAdj: -997, highOOMScoreAdj: -997},
"best-effort-container": {lowOOMScoreAdj: -997, highOOMScoreAdj: -997},
},
memoryCapacity: 4000000000,
podLevelResourcesFeatureEnabled: true,
},
"burstable-pod-resources-no-container-resources": {
pod: &burstablePodResourcesNoContainerResources,
lowHighOOMScoreAdj: map[string]lowHighOOMScoreAdjTest{
"no-request-limit-1": {lowOOMScoreAdj: 750, highOOMScoreAdj: 750},
"no-request-limit-2": {lowOOMScoreAdj: 750, highOOMScoreAdj: 750},
},
memoryCapacity: 4000000000,
podLevelResourcesFeatureEnabled: true,
},
"burstable-pod-resources-equal-container-requests": {
pod: &burstablePodResourcesEqualContainerRequests,
lowHighOOMScoreAdj: map[string]lowHighOOMScoreAdjTest{
"guaranteed-container-1": {lowOOMScoreAdj: 750, highOOMScoreAdj: 750},
"guaranteed-container-2": {lowOOMScoreAdj: 750, highOOMScoreAdj: 750},
},
memoryCapacity: 4000000000,
podLevelResourcesFeatureEnabled: true,
},
"burstable-pod-resources-unequal-container-requests": {
pod: &burstablePodResourcesUnequalContainerRequests,
lowHighOOMScoreAdj: map[string]lowHighOOMScoreAdjTest{
"burstable-container": {lowOOMScoreAdj: 625, highOOMScoreAdj: 625},
"best-effort-container": {lowOOMScoreAdj: 875, highOOMScoreAdj: 875},
},
memoryCapacity: 4000000000,
podLevelResourcesFeatureEnabled: true,
},
"burstable-pod-resources-no-container-resources-with-sidecar": {
pod: &burstablePodResourcesNoContainerResourcesWithSidecar,
lowHighOOMScoreAdj: map[string]lowHighOOMScoreAdjTest{
"no-request-limit": {lowOOMScoreAdj: 750, highOOMScoreAdj: 750},
"no-request-limit-sidecar": {lowOOMScoreAdj: 750, highOOMScoreAdj: 750},
},
memoryCapacity: 4000000000,
podLevelResourcesFeatureEnabled: true,
sidecarContainersFeatureEnabled: true,
},
"burstable-pod-resources-equal-container-requests-with-sidecar": {
pod: &burstablePodResourcesEqualContainerRequestsWithSidecar,
lowHighOOMScoreAdj: map[string]lowHighOOMScoreAdjTest{
"burstable-container": {lowOOMScoreAdj: 750, highOOMScoreAdj: 750},
"burstable-sidecar": {lowOOMScoreAdj: 750, highOOMScoreAdj: 750},
},
memoryCapacity: 4000000000,
podLevelResourcesFeatureEnabled: true,
sidecarContainersFeatureEnabled: true,
},
"burstable-pod-resources-unequal-container-requests-with-sidecar": {
pod: &burstablePodResourcesUnequalContainerRequestsWithSidecar,
lowHighOOMScoreAdj: map[string]lowHighOOMScoreAdjTest{
"burstable-container-1": {lowOOMScoreAdj: 725, highOOMScoreAdj: 725},
"burstable-container-2": {lowOOMScoreAdj: 850, highOOMScoreAdj: 850},
"burstable-sidecar": {lowOOMScoreAdj: 850, highOOMScoreAdj: 850},
},
memoryCapacity: 4000000000,
podLevelResourcesFeatureEnabled: true,
sidecarContainersFeatureEnabled: true,
},
}
for name, test := range oomTests {
t.Run(name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, test.sidecarContainersFeatureEnabled)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, test.podLevelResourcesFeatureEnabled)
listContainers := test.pod.Spec.InitContainers
listContainers = append(listContainers, test.pod.Spec.Containers...)
for _, container := range listContainers {

View File

@ -122,6 +122,18 @@ func (p *podEvaluator) Constraints(required []corev1.ResourceName, item runtime.
return err
}
// As mentioned in the subsequent comment, the older versions required explicit
// resource requests for CPU & memory for each container if resource quotas were
// enabled for these resources. This was a design flaw as resource validation is
// coupled with quota enforcement. With pod-level resources
// feature, container-level resources are not mandatory. Hence the check for
// missing container requests, for CPU/memory resources that have quotas set,
// is skipped when pod-level resources feature is enabled and resources are set
// at pod level.
if feature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
return nil
}
// BACKWARD COMPATIBILITY REQUIREMENT: if we quota cpu or memory, then each container
// must make an explicit request for the resource. this was a mistake. it coupled
// validation with resource counting, but we did this before QoS was even defined.
@ -367,6 +379,8 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, e
opts := resourcehelper.PodResourcesOptions{
UseStatusResources: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !feature.DefaultFeatureGate.Enabled(features.PodLevelResources),
}
requests := resourcehelper.PodRequests(pod, opts)
limits := resourcehelper.PodLimits(pod, opts)

View File

@ -45,6 +45,7 @@ func TestPodConstraintsFunc(t *testing.T) {
pod *api.Pod
required []corev1.ResourceName
err string
podLevelResourcesEnabled bool
}{
"init container resource missing": {
pod: &api.Pod{
@ -133,9 +134,30 @@ func TestPodConstraintsFunc(t *testing.T) {
required: []corev1.ResourceName{corev1.ResourceMemory, corev1.ResourceCPU},
err: `must specify cpu for: bar,foo; memory for: bar,foo`,
},
"pod-level resource set, container-level required resources missing": {
pod: &api.Pod{
Spec: api.PodSpec{
Resources: &api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("1m")},
},
Containers: []api.Container{{
Name: "foo",
Resources: api.ResourceRequirements{},
}, {
Name: "bar",
Resources: api.ResourceRequirements{},
}},
},
},
required: []corev1.ResourceName{corev1.ResourceMemory, corev1.ResourceCPU},
podLevelResourcesEnabled: true,
err: ``,
},
}
evaluator := NewPodEvaluator(nil, clock.RealClock{})
for testName, test := range testCases {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, test.podLevelResourcesEnabled)
err := evaluator.Constraints(test.required, test.pod)
switch {
case err != nil && len(test.err) == 0,
@ -160,6 +182,7 @@ func TestPodEvaluatorUsage(t *testing.T) {
testCases := map[string]struct {
pod *api.Pod
usage corev1.ResourceList
podLevelResourcesEnabled bool
}{
"init container CPU": {
pod: &api.Pod{
@ -529,10 +552,74 @@ func TestPodEvaluatorUsage(t *testing.T) {
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
},
},
"pod-level CPU": {
pod: &api.Pod{
Spec: api.PodSpec{
Resources: &api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("1m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("2m")},
},
},
},
podLevelResourcesEnabled: true,
usage: corev1.ResourceList{
corev1.ResourceRequestsCPU: resource.MustParse("1m"),
corev1.ResourceLimitsCPU: resource.MustParse("2m"),
corev1.ResourcePods: resource.MustParse("1"),
corev1.ResourceCPU: resource.MustParse("1m"),
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
},
},
"pod-level Memory": {
pod: &api.Pod{
Spec: api.PodSpec{
Resources: &api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceMemory: resource.MustParse("1Mi")},
Limits: api.ResourceList{api.ResourceMemory: resource.MustParse("2Mi")},
},
},
},
podLevelResourcesEnabled: true,
usage: corev1.ResourceList{
corev1.ResourceRequestsMemory: resource.MustParse("1Mi"),
corev1.ResourceLimitsMemory: resource.MustParse("2Mi"),
corev1.ResourcePods: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Mi"),
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
},
},
"pod-level memory with container-level ephemeral storage": {
pod: &api.Pod{
Spec: api.PodSpec{
Resources: &api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceMemory: resource.MustParse("1Mi")},
Limits: api.ResourceList{api.ResourceMemory: resource.MustParse("2Mi")},
},
Containers: []api.Container{{
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceEphemeralStorage: resource.MustParse("32Mi")},
Limits: api.ResourceList{api.ResourceEphemeralStorage: resource.MustParse("64Mi")},
},
}},
},
},
podLevelResourcesEnabled: true,
usage: corev1.ResourceList{
corev1.ResourceEphemeralStorage: resource.MustParse("32Mi"),
corev1.ResourceRequestsEphemeralStorage: resource.MustParse("32Mi"),
corev1.ResourceLimitsEphemeralStorage: resource.MustParse("64Mi"),
corev1.ResourcePods: resource.MustParse("1"),
corev1.ResourceRequestsMemory: resource.MustParse("1Mi"),
corev1.ResourceLimitsMemory: resource.MustParse("2Mi"),
corev1.ResourceMemory: resource.MustParse("1Mi"),
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
},
},
}
t.Parallel()
for testName, testCase := range testCases {
t.Run(testName, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, testCase.podLevelResourcesEnabled)
actual, err := evaluator.Usage(testCase.pod)
if err != nil {
t.Error(err)

View File

@ -623,7 +623,9 @@ func preCheckForNode(nodeInfo *framework.NodeInfo) queue.PreEnqueueCheck {
// returns all failures.
func AdmissionCheck(pod *v1.Pod, nodeInfo *framework.NodeInfo, includeAllFailures bool) []AdmissionResult {
var admissionResults []AdmissionResult
insufficientResources := noderesources.Fits(pod, nodeInfo)
insufficientResources := noderesources.Fits(pod, nodeInfo, noderesources.ResourceRequestsOptions{
EnablePodLevelResources: utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
})
if len(insufficientResources) != 0 {
for i := range insufficientResources {
admissionResults = append(admissionResults, AdmissionResult{InsufficientResource: &insufficientResources[i]})

View File

@ -29,4 +29,5 @@ type Features struct {
EnableSidecarContainers bool
EnableSchedulingQueueHint bool
EnableAsyncPreemption bool
EnablePodLevelResources bool
}

View File

@ -90,6 +90,7 @@ type Fit struct {
enableInPlacePodVerticalScaling bool
enableSidecarContainers bool
enableSchedulingQueueHint bool
enablePodLevelResources bool
handle framework.Handle
resourceAllocationScorer
}
@ -176,10 +177,15 @@ func NewFit(_ context.Context, plArgs runtime.Object, h framework.Handle, fts fe
enableSidecarContainers: fts.EnableSidecarContainers,
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
handle: h,
enablePodLevelResources: fts.EnablePodLevelResources,
resourceAllocationScorer: *scorePlugin(args),
}, nil
}
type ResourceRequestsOptions struct {
EnablePodLevelResources bool
}
// computePodResourceRequest returns a framework.Resource that covers the largest
// width in each resource dimension. Because init-containers run sequentially, we collect
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
@ -207,9 +213,14 @@ func NewFit(_ context.Context, plArgs runtime.Object, h framework.Handle, fts fe
// Memory: 1G
//
// Result: CPU: 3, Memory: 3G
func computePodResourceRequest(pod *v1.Pod) *preFilterState {
// TODO(ndixita): modify computePodResourceRequest to accept opts of type
// ResourceRequestOptions as the second parameter.
func computePodResourceRequest(pod *v1.Pod, opts ResourceRequestsOptions) *preFilterState {
// pod hasn't scheduled yet so we don't need to worry about InPlacePodVerticalScalingEnabled
reqs := resource.PodRequests(pod, resource.PodResourcesOptions{})
reqs := resource.PodRequests(pod, resource.PodResourcesOptions{
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !opts.EnablePodLevelResources,
})
result := &preFilterState{}
result.SetMaxResource(reqs)
return result
@ -225,7 +236,7 @@ func (f *Fit) PreFilter(ctx context.Context, cycleState *framework.CycleState, p
// and the older (before v1.28) kubelet, make the Pod unschedulable.
return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, "Pod has a restartable init container and the SidecarContainers feature is disabled")
}
cycleState.Write(preFilterStateKey, computePodResourceRequest(pod))
cycleState.Write(preFilterStateKey, computePodResourceRequest(pod, ResourceRequestsOptions{EnablePodLevelResources: f.enablePodLevelResources}))
return nil, nil
}
@ -370,7 +381,7 @@ func (f *Fit) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldO
return framework.Queue, err
}
// Leaving in the queue, since the pod won't fit into the modified node anyway.
if !isFit(pod, modifiedNode) {
if !isFit(pod, modifiedNode, ResourceRequestsOptions{EnablePodLevelResources: f.enablePodLevelResources}) {
logger.V(5).Info("node was created or updated, but it doesn't have enough resource(s) to accommodate this pod", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return framework.QueueSkip, nil
}
@ -380,7 +391,7 @@ func (f *Fit) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldO
return framework.Queue, nil
}
// The pod will fit, but since there was no increase in available resources, the change won't make the pod schedulable.
if !haveAnyRequestedResourcesIncreased(pod, originalNode, modifiedNode) {
if !haveAnyRequestedResourcesIncreased(pod, originalNode, modifiedNode, ResourceRequestsOptions{EnablePodLevelResources: f.enablePodLevelResources}) {
logger.V(5).Info("node was updated, but haven't changed the pod's resource requestments fit assessment", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return framework.QueueSkip, nil
}
@ -390,8 +401,8 @@ func (f *Fit) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldO
}
// haveAnyRequestedResourcesIncreased returns true if any of the resources requested by the pod have increased or if allowed pod number increased.
func haveAnyRequestedResourcesIncreased(pod *v1.Pod, originalNode, modifiedNode *v1.Node) bool {
podRequest := computePodResourceRequest(pod)
func haveAnyRequestedResourcesIncreased(pod *v1.Pod, originalNode, modifiedNode *v1.Node, opts ResourceRequestsOptions) bool {
podRequest := computePodResourceRequest(pod, opts)
originalNodeInfo := framework.NewNodeInfo()
originalNodeInfo.SetNode(originalNode)
modifiedNodeInfo := framework.NewNodeInfo()
@ -429,13 +440,13 @@ func haveAnyRequestedResourcesIncreased(pod *v1.Pod, originalNode, modifiedNode
// isFit checks if the pod fits the node. If the node is nil, it returns false.
// It constructs a fake NodeInfo object for the node and checks if the pod fits the node.
func isFit(pod *v1.Pod, node *v1.Node) bool {
func isFit(pod *v1.Pod, node *v1.Node, opts ResourceRequestsOptions) bool {
if node == nil {
return false
}
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(node)
return len(Fits(pod, nodeInfo)) == 0
return len(Fits(pod, nodeInfo, opts)) == 0
}
// Filter invoked at the filter extension point.
@ -481,8 +492,8 @@ type InsufficientResource struct {
}
// Fits checks if node have enough resources to host the pod.
func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) []InsufficientResource {
return fitsRequest(computePodResourceRequest(pod), nodeInfo, nil, nil)
func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo, opts ResourceRequestsOptions) []InsufficientResource {
return fitsRequest(computePodResourceRequest(pod, opts), nodeInfo, nil, nil)
}
func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources, ignoredResourceGroups sets.Set[string]) []InsufficientResource {

View File

@ -119,12 +119,18 @@ var defaultScoringStrategy = &config.ScoringStrategy{
},
}
func newPodLevelResourcesPod(pod *v1.Pod, podResources v1.ResourceRequirements) *v1.Pod {
pod.Spec.Resources = &podResources
return pod
}
func TestEnoughRequests(t *testing.T) {
enoughPodsTests := []struct {
pod *v1.Pod
nodeInfo *framework.NodeInfo
name string
args config.NodeResourcesFitArgs
podLevelResourcesEnabled bool
wantInsufficientResources []InsufficientResource
wantStatus *framework.Status
}{
@ -478,6 +484,7 @@ func TestEnoughRequests(t *testing.T) {
wantInsufficientResources: []InsufficientResource{},
},
{
podLevelResourcesEnabled: true,
pod: newResourcePod(
framework.Resource{
ScalarResources: map[v1.ResourceName]int64{
@ -488,10 +495,74 @@ func TestEnoughRequests(t *testing.T) {
name: "skip checking resource request with quantity zero",
wantInsufficientResources: []InsufficientResource{},
},
{
podLevelResourcesEnabled: true,
pod: newPodLevelResourcesPod(
newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("2")},
},
),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
name: "both pod-level and container-level resources fit",
wantInsufficientResources: []InsufficientResource{},
},
{
podLevelResourcesEnabled: true,
pod: newPodLevelResourcesPod(
newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("7m"), v1.ResourceMemory: resource.MustParse("2")},
},
),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
name: "pod-level cpu resource not fit",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{
ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 7, Used: 5, Capacity: 10},
},
},
{
podLevelResourcesEnabled: true,
pod: newPodLevelResourcesPod(
newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("2")},
},
),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "pod-level memory resource not fit",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{
ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 2, Used: 19, Capacity: 20},
},
},
{
podLevelResourcesEnabled: true,
pod: newResourceInitPod(newPodLevelResourcesPod(
newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("2")},
},
),
framework.Resource{MilliCPU: 1, Memory: 1},
),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "one pod-level cpu resource fits and all init and non-init containers resources fit",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{
ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 2, Used: 19, Capacity: 20},
},
},
}
for _, test := range enoughPodsTests {
t.Run(test.name, func(t *testing.T) {
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5), Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
test.nodeInfo.SetNode(&node)
@ -502,7 +573,7 @@ func TestEnoughRequests(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
p, err := NewFit(ctx, &test.args, nil, plfeature.Features{})
p, err := NewFit(ctx, &test.args, nil, plfeature.Features{EnablePodLevelResources: test.podLevelResourcesEnabled})
if err != nil {
t.Fatal(err)
}
@ -517,7 +588,7 @@ func TestEnoughRequests(t *testing.T) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
gotInsufficientResources := fitsRequest(computePodResourceRequest(test.pod), test.nodeInfo, p.(*Fit).ignoredResources, p.(*Fit).ignoredResourceGroups)
gotInsufficientResources := fitsRequest(computePodResourceRequest(test.pod, ResourceRequestsOptions{EnablePodLevelResources: test.podLevelResourcesEnabled}), test.nodeInfo, p.(*Fit).ignoredResources, p.(*Fit).ignoredResourceGroups)
if !reflect.DeepEqual(gotInsufficientResources, test.wantInsufficientResources) {
t.Errorf("insufficient resources do not match: %+v, want: %v", gotInsufficientResources, test.wantInsufficientResources)
}
@ -1436,6 +1507,7 @@ func TestIsFit(t *testing.T) {
testCases := map[string]struct {
pod *v1.Pod
node *v1.Node
podLevelResourcesEnabled bool
expected bool
}{
"nil node": {
@ -1452,11 +1524,26 @@ func TestIsFit(t *testing.T) {
node: st.MakeNode().Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
expected: true,
},
"insufficient pod-level resource": {
pod: st.MakePod().Resources(
v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")}},
).Obj(),
node: st.MakeNode().Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
podLevelResourcesEnabled: true,
expected: false,
},
"sufficient pod-level resource": {
pod: st.MakePod().Resources(
v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")}},
).Obj(),
node: st.MakeNode().Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
expected: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
if got := isFit(tc.pod, tc.node); got != tc.expected {
if got := isFit(tc.pod, tc.node, ResourceRequestsOptions{tc.podLevelResourcesEnabled}); got != tc.expected {
t.Errorf("expected: %v, got: %v", tc.expected, got)
}
})
@ -1589,7 +1676,7 @@ func TestHaveAnyRequestedResourcesIncreased(t *testing.T) {
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
if got := haveAnyRequestedResourcesIncreased(tc.pod, tc.originalNode, tc.modifiedNode); got != tc.expected {
if got := haveAnyRequestedResourcesIncreased(tc.pod, tc.originalNode, tc.modifiedNode, ResourceRequestsOptions{}); got != tc.expected {
t.Errorf("expected: %v, got: %v", tc.expected, got)
}
})

View File

@ -119,7 +119,10 @@ func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, reso
opts := resourcehelper.PodResourcesOptions{
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
}
if !r.useRequested {
opts.NonMissingContainerRequests = v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),

View File

@ -55,6 +55,7 @@ func NewInTreeRegistry() runtime.Registry {
EnableSidecarContainers: feature.DefaultFeatureGate.Enabled(features.SidecarContainers),
EnableSchedulingQueueHint: feature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints),
EnableAsyncPreemption: feature.DefaultFeatureGate.Enabled(features.SchedulerAsyncPreemption),
EnablePodLevelResources: feature.DefaultFeatureGate.Enabled(features.PodLevelResources),
}
registry := runtime.Registry{

View File

@ -1052,19 +1052,74 @@ func (n *NodeInfo) update(pod *v1.Pod, sign int64) {
n.Generation = nextGeneration()
}
// getNonMissingContainerRequests returns the default non-zero CPU and memory
// requests for a container that the scheduler uses when container-level and
// pod-level requests are not set for a resource. It returns a ResourceList that
// includes these default non-zero requests, which are essential for the
// scheduler to function correctly.
// The method's behavior depends on whether pod-level resources are set or not:
// 1. When the pod level resources are not set, the method returns a ResourceList
// with the following defaults:
// - CPU: schedutil.DefaultMilliCPURequest
// - Memory: schedutil.DefaultMemoryRequest
//
// These defaults ensure that each container has a minimum resource request,
// allowing the scheduler to aggregate these requests and find a suitable node
// for the pod.
//
// 2. When the pod level resources are set, if a CPU or memory request is
// missing at the container-level *and* at the pod-level, the corresponding
// default value (schedutil.DefaultMilliCPURequest or schedutil.DefaultMemoryRequest)
// is included in the returned ResourceList.
// Note that these default values are not set in the Pod object itself, they are only used
// by the scheduler during node selection.
func getNonMissingContainerRequests(requests v1.ResourceList, podLevelResourcesSet bool) v1.ResourceList {
if !podLevelResourcesSet {
return v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI),
}
}
nonMissingContainerRequests := make(v1.ResourceList, 2)
// DefaultMilliCPURequest serves as the fallback value when both
// pod-level and container-level CPU requests are not set.
// Note that the apiserver defaulting logic will propagate a non-zero
// container-level CPU request to the pod level if a pod-level request
// is not explicitly set.
if _, exists := requests[v1.ResourceCPU]; !exists {
nonMissingContainerRequests[v1.ResourceCPU] = *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI)
}
// DefaultMemoryRequest serves as the fallback value when both
// pod-level and container-level CPU requests are unspecified.
// Note that the apiserver defaulting logic will propagate a non-zero
// container-level memory request to the pod level if a pod-level request
// is not explicitly set.
if _, exists := requests[v1.ResourceMemory]; !exists {
nonMissingContainerRequests[v1.ResourceMemory] = *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI)
}
return nonMissingContainerRequests
}
func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
})
non0Requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
isPodLevelResourcesSet := utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelRequestsSet(pod)
nonMissingContainerRequests := getNonMissingContainerRequests(requests, isPodLevelResourcesSet)
non0Requests := requests
if len(nonMissingContainerRequests) > 0 {
non0Requests = resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
NonMissingContainerRequests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI),
},
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
NonMissingContainerRequests: nonMissingContainerRequests,
})
}
non0CPU := non0Requests[v1.ResourceCPU]
non0Mem := non0Requests[v1.ResourceMemory]

View File

@ -34,6 +34,7 @@ import (
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
st "k8s.io/kubernetes/pkg/scheduler/testing"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/kubernetes/test/utils/ktesting"
"k8s.io/kubernetes/test/utils/ktesting/initoption"
)
@ -1513,12 +1514,264 @@ func TestFitError_Error(t *testing.T) {
}
}
var (
cpu500m = resource.MustParse("500m")
mem500M = resource.MustParse("500Mi")
cpu700m = resource.MustParse("700m")
mem800M = resource.MustParse("800Mi")
cpu1200m = resource.MustParse("1200m")
mem1200M = resource.MustParse("1200Mi")
restartAlways = v1.ContainerRestartPolicyAlways
)
func TestCalculateResources(t *testing.T) {
testCases := []struct {
name string
containers []v1.Container
podResources *v1.ResourceRequirements
podLevelResourcesEnabled bool
expectedResource Resource
expectedNon0CPU int64
expectedNon0Mem int64
initContainers []v1.Container
}{
{
name: "requestless container",
containers: []v1.Container{{}},
expectedResource: Resource{},
expectedNon0CPU: schedutil.DefaultMilliCPURequest,
expectedNon0Mem: schedutil.DefaultMemoryRequest,
},
{
name: "1X container with requests",
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu500m,
v1.ResourceMemory: mem500M,
},
},
},
},
expectedResource: Resource{
MilliCPU: cpu500m.MilliValue(),
Memory: mem500M.Value(),
},
expectedNon0CPU: cpu500m.MilliValue(),
expectedNon0Mem: mem500M.Value(),
},
{
name: "2X container with requests",
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu500m,
v1.ResourceMemory: mem500M,
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu700m,
v1.ResourceMemory: mem800M,
},
},
},
},
expectedResource: Resource{
MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
Memory: mem500M.Value() + mem800M.Value(),
},
expectedNon0CPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
expectedNon0Mem: mem500M.Value() + mem800M.Value(),
},
{
name: "1X container and 1X init container with pod-level requests",
podLevelResourcesEnabled: true,
initContainers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu500m,
v1.ResourceMemory: mem500M,
},
},
},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu500m,
v1.ResourceMemory: mem500M,
},
},
},
},
podResources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu1200m,
v1.ResourceMemory: mem1200M,
},
},
expectedResource: Resource{
MilliCPU: cpu1200m.MilliValue(),
Memory: mem1200M.Value(),
},
expectedNon0CPU: cpu1200m.MilliValue(),
expectedNon0Mem: mem1200M.Value(),
},
{
name: "1X container and 1X sidecar container with pod-level requests",
podLevelResourcesEnabled: true,
initContainers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu500m,
v1.ResourceMemory: mem500M,
},
},
RestartPolicy: &restartAlways,
},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu500m,
v1.ResourceMemory: mem500M,
},
},
},
},
podResources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu1200m,
v1.ResourceMemory: mem1200M,
},
},
expectedResource: Resource{
MilliCPU: cpu1200m.MilliValue(),
Memory: mem1200M.Value(),
},
expectedNon0CPU: cpu1200m.MilliValue(),
expectedNon0Mem: mem1200M.Value(),
},
{
name: "1X container with pod-level memory requests",
podLevelResourcesEnabled: true,
initContainers: []v1.Container{
{
Resources: v1.ResourceRequirements{},
},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{},
},
},
podResources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: mem1200M,
},
},
expectedResource: Resource{
Memory: mem1200M.Value(),
},
expectedNon0CPU: schedutil.DefaultMilliCPURequest,
expectedNon0Mem: mem1200M.Value(),
},
{
name: "1X container with pod-level cpu requests",
podLevelResourcesEnabled: true,
initContainers: []v1.Container{
{
Resources: v1.ResourceRequirements{},
},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{},
},
},
podResources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu500m,
},
},
expectedResource: Resource{
MilliCPU: cpu500m.MilliValue(),
},
expectedNon0CPU: cpu500m.MilliValue(),
expectedNon0Mem: schedutil.DefaultMemoryRequest,
},
{
name: "1X container unsupported resources and pod-level supported resources",
podLevelResourcesEnabled: true,
initContainers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceEphemeralStorage: mem500M,
},
},
},
},
containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceEphemeralStorage: mem800M,
},
},
},
},
podResources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu500m,
},
},
expectedResource: Resource{
MilliCPU: cpu500m.MilliValue(),
EphemeralStorage: mem800M.Value(),
},
expectedNon0CPU: cpu500m.MilliValue(),
expectedNon0Mem: schedutil.DefaultMemoryRequest,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, tc.podLevelResourcesEnabled)
pod := &v1.Pod{
Spec: v1.PodSpec{
Resources: tc.podResources,
Containers: tc.containers,
InitContainers: tc.initContainers,
},
}
res, non0CPU, non0Mem := calculateResource(pod)
if !reflect.DeepEqual(res, tc.expectedResource) {
t.Errorf("Test: %s expected resource: %+v, got: %+v", tc.name, tc.expectedResource, res)
}
if non0CPU != tc.expectedNon0CPU {
t.Errorf("Test: %s expected non0CPU: %d, got: %d", tc.name, tc.expectedNon0CPU, non0CPU)
}
if non0Mem != tc.expectedNon0Mem {
t.Errorf("Test: %s expected non0Mem: %d, got: %d", tc.name, tc.expectedNon0Mem, non0Mem)
}
})
}
}
func TestCalculatePodResourcesWithResize(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
cpu500m := resource.MustParse("500m")
mem500M := resource.MustParse("500Mi")
cpu700m := resource.MustParse("700m")
mem800M := resource.MustParse("800Mi")
testpod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: "pod_resize_test",

View File

@ -334,6 +334,12 @@ func (p *PodWrapper) Namespace(s string) *PodWrapper {
return p
}
// Resources sets requests and limits at pod-level.
func (p *PodWrapper) Resources(resources v1.ResourceRequirements) *PodWrapper {
p.Spec.Resources = &resources
return p
}
// OwnerReference updates the owning controller of the pod.
func (p *PodWrapper) OwnerReference(name string, gvk schema.GroupVersionKind) *PodWrapper {
p.OwnerReferences = []metav1.OwnerReference{

View File

@ -32,14 +32,17 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission"
genericadmissioninitailizer "k8s.io/apiserver/pkg/admission/initializer"
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/utils/lru"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
)
const (
@ -528,8 +531,11 @@ func PodValidateLimitFunc(limitRange *corev1.LimitRange, pod *api.Pod) error {
// enforce pod limits on init containers
if limitType == corev1.LimitTypePod {
podRequests := podRequests(pod)
podLimits := podLimits(pod)
opts := podResourcesOptions{
PodLevelResourcesEnabled: feature.DefaultFeatureGate.Enabled(features.PodLevelResources),
}
podRequests := podRequests(pod, opts)
podLimits := podLimits(pod, opts)
for k, v := range limit.Min {
if err := minConstraint(string(limitType), string(k), v, podRequests, podLimits); err != nil {
errs = append(errs, err)
@ -550,13 +556,22 @@ func PodValidateLimitFunc(limitRange *corev1.LimitRange, pod *api.Pod) error {
return utilerrors.NewAggregate(errs)
}
type podResourcesOptions struct {
// PodLevelResourcesEnabled indicates that the PodLevelResources feature gate is
// enabled.
PodLevelResourcesEnabled bool
}
// podRequests is a simplified version of pkg/api/v1/resource/PodRequests that operates against the core version of
// pod. Any changes to that calculation should be reflected here.
// NOTE: We do not want to check status resources here, only the spec. This is equivalent to setting
// UseStatusResources=false in the common helper.
// TODO: Maybe we can consider doing a partial conversion of the pod to a v1
// type and then using the pkg/api/v1/resource/PodRequests.
func podRequests(pod *api.Pod) api.ResourceList {
// TODO(ndixita): PodRequests method exists in
// staging/src/k8s.io/component-helpers/resource/helpers.go. Refactor the code to
// avoid duplicating podRequests method.
func podRequests(pod *api.Pod, opts podResourcesOptions) api.ResourceList {
reqs := api.ResourceList{}
for _, container := range pod.Spec.Containers {
@ -589,6 +604,19 @@ func podRequests(pod *api.Pod) api.ResourceList {
}
maxResourceList(reqs, initContainerReqs)
// If PodLevelResources feature is enabled and resources are set at pod-level,
// override aggregated container requests of resources supported by pod-level
// resources with quantities specified at pod-level.
if opts.PodLevelResourcesEnabled && pod.Spec.Resources != nil {
for resourceName, quantity := range pod.Spec.Resources.Requests {
if isSupportedPodLevelResource(resourceName) {
// override with pod-level resource requests
reqs[resourceName] = quantity
}
}
}
return reqs
}
@ -598,7 +626,10 @@ func podRequests(pod *api.Pod) api.ResourceList {
// UseStatusResources=false in the common helper.
// TODO: Maybe we can consider doing a partial conversion of the pod to a v1
// type and then using the pkg/api/v1/resource/PodLimits.
func podLimits(pod *api.Pod) api.ResourceList {
// TODO(ndixita): PodLimits method exists in
// staging/src/k8s.io/component-helpers/resource/helpers.go. Refactor the code to
// avoid duplicating podLimits method.
func podLimits(pod *api.Pod, opts podResourcesOptions) api.ResourceList {
limits := api.ResourceList{}
for _, container := range pod.Spec.Containers {
@ -628,9 +659,35 @@ func podLimits(pod *api.Pod) api.ResourceList {
maxResourceList(limits, initContainerLimits)
// If PodLevelResources feature is enabled and resources are set at pod-level,
// override aggregated container limits of resources supported by pod-level
// resources with quantities specified at pod-level.
if opts.PodLevelResourcesEnabled && pod.Spec.Resources != nil {
for resourceName, quantity := range pod.Spec.Resources.Limits {
if isSupportedPodLevelResource(resourceName) {
// override with pod-level resource limits
limits[resourceName] = quantity
}
}
}
return limits
}
var supportedPodLevelResources = sets.New(api.ResourceCPU, api.ResourceMemory)
// isSupportedPodLevelResources checks if a given resource is supported by pod-level
// resource management through the PodLevelResources feature. Returns true if
// the resource is supported.
// isSupportedPodLevelResource method exists in
// staging/src/k8s.io/component-helpers/resource/helpers.go.
// isSupportedPodLevelResource is added here to avoid conversion of v1.
// Pod to api.Pod.
// TODO(ndixita): Find alternatives to avoid duplicating the code.
func isSupportedPodLevelResource(name api.ResourceName) bool {
return supportedPodLevelResources.Has(name)
}
// addResourceList adds the resources in newList to list.
func addResourceList(list, newList api.ResourceList) {
for name, quantity := range newList {

View File

@ -158,6 +158,12 @@ func validLimitRangeNoDefaults() corev1.LimitRange {
return externalLimitRange
}
func validPodWithPodLevelResources(name string, numContainers int, containerResources api.ResourceRequirements, podResources api.ResourceRequirements) api.Pod {
pod := validPod(name, numContainers, containerResources)
pod.Spec.Resources = &podResources
return pod
}
func validPod(name string, numContainers int, resources api.ResourceRequirements) api.Pod {
pod := api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test"},
@ -282,6 +288,7 @@ func TestPodLimitFunc(t *testing.T) {
type testCase struct {
pod api.Pod
limitRange corev1.LimitRange
podLevelResourcesEnabled bool
}
successCases := []testCase{
@ -453,17 +460,42 @@ func TestPodLimitFunc(t *testing.T) {
pod: validPod("pod-max-local-ephemeral-storage-ratio", 3, getResourceRequirements(getLocalStorageResourceList("300Mi"), getLocalStorageResourceList("450Mi"))),
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getLocalStorageResourceList("2Gi"), api.ResourceList{}, api.ResourceList{}, getLocalStorageResourceList("1.5")),
},
{
pod: validPodWithPodLevelResources("pod-level-resources-with-min-max", 3, getResourceRequirements(getComputeResourceList("100m", "60Mi"), getComputeResourceList("200m", "100Mi")),
getResourceRequirements(getComputeResourceList("200m", "180Mi"), getComputeResourceList("400m", "200Mi")),
),
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getComputeResourceList("400m", "200Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
podLevelResourcesEnabled: true,
},
{
pod: validPodWithPodLevelResources("pod-level-requests-with-min", 3, getResourceRequirements(getComputeResourceList("50m", "60Mi"), getComputeResourceList("", "")),
getResourceRequirements(getComputeResourceList("160m", "200Mi"), getComputeResourceList("", "")),
),
limitRange: createLimitRange(api.LimitTypePod, getComputeResourceList("160m", "200Mi"), getComputeResourceList("", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
podLevelResourcesEnabled: true,
},
{
pod: validPodWithPodLevelResources("pod-level-limits-with-max", 3, getResourceRequirements(getComputeResourceList("", ""), getComputeResourceList("50m", "60Mi")),
getResourceRequirements(getComputeResourceList("", ""), getComputeResourceList("160m", "200Mi")),
),
limitRange: createLimitRange(api.LimitTypePod, getComputeResourceList("", ""), getComputeResourceList("160m", "200Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
podLevelResourcesEnabled: true,
},
}
for i := range successCases {
test := successCases[i]
t.Run(test.pod.Name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, test.podLevelResourcesEnabled)
err := PodMutateLimitFunc(&test.limitRange, &test.pod)
if err != nil {
t.Errorf("Unexpected error for pod: %s, %v", test.pod.Name, err)
}
err = PodValidateLimitFunc(&test.limitRange, &test.pod)
if err != nil {
t.Errorf("Unexpected error for pod: %s, %v", test.pod.Name, err)
}
})
}
errorCases := []testCase{
@ -641,10 +673,32 @@ func TestPodLimitFunc(t *testing.T) {
pod: withRestartableInitContainer(getComputeResourceList("1500m", ""), api.ResourceList{},
validPod("ctr-max-cpu-limit-restartable-init-container", 1, getResourceRequirements(getComputeResourceList("1000m", ""), getComputeResourceList("1500m", "")))),
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getComputeResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
}, {
pod: validPodWithPodLevelResources("pod-level-resources-exceeding-max", 3, getResourceRequirements(getComputeResourceList("100m", "60Mi"), getComputeResourceList("200m", "100Mi")),
getResourceRequirements(getComputeResourceList("200m", "180Mi"), getComputeResourceList("500m", "280Mi")),
),
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getComputeResourceList("400m", "200Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
podLevelResourcesEnabled: true,
},
{
pod: validPodWithPodLevelResources("pod-level-requests-less-than-min", 3, getResourceRequirements(getComputeResourceList("50m", "60Mi"), getComputeResourceList("", "")),
getResourceRequirements(getComputeResourceList("100m", "200Mi"), getComputeResourceList("", "")),
),
limitRange: createLimitRange(api.LimitTypePod, getComputeResourceList("160m", "200Mi"), getComputeResourceList("", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
podLevelResourcesEnabled: true,
},
{
pod: validPodWithPodLevelResources("pod-level-limits-exceeding-max", 3, getResourceRequirements(getComputeResourceList("", ""), getComputeResourceList("50m", "60Mi")),
getResourceRequirements(getComputeResourceList("", ""), getComputeResourceList("160m", "300Mi")),
),
limitRange: createLimitRange(api.LimitTypePod, getComputeResourceList("", ""), getComputeResourceList("160m", "200Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
podLevelResourcesEnabled: true,
},
}
for i := range errorCases {
test := errorCases[i]
t.Run(test.pod.Name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, test.podLevelResourcesEnabled)
err := PodMutateLimitFunc(&test.limitRange, &test.pod)
if err != nil {
t.Errorf("Unexpected error for pod: %s, %v", test.pod.Name, err)
@ -653,6 +707,7 @@ func TestPodLimitFunc(t *testing.T) {
if err == nil {
t.Errorf("Expected error for pod: %s", test.pod.Name)
}
})
}
}

View File

@ -13,6 +13,7 @@
- k8s.io/utils/ptr
- k8s.io/utils/net
- k8s.io/klog
- k8s.io/component-helpers/resource
# the following are temporary and should go away. Think twice (or more) before adding anything here.
# Main goal: pkg/apis should be as self-contained as possible.

File diff suppressed because it is too large Load Diff

View File

@ -4466,6 +4466,21 @@ message PodSpec {
// +featureGate=DynamicResourceAllocation
// +optional
repeated PodResourceClaim resourceClaims = 39;
// Resources is the total amount of CPU and Memory resources required by all
// containers in the pod. It supports specifying Requests and Limits for
// "cpu" and "memory" resource names only. ResourceClaims are not supported.
//
// This field enables fine-grained control over resource allocation for the
// entire pod, allowing resource sharing among containers in a pod.
// TODO: For beta graduation, expand this comment with a detailed explanation.
//
// This is an alpha field and requires enabling the PodLevelResources feature
// gate.
//
// +featureGate=PodLevelResources
// +optional
optional ResourceRequirements resources = 40;
}
// PodStatus represents information about the status of a pod. Status may trail the actual

View File

@ -4087,6 +4087,20 @@ type PodSpec struct {
// +featureGate=DynamicResourceAllocation
// +optional
ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
// Resources is the total amount of CPU and Memory resources required by all
// containers in the pod. It supports specifying Requests and Limits for
// "cpu" and "memory" resource names only. ResourceClaims are not supported.
//
// This field enables fine-grained control over resource allocation for the
// entire pod, allowing resource sharing among containers in a pod.
// TODO: For beta graduation, expand this comment with a detailed explanation.
//
// This is an alpha field and requires enabling the PodLevelResources feature
// gate.
//
// +featureGate=PodLevelResources
// +optional
Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,40,opt,name=resources"`
}
// PodResourceClaim references exactly one ResourceClaim, either directly

View File

@ -1837,6 +1837,7 @@ var map_PodSpec = map[string]string{
"hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
"schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
"resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
"resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
}
func (PodSpec) SwaggerDoc() map[string]string {

View File

@ -4371,6 +4371,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(ResourceRequirements)
(*in).DeepCopyInto(*out)
}
return
}

View File

@ -1766,8 +1766,22 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
},
"updateStrategy": {
"type": "typeValue",

View File

@ -864,6 +864,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1767,8 +1767,22 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
},
"strategy": {
"type": "typeValue",

View File

@ -872,6 +872,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1768,9 +1768,23 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
}
},
"status": {
"replicas": 1,

View File

@ -864,6 +864,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1767,8 +1767,22 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
},
"volumeClaimTemplates": [
{

View File

@ -872,6 +872,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1767,8 +1767,22 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
},
"strategy": {
"type": "typeValue",

View File

@ -874,6 +874,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1767,8 +1767,22 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
},
"volumeClaimTemplates": [
{

View File

@ -872,6 +872,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1766,8 +1766,22 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
},
"updateStrategy": {
"type": "typeValue",

View File

@ -864,6 +864,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1767,8 +1767,22 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
},
"strategy": {
"type": "typeValue",

View File

@ -872,6 +872,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1768,9 +1768,23 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
}
},
"status": {
"replicas": 1,

View File

@ -864,6 +864,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1767,8 +1767,22 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
},
"volumeClaimTemplates": [
{

View File

@ -872,6 +872,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1850,8 +1850,22 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
},
"ttlSecondsAfterFinished": 8,
"completionMode": "completionModeValue",

View File

@ -924,6 +924,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1801,8 +1801,22 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
},
"ttlSecondsAfterFinished": 8,
"completionMode": "completionModeValue",

View File

@ -888,6 +888,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1850,8 +1850,22 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
},
"ttlSecondsAfterFinished": 8,
"completionMode": "completionModeValue",

View File

@ -924,6 +924,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1708,7 +1708,21 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
},
"status": {
"phase": "phaseValue",

View File

@ -820,6 +820,14 @@ spec:
- name: nameValue
resourceClaimName: resourceClaimNameValue
resourceClaimTemplateName: resourceClaimTemplateNameValue
resources:
claims:
- name: nameValue
request: requestValue
limits:
limitsKey: "0"
requests:
requestsKey: "0"
restartPolicy: restartPolicyValue
runtimeClassName: runtimeClassNameValue
schedulerName: schedulerNameValue

View File

@ -1751,7 +1751,21 @@
"resourceClaimName": "resourceClaimNameValue",
"resourceClaimTemplateName": "resourceClaimTemplateNameValue"
}
],
"resources": {
"limits": {
"limitsKey": "0"
},
"requests": {
"requestsKey": "0"
},
"claims": [
{
"name": "nameValue",
"request": "requestValue"
}
]
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More