Merge pull request #116684 from vinaykul/restart-free-pod-vertical-scaling-fixes

Add missing unit test for resource resize policy defaulting
This commit is contained in:
Kubernetes Prow Robot 2023-03-17 15:13:17 -07:00 committed by GitHub
commit c14e0983fb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 221 additions and 9 deletions

View File

@ -229,7 +229,6 @@ name | architectures
- Updated: Redefine AppProtocol field description and add new standard values ([#115433](https://github.com/kubernetes/kubernetes/pull/115433), [@LiorLieberman](https://github.com/LiorLieberman)) [SIG API Machinery, Apps and Network]
- ValidatingAdmissionPolicy now provides a status field that contains results of type checking the validation expression.
The type checking is fully informational, and the behavior of the policy is unchanged. ([#115668](https://github.com/kubernetes/kubernetes/pull/115668), [@jiahuif](https://github.com/jiahuif)) [SIG API Machinery, Auth, Cloud Provider and Testing]
- Vpa: ResourceResizePolicy type is renamed to ResourceResizeRestartPolicy and RestartRequired policy value is renamed to RestartContainer. If not specified by the user, RestartNotRequired policy defaults for CPU and memory resources. ([#116119](https://github.com/kubernetes/kubernetes/pull/116119), [@vinaykul](https://github.com/vinaykul)) [SIG API Machinery, Apps, Node and Testing]
- We have removed support for the v1alpha1 kubeletplugin API of DynamicResourceManagement. All plugins must update to v1alpha2 in order to function properly going forward. ([#116558](https://github.com/kubernetes/kubernetes/pull/116558), [@klueska](https://github.com/klueska)) [SIG API Machinery, Apps, CLI, Node, Scheduling and Testing]
### Feature
@ -502,12 +501,12 @@ name | architectures
Add SendInitialEvents field to the ListOptions. When the new option is set together with watch=true, it begins the watch stream with synthetic init events followed by a synthetic "Bookmark" after which the server continues streaming events. ([#115402](https://github.com/kubernetes/kubernetes/pull/115402), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery]
- Kubelet: a "maxParallelImagePulls" field can now be specified in the kubelet configuration file to control how many image pulls the kubelet can perform in parallel. ([#115220](https://github.com/kubernetes/kubernetes/pull/115220), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG API Machinery, Node and Scalability]
- PodSchedulingReadiness is graduated to beta. ([#115815](https://github.com/kubernetes/kubernetes/pull/115815), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG API Machinery, Apps, Scheduling and Testing]
- PodSpec.Container.Resources becomes mutable for CPU and memory resource types.
- PodSpec.Container.ResizePolicy (new object) gives users control over how their containers are resized.
- PodStatus.Resize status describes the state of a requested Pod resize.
- PodStatus.AllocatedResources describes node resources allocated to Pod.
- PodStatus.Resources describes node resources applied to running containers by CRI.
- UpdateContainerResources CRI API now supports both Linux and Windows.
- In-place resize feature for Kubernetes Pods
- Changed the Pod API so that the `resources` defined for containers are mutable for `cpu` and `memory` resource types.
- Added `resizePolicy` for containers in a pod to allow users control over how their containers are resized.
- Added `allocatedResources` field to container status in pod status that describes the node resources allocated to a pod.
- Added `resources` field to container status that reports actual resources applied to running containers.
- Added `resize` field to pod status that describes the state of a requested pod resize.
For details, see KEPs below. ([#102884](https://github.com/kubernetes/kubernetes/pull/102884), [@vinaykul](https://github.com/vinaykul)) [SIG API Machinery, Apps, Instrumentation, Node, Scheduling and Testing]
- The PodDisruptionBudget `spec.unhealthyPodEvictionPolicy` field has graduated to beta and is enabled by default. On servers with the feature enabled, this field may be set to `AlwaysAllow` to always allow unhealthy pods covered by the PodDisruptionBudget to be evicted. ([#115363](https://github.com/kubernetes/kubernetes/pull/115363), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Apps, Auth and Node]

View File

@ -33,6 +33,7 @@ import (
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/api/legacyscheme"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/features"
utilpointer "k8s.io/utils/pointer"
// ensure types are installed
@ -1906,3 +1907,215 @@ func TestSetDefaultServiceInternalTrafficPolicy(t *testing.T) {
})
}
}
func TestSetDefaultResizePolicy(t *testing.T) {
// verify we default to NotRequired restart policy for resize when resources are specified
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)()
for desc, tc := range map[string]struct {
testContainer v1.Container
expectedResizePolicy []v1.ContainerResizePolicy
}{
"CPU and memory limits are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.NotRequired,
},
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.NotRequired,
},
},
},
"CPU requests are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.NotRequired,
},
},
},
"Memory limits are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.NotRequired,
},
},
},
"No resources are specified": {
testContainer: v1.Container{Name: "besteffort"},
expectedResizePolicy: nil,
},
"CPU and memory limits are specified with restartContainer resize policy for memory": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
ResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.NotRequired,
},
},
},
"CPU requests and memory limits are specified with restartContainer resize policy for CPU": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("200Mi"),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
},
ResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.RestartContainer,
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.RestartContainer,
},
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.NotRequired,
},
},
},
"CPU and memory requests are specified with restartContainer resize policy for both": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
ResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.RestartContainer,
},
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.RestartContainer,
},
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
"Ephemeral storage limits are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("500Mi"),
},
},
},
expectedResizePolicy: nil,
},
"Ephemeral storage requests and CPU limits are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
Requests: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("500Mi"),
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.NotRequired,
},
},
},
"Ephemeral storage requests and limits, memory requests with restartContainer policy are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("500Mi"),
},
Requests: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("500Mi"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
ResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
} {
t.Run(desc, func(t *testing.T) {
testPod := v1.Pod{}
testPod.Spec.Containers = append(testPod.Spec.Containers, tc.testContainer)
output := roundTrip(t, runtime.Object(&testPod))
pod2 := output.(*v1.Pod)
if diff.ObjectDiff(pod2.Spec.Containers[0].ResizePolicy, tc.expectedResizePolicy) != "" {
t.Errorf("expected resize policy %+v, but got %+v", tc.expectedResizePolicy, pod2.Spec.Containers[0].ResizePolicy)
}
})
}
}