Updated some unit tests and resolved some review comments

This commit is contained in:
vivzbansal 2024-11-09 23:40:26 +00:00
parent 5ed5732fa2
commit 242dec3e34
9 changed files with 125 additions and 566 deletions

View File

@ -166,7 +166,6 @@ func SetDefaults_Service(obj *v1.Service) {
}
}
func SetDefaults_Pod(obj *v1.Pod) {
// If limits are specified, but requests are not, default requests to limits
// This is done here rather than a more specific defaulting pass on v1.ResourceRequirements
@ -183,10 +182,6 @@ func SetDefaults_Pod(obj *v1.Pod) {
}
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// For normal containers, set resize restart policy to default value (NotRequired), if not specified..
setDefaultResizePolicy(&obj.Spec.Containers[i])
}
}
for i := range obj.Spec.InitContainers {
if obj.Spec.InitContainers[i].Resources.Limits != nil {
@ -199,12 +194,6 @@ func SetDefaults_Pod(obj *v1.Pod) {
}
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
if obj.Spec.InitContainers[i].RestartPolicy != nil && *obj.Spec.InitContainers[i].RestartPolicy == v1.ContainerRestartPolicyAlways {
// For restartable init containers, set resize restart policy to default value (NotRequired), if not specified.
setDefaultResizePolicy(&obj.Spec.InitContainers[i])
}
}
}
// Pod Requests default values must be applied after container-level default values
@ -223,42 +212,6 @@ func SetDefaults_Pod(obj *v1.Pod) {
defaultHostNetworkPorts(&obj.Spec.InitContainers)
}
}
// setDefaultResizePolicy set resize restart policy to default value (NotRequired), if not specified.
func setDefaultResizePolicy(obj *v1.Container) {
if obj.Resources.Requests == nil && obj.Resources.Limits == nil {
return
}
resizePolicySpecified := make(map[v1.ResourceName]bool)
for _, p := range obj.ResizePolicy {
resizePolicySpecified[p.ResourceName] = true
}
defaultResizePolicy := func(resourceName v1.ResourceName) {
if _, found := resizePolicySpecified[resourceName]; !found {
obj.ResizePolicy = append(obj.ResizePolicy,
v1.ContainerResizePolicy{
ResourceName: resourceName,
RestartPolicy: v1.NotRequired,
})
}
}
if !resizePolicySpecified[v1.ResourceCPU] {
if _, exists := obj.Resources.Requests[v1.ResourceCPU]; exists {
defaultResizePolicy(v1.ResourceCPU)
} else if _, exists := obj.Resources.Limits[v1.ResourceCPU]; exists {
defaultResizePolicy(v1.ResourceCPU)
}
}
if !resizePolicySpecified[v1.ResourceMemory] {
if _, exists := obj.Resources.Requests[v1.ResourceMemory]; exists {
defaultResizePolicy(v1.ResourceMemory)
} else if _, exists := obj.Resources.Limits[v1.ResourceMemory]; exists {
defaultResizePolicy(v1.ResourceMemory)
}
}
}
func SetDefaults_PodSpec(obj *v1.PodSpec) {
// New fields added here will break upgrade tests:
// https://github.com/kubernetes/kubernetes/issues/69445

View File

@ -2982,230 +2982,6 @@ func TestSetDefaultServiceInternalTrafficPolicy(t *testing.T) {
}
}
func TestSetDefaultResizePolicy(t *testing.T) {
// verify we default to NotRequired restart policy for resize when resources are specified
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
restartAlways := v1.ContainerRestartPolicyAlways
for desc, tc := range map[string]struct {
testContainer v1.Container
expectedResizePolicy []v1.ContainerResizePolicy
}{
"CPU and memory limits are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.NotRequired,
},
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.NotRequired,
},
},
},
"CPU requests are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.NotRequired,
},
},
},
"Memory limits are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.NotRequired,
},
},
},
"No resources are specified": {
testContainer: v1.Container{Name: "besteffort"},
expectedResizePolicy: nil,
},
"CPU and memory limits are specified with restartContainer resize policy for memory": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
ResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.NotRequired,
},
},
},
"CPU requests and memory limits are specified with restartContainer resize policy for CPU": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("200Mi"),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
},
ResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.RestartContainer,
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.RestartContainer,
},
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.NotRequired,
},
},
},
"CPU and memory requests are specified with restartContainer resize policy for both": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
ResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.RestartContainer,
},
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.RestartContainer,
},
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
"Ephemeral storage limits are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("500Mi"),
},
},
},
expectedResizePolicy: nil,
},
"Ephemeral storage requests and CPU limits are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
Requests: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("500Mi"),
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.NotRequired,
},
},
},
"Ephemeral storage requests and limits, memory requests with restartContainer policy are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("500Mi"),
},
Requests: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("500Mi"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
ResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
} {
for _, isSidecarContainer := range []bool{true, false} {
t.Run(desc, func(t *testing.T) {
testPod := v1.Pod{}
if isSidecarContainer {
tc.testContainer.RestartPolicy = &restartAlways
testPod.Spec.InitContainers = append(testPod.Spec.InitContainers, tc.testContainer)
} else {
testPod.Spec.Containers = append(testPod.Spec.Containers, tc.testContainer)
}
output := roundTrip(t, runtime.Object(&testPod))
pod2 := output.(*v1.Pod)
if isSidecarContainer {
if !cmp.Equal(pod2.Spec.InitContainers[0].ResizePolicy, tc.expectedResizePolicy) {
t.Errorf("expected resize policy %+v, but got %+v for restartable init containers", tc.expectedResizePolicy, pod2.Spec.InitContainers[0].ResizePolicy)
}
} else if !cmp.Equal(pod2.Spec.Containers[0].ResizePolicy, tc.expectedResizePolicy) {
t.Errorf("expected resize policy %+v, but got %+v for normal containers", tc.expectedResizePolicy, pod2.Spec.Containers[0].ResizePolicy)
}
})
}
}
}
func TestSetDefaults_Volume(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ImageVolume, true)
for desc, tc := range map[string]struct {

View File

@ -1805,37 +1805,36 @@ func allocatedContainerResourcesMatchStatus(allocatedPod *v1.Pod, c *v1.Containe
}
}
// Only compare resizeable resources, and only compare resources that are explicitly configured.
if hasCPUReq {
if cs.Resources.CPURequest == nil {
if !cpuReq.IsZero() {
return false
}
} else if !cpuReq.Equal(*cs.Resources.CPURequest) &&
(cpuReq.MilliValue() > cm.MinShares || cs.Resources.CPURequest.MilliValue() > cm.MinShares) {
// If both allocated & status CPU requests are at or below MinShares then they are considered equal.
// Only compare resizeable resources, and only compare resources that are explicitly configured.
if hasCPUReq {
if cs.Resources.CPURequest == nil {
if !cpuReq.IsZero() {
return false
}
} else if !cpuReq.Equal(*cs.Resources.CPURequest) &&
(cpuReq.MilliValue() > cm.MinShares || cs.Resources.CPURequest.MilliValue() > cm.MinShares) {
// If both allocated & status CPU requests are at or below MinShares then they are considered equal.
return false
}
if hasCPULim {
if cs.Resources.CPULimit == nil {
if !cpuLim.IsZero() {
return false
}
} else if !cpuLim.Equal(*cs.Resources.CPULimit) &&
(cpuLim.MilliValue() > cm.MinMilliCPULimit || cs.Resources.CPULimit.MilliValue() > cm.MinMilliCPULimit) {
// If both allocated & status CPU limits are at or below the minimum limit, then they are considered equal.
}
if hasCPULim {
if cs.Resources.CPULimit == nil {
if !cpuLim.IsZero() {
return false
}
} else if !cpuLim.Equal(*cs.Resources.CPULimit) &&
(cpuLim.MilliValue() > cm.MinMilliCPULimit || cs.Resources.CPULimit.MilliValue() > cm.MinMilliCPULimit) {
// If both allocated & status CPU limits are at or below the minimum limit, then they are considered equal.
return false
}
if hasMemLim {
if cs.Resources.MemoryLimit == nil {
if !memLim.IsZero() {
return false
}
} else if !memLim.Equal(*cs.Resources.MemoryLimit) {
}
if hasMemLim {
if cs.Resources.MemoryLimit == nil {
if !memLim.IsZero() {
return false
}
} else if !memLim.Equal(*cs.Resources.MemoryLimit) {
return false
}
}
}

View File

@ -3825,7 +3825,6 @@ func Test_generateAPIPodStatus(t *testing.T) {
func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
testContainerName := "ctr0"
testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
@ -3834,7 +3833,6 @@ func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
CPU1AndMem1GAndStorage2GAndCustomResource := CPU1AndMem1GAndStorage2G.DeepCopy()
CPU1AndMem1GAndStorage2GAndCustomResource["unknown-resource"] = resource.MustParse("1")
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
testKubecontainerPodStatus := kubecontainer.PodStatus{
ContainerStatuses: []*kubecontainer.Status{
@ -3918,70 +3916,6 @@ func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
},
},
},
{
name: "custom resource in ResourcesAllocated in case of restartable init containers, resize should be null",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "1234560",
Name: "foo0",
Namespace: "bar0",
},
Spec: v1.PodSpec{
NodeName: "machine",
InitContainers: []v1.Container{
{
Name: testContainerName,
Image: "img",
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2GAndCustomResource, Requests: CPU1AndMem1GAndStorage2GAndCustomResource},
RestartPolicy: &containerRestartPolicyAlways,
},
},
RestartPolicy: v1.RestartPolicyAlways,
},
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
{
Name: testContainerName,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
AllocatedResources: CPU1AndMem1GAndStorage2GAndCustomResource,
},
},
Resize: "InProgress",
},
},
},
{
name: "cpu/memory resource in ResourcesAllocated in case of restartable init containers, resize should be null",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "1234560",
Name: "foo0",
Namespace: "bar0",
},
Spec: v1.PodSpec{
NodeName: "machine",
InitContainers: []v1.Container{
{
Name: testContainerName,
Image: "img",
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
RestartPolicy: &containerRestartPolicyAlways,
},
},
RestartPolicy: v1.RestartPolicyAlways,
},
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
{
Name: testContainerName,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
AllocatedResources: CPU1AndMem1GAndStorage2G,
},
},
Resize: "InProgress",
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
@ -6740,6 +6674,8 @@ func TestResolveRecursiveReadOnly(t *testing.T) {
}
func TestAllocatedResourcesMatchStatus(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
tests := []struct {
name string
allocatedResources v1.ResourceRequirements
@ -6954,6 +6890,11 @@ func TestAllocatedResourcesMatchStatus(t *testing.T) {
Name: "c",
Resources: test.allocatedResources,
}},
InitContainers: []v1.Container{{
Name: "c1-init",
Resources: test.allocatedResources,
RestartPolicy: &containerRestartPolicyAlways,
}},
},
}
state := kubecontainer.ContainerStateRunning
@ -6968,9 +6909,13 @@ func TestAllocatedResourcesMatchStatus(t *testing.T) {
State: state,
Resources: test.statusResources,
},
{
Name: "c1-init",
State: state,
Resources: test.statusResources,
},
},
}
match := allocatedResourcesMatchStatus(&allocatedPod, podStatus)
assert.Equal(t, test.expectMatch, match)
})

View File

@ -2876,15 +2876,16 @@ func TestHandlePodResourcesResize(t *testing.T) {
kubelet.statusManager = status.NewFakeManager()
var originalPod *v1.Pod
var originalCtr *v1.Container
if isSidecarContainer {
originalPod = testPod2.DeepCopy()
originalPod.Spec.InitContainers[0].Resources.Requests = tt.originalRequests
originalPod.Spec.InitContainers[0].Resources.Limits = tt.originalLimits
originalCtr = &originalPod.Spec.InitContainers[0]
} else {
originalPod = testPod1.DeepCopy()
originalPod.Spec.Containers[0].Resources.Requests = tt.originalRequests
originalPod.Spec.Containers[0].Resources.Limits = tt.originalLimits
originalCtr = &originalPod.Spec.Containers[0]
}
originalCtr.Resources.Requests = tt.originalRequests
originalCtr.Resources.Limits = tt.originalLimits
kubelet.podManager.UpdatePod(originalPod)
@ -2922,44 +2923,32 @@ func TestHandlePodResourcesResize(t *testing.T) {
}
}
if isSidecarContainer {
podStatus.ContainerStatuses = make([]*kubecontainer.Status, len(originalPod.Spec.InitContainers))
for i, c := range originalPod.Spec.InitContainers {
setContainerStatus(podStatus, &c, i)
}
} else {
podStatus.ContainerStatuses = make([]*kubecontainer.Status, len(originalPod.Spec.Containers))
for i, c := range originalPod.Spec.Containers {
setContainerStatus(podStatus, &c, i)
}
podStatus.ContainerStatuses = make([]*kubecontainer.Status, len(originalPod.Spec.Containers)+len(originalPod.Spec.InitContainers))
for i, c := range originalPod.Spec.InitContainers {
setContainerStatus(podStatus, &c, i)
}
for i, c := range originalPod.Spec.Containers {
setContainerStatus(podStatus, &c, i+len(originalPod.Spec.InitContainers))
}
now := kubelet.clock.Now()
// Put the container in backoff so we can confirm backoff is reset.
var backoffKey string
if isSidecarContainer {
backoffKey = kuberuntime.GetStableKey(originalPod, &originalPod.Spec.InitContainers[0])
} else {
backoffKey = kuberuntime.GetStableKey(originalPod, &originalPod.Spec.Containers[0])
}
backoffKey := kuberuntime.GetStableKey(originalPod, originalCtr)
kubelet.backOff.Next(backoffKey, now)
updatedPod, err := kubelet.handlePodResourcesResize(newPod, podStatus)
require.NoError(t, err)
var updatedPodCtr v1.Container
var newPodCtr v1.Container
if isSidecarContainer {
updatedPodCtr = updatedPod.Spec.InitContainers[0]
newPodCtr = newPod.Spec.InitContainers[0]
} else {
updatedPodCtr = updatedPod.Spec.Containers[0]
newPodCtr = newPod.Spec.Containers[0]
}
assert.Equal(t, tt.expectedAllocatedReqs, updatedPodCtr.Resources.Requests, "updated pod spec requests")
assert.Equal(t, tt.expectedAllocatedLims, updatedPodCtr.Resources.Limits, "updated pod spec limits")
alloc, found := kubelet.statusManager.GetContainerResourceAllocation(string(newPod.UID), newPodCtr.Name)
alloc, found := kubelet.statusManager.GetContainerResourceAllocation(string(newPod.UID), updatedPodCtr.Name)
require.True(t, found, "container allocation")
assert.Equal(t, tt.expectedAllocatedReqs, alloc.Requests, "stored container request allocation")
assert.Equal(t, tt.expectedAllocatedLims, alloc.Limits, "stored container limit allocation")

View File

@ -563,7 +563,6 @@ func IsInPlacePodVerticalScalingAllowed(pod *v1.Pod) bool {
// TODO(vibansal): Make this function to be agnostic to whether it is dealing with a restartable init container or not (i.e. remove the argument `isRestartableInitContainer`).
func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containerIdx int, isRestartableInitContainer bool, kubeContainerStatus *kubecontainer.Status, changes *podActions) (keepContainer bool) {
var container v1.Container
if isRestartableInitContainer {
container = pod.Spec.InitContainers[containerIdx]
} else {
@ -1394,7 +1393,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
}
}
// Step 7: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] or podContainerChanges.InitContainersToUpdate[CPU,Memory] lists, invoke UpdateContainerResources
// Step 7: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] list, invoke UpdateContainerResources
if IsInPlacePodVerticalScalingAllowed(pod) {
if len(podContainerChanges.ContainersToUpdate) > 0 || podContainerChanges.UpdatePodResources {
m.doPodResizeAction(pod, podContainerChanges, &result)

View File

@ -2590,130 +2590,11 @@ func TestComputePodActionsForPodResize(t *testing.T) {
}
func TestUpdatePodContainerResources(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
fakeRuntime, _, m, err := createTestRuntimeManager()
m.machineInfo.MemoryCapacity = 17179860387 // 16GB
assert.NoError(t, err)
cpu100m := resource.MustParse("100m")
cpu150m := resource.MustParse("150m")
cpu200m := resource.MustParse("200m")
cpu250m := resource.MustParse("250m")
cpu300m := resource.MustParse("300m")
cpu350m := resource.MustParse("350m")
mem100M := resource.MustParse("100Mi")
mem150M := resource.MustParse("150Mi")
mem200M := resource.MustParse("200Mi")
mem250M := resource.MustParse("250Mi")
mem300M := resource.MustParse("300Mi")
mem350M := resource.MustParse("350Mi")
res100m100Mi := v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M}
res150m100Mi := v1.ResourceList{v1.ResourceCPU: cpu150m, v1.ResourceMemory: mem100M}
res100m150Mi := v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem150M}
res150m150Mi := v1.ResourceList{v1.ResourceCPU: cpu150m, v1.ResourceMemory: mem150M}
res200m200Mi := v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M}
res250m200Mi := v1.ResourceList{v1.ResourceCPU: cpu250m, v1.ResourceMemory: mem200M}
res200m250Mi := v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem250M}
res250m250Mi := v1.ResourceList{v1.ResourceCPU: cpu250m, v1.ResourceMemory: mem250M}
res300m300Mi := v1.ResourceList{v1.ResourceCPU: cpu300m, v1.ResourceMemory: mem300M}
res350m300Mi := v1.ResourceList{v1.ResourceCPU: cpu350m, v1.ResourceMemory: mem300M}
res300m350Mi := v1.ResourceList{v1.ResourceCPU: cpu300m, v1.ResourceMemory: mem350M}
res350m350Mi := v1.ResourceList{v1.ResourceCPU: cpu350m, v1.ResourceMemory: mem350M}
pod, _ := makeBasePodAndStatus()
makeAndSetFakePod(t, m, fakeRuntime, pod)
for dsc, tc := range map[string]struct {
resourceName v1.ResourceName
apiSpecResources []v1.ResourceRequirements
apiStatusResources []v1.ResourceRequirements
requiresRestart []bool
invokeUpdateResources bool
expectedCurrentLimits []v1.ResourceList
expectedCurrentRequests []v1.ResourceList
}{
"Guaranteed QoS Pod - CPU & memory resize requested, update CPU": {
resourceName: v1.ResourceCPU,
apiSpecResources: []v1.ResourceRequirements{
{Limits: res150m150Mi, Requests: res150m150Mi},
{Limits: res250m250Mi, Requests: res250m250Mi},
{Limits: res350m350Mi, Requests: res350m350Mi},
},
apiStatusResources: []v1.ResourceRequirements{
{Limits: res100m100Mi, Requests: res100m100Mi},
{Limits: res200m200Mi, Requests: res200m200Mi},
{Limits: res300m300Mi, Requests: res300m300Mi},
},
requiresRestart: []bool{false, false, false},
invokeUpdateResources: true,
expectedCurrentLimits: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
expectedCurrentRequests: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
},
"Guaranteed QoS Pod - CPU & memory resize requested, update memory": {
resourceName: v1.ResourceMemory,
apiSpecResources: []v1.ResourceRequirements{
{Limits: res150m150Mi, Requests: res150m150Mi},
{Limits: res250m250Mi, Requests: res250m250Mi},
{Limits: res350m350Mi, Requests: res350m350Mi},
},
apiStatusResources: []v1.ResourceRequirements{
{Limits: res100m100Mi, Requests: res100m100Mi},
{Limits: res200m200Mi, Requests: res200m200Mi},
{Limits: res300m300Mi, Requests: res300m300Mi},
},
requiresRestart: []bool{false, false, false},
invokeUpdateResources: true,
expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
},
} {
var containersToUpdate []containerToUpdateInfo
for idx := range pod.Spec.Containers {
// default resize policy when pod resize feature is enabled
pod.Spec.Containers[idx].Resources = tc.apiSpecResources[idx]
pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx]
cInfo := containerToUpdateInfo{
container: &pod.Spec.Containers[idx],
kubeContainerID: kubecontainer.ContainerID{},
desiredContainerResources: containerResources{
memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(),
memoryRequest: tc.apiSpecResources[idx].Requests.Memory().Value(),
cpuLimit: tc.apiSpecResources[idx].Limits.Cpu().MilliValue(),
cpuRequest: tc.apiSpecResources[idx].Requests.Cpu().MilliValue(),
},
currentContainerResources: &containerResources{
memoryLimit: tc.apiStatusResources[idx].Limits.Memory().Value(),
memoryRequest: tc.apiStatusResources[idx].Requests.Memory().Value(),
cpuLimit: tc.apiStatusResources[idx].Limits.Cpu().MilliValue(),
cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(),
},
}
containersToUpdate = append(containersToUpdate, cInfo)
}
fakeRuntime.Called = []string{}
err := m.updatePodContainerResources(pod, tc.resourceName, containersToUpdate)
require.NoError(t, err, dsc)
if tc.invokeUpdateResources {
assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc)
}
for idx := range pod.Spec.Containers {
assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryLimit, dsc)
assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryRequest, dsc)
assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuLimit, dsc)
assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuRequest, dsc)
}
}
}
// TODO(vibansal): Refactor code in such a way that (TestUpdatePodContainerResources) will work for both regular and restartable init containers.
// This function works in same way as (TestUpdatePodContainerResources) except that it is checking only restartable init containers.
func TestUpdatePodRestartableInitContainerResources(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
fakeRuntime, _, m, err := createTestRuntimeManager()
m.machineInfo.MemoryCapacity = 17179860387 // 16GB
require.NoError(t, err)
assert.NoError(t, err)
cpu100m := resource.MustParse("100m")
cpu150m := resource.MustParse("150m")
@ -2787,41 +2668,58 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) {
expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
},
} {
var initContainersToUpdate []containerToUpdateInfo
for idx := range pod.Spec.InitContainers {
// default resize policy when pod resize feature is enabled
pod.Spec.InitContainers[idx].Resources = tc.apiSpecResources[idx]
pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx]
cInfo := containerToUpdateInfo{
container: &pod.Spec.InitContainers[idx],
kubeContainerID: kubecontainer.ContainerID{},
desiredContainerResources: containerResources{
memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(),
memoryRequest: tc.apiSpecResources[idx].Requests.Memory().Value(),
cpuLimit: tc.apiSpecResources[idx].Limits.Cpu().MilliValue(),
cpuRequest: tc.apiSpecResources[idx].Requests.Cpu().MilliValue(),
},
currentContainerResources: &containerResources{
memoryLimit: tc.apiStatusResources[idx].Limits.Memory().Value(),
memoryRequest: tc.apiStatusResources[idx].Requests.Memory().Value(),
cpuLimit: tc.apiStatusResources[idx].Limits.Cpu().MilliValue(),
cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(),
},
for _, allSideCarCtrs := range []bool{false, true} {
var containersToUpdate []containerToUpdateInfo
containerToUpdateInfo := func(container *v1.Container, idx int) containerToUpdateInfo {
return containerToUpdateInfo{
container: container,
kubeContainerID: kubecontainer.ContainerID{},
desiredContainerResources: containerResources{
memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(),
memoryRequest: tc.apiSpecResources[idx].Requests.Memory().Value(),
cpuLimit: tc.apiSpecResources[idx].Limits.Cpu().MilliValue(),
cpuRequest: tc.apiSpecResources[idx].Requests.Cpu().MilliValue(),
},
currentContainerResources: &containerResources{
memoryLimit: tc.apiStatusResources[idx].Limits.Memory().Value(),
memoryRequest: tc.apiStatusResources[idx].Requests.Memory().Value(),
cpuLimit: tc.apiStatusResources[idx].Limits.Cpu().MilliValue(),
cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(),
},
}
}
initContainersToUpdate = append(initContainersToUpdate, cInfo)
}
fakeRuntime.Called = []string{}
err := m.updatePodContainerResources(pod, tc.resourceName, initContainersToUpdate)
require.NoError(t, err, dsc)
if tc.invokeUpdateResources {
assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc)
}
for idx := range pod.Spec.InitContainers {
assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), initContainersToUpdate[idx].currentContainerResources.memoryLimit, dsc)
assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), initContainersToUpdate[idx].currentContainerResources.memoryRequest, dsc)
assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), initContainersToUpdate[idx].currentContainerResources.cpuLimit, dsc)
assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), initContainersToUpdate[idx].currentContainerResources.cpuRequest, dsc)
if allSideCarCtrs {
for idx := range pod.Spec.InitContainers {
// default resize policy when pod resize feature is enabled
pod.Spec.InitContainers[idx].Resources = tc.apiSpecResources[idx]
pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx]
cinfo := containerToUpdateInfo(&pod.Spec.InitContainers[idx], idx)
containersToUpdate = append(containersToUpdate, cinfo)
}
} else {
for idx := range pod.Spec.Containers {
// default resize policy when pod resize feature is enabled
pod.Spec.Containers[idx].Resources = tc.apiSpecResources[idx]
pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx]
cinfo := containerToUpdateInfo(&pod.Spec.Containers[idx], idx)
containersToUpdate = append(containersToUpdate, cinfo)
}
}
fakeRuntime.Called = []string{}
err := m.updatePodContainerResources(pod, tc.resourceName, containersToUpdate)
require.NoError(t, err, dsc)
if tc.invokeUpdateResources {
assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc)
}
for idx := range len(containersToUpdate) {
assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryLimit, dsc)
assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryRequest, dsc)
assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuLimit, dsc)
assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuRequest, dsc)
}
}
}
}
@ -3120,7 +3018,7 @@ func TestDoPodResizeAction(t *testing.T) {
}
updateInfo := containerToUpdateInfo{
apiContainerIdx: 0,
container: &pod.Spec.Containers[0],
kubeContainerID: kps.ContainerStatuses[0].ID,
desiredContainerResources: tc.desiredResources,
currentContainerResources: &tc.currentResources,

View File

@ -289,7 +289,7 @@ var ResizeStrategy = podResizeStrategy{
),
}
// dropNonResizeUpdates discards all changes except for pod.Spec.Containers[*].Resources, pod.Spec.InitContainers[*].Resources, ResizePolicy, and certain metadata
// dropNonResizeUpdates discards all changes except for pod.Spec.Containers[*].Resources, pod.Spec.InitContainers[*].Resources, ResizePolicy and certain metadata
func dropNonResizeUpdates(newPod, oldPod *api.Pod) *api.Pod {
pod := dropPodUpdates(newPod, oldPod)

View File

@ -222,38 +222,38 @@ func separateContainerStatuses(tcInfo []ResizableContainerInfo) ([]v1.ContainerS
func VerifyPodResizePolicy(gotPod *v1.Pod, wantInfo []ResizableContainerInfo) {
ginkgo.GinkgoHelper()
wantInitCtrs, wantCtrs := separateContainers(wantInfo)
verifyPodContainersResizePolicy(gotPod.Spec.InitContainers, wantInitCtrs)
verifyPodContainersResizePolicy(gotPod.Spec.Containers, wantCtrs)
}
func verifyPodContainersResizePolicy(gotCtrs []v1.Container, wantCtrs []v1.Container) {
ginkgo.GinkgoHelper()
gotCtrs := append(append([]v1.Container{}, gotPod.Spec.Containers...), gotPod.Spec.InitContainers...)
var wantCtrs []v1.Container
for _, ci := range wantInfo {
wantCtrs = append(wantCtrs, makeResizableContainer(ci))
}
gomega.Expect(gotCtrs).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
for i, wantCtr := range wantCtrs {
gotCtr := gotCtrs[i]
gomega.Expect(gotCtr.Name).To(gomega.Equal(wantCtr.Name))
gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(wantCtr.ResizePolicy))
for _, wantCtr := range wantCtrs {
for _, gotCtr := range gotCtrs {
if wantCtr.Name != gotCtr.Name {
continue
}
gomega.Expect(v1.Container{Name: gotCtr.Name, ResizePolicy: gotCtr.ResizePolicy}).To(gomega.Equal(v1.Container{Name: wantCtr.Name, ResizePolicy: wantCtr.ResizePolicy}))
}
}
}
func VerifyPodResources(gotPod *v1.Pod, wantInfo []ResizableContainerInfo) {
ginkgo.GinkgoHelper()
wantInitCtrs, wantCtrs := separateContainers(wantInfo)
verifyPodContainersResources(gotPod.Spec.InitContainers, wantInitCtrs)
verifyPodContainersResources(gotPod.Spec.Containers, wantCtrs)
}
func verifyPodContainersResources(gotCtrs []v1.Container, wantCtrs []v1.Container) {
ginkgo.GinkgoHelper()
gotCtrs := append(append([]v1.Container{}, gotPod.Spec.Containers...), gotPod.Spec.InitContainers...)
var wantCtrs []v1.Container
for _, ci := range wantInfo {
wantCtrs = append(wantCtrs, makeResizableContainer(ci))
}
gomega.Expect(gotCtrs).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
for i, wantCtr := range wantCtrs {
gotCtr := gotCtrs[i]
gomega.Expect(gotCtr.Name).To(gomega.Equal(wantCtr.Name))
gomega.Expect(gotCtr.Resources).To(gomega.Equal(wantCtr.Resources))
for _, wantCtr := range wantCtrs {
for _, gotCtr := range gotCtrs {
if wantCtr.Name != gotCtr.Name {
continue
}
gomega.Expect(v1.Container{Name: gotCtr.Name, Resources: gotCtr.Resources}).To(gomega.Equal(v1.Container{Name: wantCtr.Name, Resources: wantCtr.Resources}))
}
}
}