resolve merge conflicts

This commit is contained in:
vivzbansal 2024-11-06 21:25:05 +00:00
parent e07aeb7c8b
commit d1fac494f4
24 changed files with 1652 additions and 216 deletions

View File

@ -1289,19 +1289,35 @@ func MarkPodProposedForResize(oldPod, newPod *api.Pod) {
return return
} }
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) && len(newPod.Spec.InitContainers) != len(oldPod.Spec.InitContainers) {
return
}
for i, c := range newPod.Spec.Containers { for i, c := range newPod.Spec.Containers {
if c.Name != oldPod.Spec.Containers[i].Name { if c.Name != oldPod.Spec.Containers[i].Name {
return // Update is invalid (container mismatch): let validation handle it. return // Update is invalid (container mismatch): let validation handle it.
} }
if c.Resources.Requests == nil { if c.Resources.Requests == nil || cmp.Equal(oldPod.Spec.Containers[i].Resources, c.Resources) {
continue
}
if cmp.Equal(oldPod.Spec.Containers[i].Resources, c.Resources) {
continue continue
} }
newPod.Status.Resize = api.PodResizeStatusProposed newPod.Status.Resize = api.PodResizeStatusProposed
return return
} }
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
for i, c := range newPod.Spec.InitContainers {
if IsRestartableInitContainer(&c) {
if c.Name != oldPod.Spec.InitContainers[i].Name {
return // Update is invalid (container mismatch): let validation handle it.
}
if c.Resources.Requests == nil || cmp.Equal(oldPod.Spec.InitContainers[i].Resources, c.Resources) {
continue
}
newPod.Status.Resize = api.PodResizeStatusProposed
break
}
}
}
} }
// KEP: https://kep.k8s.io/4639 // KEP: https://kep.k8s.io/4639

View File

@ -2950,11 +2950,13 @@ func TestDropSidecarContainers(t *testing.T) {
} }
func TestMarkPodProposedForResize(t *testing.T) { func TestMarkPodProposedForResize(t *testing.T) {
containerRestartPolicyAlways := api.ContainerRestartPolicyAlways
testCases := []struct { testCases := []struct {
desc string desc string
newPodSpec api.PodSpec newPodSpec api.PodSpec
oldPodSpec api.PodSpec oldPodSpec api.PodSpec
expectProposedResize bool expectProposedResize bool
hasSidecarContainer bool
}{ }{
{ {
desc: "nil requests", desc: "nil requests",
@ -3208,9 +3210,321 @@ func TestMarkPodProposedForResize(t *testing.T) {
}, },
expectProposedResize: false, expectProposedResize: false,
}, },
{
desc: "resources unchanged with sidecar containers",
hasSidecarContainer: true,
newPodSpec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
},
},
InitContainers: []api.Container{
{
Name: "i1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
},
},
oldPodSpec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
},
},
InitContainers: []api.Container{
{
Name: "i1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
},
},
expectProposedResize: false,
},
{
desc: "requests resized with sidecar containers",
hasSidecarContainer: true,
newPodSpec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
},
{
Name: "c2",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
},
},
},
InitContainers: []api.Container{
{
Name: "i1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
},
},
oldPodSpec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
},
{
Name: "c2",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
},
},
},
InitContainers: []api.Container{
{
Name: "i1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
},
},
expectProposedResize: true,
},
{
desc: "limits resized with sidecar containers",
hasSidecarContainer: true,
newPodSpec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
},
{
Name: "c2",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
},
},
},
InitContainers: []api.Container{
{
Name: "i1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
},
},
oldPodSpec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
},
{
Name: "c2",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("500m")},
},
},
},
InitContainers: []api.Container{
{
Name: "i1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("500m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
},
},
expectProposedResize: true,
},
{
desc: "the number of sidecar containers in the pod has increased; no action should be taken.",
newPodSpec: api.PodSpec{
InitContainers: []api.Container{
{
Name: "i1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
{
Name: "i2",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
},
},
oldPodSpec: api.PodSpec{
InitContainers: []api.Container{
{
Name: "i1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
},
},
expectProposedResize: false,
},
{
desc: "the number of sidecar containers in the pod has decreased; no action should be taken.",
newPodSpec: api.PodSpec{
InitContainers: []api.Container{
{
Name: "i1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
},
},
oldPodSpec: api.PodSpec{
InitContainers: []api.Container{
{
Name: "i1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
{
Name: "i2",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
},
},
expectProposedResize: false,
},
{
desc: "sidecar containers reordered",
newPodSpec: api.PodSpec{
InitContainers: []api.Container{
{
Name: "i1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
{
Name: "i2",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
},
},
oldPodSpec: api.PodSpec{
InitContainers: []api.Container{
{
Name: "i2",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
{
Name: "i1",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
},
RestartPolicy: &containerRestartPolicyAlways,
},
},
},
expectProposedResize: false,
},
} }
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, tc.hasSidecarContainer)
newPod := &api.Pod{Spec: tc.newPodSpec} newPod := &api.Pod{Spec: tc.newPodSpec}
newPodUnchanged := newPod.DeepCopy() newPodUnchanged := newPod.DeepCopy()
oldPod := &api.Pod{Spec: tc.oldPodSpec} oldPod := &api.Pod{Spec: tc.oldPodSpec}

View File

@ -166,6 +166,7 @@ func SetDefaults_Service(obj *v1.Service) {
} }
} }
func SetDefaults_Pod(obj *v1.Pod) { func SetDefaults_Pod(obj *v1.Pod) {
// If limits are specified, but requests are not, default requests to limits // If limits are specified, but requests are not, default requests to limits
// This is done here rather than a more specific defaulting pass on v1.ResourceRequirements // This is done here rather than a more specific defaulting pass on v1.ResourceRequirements
@ -182,6 +183,11 @@ func SetDefaults_Pod(obj *v1.Pod) {
} }
} }
} }
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) &&
(obj.Spec.Containers[i].Resources.Requests != nil || obj.Spec.Containers[i].Resources.Limits != nil) {
// For normal containers, set resize restart policy to default value (NotRequired), if not specified..
setDefaultResizePolicy(&obj.Spec.Containers[i])
}
} }
for i := range obj.Spec.InitContainers { for i := range obj.Spec.InitContainers {
if obj.Spec.InitContainers[i].Resources.Limits != nil { if obj.Spec.InitContainers[i].Resources.Limits != nil {
@ -194,6 +200,17 @@ func SetDefaults_Pod(obj *v1.Pod) {
} }
} }
} }
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
isRestartableInitContainer := false
c := obj.Spec.InitContainers[i]
if c.RestartPolicy != nil && *c.RestartPolicy == v1.ContainerRestartPolicyAlways {
isRestartableInitContainer = true
}
if isRestartableInitContainer && (c.Resources.Requests != nil || c.Resources.Limits != nil) {
// For restartable init containers, set resize restart policy to default value (NotRequired), if not specified.
setDefaultResizePolicy(&obj.Spec.InitContainers[i])
}
}
} }
// Pod Requests default values must be applied after container-level default values // Pod Requests default values must be applied after container-level default values
@ -212,6 +229,30 @@ func SetDefaults_Pod(obj *v1.Pod) {
defaultHostNetworkPorts(&obj.Spec.InitContainers) defaultHostNetworkPorts(&obj.Spec.InitContainers)
} }
} }
// setDefaultResizePolicy set resize restart policy to default value (NotRequired), if not specified.
func setDefaultResizePolicy(obj *v1.Container) {
resizePolicySpecified := make(map[v1.ResourceName]bool)
for _, p := range obj.ResizePolicy {
resizePolicySpecified[p.ResourceName] = true
}
defaultResizePolicy := func(resourceName v1.ResourceName) {
if _, found := resizePolicySpecified[resourceName]; !found {
obj.ResizePolicy = append(obj.ResizePolicy,
v1.ContainerResizePolicy{
ResourceName: resourceName,
RestartPolicy: v1.NotRequired,
})
}
}
if _, exists := obj.Resources.Requests[v1.ResourceCPU]; exists {
defaultResizePolicy(v1.ResourceCPU)
}
if _, exists := obj.Resources.Requests[v1.ResourceMemory]; exists {
defaultResizePolicy(v1.ResourceMemory)
}
}
func SetDefaults_PodSpec(obj *v1.PodSpec) { func SetDefaults_PodSpec(obj *v1.PodSpec) {
// New fields added here will break upgrade tests: // New fields added here will break upgrade tests:
// https://github.com/kubernetes/kubernetes/issues/69445 // https://github.com/kubernetes/kubernetes/issues/69445

View File

@ -2982,6 +2982,232 @@ func TestSetDefaultServiceInternalTrafficPolicy(t *testing.T) {
} }
} }
func TestSetDefaultResizePolicy(t *testing.T) {
// verify we default to NotRequired restart policy for resize when resources are specified
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
restartAlways := v1.ContainerRestartPolicyAlways
for desc, tc := range map[string]struct {
testContainer v1.Container
expectedResizePolicy []v1.ContainerResizePolicy
}{
"CPU and memory limits are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.NotRequired,
},
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.NotRequired,
},
},
},
"CPU requests are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.NotRequired,
},
},
},
"Memory limits are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.NotRequired,
},
},
},
"No resources are specified": {
testContainer: v1.Container{Name: "besteffort"},
expectedResizePolicy: nil,
},
"CPU and memory limits are specified with restartContainer resize policy for memory": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
ResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.NotRequired,
},
},
},
"CPU requests and memory limits are specified with restartContainer resize policy for CPU": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("200Mi"),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
},
ResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.RestartContainer,
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.RestartContainer,
},
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.NotRequired,
},
},
},
"CPU and memory requests are specified with restartContainer resize policy for both": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
ResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.RestartContainer,
},
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.RestartContainer,
},
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
"Ephemeral storage limits are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("500Mi"),
},
},
},
expectedResizePolicy: nil,
},
"Ephemeral storage requests and CPU limits are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
Requests: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("500Mi"),
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceCPU,
RestartPolicy: v1.NotRequired,
},
},
},
"Ephemeral storage requests and limits, memory requests with restartContainer policy are specified": {
testContainer: v1.Container{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("500Mi"),
},
Requests: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("500Mi"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
ResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
expectedResizePolicy: []v1.ContainerResizePolicy{
{
ResourceName: v1.ResourceMemory,
RestartPolicy: v1.RestartContainer,
},
},
},
} {
for _, isSidecarContainer := range []bool{true, false} {
t.Run(desc, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, isSidecarContainer)
if isSidecarContainer {
tc.testContainer.RestartPolicy = &restartAlways
}
testPod := v1.Pod{}
testPod.Spec.Containers = append(testPod.Spec.Containers, tc.testContainer)
if isSidecarContainer {
testPod.Spec.InitContainers = append(testPod.Spec.InitContainers, tc.testContainer)
}
output := roundTrip(t, runtime.Object(&testPod))
pod2 := output.(*v1.Pod)
if !cmp.Equal(pod2.Spec.Containers[0].ResizePolicy, tc.expectedResizePolicy) {
t.Errorf("expected resize policy %+v, but got %+v for normal containers", tc.expectedResizePolicy, pod2.Spec.Containers[0].ResizePolicy)
}
if isSidecarContainer {
if !cmp.Equal(pod2.Spec.InitContainers[0].ResizePolicy, tc.expectedResizePolicy) {
t.Errorf("expected resize policy %+v, but got %+v for restartable init containers", tc.expectedResizePolicy, pod2.Spec.InitContainers[0].ResizePolicy)
}
}
})
}
}
}
func TestSetDefaults_Volume(t *testing.T) { func TestSetDefaults_Volume(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ImageVolume, true) featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ImageVolume, true)
for desc, tc := range map[string]struct { for desc, tc := range map[string]struct {

View File

@ -5289,6 +5289,7 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
for ix, container := range mungedPodSpec.Containers { for ix, container := range mungedPodSpec.Containers {
container.Image = oldPod.Spec.Containers[ix].Image // +k8s:verify-mutation:reason=clone container.Image = oldPod.Spec.Containers[ix].Image // +k8s:verify-mutation:reason=clone
newContainers = append(newContainers, container) newContainers = append(newContainers, container)
} }
mungedPodSpec.Containers = newContainers mungedPodSpec.Containers = newContainers
// munge spec.initContainers[*].image // munge spec.initContainers[*].image

View File

@ -10279,11 +10279,6 @@ func TestValidatePodSpec(t *testing.T) {
FSGroupChangePolicy: &badfsGroupChangePolicy1, FSGroupChangePolicy: &badfsGroupChangePolicy1,
}), }),
), ),
"disallowed resources resize policy for init containers": *podtest.MakePod("",
podtest.SetInitContainers(podtest.MakeContainer("initctr", podtest.SetContainerResizePolicy(
core.ContainerResizePolicy{ResourceName: "cpu", RestartPolicy: "NotRequired"},
))),
),
} }
for k, v := range failureCases { for k, v := range failureCases {
opts := PodValidationOptions{ opts := PodValidationOptions{

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1" podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
) )
// v1PodResourcesServer implements PodResourcesListerServer // v1PodResourcesServer implements PodResourcesListerServer

View File

@ -464,7 +464,13 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int
// We should return this value because this is what kubelet agreed to allocate for the container // We should return this value because this is what kubelet agreed to allocate for the container
// and the value configured with runtime. // and the value configured with runtime.
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) { if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
if cs, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, container.Name); ok { containerStatuses := pod.Status.ContainerStatuses
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) && podutil.IsRestartableInitContainer(container) {
if len(pod.Status.InitContainerStatuses) != 0 {
containerStatuses = append(containerStatuses, pod.Status.InitContainerStatuses...)
}
}
if cs, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
cpuQuantity = cs.AllocatedResources[v1.ResourceCPU] cpuQuantity = cs.AllocatedResources[v1.ResourceCPU]
} }
} }

View File

@ -447,7 +447,13 @@ func getRequestedResources(pod *v1.Pod, container *v1.Container) (map[v1.Resourc
// We should return this value because this is what kubelet agreed to allocate for the container // We should return this value because this is what kubelet agreed to allocate for the container
// and the value configured with runtime. // and the value configured with runtime.
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) { if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
if cs, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, container.Name); ok { containerStatuses := pod.Status.ContainerStatuses
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) && podutil.IsRestartableInitContainer(container) {
if len(pod.Status.InitContainerStatuses) != 0 {
containerStatuses = append(containerStatuses, pod.Status.InitContainerStatuses...)
}
}
if cs, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
resources = cs.AllocatedResources resources = cs.AllocatedResources
} }
} }

View File

@ -2813,23 +2813,40 @@ func (kl *Kubelet) HandlePodSyncs(pods []*v1.Pod) {
} }
func isPodResizeInProgress(pod *v1.Pod, podStatus *kubecontainer.PodStatus) bool { func isPodResizeInProgress(pod *v1.Pod, podStatus *kubecontainer.PodStatus) bool {
for _, c := range pod.Spec.Containers { for i := range pod.Spec.Containers {
if cs := podStatus.FindContainerStatusByName(c.Name); cs != nil { if containerResourcesChanged(&pod.Spec.Containers[i], podStatus) {
if cs.State != kubecontainer.ContainerStateRunning || cs.Resources == nil { return true
continue }
} }
if c.Resources.Requests != nil {
if cs.Resources.CPURequest != nil && !cs.Resources.CPURequest.Equal(*c.Resources.Requests.Cpu()) { if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
for i, c := range pod.Spec.InitContainers {
if podutil.IsRestartableInitContainer(&c) {
if containerResourcesChanged(&pod.Spec.InitContainers[i], podStatus) {
return true return true
} }
} }
if c.Resources.Limits != nil { }
if cs.Resources.CPULimit != nil && !cs.Resources.CPULimit.Equal(*c.Resources.Limits.Cpu()) { }
return true return false
} }
if cs.Resources.MemoryLimit != nil && !cs.Resources.MemoryLimit.Equal(*c.Resources.Limits.Memory()) {
return true func containerResourcesChanged(c *v1.Container, podStatus *kubecontainer.PodStatus) bool {
} if cs := podStatus.FindContainerStatusByName(c.Name); cs != nil {
if cs.State != kubecontainer.ContainerStateRunning || cs.Resources == nil {
return false
}
if c.Resources.Requests != nil {
if cs.Resources.CPURequest != nil && !cs.Resources.CPURequest.Equal(*c.Resources.Requests.Cpu()) {
return true
}
}
if c.Resources.Limits != nil {
if cs.Resources.CPULimit != nil && !cs.Resources.CPULimit.Equal(*c.Resources.Limits.Cpu()) {
return true
}
if cs.Resources.MemoryLimit != nil && !cs.Resources.MemoryLimit.Equal(*c.Resources.Limits.Memory()) {
return true
} }
} }
} }

View File

@ -1766,28 +1766,43 @@ func (kl *Kubelet) determinePodResizeStatus(allocatedPod *v1.Pod, podStatus *kub
// resources reported in the status. // resources reported in the status.
func allocatedResourcesMatchStatus(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus) bool { func allocatedResourcesMatchStatus(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus) bool {
for _, c := range allocatedPod.Spec.Containers { for _, c := range allocatedPod.Spec.Containers {
if cs := podStatus.FindContainerStatusByName(c.Name); cs != nil { if !allocatedContainerResourcesMatchStatus(allocatedPod, &c, podStatus) {
if cs.State != kubecontainer.ContainerStateRunning { return false
// If the container isn't running, it isn't resizing. }
continue }
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
for _, c := range allocatedPod.Spec.InitContainers {
if podutil.IsRestartableInitContainer(&c) && !allocatedContainerResourcesMatchStatus(allocatedPod, &c, podStatus) {
return false
} }
}
}
return true
}
cpuReq, hasCPUReq := c.Resources.Requests[v1.ResourceCPU] func allocatedContainerResourcesMatchStatus(allocatedPod *v1.Pod, c *v1.Container, podStatus *kubecontainer.PodStatus) bool {
cpuLim, hasCPULim := c.Resources.Limits[v1.ResourceCPU] if cs := podStatus.FindContainerStatusByName(c.Name); cs != nil {
memLim, hasMemLim := c.Resources.Limits[v1.ResourceMemory] if cs.State != kubecontainer.ContainerStateRunning {
// If the container isn't running, it isn't resizing.
return true
}
if cs.Resources == nil { cpuReq, hasCPUReq := c.Resources.Requests[v1.ResourceCPU]
if hasCPUReq || hasCPULim || hasMemLim { cpuLim, hasCPULim := c.Resources.Limits[v1.ResourceCPU]
// Container status is missing Resources information, but the container does memLim, hasMemLim := c.Resources.Limits[v1.ResourceMemory]
// have resizable resources configured.
klog.ErrorS(nil, "Missing runtime resources information for resizing container", if cs.Resources == nil {
"pod", format.Pod(allocatedPod), "container", c.Name) if hasCPUReq || hasCPULim || hasMemLim {
return false // We don't want to clear resize status with insufficient information. // Container status is missing Resources information, but the container does
} else { // have resizable resources configured.
// No resizable resources configured; this might be ok. klog.ErrorS(nil, "Missing runtime resources information for resizing container",
continue "pod", format.Pod(allocatedPod), "container", c.Name)
} return false // We don't want to clear resize status with insufficient information.
} else {
// No resizable resources configured; this might be ok.
return true
} }
}
// Only compare resizeable resources, and only compare resources that are explicitly configured. // Only compare resizeable resources, and only compare resources that are explicitly configured.
if hasCPUReq { if hasCPUReq {

View File

@ -3833,6 +3833,7 @@ func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi") CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
CPU1AndMem1GAndStorage2GAndCustomResource := CPU1AndMem1GAndStorage2G.DeepCopy() CPU1AndMem1GAndStorage2GAndCustomResource := CPU1AndMem1GAndStorage2G.DeepCopy()
CPU1AndMem1GAndStorage2GAndCustomResource["unknown-resource"] = resource.MustParse("1") CPU1AndMem1GAndStorage2GAndCustomResource["unknown-resource"] = resource.MustParse("1")
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
testKubecontainerPodStatus := kubecontainer.PodStatus{ testKubecontainerPodStatus := kubecontainer.PodStatus{
ContainerStatuses: []*kubecontainer.Status{ ContainerStatuses: []*kubecontainer.Status{
@ -3850,9 +3851,10 @@ func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
} }
tests := []struct { tests := []struct {
name string name string
pod *v1.Pod pod *v1.Pod
oldStatus *v1.PodStatus hasSidecarContainer bool
oldStatus *v1.PodStatus
}{ }{
{ {
name: "custom resource in ResourcesAllocated, resize should be null", name: "custom resource in ResourcesAllocated, resize should be null",
@ -3916,9 +3918,76 @@ func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
}, },
}, },
}, },
{
name: "custom resource in ResourcesAllocated in case of restartable init containers, resize should be null",
hasSidecarContainer: true,
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "1234560",
Name: "foo0",
Namespace: "bar0",
},
Spec: v1.PodSpec{
NodeName: "machine",
InitContainers: []v1.Container{
{
Name: testContainerName,
Image: "img",
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2GAndCustomResource, Requests: CPU1AndMem1GAndStorage2GAndCustomResource},
RestartPolicy: &containerRestartPolicyAlways,
},
},
RestartPolicy: v1.RestartPolicyAlways,
},
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
{
Name: testContainerName,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
AllocatedResources: CPU1AndMem1GAndStorage2GAndCustomResource,
},
},
Resize: "InProgress",
},
},
},
{
name: "cpu/memory resource in ResourcesAllocated in case of restartable init containers, resize should be null",
hasSidecarContainer: true,
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "1234560",
Name: "foo0",
Namespace: "bar0",
},
Spec: v1.PodSpec{
NodeName: "machine",
InitContainers: []v1.Container{
{
Name: testContainerName,
Image: "img",
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
RestartPolicy: &containerRestartPolicyAlways,
},
},
RestartPolicy: v1.RestartPolicyAlways,
},
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
{
Name: testContainerName,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
AllocatedResources: CPU1AndMem1GAndStorage2G,
},
},
Resize: "InProgress",
},
},
},
} }
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, test.hasSidecarContainer)
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kl := testKubelet.kubelet kl := testKubelet.kubelet

View File

@ -3470,31 +3470,63 @@ func TestIsPodResizeInProgress(t *testing.T) {
Namespace: "default", Namespace: "default",
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{{ Containers: []v1.Container{
Name: "c1", {
Resources: v1.ResourceRequirements{ Name: "c1",
Requests: v1.ResourceList{ Resources: v1.ResourceRequirements{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), Requests: v1.ResourceList{
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
}, v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI),
Limits: v1.ResourceList{ },
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI), Limits: v1.ResourceList{
v1.ResourceMemory: *resource.NewQuantity(400, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(400, resource.DecimalSI),
},
}, },
}, },
}, { {
Name: "c2", Name: "c2",
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{ Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(600, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(600, resource.DecimalSI),
}, },
Limits: v1.ResourceList{ Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(700, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(700, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(800, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(800, resource.DecimalSI),
},
}, },
}, },
}}, },
InitContainers: []v1.Container{
{
Name: "c3-restartable-init",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(300, resource.DecimalSI),
},
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
},
},
RestartPolicy: &containerRestartPolicyAlways,
},
{
Name: "c4-init",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(300, resource.DecimalSI),
},
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
},
},
},
},
}, },
} }
steadyStateC1Status := &kubecontainer.Status{ steadyStateC1Status := &kubecontainer.Status{
@ -3542,6 +3574,43 @@ func TestIsPodResizeInProgress(t *testing.T) {
MemoryLimit: resource.NewQuantity(800, resource.DecimalSI), MemoryLimit: resource.NewQuantity(800, resource.DecimalSI),
}, },
} }
steadyStateC3Status := &kubecontainer.Status{
Name: "c3-restartable-init",
State: kubecontainer.ContainerStateRunning,
Resources: &kubecontainer.ContainerResources{
CPURequest: resource.NewMilliQuantity(200, resource.DecimalSI),
CPULimit: resource.NewMilliQuantity(400, resource.DecimalSI),
MemoryLimit: resource.NewQuantity(500, resource.DecimalSI),
},
}
resizeCPUReqC3Status := &kubecontainer.Status{
Name: "c3-restartable-init",
State: kubecontainer.ContainerStateRunning,
Resources: &kubecontainer.ContainerResources{
CPURequest: resource.NewMilliQuantity(300, resource.DecimalSI),
CPULimit: resource.NewMilliQuantity(400, resource.DecimalSI),
MemoryLimit: resource.NewQuantity(500, resource.DecimalSI),
},
}
resizeMemLimitC3Status := &kubecontainer.Status{
Name: "c3-restartable-init",
State: kubecontainer.ContainerStateRunning,
Resources: &kubecontainer.ContainerResources{
CPURequest: resource.NewMilliQuantity(200, resource.DecimalSI),
CPULimit: resource.NewMilliQuantity(400, resource.DecimalSI),
MemoryLimit: resource.NewQuantity(800, resource.DecimalSI),
},
}
resizeCPUReqC4Status := &kubecontainer.Status{
Name: "c4-init",
State: kubecontainer.ContainerStateRunning,
Resources: &kubecontainer.ContainerResources{
CPURequest: resource.NewMilliQuantity(300, resource.DecimalSI),
CPULimit: resource.NewMilliQuantity(400, resource.DecimalSI),
MemoryLimit: resource.NewQuantity(500, resource.DecimalSI),
},
}
mkPodStatus := func(containerStatuses ...*kubecontainer.Status) *kubecontainer.PodStatus { mkPodStatus := func(containerStatuses ...*kubecontainer.Status) *kubecontainer.PodStatus {
return &kubecontainer.PodStatus{ return &kubecontainer.PodStatus{
ID: pod.UID, ID: pod.UID,
@ -3556,7 +3625,7 @@ func TestIsPodResizeInProgress(t *testing.T) {
expectResize bool expectResize bool
}{{ }{{
name: "steady state", name: "steady state",
status: mkPodStatus(steadyStateC1Status, steadyStateC2Status), status: mkPodStatus(steadyStateC1Status, steadyStateC2Status, steadyStateC3Status),
expectResize: false, expectResize: false,
}, { }, {
name: "terminated container", name: "terminated container",
@ -3582,10 +3651,27 @@ func TestIsPodResizeInProgress(t *testing.T) {
name: "resizing cpu limit", name: "resizing cpu limit",
status: mkPodStatus(resizeCPULimitC1Status, steadyStateC2Status), status: mkPodStatus(resizeCPULimitC1Status, steadyStateC2Status),
expectResize: true, expectResize: true,
}} },
{
name: "resizing cpu request for restartable init container",
status: mkPodStatus(steadyStateC1Status, steadyStateC2Status, resizeCPUReqC3Status),
expectResize: true,
},
{
name: "resizing memory limit for restartable init container",
status: mkPodStatus(steadyStateC1Status, steadyStateC2Status, resizeMemLimitC3Status),
expectResize: true,
},
{
name: "non-restartable init container should be ignored",
status: mkPodStatus(steadyStateC1Status, steadyStateC2Status, steadyStateC3Status, resizeCPUReqC4Status),
expectResize: false,
},
}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
assert.Equal(t, test.expectResize, isPodResizeInProgress(pod, test.status)) assert.Equal(t, test.expectResize, isPodResizeInProgress(pod, test.status))
}) })
} }

View File

@ -51,6 +51,7 @@ import (
remote "k8s.io/cri-client/pkg" remote "k8s.io/cri-client/pkg"
kubelettypes "k8s.io/kubelet/pkg/types" kubelettypes "k8s.io/kubelet/pkg/types"
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
@ -1071,12 +1072,14 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
// If the container is previously initialized but its status is not // If the container is previously initialized but its status is not
// found, it means its last status is removed for some reason. // found, it means its last status is removed for some reason.
// Restart it if it is a restartable init container. // Restart it if it is a restartable init container.
if isPreviouslyInitialized && podutil.IsRestartableInitContainer(container) {
if isPreviouslyInitialized && podutil.IsRestartableInitContainer(container) { if isPreviouslyInitialized && podutil.IsRestartableInitContainer(container) {
changes.InitContainersToStart = append(changes.InitContainersToStart, i) changes.InitContainersToStart = append(changes.InitContainersToStart, i)
} }
continue continue
} }
if isPreviouslyInitialized && !podutil.IsRestartableInitContainer(container) {
if isPreviouslyInitialized && !podutil.IsRestartableInitContainer(container) { if isPreviouslyInitialized && !podutil.IsRestartableInitContainer(container) {
// after initialization, only restartable init containers need to be kept // after initialization, only restartable init containers need to be kept
// running // running
@ -1093,81 +1096,84 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
changes.InitContainersToStart = append(changes.InitContainersToStart, i) changes.InitContainersToStart = append(changes.InitContainersToStart, i)
case kubecontainer.ContainerStateRunning: case kubecontainer.ContainerStateRunning:
if !podutil.IsRestartableInitContainer(container) {
if !podutil.IsRestartableInitContainer(container) { if !podutil.IsRestartableInitContainer(container) {
break break
} }
if podutil.IsRestartableInitContainer(container) { if container.StartupProbe != nil {
if container.StartupProbe != nil { startup, found := m.startupManager.Get(status.ID)
startup, found := m.startupManager.Get(status.ID) if !found {
if !found { // If the startup probe has not been run, wait for it.
// If the startup probe has not been run, wait for it.
break
}
if startup != proberesults.Success {
if startup == proberesults.Failure {
// If the restartable init container failed the startup probe,
// restart it.
changes.ContainersToKill[status.ID] = containerToKillInfo{
name: container.Name,
container: container,
message: fmt.Sprintf("Init container %s failed startup probe", container.Name),
reason: reasonStartupProbe,
}
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
}
break
}
}
klog.V(4).InfoS("Init container has been initialized", "pod", klog.KObj(pod), "container", container.Name)
if i == (len(pod.Spec.InitContainers) - 1) {
podHasInitialized = true
} else if !isPreviouslyInitialized {
// this init container is initialized for the first time, start the next one
changes.InitContainersToStart = append(changes.InitContainersToStart, i+1)
}
// Restart running sidecar containers which have had their definition changed.
if _, _, changed := containerChanged(container, status); changed {
changes.ContainersToKill[status.ID] = containerToKillInfo{
name: container.Name,
container: container,
message: fmt.Sprintf("Init container %s definition changed", container.Name),
reason: "",
}
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
break break
} }
if startup != proberesults.Success {
// A restartable init container does not have to take into account its if startup == proberesults.Failure {
// liveness probe when it determines to start the next init container. // If the restartable init container failed the startup probe,
if container.LivenessProbe != nil {
liveness, found := m.livenessManager.Get(status.ID)
if !found {
// If the liveness probe has not been run, wait for it.
break
}
if liveness == proberesults.Failure {
// If the restartable init container failed the liveness probe,
// restart it. // restart it.
changes.ContainersToKill[status.ID] = containerToKillInfo{ changes.ContainersToKill[status.ID] = containerToKillInfo{
name: container.Name, name: container.Name,
container: container, container: container,
message: fmt.Sprintf("Init container %s failed liveness probe", container.Name), message: fmt.Sprintf("Init container %s failed startup probe", container.Name),
reason: reasonLivenessProbe, reason: reasonStartupProbe,
} }
changes.InitContainersToStart = append(changes.InitContainersToStart, i) changes.InitContainersToStart = append(changes.InitContainersToStart, i)
} }
break
} }
} else { // init container }
// nothing do to but wait for it to finish
klog.V(4).InfoS("Init container has been initialized", "pod", klog.KObj(pod), "container", container.Name)
if i == (len(pod.Spec.InitContainers) - 1) {
podHasInitialized = true
} else if !isPreviouslyInitialized {
// this init container is initialized for the first time, start the next one
changes.InitContainersToStart = append(changes.InitContainersToStart, i+1)
}
// Restart running sidecar containers which have had their definition changed.
if _, _, changed := containerChanged(container, status); changed {
changes.ContainersToKill[status.ID] = containerToKillInfo{
name: container.Name,
container: container,
message: fmt.Sprintf("Init container %s definition changed", container.Name),
reason: "",
}
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
break
}
// A restartable init container does not have to take into account its
// liveness probe when it determines to start the next init container.
if container.LivenessProbe != nil {
liveness, found := m.livenessManager.Get(status.ID)
if !found {
// If the liveness probe has not been run, wait for it.
break
}
if liveness == proberesults.Failure {
// If the restartable init container failed the liveness probe,
// restart it.
changes.ContainersToKill[status.ID] = containerToKillInfo{
name: container.Name,
container: container,
message: fmt.Sprintf("Init container %s failed liveness probe", container.Name),
reason: reasonLivenessProbe,
}
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
break
}
}
if IsInPlacePodVerticalScalingAllowed(pod) && !m.computePodResizeAction(pod, i, true, status, changes) {
// computePodResizeAction updates 'changes' if resize policy requires restarting this container
break break
} }
// If the init container failed and the restart policy is Never, the pod is terminal. // If the init container failed and the restart policy is Never, the pod is terminal.
// Otherwise, restart the init container. // Otherwise, restart the init container.
case kubecontainer.ContainerStateExited: case kubecontainer.ContainerStateExited:
if podutil.IsRestartableInitContainer(container) {
if podutil.IsRestartableInitContainer(container) { if podutil.IsRestartableInitContainer(container) {
changes.InitContainersToStart = append(changes.InitContainersToStart, i) changes.InitContainersToStart = append(changes.InitContainersToStart, i)
} else { // init container } else { // init container
@ -1191,6 +1197,7 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
} }
default: // kubecontainer.ContainerStatusUnknown or other unknown states default: // kubecontainer.ContainerStatusUnknown or other unknown states
if podutil.IsRestartableInitContainer(container) {
if podutil.IsRestartableInitContainer(container) { if podutil.IsRestartableInitContainer(container) {
// If the restartable init container is in unknown state, restart it. // If the restartable init container is in unknown state, restart it.
changes.ContainersToKill[status.ID] = containerToKillInfo{ changes.ContainersToKill[status.ID] = containerToKillInfo{

View File

@ -473,7 +473,7 @@ type containerResources struct {
// containerToUpdateInfo contains necessary information to update a container's resources. // containerToUpdateInfo contains necessary information to update a container's resources.
type containerToUpdateInfo struct { type containerToUpdateInfo struct {
// Index of the container in pod.Spec.Containers that needs resource update // Index of the container in pod.Spec.Containers or pod.Spec.InitContainers that needs resource update
apiContainerIdx int apiContainerIdx int
// ID of the runtime container that needs resource update // ID of the runtime container that needs resource update
kubeContainerID kubecontainer.ContainerID kubeContainerID kubecontainer.ContainerID
@ -514,6 +514,9 @@ type podActions struct {
// EphemeralContainersToStart is a list of indexes for the ephemeral containers to start, // EphemeralContainersToStart is a list of indexes for the ephemeral containers to start,
// where the index is the index of the specific container in pod.Spec.EphemeralContainers. // where the index is the index of the specific container in pod.Spec.EphemeralContainers.
EphemeralContainersToStart []int EphemeralContainersToStart []int
// InitContainersToUpdate keeps a list of restartable init containers (sidecar containers) needing resource update.
// Init Containers resource update is applicable only for CPU and memory.
InitContainersToUpdate map[v1.ResourceName][]containerToUpdateInfo
// ContainersToUpdate keeps a list of containers needing resource update. // ContainersToUpdate keeps a list of containers needing resource update.
// Container resource update is applicable only for CPU and memory. // Container resource update is applicable only for CPU and memory.
ContainersToUpdate map[v1.ResourceName][]containerToUpdateInfo ContainersToUpdate map[v1.ResourceName][]containerToUpdateInfo
@ -522,8 +525,8 @@ type podActions struct {
} }
func (p podActions) String() string { func (p podActions) String() string {
return fmt.Sprintf("KillPod: %t, CreateSandbox: %t, UpdatePodResources: %t, Attempt: %d, InitContainersToStart: %v, ContainersToStart: %v, EphemeralContainersToStart: %v,ContainersToUpdate: %v, ContainersToKill: %v", return fmt.Sprintf("KillPod: %t, CreateSandbox: %t, UpdatePodResources: %t, Attempt: %d, InitContainersToStart: %v, ContainersToStart: %v, EphemeralContainersToStart: %v,InitContainersToUpdate: %v, ContainersToUpdate: %v, ContainersToKill: %v",
p.KillPod, p.CreateSandbox, p.UpdatePodResources, p.Attempt, p.InitContainersToStart, p.ContainersToStart, p.EphemeralContainersToStart, p.ContainersToUpdate, p.ContainersToKill) p.KillPod, p.CreateSandbox, p.UpdatePodResources, p.Attempt, p.InitContainersToStart, p.ContainersToStart, p.EphemeralContainersToStart, p.InitContainersToUpdate, p.ContainersToUpdate, p.ContainersToKill)
} }
// containerChanged will determine whether the container has changed based on the fields that will affect the running of the container. // containerChanged will determine whether the container has changed based on the fields that will affect the running of the container.
@ -560,8 +563,15 @@ func IsInPlacePodVerticalScalingAllowed(pod *v1.Pod) bool {
// computePodResizeAction determines the actions required (if any) to resize the given container. // computePodResizeAction determines the actions required (if any) to resize the given container.
// Returns whether to keep (true) or restart (false) the container. // Returns whether to keep (true) or restart (false) the container.
func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containerIdx int, kubeContainerStatus *kubecontainer.Status, changes *podActions) (keepContainer bool) { // TODO(vibansal): Make this function to be agnostic to whether it is dealing with a restartable init container or not (i.e. remove the argument `isRestartableInitContainer`).
container := pod.Spec.Containers[containerIdx] func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containerIdx int, isRestartableInitContainer bool, kubeContainerStatus *kubecontainer.Status, changes *podActions) (keepContainer bool) {
var container v1.Container
if isRestartableInitContainer {
container = pod.Spec.InitContainers[containerIdx]
} else {
container = pod.Spec.Containers[containerIdx]
}
// Determine if the *running* container needs resource update by comparing v1.Spec.Resources (desired) // Determine if the *running* container needs resource update by comparing v1.Spec.Resources (desired)
// with v1.Status.Resources / runtime.Status.Resources (last known actual). // with v1.Status.Resources / runtime.Status.Resources (last known actual).
@ -637,13 +647,17 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
currentContainerResources: &currentResources, currentContainerResources: &currentResources,
} }
// Order the container updates such that resource decreases are applied before increases // Order the container updates such that resource decreases are applied before increases
containersToUpdate := changes.ContainersToUpdate
if isRestartableInitContainer {
containersToUpdate = changes.InitContainersToUpdate
}
switch { switch {
case specValue > statusValue: // append case specValue > statusValue: // append
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], cUpdateInfo) containersToUpdate[rName] = append(containersToUpdate[rName], cUpdateInfo)
case specValue < statusValue: // prepend case specValue < statusValue: // prepend
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], containerToUpdateInfo{}) containersToUpdate[rName] = append(containersToUpdate[rName], containerToUpdateInfo{})
copy(changes.ContainersToUpdate[rName][1:], changes.ContainersToUpdate[rName]) copy(containersToUpdate[rName][1:], containersToUpdate[rName])
changes.ContainersToUpdate[rName][0] = cUpdateInfo containersToUpdate[rName][0] = cUpdateInfo
} }
} }
resizeMemLim, restartMemLim := determineContainerResize(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit) resizeMemLim, restartMemLim := determineContainerResize(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
@ -653,10 +667,14 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
// resize policy requires this container to restart // resize policy requires this container to restart
changes.ContainersToKill[kubeContainerStatus.ID] = containerToKillInfo{ changes.ContainersToKill[kubeContainerStatus.ID] = containerToKillInfo{
name: kubeContainerStatus.Name, name: kubeContainerStatus.Name,
container: &pod.Spec.Containers[containerIdx], container: &container,
message: fmt.Sprintf("Container %s resize requires restart", container.Name), message: fmt.Sprintf("Container %s resize requires restart", container.Name),
} }
changes.ContainersToStart = append(changes.ContainersToStart, containerIdx) if isRestartableInitContainer {
changes.InitContainersToStart = append(changes.InitContainersToStart, containerIdx)
} else {
changes.ContainersToStart = append(changes.ContainersToStart, containerIdx)
}
changes.UpdatePodResources = true changes.UpdatePodResources = true
return false return false
} else { } else {
@ -731,6 +749,13 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerC
return err return err
} }
} }
if len(podContainerChanges.InitContainersToUpdate[rName]) > 0 {
if err = m.updatePodContainersResources(pod, rName, podContainerChanges.InitContainersToUpdate[rName], true); err != nil {
klog.ErrorS(err, "updatePodContainersResources failed for init containers", "pod", format.Pod(pod), "resource", rName)
return err
}
}
// At downsizing, requests should shrink prior to limits in order to keep "requests <= limits". // At downsizing, requests should shrink prior to limits in order to keep "requests <= limits".
if newPodCgReqValue < currPodCgReqValue { if newPodCgReqValue < currPodCgReqValue {
if err = setPodCgroupConfig(rName, false); err != nil { if err = setPodCgroupConfig(rName, false); err != nil {
@ -801,11 +826,17 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerC
} }
} }
func (m *kubeGenericRuntimeManager) updatePodContainerResources(pod *v1.Pod, resourceName v1.ResourceName, containersToUpdate []containerToUpdateInfo) error { func (m *kubeGenericRuntimeManager) updatePodContainerResources(ctx context.Context, pod *v1.Pod, resourceName v1.ResourceName, containersToUpdate []containerToUpdateInfo, isRestartableInitContainer bool) (updateResults []*kubecontainer.SyncResult, err error) {
klog.V(5).InfoS("Updating container resources", "pod", klog.KObj(pod)) klog.V(5).InfoS("Updating container resources", "pod", klog.KObj(pod))
var container *v1.Container
for _, cInfo := range containersToUpdate { for _, cInfo := range containersToUpdate {
container := pod.Spec.Containers[cInfo.apiContainerIdx].DeepCopy() var updateContainerResult *kubecontainer.SyncResult
if isRestartableInitContainer {
container = pod.Spec.InitContainers[cInfo.apiContainerIdx].DeepCopy()
} else {
container = pod.Spec.Containers[cInfo.apiContainerIdx].DeepCopy()
}
// If updating memory limit, use most recently configured CPU request and limit values. // If updating memory limit, use most recently configured CPU request and limit values.
// If updating CPU request and limit, use most recently configured memory request and limit values. // If updating CPU request and limit, use most recently configured memory request and limit values.
switch resourceName { switch resourceName {
@ -1001,6 +1032,9 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
return changes return changes
} }
} else { } else {
if IsInPlacePodVerticalScalingAllowed(pod) {
changes.InitContainersToUpdate = make(map[v1.ResourceName][]containerToUpdateInfo)
}
hasInitialized := m.computeInitContainerActions(pod, podStatus, &changes) hasInitialized := m.computeInitContainerActions(pod, podStatus, &changes)
if changes.KillPod || !hasInitialized { if changes.KillPod || !hasInitialized {
// Initialization failed or still in progress. Skip inspecting non-init // Initialization failed or still in progress. Skip inspecting non-init
@ -1067,7 +1101,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
// If the container failed the startup probe, we should kill it. // If the container failed the startup probe, we should kill it.
message = fmt.Sprintf("Container %s failed startup probe", container.Name) message = fmt.Sprintf("Container %s failed startup probe", container.Name)
reason = reasonStartupProbe reason = reasonStartupProbe
} else if IsInPlacePodVerticalScalingAllowed(pod) && !m.computePodResizeAction(pod, idx, containerStatus, &changes) { } else if IsInPlacePodVerticalScalingAllowed(pod) && !m.computePodResizeAction(pod, idx, false, containerStatus, &changes) {
// computePodResizeAction updates 'changes' if resize policy requires restarting this container // computePodResizeAction updates 'changes' if resize policy requires restarting this container
continue continue
} else { } else {
@ -1383,7 +1417,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
} }
} }
// Step 7: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] list, invoke UpdateContainerResources // Step 7: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] or podContainerChanges.InitContainersToUpdate[CPU,Memory] lists, invoke UpdateContainerResources
if IsInPlacePodVerticalScalingAllowed(pod) { if IsInPlacePodVerticalScalingAllowed(pod) {
if len(podContainerChanges.ContainersToUpdate) > 0 || podContainerChanges.UpdatePodResources { if len(podContainerChanges.ContainersToUpdate) > 0 || podContainerChanges.UpdatePodResources {
m.doPodResizeAction(pod, podContainerChanges, &result) m.doPodResizeAction(pod, podContainerChanges, &result)

View File

@ -2665,13 +2665,78 @@ func TestUpdatePodContainerResources(t *testing.T) {
invokeUpdateResources: true, invokeUpdateResources: true,
expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi}, expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi}, expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
expectedResults: []*kubecontainer.SyncResult{
{
Action: kubecontainer.UpdateContainerMemory,
Target: pod.Spec.Containers[0].Name,
},
{
Action: kubecontainer.UpdateContainerMemory,
Target: pod.Spec.Containers[1].Name,
},
{
Action: kubecontainer.UpdateContainerMemory,
Target: pod.Spec.Containers[2].Name,
},
},
},
"Guaranteed QoS Pod - CPU & memory resize requested, update CPU, error occurs": {
resourceName: v1.ResourceCPU,
apiSpecResources: []v1.ResourceRequirements{
{Limits: res150m150Mi, Requests: res150m150Mi},
{Limits: res250m250Mi, Requests: res250m250Mi},
{Limits: res350m350Mi, Requests: res350m350Mi},
},
apiStatusResources: []v1.ResourceRequirements{
{Limits: res100m100Mi, Requests: res100m100Mi},
{Limits: res200m200Mi, Requests: res200m200Mi},
{Limits: res300m300Mi, Requests: res300m300Mi},
},
requiresRestart: []bool{false, false, false},
invokeUpdateResources: true,
injectedError: fakeError,
expectedCurrentLimits: []v1.ResourceList{res100m100Mi, res200m200Mi, res300m300Mi},
expectedCurrentRequests: []v1.ResourceList{res100m100Mi, res200m200Mi, res300m300Mi},
expectedResults: []*kubecontainer.SyncResult{
{
Action: kubecontainer.UpdateContainerCPU,
Target: pod.Spec.Containers[0].Name,
Error: kubecontainer.ErrUpdateContainerCPU,
Message: fakeError.Error(),
},
},
},
"Guaranteed QoS Pod - CPU & memory resize requested, update memory, error occurs": {
resourceName: v1.ResourceMemory,
apiSpecResources: []v1.ResourceRequirements{
{Limits: res150m150Mi, Requests: res150m150Mi},
{Limits: res250m250Mi, Requests: res250m250Mi},
{Limits: res350m350Mi, Requests: res350m350Mi},
},
apiStatusResources: []v1.ResourceRequirements{
{Limits: res100m100Mi, Requests: res100m100Mi},
{Limits: res200m200Mi, Requests: res200m200Mi},
{Limits: res300m300Mi, Requests: res300m300Mi},
},
requiresRestart: []bool{false, false, false},
invokeUpdateResources: true,
injectedError: fakeError,
expectedCurrentLimits: []v1.ResourceList{res100m100Mi, res200m200Mi, res300m300Mi},
expectedCurrentRequests: []v1.ResourceList{res100m100Mi, res200m200Mi, res300m300Mi},
expectedResults: []*kubecontainer.SyncResult{
{
Action: kubecontainer.UpdateContainerMemory,
Target: pod.Spec.Containers[0].Name,
Error: kubecontainer.ErrUpdateContainerMemory,
Message: fakeError.Error(),
},
},
}, },
} { } {
var containersToUpdate []containerToUpdateInfo var containersToUpdate []containerToUpdateInfo
for idx := range pod.Spec.Containers { for idx := range pod.Spec.Containers {
// default resize policy when pod resize feature is enabled // default resize policy when pod resize feature is enabled
pod.Spec.Containers[idx].Resources = tc.apiSpecResources[idx] pod.Spec.InitContainers[idx].Resources = tc.apiSpecResources[idx]
pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx]
cInfo := containerToUpdateInfo{ cInfo := containerToUpdateInfo{
apiContainerIdx: idx, apiContainerIdx: idx,
kubeContainerID: kubecontainer.ContainerID{}, kubeContainerID: kubecontainer.ContainerID{},
@ -2688,20 +2753,147 @@ func TestUpdatePodContainerResources(t *testing.T) {
cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(), cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(),
}, },
} }
containersToUpdate = append(containersToUpdate, cInfo) initContainersToUpdate = append(initContainersToUpdate, cInfo)
} }
fakeRuntime.Called = []string{} fakeRuntime.Called = []string{}
err := m.updatePodContainerResources(pod, tc.resourceName, containersToUpdate) if tc.injectedError != nil {
fakeRuntime.InjectError("UpdateContainerResources", tc.injectedError)
}
updateContainerResults, err := m.updatePodContainerResources(context.TODO(), pod, tc.resourceName, containersToUpdate, false)
assert.ElementsMatch(t, tc.expectedResults, updateContainerResults)
if tc.injectedError == nil {
require.NoError(t, err, dsc)
} else {
require.EqualError(t, err, tc.injectedError.Error(), dsc)
}
if tc.invokeUpdateResources {
assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc)
}
for idx := range pod.Spec.InitContainers {
assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), initContainersToUpdate[idx].currentContainerResources.memoryLimit, dsc)
assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), initContainersToUpdate[idx].currentContainerResources.memoryRequest, dsc)
assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), initContainersToUpdate[idx].currentContainerResources.cpuLimit, dsc)
assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), initContainersToUpdate[idx].currentContainerResources.cpuRequest, dsc)
}
}
}
// TODO(vibansal): Refactor code in such a way that (TestUpdatePodContainerResources) will work for both regular and restartable init containers.
// This function works in same way as (TestUpdatePodContainerResources) except that it is checking only restartable init containers.
func TestUpdatePodRestartableInitContainerResources(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
fakeRuntime, _, m, err := createTestRuntimeManager()
m.machineInfo.MemoryCapacity = 17179860387 // 16GB
require.NoError(t, err)
cpu100m := resource.MustParse("100m")
cpu150m := resource.MustParse("150m")
cpu200m := resource.MustParse("200m")
cpu250m := resource.MustParse("250m")
cpu300m := resource.MustParse("300m")
cpu350m := resource.MustParse("350m")
mem100M := resource.MustParse("100Mi")
mem150M := resource.MustParse("150Mi")
mem200M := resource.MustParse("200Mi")
mem250M := resource.MustParse("250Mi")
mem300M := resource.MustParse("300Mi")
mem350M := resource.MustParse("350Mi")
res100m100Mi := v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M}
res150m100Mi := v1.ResourceList{v1.ResourceCPU: cpu150m, v1.ResourceMemory: mem100M}
res100m150Mi := v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem150M}
res150m150Mi := v1.ResourceList{v1.ResourceCPU: cpu150m, v1.ResourceMemory: mem150M}
res200m200Mi := v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M}
res250m200Mi := v1.ResourceList{v1.ResourceCPU: cpu250m, v1.ResourceMemory: mem200M}
res200m250Mi := v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem250M}
res250m250Mi := v1.ResourceList{v1.ResourceCPU: cpu250m, v1.ResourceMemory: mem250M}
res300m300Mi := v1.ResourceList{v1.ResourceCPU: cpu300m, v1.ResourceMemory: mem300M}
res350m300Mi := v1.ResourceList{v1.ResourceCPU: cpu350m, v1.ResourceMemory: mem300M}
res300m350Mi := v1.ResourceList{v1.ResourceCPU: cpu300m, v1.ResourceMemory: mem350M}
res350m350Mi := v1.ResourceList{v1.ResourceCPU: cpu350m, v1.ResourceMemory: mem350M}
pod, _ := makeBasePodAndStatusWithRestartableInitContainers()
makeAndSetFakePod(t, m, fakeRuntime, pod)
for dsc, tc := range map[string]struct {
resourceName v1.ResourceName
apiSpecResources []v1.ResourceRequirements
apiStatusResources []v1.ResourceRequirements
requiresRestart []bool
invokeUpdateResources bool
expectedCurrentLimits []v1.ResourceList
expectedCurrentRequests []v1.ResourceList
}{
"Guaranteed QoS Pod - CPU & memory resize requested, update CPU": {
resourceName: v1.ResourceCPU,
apiSpecResources: []v1.ResourceRequirements{
{Limits: res150m150Mi, Requests: res150m150Mi},
{Limits: res250m250Mi, Requests: res250m250Mi},
{Limits: res350m350Mi, Requests: res350m350Mi},
},
apiStatusResources: []v1.ResourceRequirements{
{Limits: res100m100Mi, Requests: res100m100Mi},
{Limits: res200m200Mi, Requests: res200m200Mi},
{Limits: res300m300Mi, Requests: res300m300Mi},
},
requiresRestart: []bool{false, false, false},
invokeUpdateResources: true,
expectedCurrentLimits: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
expectedCurrentRequests: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
},
"Guaranteed QoS Pod - CPU & memory resize requested, update memory": {
resourceName: v1.ResourceMemory,
apiSpecResources: []v1.ResourceRequirements{
{Limits: res150m150Mi, Requests: res150m150Mi},
{Limits: res250m250Mi, Requests: res250m250Mi},
{Limits: res350m350Mi, Requests: res350m350Mi},
},
apiStatusResources: []v1.ResourceRequirements{
{Limits: res100m100Mi, Requests: res100m100Mi},
{Limits: res200m200Mi, Requests: res200m200Mi},
{Limits: res300m300Mi, Requests: res300m300Mi},
},
requiresRestart: []bool{false, false, false},
invokeUpdateResources: true,
expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
},
} {
var initContainersToUpdate []containerToUpdateInfo
for idx := range pod.Spec.InitContainers {
// default resize policy when pod resize feature is enabled
pod.Spec.InitContainers[idx].Resources = tc.apiSpecResources[idx]
cInfo := containerToUpdateInfo{
apiContainerIdx: idx,
kubeContainerID: kubecontainer.ContainerID{},
desiredContainerResources: containerResources{
memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(),
memoryRequest: tc.apiSpecResources[idx].Requests.Memory().Value(),
cpuLimit: tc.apiSpecResources[idx].Limits.Cpu().MilliValue(),
cpuRequest: tc.apiSpecResources[idx].Requests.Cpu().MilliValue(),
},
currentContainerResources: &containerResources{
memoryLimit: tc.apiStatusResources[idx].Limits.Memory().Value(),
memoryRequest: tc.apiStatusResources[idx].Requests.Memory().Value(),
cpuLimit: tc.apiStatusResources[idx].Limits.Cpu().MilliValue(),
cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(),
},
}
initContainersToUpdate = append(initContainersToUpdate, cInfo)
}
fakeRuntime.Called = []string{}
err := m.updatePodContainersResources(pod, tc.resourceName, initContainersToUpdate, true)
require.NoError(t, err, dsc) require.NoError(t, err, dsc)
if tc.invokeUpdateResources { if tc.invokeUpdateResources {
assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc) assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc)
} }
for idx := range pod.Spec.Containers { for idx := range pod.Spec.InitContainers {
assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryLimit, dsc) assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), initContainersToUpdate[idx].currentContainerResources.memoryLimit, dsc)
assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryRequest, dsc) assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), initContainersToUpdate[idx].currentContainerResources.memoryRequest, dsc)
assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuLimit, dsc) assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), initContainersToUpdate[idx].currentContainerResources.cpuLimit, dsc)
assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuRequest, dsc) assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), initContainersToUpdate[idx].currentContainerResources.cpuRequest, dsc)
} }
} }
} }

View File

@ -19,7 +19,10 @@ package status
import ( import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2" "k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/status/state" "k8s.io/kubernetes/pkg/kubelet/status/state"
) )
@ -81,7 +84,19 @@ func (m *fakeManager) SetPodAllocation(pod *v1.Pod) error {
klog.InfoS("SetPodAllocation()") klog.InfoS("SetPodAllocation()")
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
alloc := *container.Resources.DeepCopy() alloc := *container.Resources.DeepCopy()
m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc) if err := m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc); err != nil {
return err
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
for _, container := range pod.Spec.InitContainers {
if podutil.IsRestartableInitContainer(&container) {
alloc := *container.Resources.DeepCopy()
if err := m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc); err != nil {
return err
}
}
}
} }
return nil return nil
} }

View File

@ -282,6 +282,24 @@ func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (*
} }
} }
} }
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
for i, c := range pod.Spec.InitContainers {
if podutil.IsRestartableInitContainer(&c) {
if cAlloc, ok := allocated[c.Name]; ok {
if !apiequality.Semantic.DeepEqual(c.Resources, cAlloc) {
// Allocation differs from pod spec, update
if !updated {
// If this is the first update, copy the pod
pod = pod.DeepCopy()
updated = true
}
pod.Spec.InitContainers[i].Resources = cAlloc
}
}
}
}
}
return pod, updated return pod, updated
} }
@ -296,12 +314,25 @@ func (m *manager) GetPodResizeStatus(podUID types.UID) v1.PodResizeStatus {
func (m *manager) SetPodAllocation(pod *v1.Pod) error { func (m *manager) SetPodAllocation(pod *v1.Pod) error {
m.podStatusesLock.RLock() m.podStatusesLock.RLock()
defer m.podStatusesLock.RUnlock() defer m.podStatusesLock.RUnlock()
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
alloc := *container.Resources.DeepCopy() alloc := *container.Resources.DeepCopy()
if err := m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc); err != nil { if err := m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc); err != nil {
return err return err
} }
} }
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
for _, container := range pod.Spec.InitContainers {
if podutil.IsRestartableInitContainer(&container) {
alloc := *container.Resources.DeepCopy()
if err := m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc); err != nil {
return err
}
}
}
}
return nil return nil
} }

View File

@ -36,11 +36,14 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
featuregatetesting "k8s.io/component-base/featuregate/testing"
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod" kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/status/state" "k8s.io/kubernetes/pkg/kubelet/status/state"
@ -2037,6 +2040,7 @@ func TestMergePodStatus(t *testing.T) {
} }
func TestUpdatePodFromAllocation(t *testing.T) { func TestUpdatePodFromAllocation(t *testing.T) {
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
UID: "12345", UID: "12345",
@ -2044,36 +2048,69 @@ func TestUpdatePodFromAllocation(t *testing.T) {
Namespace: "default", Namespace: "default",
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{{ Containers: []v1.Container{
Name: "c1", {
Resources: v1.ResourceRequirements{ Name: "c1",
Requests: v1.ResourceList{ Resources: v1.ResourceRequirements{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), Requests: v1.ResourceList{
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
}, v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI),
Limits: v1.ResourceList{ },
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI), Limits: v1.ResourceList{
v1.ResourceMemory: *resource.NewQuantity(400, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(400, resource.DecimalSI),
},
}, },
}, },
}, { {
Name: "c2", Name: "c2",
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{ Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(600, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(600, resource.DecimalSI),
}, },
Limits: v1.ResourceList{ Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(700, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(700, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(800, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(800, resource.DecimalSI),
},
}, },
}, },
}}, },
InitContainers: []v1.Container{
{
Name: "c1-restartable-init",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(300, resource.DecimalSI),
},
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
},
},
RestartPolicy: &containerRestartPolicyAlways,
},
{
Name: "c1-init",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(300, resource.DecimalSI),
},
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
},
},
},
},
}, },
} }
resizedPod := pod.DeepCopy() resizedPod := pod.DeepCopy()
resizedPod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(200, resource.DecimalSI) resizedPod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(200, resource.DecimalSI)
resizedPod.Spec.InitContainers[0].Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(300, resource.DecimalSI)
tests := []struct { tests := []struct {
name string name string
@ -2086,8 +2123,10 @@ func TestUpdatePodFromAllocation(t *testing.T) {
pod: pod, pod: pod,
allocs: state.PodResourceAllocation{ allocs: state.PodResourceAllocation{
string(pod.UID): map[string]v1.ResourceRequirements{ string(pod.UID): map[string]v1.ResourceRequirements{
"c1": *pod.Spec.Containers[0].Resources.DeepCopy(), "c1": *pod.Spec.Containers[0].Resources.DeepCopy(),
"c2": *pod.Spec.Containers[1].Resources.DeepCopy(), "c2": *pod.Spec.Containers[1].Resources.DeepCopy(),
"c1-restartable-init": *pod.Spec.InitContainers[0].Resources.DeepCopy(),
"c1-init": *pod.Spec.InitContainers[1].Resources.DeepCopy(),
}, },
}, },
expectUpdate: false, expectUpdate: false,
@ -2110,8 +2149,10 @@ func TestUpdatePodFromAllocation(t *testing.T) {
pod: pod, pod: pod,
allocs: state.PodResourceAllocation{ allocs: state.PodResourceAllocation{
string(pod.UID): map[string]v1.ResourceRequirements{ string(pod.UID): map[string]v1.ResourceRequirements{
"c1": *resizedPod.Spec.Containers[0].Resources.DeepCopy(), "c1": *resizedPod.Spec.Containers[0].Resources.DeepCopy(),
"c2": *resizedPod.Spec.Containers[1].Resources.DeepCopy(), "c2": *resizedPod.Spec.Containers[1].Resources.DeepCopy(),
"c1-restartable-init": *resizedPod.Spec.InitContainers[0].Resources.DeepCopy(),
"c1-init": *resizedPod.Spec.InitContainers[1].Resources.DeepCopy(),
}, },
}, },
expectUpdate: true, expectUpdate: true,
@ -2120,6 +2161,7 @@ func TestUpdatePodFromAllocation(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
pod := test.pod.DeepCopy() pod := test.pod.DeepCopy()
allocatedPod, updated := updatePodFromAllocation(pod, test.allocs) allocatedPod, updated := updatePodFromAllocation(pod, test.allocs)

View File

@ -148,6 +148,9 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
for i := range pod.Status.ContainerStatuses { for i := range pod.Status.ContainerStatuses {
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i] containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
} }
for i := range pod.Status.InitContainerStatuses {
containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i]
}
} }
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
@ -155,11 +158,7 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
if opts.UseStatusResources { if opts.UseStatusResources {
cs, found := containerStatuses[container.Name] cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil { if found && cs.Resources != nil {
if pod.Status.Resize == v1.PodResizeStatusInfeasible { containerReqs = setContainerReqs(pod, &container, cs)
containerReqs = cs.Resources.Requests.DeepCopy()
} else {
containerReqs = max(container.Resources.Requests, cs.Resources.Requests)
}
} }
} }
@ -177,7 +176,6 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
restartableInitContainerReqs := v1.ResourceList{} restartableInitContainerReqs := v1.ResourceList{}
initContainerReqs := v1.ResourceList{} initContainerReqs := v1.ResourceList{}
// init containers define the minimum of any resource // init containers define the minimum of any resource
// Note: In-place resize is not allowed for InitContainers, so no need to check for ResizeStatus value
// //
// Let's say `InitContainerUse(i)` is the resource requirements when the i-th // Let's say `InitContainerUse(i)` is the resource requirements when the i-th
// init container is initializing, then // init container is initializing, then
@ -186,6 +184,15 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail. // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
for _, container := range pod.Spec.InitContainers { for _, container := range pod.Spec.InitContainers {
containerReqs := container.Resources.Requests containerReqs := container.Resources.Requests
if opts.UseStatusResources {
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
containerReqs = setContainerReqs(pod, &container, cs)
}
}
}
if len(opts.NonMissingContainerRequests) > 0 { if len(opts.NonMissingContainerRequests) > 0 {
containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests) containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests)
} }
@ -214,6 +221,14 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
return reqs return reqs
} }
// setContainerReqs will return a copy of the container requests based on if resizing is feasible or not.
func setContainerReqs(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
return cs.Resources.Requests.DeepCopy()
}
return max(container.Resources.Requests, cs.Resources.Requests)
}
// applyNonMissing will return a copy of the given resource list with any missing values replaced by the nonMissing values // applyNonMissing will return a copy of the given resource list with any missing values replaced by the nonMissing values
func applyNonMissing(reqs v1.ResourceList, nonMissing v1.ResourceList) v1.ResourceList { func applyNonMissing(reqs v1.ResourceList, nonMissing v1.ResourceList) v1.ResourceList {
cp := v1.ResourceList{} cp := v1.ResourceList{}

View File

@ -286,14 +286,16 @@ func TestPodRequestsAndLimitsWithoutOverhead(t *testing.T) {
func TestPodResourceRequests(t *testing.T) { func TestPodResourceRequests(t *testing.T) {
restartAlways := v1.ContainerRestartPolicyAlways restartAlways := v1.ContainerRestartPolicyAlways
testCases := []struct { testCases := []struct {
description string description string
options PodResourcesOptions options PodResourcesOptions
overhead v1.ResourceList overhead v1.ResourceList
podResizeStatus v1.PodResizeStatus podResizeStatus v1.PodResizeStatus
initContainers []v1.Container initContainers []v1.Container
containers []v1.Container initContainerStatus []v1.ContainerStatus
containerStatus []v1.ContainerStatus hasSidecarContainer bool
expectedRequests v1.ResourceList containers []v1.Container
containerStatus []v1.ContainerStatus
expectedRequests v1.ResourceList
}{ }{
{ {
description: "nil options, larger init container", description: "nil options, larger init container",
@ -427,7 +429,7 @@ func TestPodResourceRequests(t *testing.T) {
}, },
}, },
{ {
description: "resized, infeasible", description: "resized without sidecar containers, infeasible",
expectedRequests: v1.ResourceList{ expectedRequests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"), v1.ResourceCPU: resource.MustParse("2"),
}, },
@ -455,7 +457,53 @@ func TestPodResourceRequests(t *testing.T) {
}, },
}, },
{ {
description: "resized, no resize status", description: "resized with sidecar containers, infeasible",
hasSidecarContainer: true,
expectedRequests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("7"),
},
podResizeStatus: v1.PodResizeStatusInfeasible,
options: PodResourcesOptions{InPlacePodVerticalScalingEnabled: true},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
},
},
},
initContainers: []v1.Container{
{
Name: "restartable-init-1",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
},
},
},
},
containerStatus: []v1.ContainerStatus{
{
Name: "container-1",
AllocatedResources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
},
},
},
initContainerStatus: []v1.ContainerStatus{
{
Name: "restartable-init-1",
AllocatedResources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5"),
},
},
},
},
{
description: "resized with no sidecar containers, no resize status",
expectedRequests: v1.ResourceList{ expectedRequests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
@ -480,6 +528,62 @@ func TestPodResourceRequests(t *testing.T) {
}, },
}, },
}, },
initContainerStatus: []v1.ContainerStatus{
{
Name: "restartable-init-1",
AllocatedResources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
},
},
},
},
{
description: "resized with sidecar containers, no resize status",
hasSidecarContainer: true,
expectedRequests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("7"),
},
podResizeStatus: v1.PodResizeStatusInfeasible,
options: PodResourcesOptions{UseStatusResources: true},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
},
},
},
initContainers: []v1.Container{
{
Name: "restartable-init-1",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
},
},
},
},
containerStatus: []v1.ContainerStatus{
{
Name: "container-1",
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
},
},
},
},
initContainerStatus: []v1.ContainerStatus{
{
Name: "restartable-init-1",
AllocatedResources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
},
},
},
}, },
{ {
description: "resized, infeasible, but don't use status", description: "resized, infeasible, but don't use status",
@ -700,8 +804,9 @@ func TestPodResourceRequests(t *testing.T) {
Overhead: tc.overhead, Overhead: tc.overhead,
}, },
Status: v1.PodStatus{ Status: v1.PodStatus{
ContainerStatuses: tc.containerStatus, ContainerStatuses: tc.containerStatus,
Resize: tc.podResizeStatus, InitContainerStatuses: tc.initContainerStatus,
Resize: tc.podResizeStatus,
}, },
} }
request := PodRequests(p, tc.options) request := PodRequests(p, tc.options)

View File

@ -968,6 +968,131 @@ func doPodResizeTests() {
}, },
}, },
}, },
{
name: "Guaranteed QoS pod, one restartable init container - increase CPU & memory",
containers: []e2epod.ResizableContainerInfo{
{
Name: "c1",
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
{
Name: "c1-init",
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
IsRestartableInitCtr: true,
},
},
patchString: `{"spec":{"initcontainers":[
{"name":"c1-init", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}}
]}}`,
expected: []e2epod.ResizableContainerInfo{
{
Name: "c1",
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
{
Name: "c1-init",
Resources: &e2epod.ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "400Mi", MemLim: "400Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
IsRestartableInitCtr: true,
},
},
},
{
name: "Guaranteed QoS pod, one restartable init container - decrease CPU & memory",
containers: []e2epod.ResizableContainerInfo{
{
Name: "c1",
Resources: &e2epod.ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "500Mi", MemLim: "500Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
{
Name: "c1-init",
Resources: &e2epod.ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "500Mi", MemLim: "500Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
IsRestartableInitCtr: true,
},
},
patchString: `{"spec":{"initcontainers":[
{"name":"c1-init", "resources":{"requests":{"cpu":"100m","memory":"250Mi"},"limits":{"cpu":"100m","memory":"250Mi"}}}
]}}`,
expected: []e2epod.ResizableContainerInfo{
{
Name: "c1",
Resources: &e2epod.ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "500Mi", MemLim: "500Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
{
Name: "c1-init",
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "250Mi", MemLim: "250Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
},
{
name: "Guaranteed QoS pod, one restartable init container - increase CPU & decrease memory",
containers: []e2epod.ResizableContainerInfo{
{
Name: "c1",
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
},
{
Name: "c1-init",
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
IsRestartableInitCtr: true,
},
},
patchString: `{"spec":{"initcontainers":[
{"name":"c1-init", "resources":{"requests":{"cpu":"200m","memory":"100Mi"},"limits":{"cpu":"200m","memory":"100Mi"}}}
]}}`,
expected: []e2epod.ResizableContainerInfo{
{
Name: "c1",
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
},
{
Name: "c1-init",
Resources: &e2epod.ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "100Mi", MemLim: "100Mi"},
},
},
},
{
name: "Guaranteed QoS pod, one restartable init container - decrease CPU & increase memory",
containers: []e2epod.ResizableContainerInfo{
{
Name: "c1",
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
},
{
Name: "c1-init",
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
IsRestartableInitCtr: true,
},
},
patchString: `{"spec":{"initcontainers":[
{"name":"c1-init", "resources":{"requests":{"cpu":"50m","memory":"300Mi"},"limits":{"cpu":"50m","memory":"300Mi"}}}
]}}`,
expected: []e2epod.ResizableContainerInfo{
{
Name: "c1",
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
},
{
Name: "c1-init",
Resources: &e2epod.ContainerResources{CPUReq: "50m", CPULim: "50m", MemReq: "300Mi", MemLim: "300Mi"},
},
},
},
} }
for idx := range tests { for idx := range tests {
@ -1003,6 +1128,7 @@ func doPodResizeTests() {
ginkgo.By("verifying initial pod resources are as expected") ginkgo.By("verifying initial pod resources are as expected")
e2epod.VerifyPodResources(newPod, tc.containers) e2epod.VerifyPodResources(newPod, tc.containers)
ginkgo.By("verifying initial pod resize policy is as expected") ginkgo.By("verifying initial pod resize policy is as expected")
e2epod.VerifyPodResizePolicy(newPod, tc.containers) e2epod.VerifyPodResizePolicy(newPod, tc.containers)

View File

@ -271,6 +271,21 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
// been prepulled. // been prepulled.
c.ImagePullPolicy = v1.PullNever c.ImagePullPolicy = v1.PullNever
} }
for i := range pod.Spec.InitContainers {
c := &pod.Spec.InitContainers[i]
if c.ImagePullPolicy == v1.PullAlways {
// If the image pull policy is PullAlways, the image doesn't need to be in
// the allow list or pre-pulled, because the image is expected to be pulled
// in the test anyway.
continue
}
// If the image policy is not PullAlways, the image must be in the pre-pull list and
// pre-pulled.
gomega.Expect(ImagePrePullList.Has(c.Image)).To(gomega.BeTrueBecause("Image %q is not in the pre-pull list, consider adding it to PrePulledImages in test/e2e/common/util.go or NodePrePullImageList in test/e2e_node/image_list.go", c.Image))
// Do not pull images during the tests because the images in pre-pull list should have
// been prepulled.
c.ImagePullPolicy = v1.PullNever
}
} }
// WaitForSuccess waits for pod to succeed. // WaitForSuccess waits for pod to succeed.

View File

@ -98,11 +98,12 @@ func (cr *ContainerResources) ResourceRequirements() *v1.ResourceRequirements {
} }
type ResizableContainerInfo struct { type ResizableContainerInfo struct {
Name string Name string
Resources *ContainerResources Resources *ContainerResources
CPUPolicy *v1.ResourceResizeRestartPolicy CPUPolicy *v1.ResourceResizeRestartPolicy
MemPolicy *v1.ResourceResizeRestartPolicy MemPolicy *v1.ResourceResizeRestartPolicy
RestartCount int32 RestartCount int32
IsRestartableInitCtr bool
} }
type containerPatch struct { type containerPatch struct {
@ -123,7 +124,8 @@ type containerPatch struct {
type patchSpec struct { type patchSpec struct {
Spec struct { Spec struct {
Containers []containerPatch `json:"containers"` Containers []containerPatch `json:"containers"`
InitContainers []containerPatch `json:"initcontainers"`
} `json:"spec"` } `json:"spec"`
} }
@ -160,10 +162,15 @@ func makeResizableContainer(tcInfo ResizableContainerInfo) v1.Container {
func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []ResizableContainerInfo) *v1.Pod { func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []ResizableContainerInfo) *v1.Pod {
var testContainers []v1.Container var testContainers []v1.Container
var testInitContainers []v1.Container
for _, ci := range tcInfo { for _, ci := range tcInfo {
tc := makeResizableContainer(ci) tc, _ := makeResizableContainer(ci)
testContainers = append(testContainers, tc) if ci.IsRestartableInitCtr {
testInitContainers = append(testInitContainers, tc)
} else {
testContainers = append(testContainers, tc)
}
} }
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -179,15 +186,36 @@ func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []Resizab
RestartPolicy: v1.RestartPolicyOnFailure, RestartPolicy: v1.RestartPolicyOnFailure,
}, },
} }
if len(testInitContainers) > 0 {
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
pod.Spec.InitContainers = testInitContainers
}
return pod return pod
} }
func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) { func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
ginkgo.GinkgoHelper() ginkgo.GinkgoHelper()
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match") containers := gotPod.Spec.Containers
for i, wantCtr := range wantCtrs { for _, c := range gotPod.Spec.InitContainers {
gotCtr := &gotPod.Spec.Containers[i] containers = append(containers, c)
ctr := makeResizableContainer(wantCtr) }
gomega.Expect(containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
idx, initIdx := 0, 0
var gotCtr *v1.Container
for _, wantCtr := range wantCtrs {
if wantCtr.IsRestartableInitCtr {
gotCtr = &gotPod.Spec.InitContainers[initIdx]
initIdx += 1
} else {
gotCtr = &gotPod.Spec.Containers[idx]
idx += 1
}
ctr, _ := makeResizableContainer(wantCtr)
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name)) gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(ctr.ResizePolicy)) gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(ctr.ResizePolicy))
} }
@ -195,10 +223,24 @@ func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) { func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
ginkgo.GinkgoHelper() ginkgo.GinkgoHelper()
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match") containers := gotPod.Spec.Containers
for i, wantCtr := range wantCtrs { for _, c := range gotPod.Spec.InitContainers {
gotCtr := &gotPod.Spec.Containers[i] containers = append(containers, c)
ctr := makeResizableContainer(wantCtr) }
gomega.Expect(containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
idx, initIdx := 0, 0
var gotCtr *v1.Container
for _, wantCtr := range wantCtrs {
if wantCtr.IsRestartableInitCtr {
gotCtr = &gotPod.Spec.InitContainers[initIdx]
initIdx += 1
} else {
gotCtr = &gotPod.Spec.Containers[idx]
idx += 1
}
ctr, _ := makeResizableContainer(wantCtr)
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name)) gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
gomega.Expect(gotCtr.Resources).To(gomega.Equal(ctr.Resources)) gomega.Expect(gotCtr.Resources).To(gomega.Equal(ctr.Resources))
} }
@ -208,13 +250,25 @@ func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo)
ginkgo.GinkgoHelper() ginkgo.GinkgoHelper()
var errs []error var errs []error
containerStatuses := gotPod.Status.ContainerStatuses
if len(gotPod.Status.ContainerStatuses) != len(wantCtrs) { for _, cs := range gotPod.Status.InitContainerStatuses {
return fmt.Errorf("expectation length mismatch: got %d statuses, want %d", containerStatuses = append(containerStatuses, cs)
len(gotPod.Status.ContainerStatuses), len(wantCtrs))
} }
if len(containerStatuses) != len(wantCtrs) {
return fmt.Errorf("expectation length mismatch: got %d statuses, want %d",
len(containerStatuses), len(wantCtrs))
}
idx, initIdx := 0, 0
var gotCtrStatus *v1.ContainerStatus
for i, wantCtr := range wantCtrs { for i, wantCtr := range wantCtrs {
gotCtrStatus := &gotPod.Status.ContainerStatuses[i] if wantCtr.IsRestartableInitCtr {
gotCtrStatus = &gotPod.Status.InitContainerStatuses[initIdx]
initIdx += 1
} else {
gotCtrStatus = &gotPod.Status.ContainerStatuses[idx]
idx += 1
}
ctr := makeResizableContainer(wantCtr) ctr := makeResizableContainer(wantCtr)
if gotCtrStatus.Name != ctr.Name { if gotCtrStatus.Name != ctr.Name {
errs = append(errs, fmt.Errorf("container status %d name %q != expected name %q", i, gotCtrStatus.Name, ctr.Name)) errs = append(errs, fmt.Errorf("container status %d name %q != expected name %q", i, gotCtrStatus.Name, ctr.Name))
@ -297,7 +351,11 @@ func verifyContainerRestarts(pod *v1.Pod, expectedContainers []ResizableContaine
} }
errs := []error{} errs := []error{}
for _, cs := range pod.Status.ContainerStatuses { containerStatuses := pod.Status.ContainerStatuses
for _, cs := range pod.Status.InitContainerStatuses {
containerStatuses = append(containerStatuses, cs)
}
for _, cs := range containerStatuses {
expectedRestarts := expectContainerRestarts[cs.Name] expectedRestarts := expectContainerRestarts[cs.Name]
if cs.RestartCount != expectedRestarts { if cs.RestartCount != expectedRestarts {
errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", cs.Name, cs.RestartCount, expectedRestarts)) errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", cs.Name, cs.RestartCount, expectedRestarts))
@ -369,8 +427,11 @@ func ResizeContainerPatch(containers []ResizableContainerInfo) (string, error) {
cPatch.Resources.Requests.Memory = container.Resources.MemReq cPatch.Resources.Requests.Memory = container.Resources.MemReq
cPatch.Resources.Limits.CPU = container.Resources.CPULim cPatch.Resources.Limits.CPU = container.Resources.CPULim
cPatch.Resources.Limits.Memory = container.Resources.MemLim cPatch.Resources.Limits.Memory = container.Resources.MemLim
if container.IsRestartableInitCtr {
patch.Spec.Containers = append(patch.Spec.Containers, cPatch) patch.Spec.InitContainers = append(patch.Spec.InitContainers, cPatch)
} else {
patch.Spec.Containers = append(patch.Spec.Containers, cPatch)
}
} }
patchBytes, err := json.Marshal(patch) patchBytes, err := json.Marshal(patch)