mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 01:06:27 +00:00
Added some unit tests
This commit is contained in:
parent
2ba61325f6
commit
8fa8277908
@ -5413,16 +5413,16 @@ func ValidateInitContainerStateTransition(newStatuses, oldStatuses []core.Contai
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Skip any restartable init container that is allowed to restart
|
// Skip any restartable init container that is allowed to restart
|
||||||
isRestartableInitContainer := false
|
isRestartableInitCtr := false
|
||||||
for _, c := range podSpec.InitContainers {
|
for _, c := range podSpec.InitContainers {
|
||||||
if oldStatus.Name == c.Name {
|
if oldStatus.Name == c.Name {
|
||||||
if c.RestartPolicy != nil && *c.RestartPolicy == core.ContainerRestartPolicyAlways {
|
if isRestartableInitContainer(&c) {
|
||||||
isRestartableInitContainer = true
|
isRestartableInitCtr = true
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if isRestartableInitContainer {
|
if isRestartableInitCtr {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5619,7 +5619,7 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
|
|||||||
// Do not allow removing resource requests/limits on resize.
|
// Do not allow removing resource requests/limits on resize.
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
||||||
for ix, ctr := range oldPod.Spec.InitContainers {
|
for ix, ctr := range oldPod.Spec.InitContainers {
|
||||||
if ctr.RestartPolicy != nil && *ctr.RestartPolicy != core.ContainerRestartPolicyAlways {
|
if !isRestartableInitContainer(&ctr) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if resourcesRemoved(newPod.Spec.InitContainers[ix].Resources.Requests, ctr.Resources.Requests) {
|
if resourcesRemoved(newPod.Spec.InitContainers[ix].Resources.Requests, ctr.Resources.Requests) {
|
||||||
@ -5652,7 +5652,7 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
|
|||||||
var newInitContainers []core.Container
|
var newInitContainers []core.Container
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
||||||
for ix, container := range originalCPUMemPodSpec.InitContainers {
|
for ix, container := range originalCPUMemPodSpec.InitContainers {
|
||||||
if container.RestartPolicy != nil && *container.RestartPolicy == core.ContainerRestartPolicyAlways { // restartable init container
|
if isRestartableInitContainer(&container) { // restartable init container
|
||||||
dropCPUMemoryResourcesFromContainer(&container, &oldPod.Spec.InitContainers[ix])
|
dropCPUMemoryResourcesFromContainer(&container, &oldPod.Spec.InitContainers[ix])
|
||||||
}
|
}
|
||||||
newInitContainers = append(newInitContainers, container)
|
newInitContainers = append(newInitContainers, container)
|
||||||
|
@ -2584,9 +2584,11 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
|||||||
|
|
||||||
func TestHandlePodResourcesResize(t *testing.T) {
|
func TestHandlePodResourcesResize(t *testing.T) {
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||||
testKubelet := newTestKubelet(t, false)
|
testKubelet := newTestKubelet(t, false)
|
||||||
defer testKubelet.Cleanup()
|
defer testKubelet.Cleanup()
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
|
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
|
||||||
|
|
||||||
cpu1m := resource.MustParse("1m")
|
cpu1m := resource.MustParse("1m")
|
||||||
cpu2m := resource.MustParse("2m")
|
cpu2m := resource.MustParse("2m")
|
||||||
@ -2651,6 +2653,28 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
|||||||
testPod2.UID = "2222"
|
testPod2.UID = "2222"
|
||||||
testPod2.Name = "pod2"
|
testPod2.Name = "pod2"
|
||||||
testPod2.Namespace = "ns2"
|
testPod2.Namespace = "ns2"
|
||||||
|
testPod2.Spec = v1.PodSpec{
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "c1-init",
|
||||||
|
Image: "i1",
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
|
||||||
|
},
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
testPod2.Status = v1.PodStatus{
|
||||||
|
Phase: v1.PodRunning,
|
||||||
|
InitContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "c1-init",
|
||||||
|
AllocatedResources: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
|
||||||
|
Resources: &v1.ResourceRequirements{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
testPod3 := testPod1.DeepCopy()
|
testPod3 := testPod1.DeepCopy()
|
||||||
testPod3.UID = "3333"
|
testPod3.UID = "3333"
|
||||||
testPod3.Name = "pod3"
|
testPod3.Name = "pod3"
|
||||||
@ -2842,72 +2866,115 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
for _, isSidecarContainer := range []bool{false, true} {
|
||||||
oldGOOS := goos
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
defer func() { goos = oldGOOS }()
|
oldGOOS := goos
|
||||||
if tt.goos != "" {
|
defer func() { goos = oldGOOS }()
|
||||||
goos = tt.goos
|
if tt.goos != "" {
|
||||||
}
|
goos = tt.goos
|
||||||
kubelet.statusManager = status.NewFakeManager()
|
|
||||||
|
|
||||||
originalPod := testPod1.DeepCopy()
|
|
||||||
originalPod.Spec.Containers[0].Resources.Requests = tt.originalRequests
|
|
||||||
originalPod.Spec.Containers[0].Resources.Limits = tt.originalLimits
|
|
||||||
kubelet.podManager.UpdatePod(originalPod)
|
|
||||||
|
|
||||||
newPod := originalPod.DeepCopy()
|
|
||||||
newPod.Spec.Containers[0].Resources.Requests = tt.newRequests
|
|
||||||
newPod.Spec.Containers[0].Resources.Limits = tt.newLimits
|
|
||||||
|
|
||||||
if !tt.newResourcesAllocated {
|
|
||||||
require.NoError(t, kubelet.statusManager.SetPodAllocation(originalPod))
|
|
||||||
} else {
|
|
||||||
require.NoError(t, kubelet.statusManager.SetPodAllocation(newPod))
|
|
||||||
}
|
|
||||||
|
|
||||||
podStatus := &kubecontainer.PodStatus{
|
|
||||||
ID: originalPod.UID,
|
|
||||||
Name: originalPod.Name,
|
|
||||||
Namespace: originalPod.Namespace,
|
|
||||||
ContainerStatuses: make([]*kubecontainer.Status, len(originalPod.Spec.Containers)),
|
|
||||||
}
|
|
||||||
for i, c := range originalPod.Spec.Containers {
|
|
||||||
podStatus.ContainerStatuses[i] = &kubecontainer.Status{
|
|
||||||
Name: c.Name,
|
|
||||||
State: kubecontainer.ContainerStateRunning,
|
|
||||||
Resources: &kubecontainer.ContainerResources{
|
|
||||||
CPURequest: c.Resources.Requests.Cpu(),
|
|
||||||
CPULimit: c.Resources.Limits.Cpu(),
|
|
||||||
MemoryLimit: c.Resources.Limits.Memory(),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
kubelet.statusManager = status.NewFakeManager()
|
||||||
|
|
||||||
now := kubelet.clock.Now()
|
var originalPod *v1.Pod
|
||||||
// Put the container in backoff so we can confirm backoff is reset.
|
if isSidecarContainer {
|
||||||
backoffKey := kuberuntime.GetStableKey(originalPod, &originalPod.Spec.Containers[0])
|
originalPod = testPod2.DeepCopy()
|
||||||
kubelet.backOff.Next(backoffKey, now)
|
originalPod.Spec.InitContainers[0].Resources.Requests = tt.originalRequests
|
||||||
|
originalPod.Spec.InitContainers[0].Resources.Limits = tt.originalLimits
|
||||||
|
} else {
|
||||||
|
originalPod = testPod1.DeepCopy()
|
||||||
|
originalPod.Spec.Containers[0].Resources.Requests = tt.originalRequests
|
||||||
|
originalPod.Spec.Containers[0].Resources.Limits = tt.originalLimits
|
||||||
|
}
|
||||||
|
|
||||||
updatedPod, err := kubelet.handlePodResourcesResize(newPod, podStatus)
|
kubelet.podManager.UpdatePod(originalPod)
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, tt.expectedAllocatedReqs, updatedPod.Spec.Containers[0].Resources.Requests, "updated pod spec requests")
|
|
||||||
assert.Equal(t, tt.expectedAllocatedLims, updatedPod.Spec.Containers[0].Resources.Limits, "updated pod spec limits")
|
|
||||||
|
|
||||||
alloc, found := kubelet.statusManager.GetContainerResourceAllocation(string(newPod.UID), newPod.Spec.Containers[0].Name)
|
newPod := originalPod.DeepCopy()
|
||||||
require.True(t, found, "container allocation")
|
|
||||||
assert.Equal(t, tt.expectedAllocatedReqs, alloc.Requests, "stored container request allocation")
|
|
||||||
assert.Equal(t, tt.expectedAllocatedLims, alloc.Limits, "stored container limit allocation")
|
|
||||||
|
|
||||||
resizeStatus := kubelet.statusManager.GetPodResizeStatus(newPod.UID)
|
if isSidecarContainer {
|
||||||
assert.Equal(t, tt.expectedResize, resizeStatus)
|
newPod.Spec.InitContainers[0].Resources.Requests = tt.newRequests
|
||||||
|
newPod.Spec.InitContainers[0].Resources.Limits = tt.newLimits
|
||||||
|
} else {
|
||||||
|
newPod.Spec.Containers[0].Resources.Requests = tt.newRequests
|
||||||
|
newPod.Spec.Containers[0].Resources.Limits = tt.newLimits
|
||||||
|
}
|
||||||
|
|
||||||
isInBackoff := kubelet.backOff.IsInBackOffSince(backoffKey, now)
|
if !tt.newResourcesAllocated {
|
||||||
if tt.expectBackoffReset {
|
require.NoError(t, kubelet.statusManager.SetPodAllocation(originalPod))
|
||||||
assert.False(t, isInBackoff, "container backoff should be reset")
|
} else {
|
||||||
} else {
|
require.NoError(t, kubelet.statusManager.SetPodAllocation(newPod))
|
||||||
assert.True(t, isInBackoff, "container backoff should not be reset")
|
}
|
||||||
}
|
|
||||||
})
|
podStatus := &kubecontainer.PodStatus{
|
||||||
|
ID: originalPod.UID,
|
||||||
|
Name: originalPod.Name,
|
||||||
|
Namespace: originalPod.Namespace,
|
||||||
|
}
|
||||||
|
|
||||||
|
setContainerStatus := func(podStatus *kubecontainer.PodStatus, c *v1.Container, idx int) {
|
||||||
|
podStatus.ContainerStatuses[idx] = &kubecontainer.Status{
|
||||||
|
Name: c.Name,
|
||||||
|
State: kubecontainer.ContainerStateRunning,
|
||||||
|
Resources: &kubecontainer.ContainerResources{
|
||||||
|
CPURequest: c.Resources.Requests.Cpu(),
|
||||||
|
CPULimit: c.Resources.Limits.Cpu(),
|
||||||
|
MemoryLimit: c.Resources.Limits.Memory(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if isSidecarContainer {
|
||||||
|
podStatus.ContainerStatuses = make([]*kubecontainer.Status, len(originalPod.Spec.InitContainers))
|
||||||
|
for i, c := range originalPod.Spec.InitContainers {
|
||||||
|
setContainerStatus(podStatus, &c, i)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
podStatus.ContainerStatuses = make([]*kubecontainer.Status, len(originalPod.Spec.Containers))
|
||||||
|
for i, c := range originalPod.Spec.Containers {
|
||||||
|
setContainerStatus(podStatus, &c, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
now := kubelet.clock.Now()
|
||||||
|
// Put the container in backoff so we can confirm backoff is reset.
|
||||||
|
var backoffKey string
|
||||||
|
if isSidecarContainer {
|
||||||
|
backoffKey = kuberuntime.GetStableKey(originalPod, &originalPod.Spec.InitContainers[0])
|
||||||
|
} else {
|
||||||
|
backoffKey = kuberuntime.GetStableKey(originalPod, &originalPod.Spec.Containers[0])
|
||||||
|
}
|
||||||
|
kubelet.backOff.Next(backoffKey, now)
|
||||||
|
|
||||||
|
updatedPod, err := kubelet.handlePodResourcesResize(newPod, podStatus)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var updatedPodCtr v1.Container
|
||||||
|
var newPodCtr v1.Container
|
||||||
|
if isSidecarContainer {
|
||||||
|
updatedPodCtr = updatedPod.Spec.InitContainers[0]
|
||||||
|
newPodCtr = newPod.Spec.InitContainers[0]
|
||||||
|
} else {
|
||||||
|
updatedPodCtr = updatedPod.Spec.Containers[0]
|
||||||
|
newPodCtr = newPod.Spec.Containers[0]
|
||||||
|
}
|
||||||
|
assert.Equal(t, tt.expectedAllocatedReqs, updatedPodCtr.Resources.Requests, "updated pod spec requests")
|
||||||
|
assert.Equal(t, tt.expectedAllocatedLims, updatedPodCtr.Resources.Limits, "updated pod spec limits")
|
||||||
|
|
||||||
|
alloc, found := kubelet.statusManager.GetContainerResourceAllocation(string(newPod.UID), newPodCtr.Name)
|
||||||
|
require.True(t, found, "container allocation")
|
||||||
|
assert.Equal(t, tt.expectedAllocatedReqs, alloc.Requests, "stored container request allocation")
|
||||||
|
assert.Equal(t, tt.expectedAllocatedLims, alloc.Limits, "stored container limit allocation")
|
||||||
|
|
||||||
|
resizeStatus := kubelet.statusManager.GetPodResizeStatus(newPod.UID)
|
||||||
|
assert.Equal(t, tt.expectedResize, resizeStatus)
|
||||||
|
|
||||||
|
isInBackoff := kubelet.backOff.IsInBackOffSince(backoffKey, now)
|
||||||
|
if tt.expectBackoffReset {
|
||||||
|
assert.False(t, isInBackoff, "container backoff should be reset")
|
||||||
|
} else {
|
||||||
|
assert.True(t, isInBackoff, "container backoff should not be reset")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2451,6 +2451,7 @@ func (w *warningRecorder) AddWarning(_, text string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPodResizePrepareForUpdate(t *testing.T) {
|
func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||||
|
containerRestartPolicyAlways := api.ContainerRestartPolicyAlways
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
oldPod *api.Pod
|
oldPod *api.Pod
|
||||||
@ -2898,12 +2899,156 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
|||||||
)),
|
)),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Update resources for sidecar container",
|
||||||
|
oldPod: podtest.MakePod("test-pod",
|
||||||
|
podtest.SetResourceVersion("1"),
|
||||||
|
podtest.SetInitContainers(
|
||||||
|
podtest.MakeContainer("init-container1",
|
||||||
|
podtest.SetContainerRestartPolicy(containerRestartPolicyAlways),
|
||||||
|
podtest.SetContainerResources(api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{
|
||||||
|
api.ResourceCPU: resource.MustParse("100m"),
|
||||||
|
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
podtest.SetStatus(podtest.MakePodStatus(
|
||||||
|
podtest.SetContainerStatuses(
|
||||||
|
podtest.MakeContainerStatus("init-container1",
|
||||||
|
api.ResourceList{
|
||||||
|
api.ResourceCPU: resource.MustParse("100m"),
|
||||||
|
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
)),
|
||||||
|
),
|
||||||
|
newPod: podtest.MakePod("test-pod",
|
||||||
|
podtest.SetResourceVersion("2"),
|
||||||
|
podtest.SetInitContainers(
|
||||||
|
podtest.MakeContainer("init-container1",
|
||||||
|
podtest.SetContainerRestartPolicy(containerRestartPolicyAlways),
|
||||||
|
podtest.SetContainerResources(api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{
|
||||||
|
api.ResourceCPU: resource.MustParse("200m"), // Updated resource
|
||||||
|
api.ResourceMemory: resource.MustParse("4Gi"), // Updated resource
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
podtest.SetStatus(podtest.MakePodStatus(
|
||||||
|
podtest.SetContainerStatuses(
|
||||||
|
podtest.MakeContainerStatus("init-container1",
|
||||||
|
api.ResourceList{
|
||||||
|
api.ResourceCPU: resource.MustParse("100m"),
|
||||||
|
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
)),
|
||||||
|
),
|
||||||
|
expected: podtest.MakePod("test-pod",
|
||||||
|
podtest.SetResourceVersion("2"),
|
||||||
|
podtest.SetInitContainers(
|
||||||
|
podtest.MakeContainer("init-container1",
|
||||||
|
podtest.SetContainerRestartPolicy(containerRestartPolicyAlways),
|
||||||
|
podtest.SetContainerResources(api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{
|
||||||
|
api.ResourceCPU: resource.MustParse("200m"), // Updated resource
|
||||||
|
api.ResourceMemory: resource.MustParse("4Gi"), // Updated resource
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
podtest.SetStatus(podtest.MakePodStatus(
|
||||||
|
podtest.SetResizeStatus(api.PodResizeStatusProposed), // Resize status set
|
||||||
|
podtest.SetContainerStatuses(
|
||||||
|
podtest.MakeContainerStatus("init-container1",
|
||||||
|
api.ResourceList{
|
||||||
|
api.ResourceCPU: resource.MustParse("100m"),
|
||||||
|
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
)),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Update resources should fail for non-restartable init container",
|
||||||
|
oldPod: podtest.MakePod("test-pod",
|
||||||
|
podtest.SetResourceVersion("1"),
|
||||||
|
podtest.SetInitContainers(
|
||||||
|
podtest.MakeContainer("init-container1",
|
||||||
|
podtest.SetContainerResources(api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{
|
||||||
|
api.ResourceCPU: resource.MustParse("100m"),
|
||||||
|
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
podtest.SetStatus(podtest.MakePodStatus(
|
||||||
|
podtest.SetContainerStatuses(
|
||||||
|
podtest.MakeContainerStatus("init-container1",
|
||||||
|
api.ResourceList{
|
||||||
|
api.ResourceCPU: resource.MustParse("100m"),
|
||||||
|
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
)),
|
||||||
|
),
|
||||||
|
newPod: podtest.MakePod("test-pod",
|
||||||
|
podtest.SetResourceVersion("2"),
|
||||||
|
podtest.SetInitContainers(
|
||||||
|
podtest.MakeContainer("init-container1",
|
||||||
|
podtest.SetContainerResources(api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{
|
||||||
|
api.ResourceCPU: resource.MustParse("200m"), // Updated resource
|
||||||
|
api.ResourceMemory: resource.MustParse("4Gi"), // Updated resource
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
podtest.SetStatus(podtest.MakePodStatus(
|
||||||
|
podtest.SetContainerStatuses(
|
||||||
|
podtest.MakeContainerStatus("init-container1",
|
||||||
|
api.ResourceList{
|
||||||
|
api.ResourceCPU: resource.MustParse("100m"),
|
||||||
|
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
)),
|
||||||
|
),
|
||||||
|
expected: podtest.MakePod("test-pod",
|
||||||
|
podtest.SetResourceVersion("2"),
|
||||||
|
podtest.SetInitContainers(
|
||||||
|
podtest.MakeContainer("init-container1",
|
||||||
|
podtest.SetContainerResources(api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{
|
||||||
|
api.ResourceCPU: resource.MustParse("200m"), // Updated resource
|
||||||
|
api.ResourceMemory: resource.MustParse("4Gi"), // Updated resource
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
podtest.SetStatus(podtest.MakePodStatus(
|
||||||
|
podtest.SetResizeStatus(""), // Resize status not set
|
||||||
|
podtest.SetContainerStatuses(
|
||||||
|
podtest.MakeContainerStatus("init-container1",
|
||||||
|
api.ResourceList{
|
||||||
|
api.ResourceCPU: resource.MustParse("100m"),
|
||||||
|
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
)),
|
||||||
|
),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingAllocatedStatus, true)
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingAllocatedStatus, true)
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
ResizeStrategy.PrepareForUpdate(ctx, tc.newPod, tc.oldPod)
|
ResizeStrategy.PrepareForUpdate(ctx, tc.newPod, tc.oldPod)
|
||||||
if !cmp.Equal(tc.expected, tc.newPod) {
|
if !cmp.Equal(tc.expected, tc.newPod) {
|
||||||
|
@ -286,15 +286,15 @@ func TestPodRequestsAndLimitsWithoutOverhead(t *testing.T) {
|
|||||||
func TestPodResourceRequests(t *testing.T) {
|
func TestPodResourceRequests(t *testing.T) {
|
||||||
restartAlways := v1.ContainerRestartPolicyAlways
|
restartAlways := v1.ContainerRestartPolicyAlways
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
description string
|
description string
|
||||||
options PodResourcesOptions
|
options PodResourcesOptions
|
||||||
overhead v1.ResourceList
|
overhead v1.ResourceList
|
||||||
podResizeStatus v1.PodResizeStatus
|
podResizeStatus v1.PodResizeStatus
|
||||||
initContainers []v1.Container
|
initContainers []v1.Container
|
||||||
initContainerStatus []v1.ContainerStatus
|
initContainerStatuses []v1.ContainerStatus
|
||||||
containers []v1.Container
|
containers []v1.Container
|
||||||
containerStatus []v1.ContainerStatus
|
containerStatus []v1.ContainerStatus
|
||||||
expectedRequests v1.ResourceList
|
expectedRequests v1.ResourceList
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "nil options, larger init container",
|
description: "nil options, larger init container",
|
||||||
@ -481,14 +481,6 @@ func TestPodResourceRequests(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
initContainerStatus: []v1.ContainerStatus{
|
|
||||||
{
|
|
||||||
Name: "restartable-init-1",
|
|
||||||
AllocatedResources: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: resource.MustParse("2"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "resized, infeasible, but don't use status",
|
description: "resized, infeasible, but don't use status",
|
||||||
@ -518,6 +510,92 @@ func TestPodResourceRequests(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
description: "resized, restartable init container, infeasible",
|
||||||
|
expectedRequests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("2"),
|
||||||
|
},
|
||||||
|
podResizeStatus: v1.PodResizeStatusInfeasible,
|
||||||
|
options: PodResourcesOptions{UseStatusResources: true},
|
||||||
|
initContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "restartable-init-1",
|
||||||
|
RestartPolicy: &restartAlways,
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("4"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
initContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "restartable-init-1",
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "resized, restartable init container, no resize status",
|
||||||
|
expectedRequests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("4"),
|
||||||
|
},
|
||||||
|
options: PodResourcesOptions{UseStatusResources: true},
|
||||||
|
initContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "restartable-init-1",
|
||||||
|
RestartPolicy: &restartAlways,
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("4"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
initContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "restartable-init-1",
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "resized, restartable init container, infeasible, but don't use status",
|
||||||
|
expectedRequests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("4"),
|
||||||
|
},
|
||||||
|
podResizeStatus: v1.PodResizeStatusInfeasible,
|
||||||
|
options: PodResourcesOptions{UseStatusResources: false},
|
||||||
|
initContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "restartable-init-1",
|
||||||
|
RestartPolicy: &restartAlways,
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("4"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
initContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "restartable-init-1",
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
description: "restartable init container",
|
description: "restartable init container",
|
||||||
expectedRequests: v1.ResourceList{
|
expectedRequests: v1.ResourceList{
|
||||||
@ -710,7 +788,7 @@ func TestPodResourceRequests(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Status: v1.PodStatus{
|
Status: v1.PodStatus{
|
||||||
ContainerStatuses: tc.containerStatus,
|
ContainerStatuses: tc.containerStatus,
|
||||||
InitContainerStatuses: tc.initContainerStatus,
|
InitContainerStatuses: tc.initContainerStatuses,
|
||||||
Resize: tc.podResizeStatus,
|
Resize: tc.podResizeStatus,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -758,13 +836,14 @@ func TestPodResourceRequestsReuse(t *testing.T) {
|
|||||||
func TestPodResourceLimits(t *testing.T) {
|
func TestPodResourceLimits(t *testing.T) {
|
||||||
restartAlways := v1.ContainerRestartPolicyAlways
|
restartAlways := v1.ContainerRestartPolicyAlways
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
description string
|
description string
|
||||||
options PodResourcesOptions
|
options PodResourcesOptions
|
||||||
overhead v1.ResourceList
|
overhead v1.ResourceList
|
||||||
initContainers []v1.Container
|
initContainers []v1.Container
|
||||||
containers []v1.Container
|
initContainerStatuses []v1.ContainerStatus
|
||||||
containerStatuses []v1.ContainerStatus
|
containers []v1.Container
|
||||||
expectedLimits v1.ResourceList
|
containerStatuses []v1.ContainerStatus
|
||||||
|
expectedLimits v1.ResourceList
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "nil options, larger init container",
|
description: "nil options, larger init container",
|
||||||
@ -1217,6 +1296,90 @@ func TestPodResourceLimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
description: "pod scaled up with restartable init containers",
|
||||||
|
expectedLimits: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
options: PodResourcesOptions{UseStatusResources: true},
|
||||||
|
initContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "restartable-init-1",
|
||||||
|
RestartPolicy: &restartAlways,
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
initContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "restartable-init-1",
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "pod scaled down with restartable init containers",
|
||||||
|
expectedLimits: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
options: PodResourcesOptions{UseStatusResources: true},
|
||||||
|
initContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "restartable-init-1",
|
||||||
|
RestartPolicy: &restartAlways,
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
initContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "restartable-init-1",
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "pod scaled down with restartable init containers, don't use status",
|
||||||
|
expectedLimits: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
options: PodResourcesOptions{UseStatusResources: false},
|
||||||
|
initContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "restartable-init-1",
|
||||||
|
RestartPolicy: &restartAlways,
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
initContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "restartable-init-1",
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.description, func(t *testing.T) {
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
@ -1227,7 +1390,8 @@ func TestPodResourceLimits(t *testing.T) {
|
|||||||
Overhead: tc.overhead,
|
Overhead: tc.overhead,
|
||||||
},
|
},
|
||||||
Status: v1.PodStatus{
|
Status: v1.PodStatus{
|
||||||
ContainerStatuses: tc.containerStatuses,
|
ContainerStatuses: tc.containerStatuses,
|
||||||
|
InitContainerStatuses: tc.initContainerStatuses,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
limits := PodLimits(p, tc.options)
|
limits := PodLimits(p, tc.options)
|
||||||
|
Loading…
Reference in New Issue
Block a user