mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
Added some unit tests
This commit is contained in:
parent
2ba61325f6
commit
8fa8277908
@ -5413,16 +5413,16 @@ func ValidateInitContainerStateTransition(newStatuses, oldStatuses []core.Contai
|
||||
}
|
||||
|
||||
// Skip any restartable init container that is allowed to restart
|
||||
isRestartableInitContainer := false
|
||||
isRestartableInitCtr := false
|
||||
for _, c := range podSpec.InitContainers {
|
||||
if oldStatus.Name == c.Name {
|
||||
if c.RestartPolicy != nil && *c.RestartPolicy == core.ContainerRestartPolicyAlways {
|
||||
isRestartableInitContainer = true
|
||||
if isRestartableInitContainer(&c) {
|
||||
isRestartableInitCtr = true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if isRestartableInitContainer {
|
||||
if isRestartableInitCtr {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -5619,7 +5619,7 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
|
||||
// Do not allow removing resource requests/limits on resize.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
||||
for ix, ctr := range oldPod.Spec.InitContainers {
|
||||
if ctr.RestartPolicy != nil && *ctr.RestartPolicy != core.ContainerRestartPolicyAlways {
|
||||
if !isRestartableInitContainer(&ctr) {
|
||||
continue
|
||||
}
|
||||
if resourcesRemoved(newPod.Spec.InitContainers[ix].Resources.Requests, ctr.Resources.Requests) {
|
||||
@ -5652,7 +5652,7 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
|
||||
var newInitContainers []core.Container
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
||||
for ix, container := range originalCPUMemPodSpec.InitContainers {
|
||||
if container.RestartPolicy != nil && *container.RestartPolicy == core.ContainerRestartPolicyAlways { // restartable init container
|
||||
if isRestartableInitContainer(&container) { // restartable init container
|
||||
dropCPUMemoryResourcesFromContainer(&container, &oldPod.Spec.InitContainers[ix])
|
||||
}
|
||||
newInitContainers = append(newInitContainers, container)
|
||||
|
@ -2584,9 +2584,11 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
|
||||
func TestHandlePodResourcesResize(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||
testKubelet := newTestKubelet(t, false)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
|
||||
|
||||
cpu1m := resource.MustParse("1m")
|
||||
cpu2m := resource.MustParse("2m")
|
||||
@ -2651,6 +2653,28 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
||||
testPod2.UID = "2222"
|
||||
testPod2.Name = "pod2"
|
||||
testPod2.Namespace = "ns2"
|
||||
testPod2.Spec = v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "c1-init",
|
||||
Image: "i1",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
|
||||
},
|
||||
RestartPolicy: &containerRestartPolicyAlways,
|
||||
},
|
||||
},
|
||||
}
|
||||
testPod2.Status = v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "c1-init",
|
||||
AllocatedResources: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
},
|
||||
}
|
||||
testPod3 := testPod1.DeepCopy()
|
||||
testPod3.UID = "3333"
|
||||
testPod3.Name = "pod3"
|
||||
@ -2842,72 +2866,115 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
oldGOOS := goos
|
||||
defer func() { goos = oldGOOS }()
|
||||
if tt.goos != "" {
|
||||
goos = tt.goos
|
||||
}
|
||||
kubelet.statusManager = status.NewFakeManager()
|
||||
|
||||
originalPod := testPod1.DeepCopy()
|
||||
originalPod.Spec.Containers[0].Resources.Requests = tt.originalRequests
|
||||
originalPod.Spec.Containers[0].Resources.Limits = tt.originalLimits
|
||||
kubelet.podManager.UpdatePod(originalPod)
|
||||
|
||||
newPod := originalPod.DeepCopy()
|
||||
newPod.Spec.Containers[0].Resources.Requests = tt.newRequests
|
||||
newPod.Spec.Containers[0].Resources.Limits = tt.newLimits
|
||||
|
||||
if !tt.newResourcesAllocated {
|
||||
require.NoError(t, kubelet.statusManager.SetPodAllocation(originalPod))
|
||||
} else {
|
||||
require.NoError(t, kubelet.statusManager.SetPodAllocation(newPod))
|
||||
}
|
||||
|
||||
podStatus := &kubecontainer.PodStatus{
|
||||
ID: originalPod.UID,
|
||||
Name: originalPod.Name,
|
||||
Namespace: originalPod.Namespace,
|
||||
ContainerStatuses: make([]*kubecontainer.Status, len(originalPod.Spec.Containers)),
|
||||
}
|
||||
for i, c := range originalPod.Spec.Containers {
|
||||
podStatus.ContainerStatuses[i] = &kubecontainer.Status{
|
||||
Name: c.Name,
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
Resources: &kubecontainer.ContainerResources{
|
||||
CPURequest: c.Resources.Requests.Cpu(),
|
||||
CPULimit: c.Resources.Limits.Cpu(),
|
||||
MemoryLimit: c.Resources.Limits.Memory(),
|
||||
},
|
||||
for _, isSidecarContainer := range []bool{false, true} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
oldGOOS := goos
|
||||
defer func() { goos = oldGOOS }()
|
||||
if tt.goos != "" {
|
||||
goos = tt.goos
|
||||
}
|
||||
}
|
||||
kubelet.statusManager = status.NewFakeManager()
|
||||
|
||||
now := kubelet.clock.Now()
|
||||
// Put the container in backoff so we can confirm backoff is reset.
|
||||
backoffKey := kuberuntime.GetStableKey(originalPod, &originalPod.Spec.Containers[0])
|
||||
kubelet.backOff.Next(backoffKey, now)
|
||||
var originalPod *v1.Pod
|
||||
if isSidecarContainer {
|
||||
originalPod = testPod2.DeepCopy()
|
||||
originalPod.Spec.InitContainers[0].Resources.Requests = tt.originalRequests
|
||||
originalPod.Spec.InitContainers[0].Resources.Limits = tt.originalLimits
|
||||
} else {
|
||||
originalPod = testPod1.DeepCopy()
|
||||
originalPod.Spec.Containers[0].Resources.Requests = tt.originalRequests
|
||||
originalPod.Spec.Containers[0].Resources.Limits = tt.originalLimits
|
||||
}
|
||||
|
||||
updatedPod, err := kubelet.handlePodResourcesResize(newPod, podStatus)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedAllocatedReqs, updatedPod.Spec.Containers[0].Resources.Requests, "updated pod spec requests")
|
||||
assert.Equal(t, tt.expectedAllocatedLims, updatedPod.Spec.Containers[0].Resources.Limits, "updated pod spec limits")
|
||||
kubelet.podManager.UpdatePod(originalPod)
|
||||
|
||||
alloc, found := kubelet.statusManager.GetContainerResourceAllocation(string(newPod.UID), newPod.Spec.Containers[0].Name)
|
||||
require.True(t, found, "container allocation")
|
||||
assert.Equal(t, tt.expectedAllocatedReqs, alloc.Requests, "stored container request allocation")
|
||||
assert.Equal(t, tt.expectedAllocatedLims, alloc.Limits, "stored container limit allocation")
|
||||
newPod := originalPod.DeepCopy()
|
||||
|
||||
resizeStatus := kubelet.statusManager.GetPodResizeStatus(newPod.UID)
|
||||
assert.Equal(t, tt.expectedResize, resizeStatus)
|
||||
if isSidecarContainer {
|
||||
newPod.Spec.InitContainers[0].Resources.Requests = tt.newRequests
|
||||
newPod.Spec.InitContainers[0].Resources.Limits = tt.newLimits
|
||||
} else {
|
||||
newPod.Spec.Containers[0].Resources.Requests = tt.newRequests
|
||||
newPod.Spec.Containers[0].Resources.Limits = tt.newLimits
|
||||
}
|
||||
|
||||
isInBackoff := kubelet.backOff.IsInBackOffSince(backoffKey, now)
|
||||
if tt.expectBackoffReset {
|
||||
assert.False(t, isInBackoff, "container backoff should be reset")
|
||||
} else {
|
||||
assert.True(t, isInBackoff, "container backoff should not be reset")
|
||||
}
|
||||
})
|
||||
if !tt.newResourcesAllocated {
|
||||
require.NoError(t, kubelet.statusManager.SetPodAllocation(originalPod))
|
||||
} else {
|
||||
require.NoError(t, kubelet.statusManager.SetPodAllocation(newPod))
|
||||
}
|
||||
|
||||
podStatus := &kubecontainer.PodStatus{
|
||||
ID: originalPod.UID,
|
||||
Name: originalPod.Name,
|
||||
Namespace: originalPod.Namespace,
|
||||
}
|
||||
|
||||
setContainerStatus := func(podStatus *kubecontainer.PodStatus, c *v1.Container, idx int) {
|
||||
podStatus.ContainerStatuses[idx] = &kubecontainer.Status{
|
||||
Name: c.Name,
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
Resources: &kubecontainer.ContainerResources{
|
||||
CPURequest: c.Resources.Requests.Cpu(),
|
||||
CPULimit: c.Resources.Limits.Cpu(),
|
||||
MemoryLimit: c.Resources.Limits.Memory(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if isSidecarContainer {
|
||||
podStatus.ContainerStatuses = make([]*kubecontainer.Status, len(originalPod.Spec.InitContainers))
|
||||
for i, c := range originalPod.Spec.InitContainers {
|
||||
setContainerStatus(podStatus, &c, i)
|
||||
}
|
||||
} else {
|
||||
podStatus.ContainerStatuses = make([]*kubecontainer.Status, len(originalPod.Spec.Containers))
|
||||
for i, c := range originalPod.Spec.Containers {
|
||||
setContainerStatus(podStatus, &c, i)
|
||||
}
|
||||
}
|
||||
|
||||
now := kubelet.clock.Now()
|
||||
// Put the container in backoff so we can confirm backoff is reset.
|
||||
var backoffKey string
|
||||
if isSidecarContainer {
|
||||
backoffKey = kuberuntime.GetStableKey(originalPod, &originalPod.Spec.InitContainers[0])
|
||||
} else {
|
||||
backoffKey = kuberuntime.GetStableKey(originalPod, &originalPod.Spec.Containers[0])
|
||||
}
|
||||
kubelet.backOff.Next(backoffKey, now)
|
||||
|
||||
updatedPod, err := kubelet.handlePodResourcesResize(newPod, podStatus)
|
||||
require.NoError(t, err)
|
||||
|
||||
var updatedPodCtr v1.Container
|
||||
var newPodCtr v1.Container
|
||||
if isSidecarContainer {
|
||||
updatedPodCtr = updatedPod.Spec.InitContainers[0]
|
||||
newPodCtr = newPod.Spec.InitContainers[0]
|
||||
} else {
|
||||
updatedPodCtr = updatedPod.Spec.Containers[0]
|
||||
newPodCtr = newPod.Spec.Containers[0]
|
||||
}
|
||||
assert.Equal(t, tt.expectedAllocatedReqs, updatedPodCtr.Resources.Requests, "updated pod spec requests")
|
||||
assert.Equal(t, tt.expectedAllocatedLims, updatedPodCtr.Resources.Limits, "updated pod spec limits")
|
||||
|
||||
alloc, found := kubelet.statusManager.GetContainerResourceAllocation(string(newPod.UID), newPodCtr.Name)
|
||||
require.True(t, found, "container allocation")
|
||||
assert.Equal(t, tt.expectedAllocatedReqs, alloc.Requests, "stored container request allocation")
|
||||
assert.Equal(t, tt.expectedAllocatedLims, alloc.Limits, "stored container limit allocation")
|
||||
|
||||
resizeStatus := kubelet.statusManager.GetPodResizeStatus(newPod.UID)
|
||||
assert.Equal(t, tt.expectedResize, resizeStatus)
|
||||
|
||||
isInBackoff := kubelet.backOff.IsInBackOffSince(backoffKey, now)
|
||||
if tt.expectBackoffReset {
|
||||
assert.False(t, isInBackoff, "container backoff should be reset")
|
||||
} else {
|
||||
assert.True(t, isInBackoff, "container backoff should not be reset")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2451,6 +2451,7 @@ func (w *warningRecorder) AddWarning(_, text string) {
|
||||
}
|
||||
|
||||
func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
containerRestartPolicyAlways := api.ContainerRestartPolicyAlways
|
||||
tests := []struct {
|
||||
name string
|
||||
oldPod *api.Pod
|
||||
@ -2898,12 +2899,156 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
)),
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "Update resources for sidecar container",
|
||||
oldPod: podtest.MakePod("test-pod",
|
||||
podtest.SetResourceVersion("1"),
|
||||
podtest.SetInitContainers(
|
||||
podtest.MakeContainer("init-container1",
|
||||
podtest.SetContainerRestartPolicy(containerRestartPolicyAlways),
|
||||
podtest.SetContainerResources(api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("init-container1",
|
||||
api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
}),
|
||||
),
|
||||
)),
|
||||
),
|
||||
newPod: podtest.MakePod("test-pod",
|
||||
podtest.SetResourceVersion("2"),
|
||||
podtest.SetInitContainers(
|
||||
podtest.MakeContainer("init-container1",
|
||||
podtest.SetContainerRestartPolicy(containerRestartPolicyAlways),
|
||||
podtest.SetContainerResources(api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("200m"), // Updated resource
|
||||
api.ResourceMemory: resource.MustParse("4Gi"), // Updated resource
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("init-container1",
|
||||
api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
}),
|
||||
),
|
||||
)),
|
||||
),
|
||||
expected: podtest.MakePod("test-pod",
|
||||
podtest.SetResourceVersion("2"),
|
||||
podtest.SetInitContainers(
|
||||
podtest.MakeContainer("init-container1",
|
||||
podtest.SetContainerRestartPolicy(containerRestartPolicyAlways),
|
||||
podtest.SetContainerResources(api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("200m"), // Updated resource
|
||||
api.ResourceMemory: resource.MustParse("4Gi"), // Updated resource
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetResizeStatus(api.PodResizeStatusProposed), // Resize status set
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("init-container1",
|
||||
api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
}),
|
||||
),
|
||||
)),
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "Update resources should fail for non-restartable init container",
|
||||
oldPod: podtest.MakePod("test-pod",
|
||||
podtest.SetResourceVersion("1"),
|
||||
podtest.SetInitContainers(
|
||||
podtest.MakeContainer("init-container1",
|
||||
podtest.SetContainerResources(api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("init-container1",
|
||||
api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
}),
|
||||
),
|
||||
)),
|
||||
),
|
||||
newPod: podtest.MakePod("test-pod",
|
||||
podtest.SetResourceVersion("2"),
|
||||
podtest.SetInitContainers(
|
||||
podtest.MakeContainer("init-container1",
|
||||
podtest.SetContainerResources(api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("200m"), // Updated resource
|
||||
api.ResourceMemory: resource.MustParse("4Gi"), // Updated resource
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("init-container1",
|
||||
api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
}),
|
||||
),
|
||||
)),
|
||||
),
|
||||
expected: podtest.MakePod("test-pod",
|
||||
podtest.SetResourceVersion("2"),
|
||||
podtest.SetInitContainers(
|
||||
podtest.MakeContainer("init-container1",
|
||||
podtest.SetContainerResources(api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("200m"), // Updated resource
|
||||
api.ResourceMemory: resource.MustParse("4Gi"), // Updated resource
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetResizeStatus(""), // Resize status not set
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("init-container1",
|
||||
api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
}),
|
||||
),
|
||||
)),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingAllocatedStatus, true)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||
ctx := context.Background()
|
||||
ResizeStrategy.PrepareForUpdate(ctx, tc.newPod, tc.oldPod)
|
||||
if !cmp.Equal(tc.expected, tc.newPod) {
|
||||
|
@ -286,15 +286,15 @@ func TestPodRequestsAndLimitsWithoutOverhead(t *testing.T) {
|
||||
func TestPodResourceRequests(t *testing.T) {
|
||||
restartAlways := v1.ContainerRestartPolicyAlways
|
||||
testCases := []struct {
|
||||
description string
|
||||
options PodResourcesOptions
|
||||
overhead v1.ResourceList
|
||||
podResizeStatus v1.PodResizeStatus
|
||||
initContainers []v1.Container
|
||||
initContainerStatus []v1.ContainerStatus
|
||||
containers []v1.Container
|
||||
containerStatus []v1.ContainerStatus
|
||||
expectedRequests v1.ResourceList
|
||||
description string
|
||||
options PodResourcesOptions
|
||||
overhead v1.ResourceList
|
||||
podResizeStatus v1.PodResizeStatus
|
||||
initContainers []v1.Container
|
||||
initContainerStatuses []v1.ContainerStatus
|
||||
containers []v1.Container
|
||||
containerStatus []v1.ContainerStatus
|
||||
expectedRequests v1.ResourceList
|
||||
}{
|
||||
{
|
||||
description: "nil options, larger init container",
|
||||
@ -481,14 +481,6 @@ func TestPodResourceRequests(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
initContainerStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
AllocatedResources: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "resized, infeasible, but don't use status",
|
||||
@ -518,6 +510,92 @@ func TestPodResourceRequests(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "resized, restartable init container, infeasible",
|
||||
expectedRequests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
podResizeStatus: v1.PodResizeStatusInfeasible,
|
||||
options: PodResourcesOptions{UseStatusResources: true},
|
||||
initContainers: []v1.Container{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
RestartPolicy: &restartAlways,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
initContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "resized, restartable init container, no resize status",
|
||||
expectedRequests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
options: PodResourcesOptions{UseStatusResources: true},
|
||||
initContainers: []v1.Container{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
RestartPolicy: &restartAlways,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
initContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "resized, restartable init container, infeasible, but don't use status",
|
||||
expectedRequests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
podResizeStatus: v1.PodResizeStatusInfeasible,
|
||||
options: PodResourcesOptions{UseStatusResources: false},
|
||||
initContainers: []v1.Container{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
RestartPolicy: &restartAlways,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
initContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "restartable init container",
|
||||
expectedRequests: v1.ResourceList{
|
||||
@ -710,7 +788,7 @@ func TestPodResourceRequests(t *testing.T) {
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: tc.containerStatus,
|
||||
InitContainerStatuses: tc.initContainerStatus,
|
||||
InitContainerStatuses: tc.initContainerStatuses,
|
||||
Resize: tc.podResizeStatus,
|
||||
},
|
||||
}
|
||||
@ -758,13 +836,14 @@ func TestPodResourceRequestsReuse(t *testing.T) {
|
||||
func TestPodResourceLimits(t *testing.T) {
|
||||
restartAlways := v1.ContainerRestartPolicyAlways
|
||||
testCases := []struct {
|
||||
description string
|
||||
options PodResourcesOptions
|
||||
overhead v1.ResourceList
|
||||
initContainers []v1.Container
|
||||
containers []v1.Container
|
||||
containerStatuses []v1.ContainerStatus
|
||||
expectedLimits v1.ResourceList
|
||||
description string
|
||||
options PodResourcesOptions
|
||||
overhead v1.ResourceList
|
||||
initContainers []v1.Container
|
||||
initContainerStatuses []v1.ContainerStatus
|
||||
containers []v1.Container
|
||||
containerStatuses []v1.ContainerStatus
|
||||
expectedLimits v1.ResourceList
|
||||
}{
|
||||
{
|
||||
description: "nil options, larger init container",
|
||||
@ -1217,6 +1296,90 @@ func TestPodResourceLimits(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pod scaled up with restartable init containers",
|
||||
expectedLimits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
options: PodResourcesOptions{UseStatusResources: true},
|
||||
initContainers: []v1.Container{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
RestartPolicy: &restartAlways,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
initContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pod scaled down with restartable init containers",
|
||||
expectedLimits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
options: PodResourcesOptions{UseStatusResources: true},
|
||||
initContainers: []v1.Container{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
RestartPolicy: &restartAlways,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
initContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pod scaled down with restartable init containers, don't use status",
|
||||
expectedLimits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
options: PodResourcesOptions{UseStatusResources: false},
|
||||
initContainers: []v1.Container{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
RestartPolicy: &restartAlways,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
initContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "restartable-init-1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
@ -1227,7 +1390,8 @@ func TestPodResourceLimits(t *testing.T) {
|
||||
Overhead: tc.overhead,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: tc.containerStatuses,
|
||||
ContainerStatuses: tc.containerStatuses,
|
||||
InitContainerStatuses: tc.initContainerStatuses,
|
||||
},
|
||||
}
|
||||
limits := PodLimits(p, tc.options)
|
||||
|
Loading…
Reference in New Issue
Block a user