mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
Merge pull request #112599 from fabi200123/Adding-Windows-Support-for-InPlaceVerticalScaling
Adding Windows support for InPlace Pod Vertical Scaling
This commit is contained in:
commit
dea3f1a119
@ -260,14 +260,25 @@ func TestToKubeContainerStatusWithResources(t *testing.T) {
|
|||||||
State: runtimeapi.ContainerState_CONTAINER_RUNNING,
|
State: runtimeapi.ContainerState_CONTAINER_RUNNING,
|
||||||
CreatedAt: createdAt,
|
CreatedAt: createdAt,
|
||||||
StartedAt: startedAt,
|
StartedAt: startedAt,
|
||||||
Resources: &runtimeapi.ContainerResources{
|
Resources: func() *runtimeapi.ContainerResources {
|
||||||
Linux: &runtimeapi.LinuxContainerResources{
|
if goruntime.GOOS == "windows" {
|
||||||
CpuQuota: 25000,
|
return &runtimeapi.ContainerResources{
|
||||||
CpuPeriod: 100000,
|
Windows: &runtimeapi.WindowsContainerResources{
|
||||||
MemoryLimitInBytes: 524288000,
|
CpuMaximum: 2500,
|
||||||
OomScoreAdj: -998,
|
CpuCount: 1,
|
||||||
},
|
MemoryLimitInBytes: 524288000,
|
||||||
},
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &runtimeapi.ContainerResources{
|
||||||
|
Linux: &runtimeapi.LinuxContainerResources{
|
||||||
|
CpuQuota: 25000,
|
||||||
|
CpuPeriod: 100000,
|
||||||
|
MemoryLimitInBytes: 524288000,
|
||||||
|
OomScoreAdj: -998,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}(),
|
||||||
},
|
},
|
||||||
expected: &kubecontainer.Status{
|
expected: &kubecontainer.Status{
|
||||||
ID: *cid,
|
ID: *cid,
|
||||||
@ -289,12 +300,22 @@ func TestToKubeContainerStatusWithResources(t *testing.T) {
|
|||||||
State: runtimeapi.ContainerState_CONTAINER_RUNNING,
|
State: runtimeapi.ContainerState_CONTAINER_RUNNING,
|
||||||
CreatedAt: createdAt,
|
CreatedAt: createdAt,
|
||||||
StartedAt: startedAt,
|
StartedAt: startedAt,
|
||||||
Resources: &runtimeapi.ContainerResources{
|
Resources: func() *runtimeapi.ContainerResources {
|
||||||
Linux: &runtimeapi.LinuxContainerResources{
|
if goruntime.GOOS == "windows" {
|
||||||
CpuQuota: 50000,
|
return &runtimeapi.ContainerResources{
|
||||||
CpuPeriod: 100000,
|
Windows: &runtimeapi.WindowsContainerResources{
|
||||||
},
|
CpuMaximum: 2500,
|
||||||
},
|
CpuCount: 2,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &runtimeapi.ContainerResources{
|
||||||
|
Linux: &runtimeapi.LinuxContainerResources{
|
||||||
|
CpuQuota: 50000,
|
||||||
|
CpuPeriod: 100000,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}(),
|
||||||
},
|
},
|
||||||
expected: &kubecontainer.Status{
|
expected: &kubecontainer.Status{
|
||||||
ID: *cid,
|
ID: *cid,
|
||||||
@ -320,6 +341,9 @@ func TestToKubeContainerStatusWithResources(t *testing.T) {
|
|||||||
MemoryLimitInBytes: 524288000,
|
MemoryLimitInBytes: 524288000,
|
||||||
OomScoreAdj: -998,
|
OomScoreAdj: -998,
|
||||||
},
|
},
|
||||||
|
Windows: &runtimeapi.WindowsContainerResources{
|
||||||
|
MemoryLimitInBytes: 524288000,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expected: &kubecontainer.Status{
|
expected: &kubecontainer.Status{
|
||||||
|
@ -42,19 +42,24 @@ func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config
|
|||||||
|
|
||||||
// generateContainerResources generates platform specific (windows) container resources config for runtime
|
// generateContainerResources generates platform specific (windows) container resources config for runtime
|
||||||
func (m *kubeGenericRuntimeManager) generateContainerResources(pod *v1.Pod, container *v1.Container) *runtimeapi.ContainerResources {
|
func (m *kubeGenericRuntimeManager) generateContainerResources(pod *v1.Pod, container *v1.Container) *runtimeapi.ContainerResources {
|
||||||
//TODO: Add windows support
|
return &runtimeapi.ContainerResources{
|
||||||
return nil
|
Windows: m.generateWindowsContainerResources(pod, container),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateWindowsContainerConfig generates windows container config for kubelet runtime v1.
|
// generateWindowsContainerResources generates windows container resources config for runtime
|
||||||
// Refer https://git.k8s.io/design-proposals-archive/node/cri-windows.md.
|
func (m *kubeGenericRuntimeManager) generateWindowsContainerResources(pod *v1.Pod, container *v1.Container) *runtimeapi.WindowsContainerResources {
|
||||||
func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string) (*runtimeapi.WindowsContainerConfig, error) {
|
wcr := m.calculateWindowsResources(container.Resources.Limits.Cpu(), container.Resources.Limits.Memory())
|
||||||
wc := &runtimeapi.WindowsContainerConfig{
|
|
||||||
Resources: &runtimeapi.WindowsContainerResources{},
|
return wcr
|
||||||
SecurityContext: &runtimeapi.WindowsContainerSecurityContext{},
|
}
|
||||||
}
|
|
||||||
|
// calculateWindowsResources will create the windowsContainerResources type based on the provided CPU and memory resource requests, limits
|
||||||
|
func (m *kubeGenericRuntimeManager) calculateWindowsResources(cpuLimit, memoryLimit *resource.Quantity) *runtimeapi.WindowsContainerResources {
|
||||||
|
resources := runtimeapi.WindowsContainerResources{}
|
||||||
|
|
||||||
|
memLimit := memoryLimit.Value()
|
||||||
|
|
||||||
cpuLimit := container.Resources.Limits.Cpu()
|
|
||||||
if !cpuLimit.IsZero() {
|
if !cpuLimit.IsZero() {
|
||||||
// Since Kubernetes doesn't have any notion of weight in the Pod/Container API, only limits/reserves, then applying CpuMaximum only
|
// Since Kubernetes doesn't have any notion of weight in the Pod/Container API, only limits/reserves, then applying CpuMaximum only
|
||||||
// will better follow the intent of the user. At one point CpuWeights were set, but this prevented limits from having any effect.
|
// will better follow the intent of the user. At one point CpuWeights were set, but this prevented limits from having any effect.
|
||||||
@ -79,22 +84,32 @@ func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1
|
|||||||
// https://github.com/kubernetes/kubernetes/blob/56d1c3b96d0a544130a82caad33dd57629b8a7f8/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto#L681-L682
|
// https://github.com/kubernetes/kubernetes/blob/56d1c3b96d0a544130a82caad33dd57629b8a7f8/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto#L681-L682
|
||||||
// https://github.com/opencontainers/runtime-spec/blob/ad53dcdc39f1f7f7472b10aa0a45648fe4865496/config-windows.md#cpu
|
// https://github.com/opencontainers/runtime-spec/blob/ad53dcdc39f1f7f7472b10aa0a45648fe4865496/config-windows.md#cpu
|
||||||
// If both CpuWeight and CpuMaximum are set - ContainerD catches this invalid case and returns an error instead.
|
// If both CpuWeight and CpuMaximum are set - ContainerD catches this invalid case and returns an error instead.
|
||||||
wc.Resources.CpuMaximum = calculateCPUMaximum(cpuLimit, int64(winstats.ProcessorCount()))
|
resources.CpuMaximum = calculateCPUMaximum(cpuLimit, int64(winstats.ProcessorCount()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// The processor resource controls are mutually exclusive on
|
// The processor resource controls are mutually exclusive on
|
||||||
// Windows Server Containers, the order of precedence is
|
// Windows Server Containers, the order of precedence is
|
||||||
// CPUCount first, then CPUMaximum.
|
// CPUCount first, then CPUMaximum.
|
||||||
if wc.Resources.CpuCount > 0 {
|
if resources.CpuCount > 0 {
|
||||||
if wc.Resources.CpuMaximum > 0 {
|
if resources.CpuMaximum > 0 {
|
||||||
wc.Resources.CpuMaximum = 0
|
resources.CpuMaximum = 0
|
||||||
klog.InfoS("Mutually exclusive options: CPUCount priority > CPUMaximum priority on Windows Server Containers. CPUMaximum should be ignored")
|
klog.InfoS("Mutually exclusive options: CPUCount priority > CPUMaximum priority on Windows Server Containers. CPUMaximum should be ignored")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
memoryLimit := container.Resources.Limits.Memory().Value()
|
if memLimit != 0 {
|
||||||
if memoryLimit != 0 {
|
resources.MemoryLimitInBytes = memLimit
|
||||||
wc.Resources.MemoryLimitInBytes = memoryLimit
|
}
|
||||||
|
|
||||||
|
return &resources
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateWindowsContainerConfig generates windows container config for kubelet runtime v1.
|
||||||
|
// Refer https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/cri-windows.md.
|
||||||
|
func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string) (*runtimeapi.WindowsContainerConfig, error) {
|
||||||
|
wc := &runtimeapi.WindowsContainerConfig{
|
||||||
|
Resources: m.generateWindowsContainerResources(pod, container),
|
||||||
|
SecurityContext: &runtimeapi.WindowsContainerSecurityContext{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// setup security context
|
// setup security context
|
||||||
@ -134,6 +149,27 @@ func calculateCPUMaximum(cpuLimit *resource.Quantity, cpuCount int64) int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func toKubeContainerResources(statusResources *runtimeapi.ContainerResources) *kubecontainer.ContainerResources {
|
func toKubeContainerResources(statusResources *runtimeapi.ContainerResources) *kubecontainer.ContainerResources {
|
||||||
//TODO: Add windows support
|
var cStatusResources *kubecontainer.ContainerResources
|
||||||
return nil
|
runtimeStatusResources := statusResources.GetWindows()
|
||||||
|
if runtimeStatusResources != nil {
|
||||||
|
var memLimit, cpuLimit *resource.Quantity
|
||||||
|
|
||||||
|
// Used the reversed formula from the calculateCPUMaximum function
|
||||||
|
if runtimeStatusResources.CpuMaximum > 0 {
|
||||||
|
cpuLimitValue := runtimeStatusResources.CpuMaximum * int64(winstats.ProcessorCount()) / 10
|
||||||
|
cpuLimit = resource.NewMilliQuantity(cpuLimitValue, resource.DecimalSI)
|
||||||
|
}
|
||||||
|
|
||||||
|
if runtimeStatusResources.MemoryLimitInBytes > 0 {
|
||||||
|
memLimit = resource.NewQuantity(runtimeStatusResources.MemoryLimitInBytes, resource.BinarySI)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cpuLimit != nil || memLimit != nil {
|
||||||
|
cStatusResources = &kubecontainer.ContainerResources{
|
||||||
|
CPULimit: cpuLimit,
|
||||||
|
MemoryLimit: memLimit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cStatusResources
|
||||||
}
|
}
|
||||||
|
@ -149,3 +149,47 @@ func TestCalculateCPUMaximum(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCalculateWindowsResources(t *testing.T) {
|
||||||
|
_, _, fakeRuntimeSvc, err := createTestRuntimeManager()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
cpuLim resource.Quantity
|
||||||
|
memLim resource.Quantity
|
||||||
|
expected *runtimeapi.WindowsContainerResources
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Request128MBLimit256MB",
|
||||||
|
cpuLim: resource.MustParse("2"),
|
||||||
|
memLim: resource.MustParse("128Mi"),
|
||||||
|
expected: &runtimeapi.WindowsContainerResources{
|
||||||
|
CpuMaximum: 2500,
|
||||||
|
MemoryLimitInBytes: 134217728,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "RequestNoMemory",
|
||||||
|
cpuLim: resource.MustParse("8"),
|
||||||
|
memLim: resource.MustParse("0"),
|
||||||
|
expected: &runtimeapi.WindowsContainerResources{
|
||||||
|
CpuMaximum: 10000,
|
||||||
|
MemoryLimitInBytes: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "RequestZeroCPU",
|
||||||
|
cpuLim: resource.MustParse("0"),
|
||||||
|
memLim: resource.MustParse("128Mi"),
|
||||||
|
expected: &runtimeapi.WindowsContainerResources{
|
||||||
|
CpuMaximum: 1,
|
||||||
|
MemoryLimitInBytes: 134217728,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
windowsContainerResources := fakeRuntimeSvc.calculateWindowsResources(&test.cpuLim, &test.memLim)
|
||||||
|
assert.Equal(t, test.expected, windowsContainerResources)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -177,25 +178,40 @@ func makeTestContainer(tcInfo TestContainerInfo) (v1.Container, v1.ContainerStat
|
|||||||
bTrue := true
|
bTrue := true
|
||||||
bFalse := false
|
bFalse := false
|
||||||
userID := int64(1001)
|
userID := int64(1001)
|
||||||
tc := v1.Container{
|
userName := "ContainerUser"
|
||||||
Name: tcInfo.Name,
|
|
||||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
var securityContext *v1.SecurityContext
|
||||||
Command: []string{"/bin/sh"},
|
|
||||||
Args: []string{"-c", cmd},
|
if framework.NodeOSDistroIs("windows") {
|
||||||
Resources: res,
|
securityContext = &v1.SecurityContext{
|
||||||
ResizePolicy: resizePol,
|
RunAsNonRoot: &bTrue,
|
||||||
SecurityContext: &v1.SecurityContext{
|
WindowsOptions: &v1.WindowsSecurityContextOptions{
|
||||||
|
RunAsUserName: &userName,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
securityContext = &v1.SecurityContext{
|
||||||
Privileged: &bFalse,
|
Privileged: &bFalse,
|
||||||
AllowPrivilegeEscalation: &bFalse,
|
AllowPrivilegeEscalation: &bFalse,
|
||||||
RunAsNonRoot: &bTrue,
|
|
||||||
RunAsUser: &userID,
|
RunAsUser: &userID,
|
||||||
|
RunAsNonRoot: &bTrue,
|
||||||
Capabilities: &v1.Capabilities{
|
Capabilities: &v1.Capabilities{
|
||||||
Drop: []v1.Capability{"ALL"},
|
Drop: []v1.Capability{"ALL"},
|
||||||
},
|
},
|
||||||
SeccompProfile: &v1.SeccompProfile{
|
SeccompProfile: &v1.SeccompProfile{
|
||||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tc := v1.Container{
|
||||||
|
Name: tcInfo.Name,
|
||||||
|
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||||
|
Command: []string{"/bin/sh"},
|
||||||
|
Args: []string{"-c", cmd},
|
||||||
|
Resources: res,
|
||||||
|
ResizePolicy: resizePol,
|
||||||
|
SecurityContext: securityContext,
|
||||||
}
|
}
|
||||||
|
|
||||||
tcStatus := v1.ContainerStatus{
|
tcStatus := v1.ContainerStatus{
|
||||||
@ -207,10 +223,19 @@ func makeTestContainer(tcInfo TestContainerInfo) (v1.Container, v1.ContainerStat
|
|||||||
|
|
||||||
func makeTestPod(ns, name, timeStamp string, tcInfo []TestContainerInfo) *v1.Pod {
|
func makeTestPod(ns, name, timeStamp string, tcInfo []TestContainerInfo) *v1.Pod {
|
||||||
var testContainers []v1.Container
|
var testContainers []v1.Container
|
||||||
|
var podOS *v1.PodOS
|
||||||
|
|
||||||
for _, ci := range tcInfo {
|
for _, ci := range tcInfo {
|
||||||
tc, _ := makeTestContainer(ci)
|
tc, _ := makeTestContainer(ci)
|
||||||
testContainers = append(testContainers, tc)
|
testContainers = append(testContainers, tc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if framework.NodeOSDistroIs("windows") {
|
||||||
|
podOS = &v1.PodOS{Name: v1.OSName("windows")}
|
||||||
|
} else {
|
||||||
|
podOS = &v1.PodOS{Name: v1.OSName(runtime.GOOS)}
|
||||||
|
}
|
||||||
|
|
||||||
pod := &v1.Pod{
|
pod := &v1.Pod{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
Kind: "Pod",
|
Kind: "Pod",
|
||||||
@ -225,6 +250,7 @@ func makeTestPod(ns, name, timeStamp string, tcInfo []TestContainerInfo) *v1.Pod
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
|
OS: podOS,
|
||||||
Containers: testContainers,
|
Containers: testContainers,
|
||||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||||
},
|
},
|
||||||
@ -470,18 +496,20 @@ func waitForPodResizeActuation(c clientset.Interface, podClient *e2epod.PodClien
|
|||||||
// Wait for pod resource allocations to equal expected values after resize
|
// Wait for pod resource allocations to equal expected values after resize
|
||||||
resizedPod, aErr := waitPodAllocationsEqualsExpected()
|
resizedPod, aErr := waitPodAllocationsEqualsExpected()
|
||||||
framework.ExpectNoError(aErr, "failed to verify pod resource allocation values equals expected values")
|
framework.ExpectNoError(aErr, "failed to verify pod resource allocation values equals expected values")
|
||||||
// Wait for container cgroup values to equal expected cgroup values after resize
|
|
||||||
cErr := waitContainerCgroupValuesEqualsExpected()
|
|
||||||
framework.ExpectNoError(cErr, "failed to verify container cgroup values equals expected values")
|
|
||||||
//TODO(vinaykul,InPlacePodVerticalScaling): Remove this check once base-OS updates to containerd>=1.6.9
|
//TODO(vinaykul,InPlacePodVerticalScaling): Remove this check once base-OS updates to containerd>=1.6.9
|
||||||
// containerd needs to add CRI support before Beta (See Node KEP #2273)
|
// containerd needs to add CRI support before Beta (See Node KEP #2273)
|
||||||
if isInPlaceResizeSupportedByRuntime(c, pod.Spec.NodeName) {
|
if !isInPlaceResizeSupportedByRuntime(c, pod.Spec.NodeName) {
|
||||||
// Wait for PodSpec container resources to equal PodStatus container resources indicating resize is complete
|
// Wait for PodSpec container resources to equal PodStatus container resources indicating resize is complete
|
||||||
rPod, rErr := waitPodStatusResourcesEqualSpecResources()
|
rPod, rErr := waitPodStatusResourcesEqualSpecResources()
|
||||||
framework.ExpectNoError(rErr, "failed to verify pod spec resources equals pod status resources")
|
framework.ExpectNoError(rErr, "failed to verify pod spec resources equals pod status resources")
|
||||||
|
|
||||||
ginkgo.By("verifying pod status after resize")
|
ginkgo.By("verifying pod status after resize")
|
||||||
verifyPodStatusResources(rPod, expectedContainers)
|
verifyPodStatusResources(rPod, expectedContainers)
|
||||||
|
} else if !framework.NodeOSDistroIs("windows") {
|
||||||
|
// Wait for container cgroup values to equal expected cgroup values after resize
|
||||||
|
// only for containerd versions before 1.6.9
|
||||||
|
cErr := waitContainerCgroupValuesEqualsExpected()
|
||||||
|
framework.ExpectNoError(cErr, "failed to verify container cgroup values equals expected values")
|
||||||
}
|
}
|
||||||
return resizedPod
|
return resizedPod
|
||||||
}
|
}
|
||||||
@ -1228,7 +1256,12 @@ func doPodResizeTests() {
|
|||||||
|
|
||||||
ginkgo.By("verifying initial pod status resources and cgroup config are as expected")
|
ginkgo.By("verifying initial pod status resources and cgroup config are as expected")
|
||||||
verifyPodStatusResources(newPod, tc.containers)
|
verifyPodStatusResources(newPod, tc.containers)
|
||||||
verifyPodContainersCgroupValues(newPod, tc.containers, true)
|
// Check cgroup values only for containerd versions before 1.6.9
|
||||||
|
if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) {
|
||||||
|
if !framework.NodeOSDistroIs("windows") {
|
||||||
|
verifyPodContainersCgroupValues(newPod, tc.containers, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ginkgo.By("patching pod for resize")
|
ginkgo.By("patching pod for resize")
|
||||||
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
|
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
|
||||||
@ -1242,8 +1275,13 @@ func doPodResizeTests() {
|
|||||||
ginkgo.By("waiting for resize to be actuated")
|
ginkgo.By("waiting for resize to be actuated")
|
||||||
resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod, patchedPod, tc.expected)
|
resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod, patchedPod, tc.expected)
|
||||||
|
|
||||||
ginkgo.By("verifying pod container's cgroup values after resize")
|
// Check cgroup values only for containerd versions before 1.6.9
|
||||||
verifyPodContainersCgroupValues(resizedPod, tc.expected, true)
|
if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) {
|
||||||
|
ginkgo.By("verifying pod container's cgroup values after resize")
|
||||||
|
if !framework.NodeOSDistroIs("windows") {
|
||||||
|
verifyPodContainersCgroupValues(resizedPod, tc.expected, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ginkgo.By("verifying pod resources after resize")
|
ginkgo.By("verifying pod resources after resize")
|
||||||
verifyPodResources(resizedPod, tc.expected)
|
verifyPodResources(resizedPod, tc.expected)
|
||||||
@ -1335,9 +1373,12 @@ func doPodResizeResourceQuotaTests() {
|
|||||||
|
|
||||||
ginkgo.By("waiting for resize to be actuated")
|
ginkgo.By("waiting for resize to be actuated")
|
||||||
resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod1, patchedPod, expected)
|
resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod1, patchedPod, expected)
|
||||||
|
if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod1.Spec.NodeName) {
|
||||||
ginkgo.By("verifying pod container's cgroup values after resize")
|
ginkgo.By("verifying pod container's cgroup values after resize")
|
||||||
verifyPodContainersCgroupValues(resizedPod, expected, true)
|
if !framework.NodeOSDistroIs("windows") {
|
||||||
|
verifyPodContainersCgroupValues(resizedPod, expected, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ginkgo.By("verifying pod resources after resize")
|
ginkgo.By("verifying pod resources after resize")
|
||||||
verifyPodResources(resizedPod, expected)
|
verifyPodResources(resizedPod, expected)
|
||||||
@ -1439,7 +1480,11 @@ func doPodResizeErrorTests() {
|
|||||||
|
|
||||||
ginkgo.By("verifying initial pod status resources and cgroup config are as expected")
|
ginkgo.By("verifying initial pod status resources and cgroup config are as expected")
|
||||||
verifyPodStatusResources(newPod, tc.containers)
|
verifyPodStatusResources(newPod, tc.containers)
|
||||||
verifyPodContainersCgroupValues(newPod, tc.containers, true)
|
if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) {
|
||||||
|
if !framework.NodeOSDistroIs("windows") {
|
||||||
|
verifyPodContainersCgroupValues(newPod, tc.containers, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ginkgo.By("patching pod for resize")
|
ginkgo.By("patching pod for resize")
|
||||||
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
|
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
|
||||||
@ -1451,8 +1496,12 @@ func doPodResizeErrorTests() {
|
|||||||
patchedPod = newPod
|
patchedPod = newPod
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("verifying pod container's cgroup values after patch")
|
if !isInPlaceResizeSupportedByRuntime(f.ClientSet, patchedPod.Spec.NodeName) {
|
||||||
verifyPodContainersCgroupValues(patchedPod, tc.expected, true)
|
ginkgo.By("verifying pod container's cgroup values after patch")
|
||||||
|
if !framework.NodeOSDistroIs("windows") {
|
||||||
|
verifyPodContainersCgroupValues(patchedPod, tc.expected, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ginkgo.By("verifying pod resources after patch")
|
ginkgo.By("verifying pod resources after patch")
|
||||||
verifyPodResources(patchedPod, tc.expected)
|
verifyPodResources(patchedPod, tc.expected)
|
||||||
|
Loading…
Reference in New Issue
Block a user