Adding Windows CPU limit tests

This commit is contained in:
Patrick Lang 2019-12-11 15:28:26 -08:00
parent 19acf7d051
commit 63ff616aa8
5 changed files with 140 additions and 58 deletions

View File

@ -14,7 +14,6 @@ go_library(
"helpers.go",
"helpers_linux.go",
"helpers_unsupported.go",
"helpers_windows.go",
"instrumented_services.go",
"kuberuntime_container.go",
"kuberuntime_container_linux.go",

View File

@ -1,4 +1,4 @@
// +build !linux,!windows
// +build !linux
/*
Copyright 2018 The Kubernetes Authors.

View File

@ -1,56 +0,0 @@
// +build windows
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"github.com/docker/docker/pkg/sysinfo"
)
const (
// Taken from https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/resource-controls
minSharesProcess = 5000
minSharesHyperV = 10
maxShares = 10000
milliCPUToCPU = 1000
)
// TODO: remove - may be dead code
// milliCPUToShares converts milliCPU to CPU shares
func milliCPUToShares(milliCPU int64, hyperv bool) int64 {
var minShares int64 = minSharesProcess
if hyperv {
minShares = minSharesHyperV
}
if milliCPU == 0 {
// Return here to really match kernel default for zero milliCPU.
return minShares
}
// Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding.
totalCPU := sysinfo.NumCPU()
shares := (milliCPU * (maxShares - minShares)) / int64(totalCPU) / milliCPUToCPU
if shares < minShares {
return minShares
}
if shares > maxShares {
return maxShares
}
return shares
}

View File

@ -5,6 +5,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"cpu_limits.go",
"density.go",
"dns.go",
"framework.go",

View File

@ -0,0 +1,138 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package windows
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
imageutils "k8s.io/kubernetes/test/utils/image"
"time"
"github.com/onsi/ginkgo"
)
var _ = SIGDescribe("[Feature:Windows] Cpu Resources", func() {
f := framework.NewDefaultFramework("cpu-resources-test-windows")
// The Windows 'BusyBox' image is PowerShell plus a collection of scripts and utilities to mimic common busybox commands
powershellImage := imageutils.GetConfig(imageutils.BusyBox)
ginkgo.Context("Container limits", func() {
ginkgo.It("should not be exceeded after waiting 2 minutes", func() {
ginkgo.By("Creating one pod with limit set to '0.5'")
podsDecimal := newCPUBurnPods(1, powershellImage, "0.5", "1Gi")
f.PodClient().CreateBatch(podsDecimal)
ginkgo.By("Creating one pod with limit set to '500m'")
podsMilli := newCPUBurnPods(1, powershellImage, "500m", "1Gi")
f.PodClient().CreateBatch(podsMilli)
ginkgo.By("Waiting 2 minutes")
time.Sleep(2 * time.Minute)
ginkgo.By("Ensuring pods are still running")
var allPods [](*v1.Pod)
for _, p := range podsDecimal {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name,
metav1.GetOptions{})
framework.ExpectNoError(err, "Error retrieving pod")
framework.ExpectEqual(pod.Status.Phase, v1.PodRunning)
allPods = append(allPods, pod)
}
for _, p := range podsMilli {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name,
metav1.GetOptions{})
framework.ExpectNoError(err, "Error retrieving pod")
framework.ExpectEqual(pod.Status.Phase, v1.PodRunning)
allPods = append(allPods, pod)
}
ginkgo.By("Ensuring cpu doesn't exceed limit by >5%")
for _, p := range allPods {
ginkgo.By("Gathering node summary stats")
nodeStats, err := e2ekubelet.GetStatsSummary(f.ClientSet, p.Spec.NodeName)
framework.ExpectNoError(err, "Error grabbing node summary stats")
found := false
cpuUsage := float64(0)
for _, pod := range nodeStats.Pods {
if pod.PodRef.Name != p.Name || pod.PodRef.Namespace != p.Namespace {
continue
}
cpuUsage = float64(*pod.CPU.UsageNanoCores) * 1e-9
found = true
break
}
framework.ExpectEqual(found, true, "Found pod in stats summary")
framework.Logf("Pod %s usage: %v", p.Name, cpuUsage)
framework.ExpectEqual(cpuUsage > 0, true, "Pods reported usage should be > 0")
framework.ExpectEqual((.5*1.05) > cpuUsage, true, "Pods reported usage should not exceed limit by >5%")
}
})
})
})
// newCPUBurnPods creates a list of pods (specification) with a workload that will consume all available CPU resources up to container limit
func newCPUBurnPods(numPods int, image imageutils.Config, cpuLimit string, memoryLimit string) []*v1.Pod {
var pods []*v1.Pod
memLimitQuantity, err := resource.ParseQuantity(memoryLimit)
framework.ExpectNoError(err)
cpuLimitQuantity, err := resource.ParseQuantity(cpuLimit)
framework.ExpectNoError(err)
for i := 0; i < numPods; i++ {
podName := "cpulimittest-" + string(uuid.NewUUID())
pod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{
"name": podName,
"testapp": "cpuburn",
},
},
Spec: v1.PodSpec{
// Restart policy is always (default).
Containers: []v1.Container{
{
Image: image.GetE2EImage(),
Name: podName,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: memLimitQuantity,
v1.ResourceCPU: cpuLimitQuantity,
},
},
Command: []string{
"powershell.exe",
"-Command",
"foreach ($loopnumber in 1..8) { Start-Job -ScriptBlock { $result = 1; foreach($mm in 1..2147483647){$res1=1;foreach($num in 1..2147483647){$res1=$mm*$num*1340371};$res1} } } ; get-job | wait-job",
},
},
},
NodeSelector: map[string]string{
"beta.kubernetes.io/os": "windows",
},
},
}
pods = append(pods, &pod)
}
return pods
}