mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 02:11:09 +00:00
cgroup names created by kubelet should be lowercased
This commit is contained in:
parent
61e7d1ebf1
commit
48d822eafe
@ -19,12 +19,14 @@ package cm
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
)
|
||||
@ -81,12 +83,15 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis
|
||||
|
||||
// Top level for Qos containers are created only for Burstable
|
||||
// and Best Effort classes
|
||||
qosClasses := [2]v1.PodQOSClass{v1.PodQOSBurstable, v1.PodQOSBestEffort}
|
||||
qosClasses := map[v1.PodQOSClass]string{
|
||||
v1.PodQOSBurstable: path.Join(rootContainer, strings.ToLower(string(v1.PodQOSBurstable))),
|
||||
v1.PodQOSBestEffort: path.Join(rootContainer, strings.ToLower(string(v1.PodQOSBestEffort))),
|
||||
}
|
||||
|
||||
// Create containers for both qos classes
|
||||
for _, qosClass := range qosClasses {
|
||||
for qosClass, containerName := range qosClasses {
|
||||
// get the container's absolute name
|
||||
absoluteContainerName := CgroupName(path.Join(rootContainer, string(qosClass)))
|
||||
absoluteContainerName := CgroupName(containerName)
|
||||
|
||||
resourceParameters := &ResourceConfig{}
|
||||
// the BestEffort QoS class has a statically configured minShares value
|
||||
@ -114,8 +119,8 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis
|
||||
// Store the top level qos container names
|
||||
m.qosContainersInfo = QOSContainersInfo{
|
||||
Guaranteed: rootContainer,
|
||||
Burstable: path.Join(rootContainer, string(v1.PodQOSBurstable)),
|
||||
BestEffort: path.Join(rootContainer, string(v1.PodQOSBestEffort)),
|
||||
Burstable: qosClasses[v1.PodQOSBurstable],
|
||||
BestEffort: qosClasses[v1.PodQOSBestEffort],
|
||||
}
|
||||
m.getNodeAllocatable = getNodeAllocatable
|
||||
m.activePods = activePods
|
||||
|
@ -52,8 +52,14 @@ func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequir
|
||||
return res
|
||||
}
|
||||
|
||||
// Kubelet internal cgroup name for node allocatable cgroup.
|
||||
const defaultNodeAllocatableCgroup = "kubepods"
|
||||
const (
|
||||
// Kubelet internal cgroup name for node allocatable cgroup.
|
||||
defaultNodeAllocatableCgroup = "kubepods"
|
||||
// Kubelet internal cgroup name for burstable tier
|
||||
burstableCgroup = "burstable"
|
||||
// Kubelet internal cgroup name for burstable tier
|
||||
bestEffortCgroup = "burstable"
|
||||
)
|
||||
|
||||
// makePodToVerifyCgroups returns a pod that verifies the existence of the specified cgroups.
|
||||
func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *v1.Pod {
|
||||
@ -154,7 +160,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||
return
|
||||
}
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName(v1.PodQOSBurstable), cm.CgroupName(v1.PodQOSBestEffort)}
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName(burstableCgroup), cm.CgroupName(bestEffortCgroup)}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
@ -236,7 +242,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
podUID = string(bestEffortPod.UID)
|
||||
})
|
||||
By("Checking if the pod cgroup was created", func() {
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName("BestEffort/pod" + podUID)}
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName("besteffort/pod" + podUID)}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
@ -245,7 +251,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
Expect(f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("BestEffort/pod" + podUID))
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("besteffort/pod" + podUID))
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -280,7 +286,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
podUID = string(burstablePod.UID)
|
||||
})
|
||||
By("Checking if the pod cgroup was created", func() {
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName("Burstable/pod" + podUID)}
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName("burstable/pod" + podUID)}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
@ -289,7 +295,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
Expect(f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("Burstable/pod" + podUID))
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("burstable/pod" + podUID))
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
Loading…
Reference in New Issue
Block a user