mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #38989 from sjenning/set-qos-field
Automatic merge from submit-queue (batch tested with PRs 39684, 39577, 38989, 39534, 39702) Set PodStatus QOSClass field This PR continues the work for https://github.com/kubernetes/kubernetes/pull/37968 It converts all local usage of the `qos` package class types to the new API level types (first commit) and sets the pod status QOSClass field in the at pod creation time on the API server in `PrepareForCreate` and in the kubelet in the pod status update path (second commit). This way the pod QOS class is set even if the pod isn't scheduled yet. Fixes #33255 @ConnorDoyle @derekwaynecarr @vishh
This commit is contained in:
commit
3f9f7471af
@ -51,7 +51,6 @@ import (
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/fieldpath"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
@ -538,7 +537,7 @@ func describePod(pod *api.Pod, events *api.EventList) (string, error) {
|
||||
}
|
||||
}
|
||||
describeVolumes(pod.Spec.Volumes, w, "")
|
||||
w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.InternalGetPodQOS(pod))
|
||||
w.Write(LEVEL_0, "QoS Class:\t%s\n", pod.Status.QOSClass)
|
||||
printLabelsMultiline(w, "Node-Selectors", pod.Spec.NodeSelector)
|
||||
printTolerationsInAnnotationMultiline(w, "Tolerations", pod.Annotations)
|
||||
if events != nil {
|
||||
|
@ -72,6 +72,7 @@ go_library(
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/prober:go_default_library",
|
||||
"//pkg/kubelet/prober/results:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/kubelet/remote:go_default_library",
|
||||
"//pkg/kubelet/rkt:go_default_library",
|
||||
"//pkg/kubelet/server:go_default_library",
|
||||
|
@ -276,7 +276,7 @@ func InitQOS(cgroupDriver, rootContainer string, subsystems *CgroupSubsystems) (
|
||||
cm := NewCgroupManager(subsystems, cgroupDriver)
|
||||
// Top level for Qos containers are created only for Burstable
|
||||
// and Best Effort classes
|
||||
qosClasses := [2]qos.QOSClass{qos.Burstable, qos.BestEffort}
|
||||
qosClasses := [2]v1.PodQOSClass{v1.PodQOSBurstable, v1.PodQOSBestEffort}
|
||||
|
||||
// Create containers for both qos classes
|
||||
for _, qosClass := range qosClasses {
|
||||
@ -297,8 +297,8 @@ func InitQOS(cgroupDriver, rootContainer string, subsystems *CgroupSubsystems) (
|
||||
// Store the top level qos container names
|
||||
qosContainersInfo := QOSContainersInfo{
|
||||
Guaranteed: rootContainer,
|
||||
Burstable: path.Join(rootContainer, string(qos.Burstable)),
|
||||
BestEffort: path.Join(rootContainer, string(qos.BestEffort)),
|
||||
Burstable: path.Join(rootContainer, string(v1.PodQOSBurstable)),
|
||||
BestEffort: path.Join(rootContainer, string(v1.PodQOSBestEffort)),
|
||||
}
|
||||
return qosContainersInfo, nil
|
||||
}
|
||||
|
@ -111,12 +111,12 @@ func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig {
|
||||
|
||||
// build the result
|
||||
result := &ResourceConfig{}
|
||||
if qosClass == qos.Guaranteed {
|
||||
if qosClass == v1.PodQOSGuaranteed {
|
||||
result.CpuShares = &cpuShares
|
||||
result.CpuQuota = &cpuQuota
|
||||
result.CpuPeriod = &cpuPeriod
|
||||
result.Memory = &memoryLimits
|
||||
} else if qosClass == qos.Burstable {
|
||||
} else if qosClass == v1.PodQOSBurstable {
|
||||
result.CpuShares = &cpuShares
|
||||
if cpuLimitsDeclared {
|
||||
result.CpuQuota = &cpuQuota
|
||||
|
@ -99,11 +99,11 @@ func (m *podContainerManagerImpl) GetPodContainerName(pod *v1.Pod) (CgroupName,
|
||||
// Get the parent QOS container name
|
||||
var parentContainer string
|
||||
switch podQOS {
|
||||
case qos.Guaranteed:
|
||||
case v1.PodQOSGuaranteed:
|
||||
parentContainer = m.qosContainersInfo.Guaranteed
|
||||
case qos.Burstable:
|
||||
case v1.PodQOSBurstable:
|
||||
parentContainer = m.qosContainersInfo.Burstable
|
||||
case qos.BestEffort:
|
||||
case v1.PodQOSBestEffort:
|
||||
parentContainer = m.qosContainersInfo.BestEffort
|
||||
}
|
||||
podContainer := podCgroupNamePrefix + string(pod.UID)
|
||||
|
@ -109,7 +109,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
|
||||
|
||||
// the node has memory pressure, admit if not best-effort
|
||||
if hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) {
|
||||
notBestEffort := qos.BestEffort != qos.GetPodQOS(attrs.Pod)
|
||||
notBestEffort := v1.PodQOSBestEffort != qos.GetPodQOS(attrs.Pod)
|
||||
if notBestEffort || kubetypes.IsCriticalPod(attrs.Pod) {
|
||||
return lifecycle.PodAdmitResult{Admit: true}
|
||||
}
|
||||
|
@ -493,12 +493,12 @@ func qosComparator(p1, p2 *v1.Pod) int {
|
||||
return 0
|
||||
}
|
||||
// if p1 is best effort, we know p2 is burstable or guaranteed
|
||||
if qosP1 == qos.BestEffort {
|
||||
if qosP1 == v1.PodQOSBestEffort {
|
||||
return -1
|
||||
}
|
||||
// we know p1 and p2 are not besteffort, so if p1 is burstable, p2 must be guaranteed
|
||||
if qosP1 == qos.Burstable {
|
||||
if qosP2 == qos.Guaranteed {
|
||||
if qosP1 == v1.PodQOSBurstable {
|
||||
if qosP2 == v1.PodQOSGuaranteed {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
|
@ -43,6 +43,7 @@ import (
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/envvars"
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
@ -1120,6 +1121,8 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
||||
func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *v1.PodStatus {
|
||||
var apiPodStatus v1.PodStatus
|
||||
apiPodStatus.PodIP = podStatus.IP
|
||||
// set status for Pods created on versions of kube older than 1.6
|
||||
apiPodStatus.QOSClass = qos.GetPodQOS(pod)
|
||||
|
||||
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
|
||||
pod, podStatus,
|
||||
|
@ -14,7 +14,6 @@ go_library(
|
||||
"doc.go",
|
||||
"policy.go",
|
||||
"qos.go",
|
||||
"types.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
|
@ -49,10 +49,10 @@ func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapa
|
||||
}
|
||||
|
||||
switch GetPodQOS(pod) {
|
||||
case Guaranteed:
|
||||
case v1.PodQOSGuaranteed:
|
||||
// Guaranteed containers should be the last to get killed.
|
||||
return guaranteedOOMScoreAdj
|
||||
case BestEffort:
|
||||
case v1.PodQOSBestEffort:
|
||||
return besteffortOOMScoreAdj
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,7 @@ func isResourceBestEffort(container *v1.Container, resource v1.ResourceName) boo
|
||||
// A pod is besteffort if none of its containers have specified any requests or limits.
|
||||
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||
// A pod is burstable if limits and requests do not match across all containers.
|
||||
func GetPodQOS(pod *v1.Pod) QOSClass {
|
||||
func GetPodQOS(pod *v1.Pod) v1.PodQOSClass {
|
||||
requests := v1.ResourceList{}
|
||||
limits := v1.ResourceList{}
|
||||
zeroQuantity := resource.MustParse("0")
|
||||
@ -91,7 +91,7 @@ func GetPodQOS(pod *v1.Pod) QOSClass {
|
||||
}
|
||||
}
|
||||
if len(requests) == 0 && len(limits) == 0 {
|
||||
return BestEffort
|
||||
return v1.PodQOSBestEffort
|
||||
}
|
||||
// Check is requests match limits for all resources.
|
||||
if isGuaranteed {
|
||||
@ -104,21 +104,20 @@ func GetPodQOS(pod *v1.Pod) QOSClass {
|
||||
}
|
||||
if isGuaranteed &&
|
||||
len(requests) == len(limits) {
|
||||
return Guaranteed
|
||||
return v1.PodQOSGuaranteed
|
||||
}
|
||||
return Burstable
|
||||
return v1.PodQOSBurstable
|
||||
}
|
||||
|
||||
// InternalGetPodQOS returns the QoS class of a pod.
|
||||
// A pod is besteffort if none of its containers have specified any requests or limits.
|
||||
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||
// A pod is burstable if limits and requests do not match across all containers.
|
||||
func InternalGetPodQOS(pod *api.Pod) QOSClass {
|
||||
func InternalGetPodQOS(pod *api.Pod) api.PodQOSClass {
|
||||
requests := api.ResourceList{}
|
||||
limits := api.ResourceList{}
|
||||
zeroQuantity := resource.MustParse("0")
|
||||
isGuaranteed := true
|
||||
var supportedQoSComputeResources = sets.NewString(string(api.ResourceCPU), string(api.ResourceMemory))
|
||||
for _, container := range pod.Spec.Containers {
|
||||
// process requests
|
||||
for name, quantity := range container.Resources.Requests {
|
||||
@ -158,7 +157,7 @@ func InternalGetPodQOS(pod *api.Pod) QOSClass {
|
||||
}
|
||||
}
|
||||
if len(requests) == 0 && len(limits) == 0 {
|
||||
return BestEffort
|
||||
return api.PodQOSBestEffort
|
||||
}
|
||||
// Check is requests match limits for all resources.
|
||||
if isGuaranteed {
|
||||
@ -171,13 +170,13 @@ func InternalGetPodQOS(pod *api.Pod) QOSClass {
|
||||
}
|
||||
if isGuaranteed &&
|
||||
len(requests) == len(limits) {
|
||||
return Guaranteed
|
||||
return api.PodQOSGuaranteed
|
||||
}
|
||||
return Burstable
|
||||
return api.PodQOSBurstable
|
||||
}
|
||||
|
||||
// QOSList is a set of (resource name, QoS class) pairs.
|
||||
type QOSList map[v1.ResourceName]QOSClass
|
||||
type QOSList map[v1.ResourceName]v1.PodQOSClass
|
||||
|
||||
// GetQOS returns a mapping of resource name to QoS class of a container
|
||||
func GetQOS(container *v1.Container) QOSList {
|
||||
@ -185,11 +184,11 @@ func GetQOS(container *v1.Container) QOSList {
|
||||
for resource := range allResources(container) {
|
||||
switch {
|
||||
case isResourceGuaranteed(container, resource):
|
||||
resourceToQOS[resource] = Guaranteed
|
||||
resourceToQOS[resource] = v1.PodQOSGuaranteed
|
||||
case isResourceBestEffort(container, resource):
|
||||
resourceToQOS[resource] = BestEffort
|
||||
resourceToQOS[resource] = v1.PodQOSBestEffort
|
||||
default:
|
||||
resourceToQOS[resource] = Burstable
|
||||
resourceToQOS[resource] = v1.PodQOSBurstable
|
||||
}
|
||||
}
|
||||
return resourceToQOS
|
||||
|
@ -67,105 +67,105 @@ func newPod(name string, containers []v1.Container) *v1.Pod {
|
||||
func TestGetPodQOS(t *testing.T) {
|
||||
testCases := []struct {
|
||||
pod *v1.Pod
|
||||
expected QOSClass
|
||||
expected v1.PodQOSClass
|
||||
}{
|
||||
{
|
||||
pod: newPod("guaranteed", []v1.Container{
|
||||
newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
}),
|
||||
expected: Guaranteed,
|
||||
expected: v1.PodQOSGuaranteed,
|
||||
},
|
||||
{
|
||||
pod: newPod("guaranteed-with-gpu", []v1.Container{
|
||||
newContainer("guaranteed", getResourceList("100m", "100Mi"), addResource("nvidia-gpu", "2", getResourceList("100m", "100Mi"))),
|
||||
}),
|
||||
expected: Guaranteed,
|
||||
expected: v1.PodQOSGuaranteed,
|
||||
},
|
||||
{
|
||||
pod: newPod("guaranteed-guaranteed", []v1.Container{
|
||||
newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
}),
|
||||
expected: Guaranteed,
|
||||
expected: v1.PodQOSGuaranteed,
|
||||
},
|
||||
{
|
||||
pod: newPod("guaranteed-guaranteed-with-gpu", []v1.Container{
|
||||
newContainer("guaranteed", getResourceList("100m", "100Mi"), addResource("nvidia-gpu", "2", getResourceList("100m", "100Mi"))),
|
||||
newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
}),
|
||||
expected: Guaranteed,
|
||||
expected: v1.PodQOSGuaranteed,
|
||||
},
|
||||
{
|
||||
pod: newPod("best-effort-best-effort", []v1.Container{
|
||||
newContainer("best-effort", getResourceList("", ""), getResourceList("", "")),
|
||||
newContainer("best-effort", getResourceList("", ""), getResourceList("", "")),
|
||||
}),
|
||||
expected: BestEffort,
|
||||
expected: v1.PodQOSBestEffort,
|
||||
},
|
||||
{
|
||||
pod: newPod("best-effort-best-effort-with-gpu", []v1.Container{
|
||||
newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))),
|
||||
newContainer("best-effort", getResourceList("", ""), getResourceList("", "")),
|
||||
}),
|
||||
expected: BestEffort,
|
||||
expected: v1.PodQOSBestEffort,
|
||||
},
|
||||
{
|
||||
pod: newPod("best-effort-with-gpu", []v1.Container{
|
||||
newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))),
|
||||
}),
|
||||
expected: BestEffort,
|
||||
expected: v1.PodQOSBestEffort,
|
||||
},
|
||||
{
|
||||
pod: newPod("best-effort-burstable", []v1.Container{
|
||||
newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))),
|
||||
newContainer("burstable", getResourceList("1", ""), getResourceList("2", "")),
|
||||
}),
|
||||
expected: Burstable,
|
||||
expected: v1.PodQOSBurstable,
|
||||
},
|
||||
{
|
||||
pod: newPod("best-effort-guaranteed", []v1.Container{
|
||||
newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))),
|
||||
newContainer("guaranteed", getResourceList("10m", "100Mi"), getResourceList("10m", "100Mi")),
|
||||
}),
|
||||
expected: Burstable,
|
||||
expected: v1.PodQOSBurstable,
|
||||
},
|
||||
{
|
||||
pod: newPod("burstable-cpu-guaranteed-memory", []v1.Container{
|
||||
newContainer("burstable", getResourceList("", "100Mi"), getResourceList("", "100Mi")),
|
||||
}),
|
||||
expected: Burstable,
|
||||
expected: v1.PodQOSBurstable,
|
||||
},
|
||||
{
|
||||
pod: newPod("burstable-no-limits", []v1.Container{
|
||||
newContainer("burstable", getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
}),
|
||||
expected: Burstable,
|
||||
expected: v1.PodQOSBurstable,
|
||||
},
|
||||
{
|
||||
pod: newPod("burstable-guaranteed", []v1.Container{
|
||||
newContainer("burstable", getResourceList("1", "100Mi"), getResourceList("2", "100Mi")),
|
||||
newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
}),
|
||||
expected: Burstable,
|
||||
expected: v1.PodQOSBurstable,
|
||||
},
|
||||
{
|
||||
pod: newPod("burstable-unbounded-but-requests-match-limits", []v1.Container{
|
||||
newContainer("burstable", getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
newContainer("burstable-unbounded", getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
}),
|
||||
expected: Burstable,
|
||||
expected: v1.PodQOSBurstable,
|
||||
},
|
||||
{
|
||||
pod: newPod("burstable-1", []v1.Container{
|
||||
newContainer("burstable", getResourceList("10m", "100Mi"), getResourceList("100m", "200Mi")),
|
||||
}),
|
||||
expected: Burstable,
|
||||
expected: v1.PodQOSBurstable,
|
||||
},
|
||||
{
|
||||
pod: newPod("burstable-2", []v1.Container{
|
||||
newContainer("burstable", getResourceList("0", "0"), addResource("nvidia-gpu", "2", getResourceList("100m", "200Mi"))),
|
||||
}),
|
||||
expected: Burstable,
|
||||
expected: v1.PodQOSBurstable,
|
||||
},
|
||||
}
|
||||
for id, testCase := range testCases {
|
||||
|
@ -1,29 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package qos
|
||||
|
||||
// QOSClass defines the supported qos classes of Pods/Containers.
|
||||
type QOSClass string
|
||||
|
||||
const (
|
||||
// Guaranteed is the Guaranteed qos class.
|
||||
Guaranteed QOSClass = "Guaranteed"
|
||||
// Burstable is the Burstable qos class.
|
||||
Burstable QOSClass = "Burstable"
|
||||
// BestEffort is the BestEffort qos class.
|
||||
BestEffort QOSClass = "BestEffort"
|
||||
)
|
@ -256,7 +256,7 @@ func PodUsageFunc(obj runtime.Object) (api.ResourceList, error) {
|
||||
}
|
||||
|
||||
func isBestEffort(pod *api.Pod) bool {
|
||||
return qos.InternalGetPodQOS(pod) == qos.BestEffort
|
||||
return qos.InternalGetPodQOS(pod) == api.PodQOSBestEffort
|
||||
}
|
||||
|
||||
func isTerminating(pod *api.Pod) bool {
|
||||
|
@ -23,6 +23,7 @@ go_library(
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/genericapiserver/api/request:go_default_library",
|
||||
"//pkg/kubelet/client:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/labels:go_default_library",
|
||||
"//pkg/registry/generic:go_default_library",
|
||||
"//pkg/runtime:go_default_library",
|
||||
@ -41,6 +42,7 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/errors:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/testing:go_default_library",
|
||||
"//pkg/apimachinery/registered:go_default_library",
|
||||
"//pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
genericapirequest "k8s.io/kubernetes/pkg/genericapiserver/api/request"
|
||||
"k8s.io/kubernetes/pkg/kubelet/client"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/registry/generic"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@ -60,7 +61,8 @@ func (podStrategy) NamespaceScoped() bool {
|
||||
func (podStrategy) PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod.Status = api.PodStatus{
|
||||
Phase: api.PodPending,
|
||||
Phase: api.PodPending,
|
||||
QOSClass: qos.InternalGetPodQOS(pod),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
apitesting "k8s.io/kubernetes/pkg/api/testing"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
@ -92,6 +93,80 @@ func TestMatchPod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getResourceList(cpu, memory string) api.ResourceList {
|
||||
res := api.ResourceList{}
|
||||
if cpu != "" {
|
||||
res[api.ResourceCPU] = resource.MustParse(cpu)
|
||||
}
|
||||
if memory != "" {
|
||||
res[api.ResourceMemory] = resource.MustParse(memory)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func addResource(rName, value string, rl api.ResourceList) api.ResourceList {
|
||||
rl[api.ResourceName(rName)] = resource.MustParse(value)
|
||||
return rl
|
||||
}
|
||||
|
||||
func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements {
|
||||
res := api.ResourceRequirements{}
|
||||
res.Requests = requests
|
||||
res.Limits = limits
|
||||
return res
|
||||
}
|
||||
|
||||
func newContainer(name string, requests api.ResourceList, limits api.ResourceList) api.Container {
|
||||
return api.Container{
|
||||
Name: name,
|
||||
Resources: getResourceRequirements(requests, limits),
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(name string, containers []api.Container) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: containers,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodQOS(t *testing.T) {
|
||||
testCases := []struct {
|
||||
pod *api.Pod
|
||||
expected api.PodQOSClass
|
||||
}{
|
||||
{
|
||||
pod: newPod("guaranteed", []api.Container{
|
||||
newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
}),
|
||||
expected: api.PodQOSGuaranteed,
|
||||
},
|
||||
{
|
||||
pod: newPod("best-effort", []api.Container{
|
||||
newContainer("best-effort", getResourceList("", ""), getResourceList("", "")),
|
||||
}),
|
||||
expected: api.PodQOSBestEffort,
|
||||
},
|
||||
{
|
||||
pod: newPod("burstable", []api.Container{
|
||||
newContainer("burstable", getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
}),
|
||||
expected: api.PodQOSBurstable,
|
||||
},
|
||||
}
|
||||
for id, testCase := range testCases {
|
||||
Strategy.PrepareForCreate(genericapirequest.NewContext(), testCase.pod)
|
||||
actual := testCase.pod.Status.QOSClass
|
||||
if actual != testCase.expected {
|
||||
t.Errorf("[%d]: invalid qos pod %s, expected: %s, actual: %s", id, testCase.pod.Name, testCase.expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckGracefulDelete(t *testing.T) {
|
||||
defaultGracePeriod := int64(30)
|
||||
tcs := []struct {
|
||||
|
@ -1202,7 +1202,7 @@ func tolerationsToleratesTaints(tolerations []v1.Toleration, taints []v1.Taint)
|
||||
|
||||
// Determine if a pod is scheduled with best-effort QoS
|
||||
func isPodBestEffort(pod *v1.Pod) bool {
|
||||
return qos.GetPodQOS(pod) == qos.BestEffort
|
||||
return qos.GetPodQOS(pod) == v1.PodQOSBestEffort
|
||||
}
|
||||
|
||||
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
|
||||
|
321
test/e2e/pods.go
321
test/e2e/pods.go
@ -24,6 +24,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
@ -36,155 +37,203 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Pods Delete Grace Period", func() {
|
||||
var _ = framework.KubeDescribe("Pods Extended", func() {
|
||||
f := framework.NewDefaultFramework("pods")
|
||||
var podClient *framework.PodClient
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
It("should be submitted and removed [Conformance]", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-submit-remove-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "gcr.io/google_containers/nginx-slim:0.7",
|
||||
|
||||
framework.KubeDescribe("Delete Grace Period", func() {
|
||||
var podClient *framework.PodClient
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
It("should be submitted and removed [Conformance]", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-submit-remove-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "gcr.io/google_containers/nginx-slim:0.7",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("setting up watch")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
options = v1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
w, err := podClient.Watch(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to set up watch")
|
||||
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient.Create(pod)
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = v1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
By("verifying pod creation was observed")
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type != watch.Added {
|
||||
framework.Failf("Failed to observe pod creation: %v", event)
|
||||
}
|
||||
case <-time.After(framework.PodStartTimeout):
|
||||
Fail("Timeout while waiting for pod creation")
|
||||
}
|
||||
|
||||
// We need to wait for the pod to be running, otherwise the deletion
|
||||
// may be carried out immediately rather than gracefully.
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
// save the running pod
|
||||
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod")
|
||||
|
||||
// start local proxy, so we can send graceful deletion over query string, rather than body parameter
|
||||
cmd := framework.KubectlCmd("proxy", "-p", "0")
|
||||
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start up proxy")
|
||||
defer stdout.Close()
|
||||
defer stderr.Close()
|
||||
defer framework.TryKill(cmd)
|
||||
buf := make([]byte, 128)
|
||||
var n int
|
||||
n, err = stdout.Read(buf)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to read from kubectl proxy stdout")
|
||||
output := string(buf[:n])
|
||||
proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
|
||||
match := proxyRegexp.FindStringSubmatch(output)
|
||||
Expect(len(match)).To(Equal(2))
|
||||
port, err := strconv.Atoi(match[1])
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to convert port into string")
|
||||
|
||||
endpoint := fmt.Sprintf("http://localhost:%d/api/v1/namespaces/%s/pods/%s?gracePeriodSeconds=30", port, pod.Namespace, pod.Name)
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
req, err := http.NewRequest("DELETE", endpoint, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create http request")
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
rsp, err := client.Do(req)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to use http client to send delete")
|
||||
|
||||
defer rsp.Body.Close()
|
||||
|
||||
By("verifying the kubelet observed the termination notice")
|
||||
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
||||
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
|
||||
if err != nil {
|
||||
framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
|
||||
return false, nil
|
||||
By("setting up watch")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
options = v1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
for _, kubeletPod := range podList.Items {
|
||||
if pod.Name != kubeletPod.Name {
|
||||
continue
|
||||
}
|
||||
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
|
||||
framework.Logf("deletion has not yet been observed")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
|
||||
return true, nil
|
||||
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
|
||||
w, err := podClient.Watch(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to set up watch")
|
||||
|
||||
By("verifying pod deletion was observed")
|
||||
deleted := false
|
||||
timeout := false
|
||||
var lastPod *v1.Pod
|
||||
timer := time.After(30 * time.Second)
|
||||
for !deleted && !timeout {
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient.Create(pod)
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = v1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
By("verifying pod creation was observed")
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type == watch.Deleted {
|
||||
lastPod = event.Object.(*v1.Pod)
|
||||
deleted = true
|
||||
if event.Type != watch.Added {
|
||||
framework.Failf("Failed to observe pod creation: %v", event)
|
||||
}
|
||||
case <-timer:
|
||||
timeout = true
|
||||
case <-time.After(framework.PodStartTimeout):
|
||||
Fail("Timeout while waiting for pod creation")
|
||||
}
|
||||
}
|
||||
if !deleted {
|
||||
Fail("Failed to observe pod deletion")
|
||||
}
|
||||
|
||||
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
|
||||
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
|
||||
// We need to wait for the pod to be running, otherwise the deletion
|
||||
// may be carried out immediately rather than gracefully.
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
// save the running pod
|
||||
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod")
|
||||
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = v1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
// start local proxy, so we can send graceful deletion over query string, rather than body parameter
|
||||
cmd := framework.KubectlCmd("proxy", "-p", "0")
|
||||
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start up proxy")
|
||||
defer stdout.Close()
|
||||
defer stderr.Close()
|
||||
defer framework.TryKill(cmd)
|
||||
buf := make([]byte, 128)
|
||||
var n int
|
||||
n, err = stdout.Read(buf)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to read from kubectl proxy stdout")
|
||||
output := string(buf[:n])
|
||||
proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
|
||||
match := proxyRegexp.FindStringSubmatch(output)
|
||||
Expect(len(match)).To(Equal(2))
|
||||
port, err := strconv.Atoi(match[1])
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to convert port into string")
|
||||
|
||||
endpoint := fmt.Sprintf("http://localhost:%d/api/v1/namespaces/%s/pods/%s?gracePeriodSeconds=30", port, pod.Namespace, pod.Name)
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
req, err := http.NewRequest("DELETE", endpoint, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create http request")
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
rsp, err := client.Do(req)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to use http client to send delete")
|
||||
|
||||
defer rsp.Body.Close()
|
||||
|
||||
By("verifying the kubelet observed the termination notice")
|
||||
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
||||
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
|
||||
if err != nil {
|
||||
framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
|
||||
return false, nil
|
||||
}
|
||||
for _, kubeletPod := range podList.Items {
|
||||
if pod.Name != kubeletPod.Name {
|
||||
continue
|
||||
}
|
||||
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
|
||||
framework.Logf("deletion has not yet been observed")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
|
||||
return true, nil
|
||||
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
|
||||
|
||||
By("verifying pod deletion was observed")
|
||||
deleted := false
|
||||
timeout := false
|
||||
var lastPod *v1.Pod
|
||||
timer := time.After(30 * time.Second)
|
||||
for !deleted && !timeout {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type == watch.Deleted {
|
||||
lastPod = event.Object.(*v1.Pod)
|
||||
deleted = true
|
||||
}
|
||||
case <-timer:
|
||||
timeout = true
|
||||
}
|
||||
}
|
||||
if !deleted {
|
||||
Fail("Failed to observe pod deletion")
|
||||
}
|
||||
|
||||
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
|
||||
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
|
||||
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = v1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Pods Set QOS Class", func() {
|
||||
var podClient *framework.PodClient
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
It("should be submitted and removed [Conformance]", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-qos-class-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "gcr.io/google_containers/nginx-slim:0.7",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("100Mi"),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient.Create(pod)
|
||||
|
||||
By("verifying QOS class is set on the pod")
|
||||
pod, err := podClient.Get(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
|
||||
Expect(pod.Status.QOSClass == v1.PodQOSGuaranteed)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -86,7 +86,6 @@ go_test(
|
||||
"//pkg/kubelet/dockertools:go_default_library",
|
||||
"//pkg/kubelet/images:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/labels:go_default_library",
|
||||
"//pkg/metrics:go_default_library",
|
||||
"//pkg/runtime:go_default_library",
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
@ -146,7 +145,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS {
|
||||
return
|
||||
}
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName(qos.Burstable), cm.CgroupName(qos.BestEffort)}
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName(v1.PodQOSBurstable), cm.CgroupName(v1.PodQOSBestEffort)}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
|
Loading…
Reference in New Issue
Block a user