diff --git a/pkg/apis/core/helper/qos/qos.go b/pkg/apis/core/helper/qos/qos.go index d93553c6d20..6323122f98c 100644 --- a/pkg/apis/core/helper/qos/qos.go +++ b/pkg/apis/core/helper/qos/qos.go @@ -30,21 +30,21 @@ func isSupportedQoSComputeResource(name core.ResourceName) bool { return supportedQoSComputeResources.Has(string(name)) } -// PodQOSClass returns the QoS class of a pod persisted in the PodStatus. -// If QOSClass is empty, it returns value of GetPodQOS() which computes pod's QoS class. -func PodQOSClass(pod *core.Pod) core.PodQOSClass { +// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field. +// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class. +func GetPodQOS(pod *core.Pod) core.PodQOSClass { if pod.Status.QOSClass != "" { return pod.Status.QOSClass } - return GetPodQOS(pod) + return ComputePodQOS(pod) } -// GetPodQOS returns the QoS class of a pod. +// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is expensive. // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. // When this function is updated please also update staging/src/k8s.io/kubectl/pkg/util/qos/qos.go -func GetPodQOS(pod *core.Pod) core.PodQOSClass { +func ComputePodQOS(pod *core.Pod) core.PodQOSClass { requests := core.ResourceList{} limits := core.ResourceList{} zeroQuantity := resource.MustParse("0") diff --git a/pkg/apis/core/v1/helper/qos/qos.go b/pkg/apis/core/v1/helper/qos/qos.go index 7dde74e8644..e0db483dcca 100644 --- a/pkg/apis/core/v1/helper/qos/qos.go +++ b/pkg/apis/core/v1/helper/qos/qos.go @@ -32,20 +32,20 @@ func isSupportedQoSComputeResource(name v1.ResourceName) bool { return supportedQoSComputeResources.Has(string(name)) } -// PodQOSClass returns the QoS class of a pod persisted in the PodStatus. -// If QOSClass is empty, it returns value of GetPodQOS() which computes pod's QoS class. -func PodQOSClass(pod *v1.Pod) v1.PodQOSClass { +// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field. +// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class. +func GetPodQOS(pod *v1.Pod) v1.PodQOSClass { if pod.Status.QOSClass != "" { return pod.Status.QOSClass } - return GetPodQOS(pod) + return ComputePodQOS(pod) } -// GetPodQOS returns the QoS class of a pod. +// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is expensive. // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. -func GetPodQOS(pod *v1.Pod) v1.PodQOSClass { +func ComputePodQOS(pod *v1.Pod) v1.PodQOSClass { requests := v1.ResourceList{} limits := v1.ResourceList{} zeroQuantity := resource.MustParse("0") diff --git a/pkg/apis/core/v1/helper/qos/qos_test.go b/pkg/apis/core/v1/helper/qos/qos_test.go index 6dd45f61438..d16c17a14e7 100644 --- a/pkg/apis/core/v1/helper/qos/qos_test.go +++ b/pkg/apis/core/v1/helper/qos/qos_test.go @@ -27,7 +27,7 @@ import ( corev1 "k8s.io/kubernetes/pkg/apis/core/v1" ) -func TestGetPodQOS(t *testing.T) { +func TestComputePodQOS(t *testing.T) { testCases := []struct { pod *v1.Pod expected v1.PodQOSClass @@ -128,15 +128,15 @@ func TestGetPodQOS(t *testing.T) { }, } for id, testCase := range testCases { - if actual := GetPodQOS(testCase.pod); testCase.expected != actual { + if actual := ComputePodQOS(testCase.pod); testCase.expected != actual { t.Errorf("[%d]: invalid qos pod %s, expected: %s, actual: %s", id, testCase.pod.Name, testCase.expected, actual) } - // Convert v1.Pod to core.Pod, and then check against `core.helper.GetPodQOS`. + // Convert v1.Pod to core.Pod, and then check against `core.helper.ComputePodQOS`. pod := core.Pod{} corev1.Convert_v1_Pod_To_core_Pod(testCase.pod, &pod, nil) - if actual := qos.GetPodQOS(&pod); core.PodQOSClass(testCase.expected) != actual { + if actual := qos.ComputePodQOS(&pod); core.PodQOSClass(testCase.expected) != actual { t.Errorf("[%d]: conversion invalid qos pod %s, expected: %s, actual: %s", id, testCase.pod.Name, testCase.expected, actual) } } diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index fdf28401352..b39e59bc9d9 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -4785,7 +4785,7 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel return allErrs } - if qos.PodQOSClass(oldPod) != qos.GetPodQOS(newPod) { + if qos.GetPodQOS(oldPod) != qos.ComputePodQOS(newPod) { allErrs = append(allErrs, field.Invalid(fldPath, newPod.Status.QOSClass, "Pod QoS is immutable")) } diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go index b5ba3c96b48..0f72e64dbc8 100644 --- a/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/pkg/kubelet/cm/cpumanager/policy_static.go @@ -419,7 +419,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit } func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int { - if v1qos.PodQOSClass(pod) != v1.PodQOSGuaranteed { + if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed { return 0 } cpuQuantity := container.Resources.Requests[v1.ResourceCPU] diff --git a/pkg/kubelet/cm/helpers_linux.go b/pkg/kubelet/cm/helpers_linux.go index 99fb2f055a2..8a144e7a73c 100644 --- a/pkg/kubelet/cm/helpers_linux.go +++ b/pkg/kubelet/cm/helpers_linux.go @@ -165,7 +165,7 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, } // determine the qos class - qosClass := v1qos.PodQOSClass(pod) + qosClass := v1qos.GetPodQOS(pod) // build the result result := &ResourceConfig{} diff --git a/pkg/kubelet/cm/memorymanager/policy_static.go b/pkg/kubelet/cm/memorymanager/policy_static.go index daa2597ea2b..54391235e60 100644 --- a/pkg/kubelet/cm/memorymanager/policy_static.go +++ b/pkg/kubelet/cm/memorymanager/policy_static.go @@ -93,7 +93,7 @@ func (p *staticPolicy) Start(s state.State) error { // Allocate call is idempotent func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error { // allocate the memory only for guaranteed pods - if v1qos.PodQOSClass(pod) != v1.PodQOSGuaranteed { + if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed { return nil } @@ -362,7 +362,7 @@ func getPodRequestedResources(pod *v1.Pod) (map[v1.ResourceName]uint64, error) { } func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint { - if v1qos.PodQOSClass(pod) != v1.PodQOSGuaranteed { + if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed { return nil } @@ -390,7 +390,7 @@ func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[strin // and is consulted to achieve NUMA aware resource alignment among this // and other resource controllers. func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - if v1qos.PodQOSClass(pod) != v1.PodQOSGuaranteed { + if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed { return nil } diff --git a/pkg/kubelet/cm/pod_container_manager_linux.go b/pkg/kubelet/cm/pod_container_manager_linux.go index 268ba92bb4b..dae7f8bd3d4 100644 --- a/pkg/kubelet/cm/pod_container_manager_linux.go +++ b/pkg/kubelet/cm/pod_container_manager_linux.go @@ -99,7 +99,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error { // GetPodContainerName returns the CgroupName identifier, and its literal cgroupfs form on the host. func (m *podContainerManagerImpl) GetPodContainerName(pod *v1.Pod) (CgroupName, string) { - podQOS := v1qos.PodQOSClass(pod) + podQOS := v1qos.GetPodQOS(pod) // Get the parent QOS container name var parentContainer CgroupName switch podQOS { diff --git a/pkg/kubelet/cm/qos_container_manager_linux.go b/pkg/kubelet/cm/qos_container_manager_linux.go index 91deddded30..abf4487ee5d 100644 --- a/pkg/kubelet/cm/qos_container_manager_linux.go +++ b/pkg/kubelet/cm/qos_container_manager_linux.go @@ -173,7 +173,7 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass] reuseReqs := make(v1.ResourceList, 4) for i := range pods { pod := pods[i] - qosClass := v1qos.PodQOSClass(pod) + qosClass := v1qos.GetPodQOS(pod) if qosClass != v1.PodQOSBurstable { // we only care about the burstable qos tier continue @@ -207,7 +207,7 @@ func (m *qosContainerManagerImpl) getQoSMemoryRequests() map[v1.PodQOSClass]int6 reuseReqs := make(v1.ResourceList, 4) for _, pod := range pods { podMemoryRequest := int64(0) - qosClass := v1qos.PodQOSClass(pod) + qosClass := v1qos.GetPodQOS(pod) if qosClass == v1.PodQOSBestEffort { // limits are not set for Best Effort pods continue diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index c29b47ffcd3..e47b37a0d05 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -151,7 +151,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd // Conditions other than memory pressure reject all pods nodeOnlyHasMemoryPressureCondition := hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) && len(m.nodeConditions) == 1 if nodeOnlyHasMemoryPressureCondition { - notBestEffort := v1.PodQOSBestEffort != v1qos.PodQOSClass(attrs.Pod) + notBestEffort := v1.PodQOSBestEffort != v1qos.GetPodQOS(attrs.Pod) if notBestEffort { return lifecycle.PodAdmitResult{Admit: true} } diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index ea5f9ee28b0..03d8dc987b2 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -1839,7 +1839,7 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine } // set status for Pods created on versions of kube older than 1.6 - apiPodStatus.QOSClass = v1qos.PodQOSClass(pod) + apiPodStatus.QOSClass = v1qos.GetPodQOS(pod) apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses( pod, podStatus, diff --git a/pkg/kubelet/preemption/preemption.go b/pkg/kubelet/preemption/preemption.go index 92f379537fb..e4d0cbd931b 100644 --- a/pkg/kubelet/preemption/preemption.go +++ b/pkg/kubelet/preemption/preemption.go @@ -247,7 +247,7 @@ func (a admissionRequirementList) toString() string { func sortPodsByQOS(preemptor *v1.Pod, pods []*v1.Pod) (bestEffort, burstable, guaranteed []*v1.Pod) { for _, pod := range pods { if kubetypes.Preemptable(preemptor, pod) { - switch v1qos.PodQOSClass(pod) { + switch v1qos.GetPodQOS(pod) { case v1.PodQOSBestEffort: bestEffort = append(bestEffort, pod) case v1.PodQOSBurstable: diff --git a/pkg/kubelet/qos/policy.go b/pkg/kubelet/qos/policy.go index fea10465e9c..7117be21255 100644 --- a/pkg/kubelet/qos/policy.go +++ b/pkg/kubelet/qos/policy.go @@ -46,7 +46,7 @@ func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapa return guaranteedOOMScoreAdj } - switch v1qos.PodQOSClass(pod) { + switch v1qos.GetPodQOS(pod) { case v1.PodQOSGuaranteed: // Guaranteed containers should be the last to get killed. return guaranteedOOMScoreAdj diff --git a/pkg/quota/v1/evaluator/core/pods.go b/pkg/quota/v1/evaluator/core/pods.go index 90bbd96880e..a201c58383e 100644 --- a/pkg/quota/v1/evaluator/core/pods.go +++ b/pkg/quota/v1/evaluator/core/pods.go @@ -375,7 +375,7 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, e } func isBestEffort(pod *corev1.Pod) bool { - return qos.PodQOSClass(pod) == corev1.PodQOSBestEffort + return qos.GetPodQOS(pod) == corev1.PodQOSBestEffort } func isTerminating(pod *corev1.Pod) bool { diff --git a/plugin/pkg/admission/podtolerationrestriction/admission.go b/plugin/pkg/admission/podtolerationrestriction/admission.go index 7dcf7c8690b..4e20ee96558 100644 --- a/plugin/pkg/admission/podtolerationrestriction/admission.go +++ b/plugin/pkg/admission/podtolerationrestriction/admission.go @@ -99,7 +99,7 @@ func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission. extraTolerations = ts } - if qoshelper.PodQOSClass(pod) != api.PodQOSBestEffort { + if qoshelper.GetPodQOS(pod) != api.PodQOSBestEffort { extraTolerations = append(extraTolerations, api.Toleration{ Key: corev1.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, diff --git a/staging/src/k8s.io/kubectl/pkg/describe/describe.go b/staging/src/k8s.io/kubectl/pkg/describe/describe.go index faf3696344c..7b88f0f9184 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/describe.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/describe.go @@ -871,7 +871,7 @@ func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) { } } describeVolumes(pod.Spec.Volumes, w, "") - w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.PodQOSClass(pod)) + w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.GetPodQOS(pod)) printLabelsMultiline(w, "Node-Selectors", pod.Spec.NodeSelector) printPodTolerationsMultiline(w, "Tolerations", pod.Spec.Tolerations) describeTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints, w, "") diff --git a/staging/src/k8s.io/kubectl/pkg/util/qos/qos.go b/staging/src/k8s.io/kubectl/pkg/util/qos/qos.go index 87183a095a6..16a8846a799 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/qos/qos.go +++ b/staging/src/k8s.io/kubectl/pkg/util/qos/qos.go @@ -28,20 +28,20 @@ func isSupportedQoSComputeResource(name core.ResourceName) bool { return supportedQoSComputeResources.Has(string(name)) } -// PodQOSClass returns the QoS class of a pod persisted in the PodStatus. -// If QOSClass is empty, it returns value of GetPodQOS() which computes pod's QoS class. -func PodQOSClass(pod *core.Pod) core.PodQOSClass { +// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field. +// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class. +func GetPodQOS(pod *core.Pod) core.PodQOSClass { if pod.Status.QOSClass != "" { return pod.Status.QOSClass } - return GetPodQOS(pod) + return ComputePodQOS(pod) } -// GetPodQOS returns the QoS class of a pod. +// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is expensive. // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. -func GetPodQOS(pod *core.Pod) core.PodQOSClass { +func ComputePodQOS(pod *core.Pod) core.PodQOSClass { requests := core.ResourceList{} limits := core.ResourceList{} zeroQuantity := resource.MustParse("0") diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index ff0cfd78592..63197bc67fa 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -501,7 +501,7 @@ func computeCPUMemFraction(node v1.Node, resource *v1.ResourceRequirements, pods for _, pod := range pods { framework.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(pod).MilliCPU, getNonZeroRequests(pod).Memory) // Ignore best effort pods while computing fractions as they won't be taken in account by scheduler. - if v1qos.PodQOSClass(pod) == v1.PodQOSBestEffort { + if v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort { continue } totalRequestedCPUResource += getNonZeroRequests(pod).MilliCPU