mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-10 12:32:03 +00:00
Code review fix: Move GetPodQOS code to ComputePodQOS. If set, return PodStatus.QOSClass from GetPodQOS.
This commit is contained in:
parent
5d4410b960
commit
4063ca4050
@ -30,21 +30,21 @@ func isSupportedQoSComputeResource(name core.ResourceName) bool {
|
|||||||
return supportedQoSComputeResources.Has(string(name))
|
return supportedQoSComputeResources.Has(string(name))
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodQOSClass returns the QoS class of a pod persisted in the PodStatus.
|
// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field.
|
||||||
// If QOSClass is empty, it returns value of GetPodQOS() which computes pod's QoS class.
|
// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class.
|
||||||
func PodQOSClass(pod *core.Pod) core.PodQOSClass {
|
func GetPodQOS(pod *core.Pod) core.PodQOSClass {
|
||||||
if pod.Status.QOSClass != "" {
|
if pod.Status.QOSClass != "" {
|
||||||
return pod.Status.QOSClass
|
return pod.Status.QOSClass
|
||||||
}
|
}
|
||||||
return GetPodQOS(pod)
|
return ComputePodQOS(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPodQOS returns the QoS class of a pod.
|
// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is expensive.
|
||||||
// A pod is besteffort if none of its containers have specified any requests or limits.
|
// A pod is besteffort if none of its containers have specified any requests or limits.
|
||||||
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||||
// A pod is burstable if limits and requests do not match across all containers.
|
// A pod is burstable if limits and requests do not match across all containers.
|
||||||
// When this function is updated please also update staging/src/k8s.io/kubectl/pkg/util/qos/qos.go
|
// When this function is updated please also update staging/src/k8s.io/kubectl/pkg/util/qos/qos.go
|
||||||
func GetPodQOS(pod *core.Pod) core.PodQOSClass {
|
func ComputePodQOS(pod *core.Pod) core.PodQOSClass {
|
||||||
requests := core.ResourceList{}
|
requests := core.ResourceList{}
|
||||||
limits := core.ResourceList{}
|
limits := core.ResourceList{}
|
||||||
zeroQuantity := resource.MustParse("0")
|
zeroQuantity := resource.MustParse("0")
|
||||||
|
@ -32,20 +32,20 @@ func isSupportedQoSComputeResource(name v1.ResourceName) bool {
|
|||||||
return supportedQoSComputeResources.Has(string(name))
|
return supportedQoSComputeResources.Has(string(name))
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodQOSClass returns the QoS class of a pod persisted in the PodStatus.
|
// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field.
|
||||||
// If QOSClass is empty, it returns value of GetPodQOS() which computes pod's QoS class.
|
// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class.
|
||||||
func PodQOSClass(pod *v1.Pod) v1.PodQOSClass {
|
func GetPodQOS(pod *v1.Pod) v1.PodQOSClass {
|
||||||
if pod.Status.QOSClass != "" {
|
if pod.Status.QOSClass != "" {
|
||||||
return pod.Status.QOSClass
|
return pod.Status.QOSClass
|
||||||
}
|
}
|
||||||
return GetPodQOS(pod)
|
return ComputePodQOS(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPodQOS returns the QoS class of a pod.
|
// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is expensive.
|
||||||
// A pod is besteffort if none of its containers have specified any requests or limits.
|
// A pod is besteffort if none of its containers have specified any requests or limits.
|
||||||
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||||
// A pod is burstable if limits and requests do not match across all containers.
|
// A pod is burstable if limits and requests do not match across all containers.
|
||||||
func GetPodQOS(pod *v1.Pod) v1.PodQOSClass {
|
func ComputePodQOS(pod *v1.Pod) v1.PodQOSClass {
|
||||||
requests := v1.ResourceList{}
|
requests := v1.ResourceList{}
|
||||||
limits := v1.ResourceList{}
|
limits := v1.ResourceList{}
|
||||||
zeroQuantity := resource.MustParse("0")
|
zeroQuantity := resource.MustParse("0")
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGetPodQOS(t *testing.T) {
|
func TestComputePodQOS(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
expected v1.PodQOSClass
|
expected v1.PodQOSClass
|
||||||
@ -128,15 +128,15 @@ func TestGetPodQOS(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for id, testCase := range testCases {
|
for id, testCase := range testCases {
|
||||||
if actual := GetPodQOS(testCase.pod); testCase.expected != actual {
|
if actual := ComputePodQOS(testCase.pod); testCase.expected != actual {
|
||||||
t.Errorf("[%d]: invalid qos pod %s, expected: %s, actual: %s", id, testCase.pod.Name, testCase.expected, actual)
|
t.Errorf("[%d]: invalid qos pod %s, expected: %s, actual: %s", id, testCase.pod.Name, testCase.expected, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert v1.Pod to core.Pod, and then check against `core.helper.GetPodQOS`.
|
// Convert v1.Pod to core.Pod, and then check against `core.helper.ComputePodQOS`.
|
||||||
pod := core.Pod{}
|
pod := core.Pod{}
|
||||||
corev1.Convert_v1_Pod_To_core_Pod(testCase.pod, &pod, nil)
|
corev1.Convert_v1_Pod_To_core_Pod(testCase.pod, &pod, nil)
|
||||||
|
|
||||||
if actual := qos.GetPodQOS(&pod); core.PodQOSClass(testCase.expected) != actual {
|
if actual := qos.ComputePodQOS(&pod); core.PodQOSClass(testCase.expected) != actual {
|
||||||
t.Errorf("[%d]: conversion invalid qos pod %s, expected: %s, actual: %s", id, testCase.pod.Name, testCase.expected, actual)
|
t.Errorf("[%d]: conversion invalid qos pod %s, expected: %s, actual: %s", id, testCase.pod.Name, testCase.expected, actual)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4785,7 +4785,7 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
|
|||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
if qos.PodQOSClass(oldPod) != qos.GetPodQOS(newPod) {
|
if qos.GetPodQOS(oldPod) != qos.ComputePodQOS(newPod) {
|
||||||
allErrs = append(allErrs, field.Invalid(fldPath, newPod.Status.QOSClass, "Pod QoS is immutable"))
|
allErrs = append(allErrs, field.Invalid(fldPath, newPod.Status.QOSClass, "Pod QoS is immutable"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -419,7 +419,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int {
|
func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int {
|
||||||
if v1qos.PodQOSClass(pod) != v1.PodQOSGuaranteed {
|
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
cpuQuantity := container.Resources.Requests[v1.ResourceCPU]
|
cpuQuantity := container.Resources.Requests[v1.ResourceCPU]
|
||||||
|
@ -165,7 +165,7 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// determine the qos class
|
// determine the qos class
|
||||||
qosClass := v1qos.PodQOSClass(pod)
|
qosClass := v1qos.GetPodQOS(pod)
|
||||||
|
|
||||||
// build the result
|
// build the result
|
||||||
result := &ResourceConfig{}
|
result := &ResourceConfig{}
|
||||||
|
@ -93,7 +93,7 @@ func (p *staticPolicy) Start(s state.State) error {
|
|||||||
// Allocate call is idempotent
|
// Allocate call is idempotent
|
||||||
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error {
|
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error {
|
||||||
// allocate the memory only for guaranteed pods
|
// allocate the memory only for guaranteed pods
|
||||||
if v1qos.PodQOSClass(pod) != v1.PodQOSGuaranteed {
|
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,7 +362,7 @@ func getPodRequestedResources(pod *v1.Pod) (map[v1.ResourceName]uint64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint {
|
func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint {
|
||||||
if v1qos.PodQOSClass(pod) != v1.PodQOSGuaranteed {
|
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -390,7 +390,7 @@ func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[strin
|
|||||||
// and is consulted to achieve NUMA aware resource alignment among this
|
// and is consulted to achieve NUMA aware resource alignment among this
|
||||||
// and other resource controllers.
|
// and other resource controllers.
|
||||||
func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
||||||
if v1qos.PodQOSClass(pod) != v1.PodQOSGuaranteed {
|
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
|
|||||||
|
|
||||||
// GetPodContainerName returns the CgroupName identifier, and its literal cgroupfs form on the host.
|
// GetPodContainerName returns the CgroupName identifier, and its literal cgroupfs form on the host.
|
||||||
func (m *podContainerManagerImpl) GetPodContainerName(pod *v1.Pod) (CgroupName, string) {
|
func (m *podContainerManagerImpl) GetPodContainerName(pod *v1.Pod) (CgroupName, string) {
|
||||||
podQOS := v1qos.PodQOSClass(pod)
|
podQOS := v1qos.GetPodQOS(pod)
|
||||||
// Get the parent QOS container name
|
// Get the parent QOS container name
|
||||||
var parentContainer CgroupName
|
var parentContainer CgroupName
|
||||||
switch podQOS {
|
switch podQOS {
|
||||||
|
@ -173,7 +173,7 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass]
|
|||||||
reuseReqs := make(v1.ResourceList, 4)
|
reuseReqs := make(v1.ResourceList, 4)
|
||||||
for i := range pods {
|
for i := range pods {
|
||||||
pod := pods[i]
|
pod := pods[i]
|
||||||
qosClass := v1qos.PodQOSClass(pod)
|
qosClass := v1qos.GetPodQOS(pod)
|
||||||
if qosClass != v1.PodQOSBurstable {
|
if qosClass != v1.PodQOSBurstable {
|
||||||
// we only care about the burstable qos tier
|
// we only care about the burstable qos tier
|
||||||
continue
|
continue
|
||||||
@ -207,7 +207,7 @@ func (m *qosContainerManagerImpl) getQoSMemoryRequests() map[v1.PodQOSClass]int6
|
|||||||
reuseReqs := make(v1.ResourceList, 4)
|
reuseReqs := make(v1.ResourceList, 4)
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
podMemoryRequest := int64(0)
|
podMemoryRequest := int64(0)
|
||||||
qosClass := v1qos.PodQOSClass(pod)
|
qosClass := v1qos.GetPodQOS(pod)
|
||||||
if qosClass == v1.PodQOSBestEffort {
|
if qosClass == v1.PodQOSBestEffort {
|
||||||
// limits are not set for Best Effort pods
|
// limits are not set for Best Effort pods
|
||||||
continue
|
continue
|
||||||
|
@ -151,7 +151,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
|
|||||||
// Conditions other than memory pressure reject all pods
|
// Conditions other than memory pressure reject all pods
|
||||||
nodeOnlyHasMemoryPressureCondition := hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) && len(m.nodeConditions) == 1
|
nodeOnlyHasMemoryPressureCondition := hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) && len(m.nodeConditions) == 1
|
||||||
if nodeOnlyHasMemoryPressureCondition {
|
if nodeOnlyHasMemoryPressureCondition {
|
||||||
notBestEffort := v1.PodQOSBestEffort != v1qos.PodQOSClass(attrs.Pod)
|
notBestEffort := v1.PodQOSBestEffort != v1qos.GetPodQOS(attrs.Pod)
|
||||||
if notBestEffort {
|
if notBestEffort {
|
||||||
return lifecycle.PodAdmitResult{Admit: true}
|
return lifecycle.PodAdmitResult{Admit: true}
|
||||||
}
|
}
|
||||||
|
@ -1839,7 +1839,7 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine
|
|||||||
}
|
}
|
||||||
|
|
||||||
// set status for Pods created on versions of kube older than 1.6
|
// set status for Pods created on versions of kube older than 1.6
|
||||||
apiPodStatus.QOSClass = v1qos.PodQOSClass(pod)
|
apiPodStatus.QOSClass = v1qos.GetPodQOS(pod)
|
||||||
|
|
||||||
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
|
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
|
||||||
pod, podStatus,
|
pod, podStatus,
|
||||||
|
@ -247,7 +247,7 @@ func (a admissionRequirementList) toString() string {
|
|||||||
func sortPodsByQOS(preemptor *v1.Pod, pods []*v1.Pod) (bestEffort, burstable, guaranteed []*v1.Pod) {
|
func sortPodsByQOS(preemptor *v1.Pod, pods []*v1.Pod) (bestEffort, burstable, guaranteed []*v1.Pod) {
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if kubetypes.Preemptable(preemptor, pod) {
|
if kubetypes.Preemptable(preemptor, pod) {
|
||||||
switch v1qos.PodQOSClass(pod) {
|
switch v1qos.GetPodQOS(pod) {
|
||||||
case v1.PodQOSBestEffort:
|
case v1.PodQOSBestEffort:
|
||||||
bestEffort = append(bestEffort, pod)
|
bestEffort = append(bestEffort, pod)
|
||||||
case v1.PodQOSBurstable:
|
case v1.PodQOSBurstable:
|
||||||
|
@ -46,7 +46,7 @@ func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapa
|
|||||||
return guaranteedOOMScoreAdj
|
return guaranteedOOMScoreAdj
|
||||||
}
|
}
|
||||||
|
|
||||||
switch v1qos.PodQOSClass(pod) {
|
switch v1qos.GetPodQOS(pod) {
|
||||||
case v1.PodQOSGuaranteed:
|
case v1.PodQOSGuaranteed:
|
||||||
// Guaranteed containers should be the last to get killed.
|
// Guaranteed containers should be the last to get killed.
|
||||||
return guaranteedOOMScoreAdj
|
return guaranteedOOMScoreAdj
|
||||||
|
@ -375,7 +375,7 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func isBestEffort(pod *corev1.Pod) bool {
|
func isBestEffort(pod *corev1.Pod) bool {
|
||||||
return qos.PodQOSClass(pod) == corev1.PodQOSBestEffort
|
return qos.GetPodQOS(pod) == corev1.PodQOSBestEffort
|
||||||
}
|
}
|
||||||
|
|
||||||
func isTerminating(pod *corev1.Pod) bool {
|
func isTerminating(pod *corev1.Pod) bool {
|
||||||
|
@ -99,7 +99,7 @@ func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.
|
|||||||
extraTolerations = ts
|
extraTolerations = ts
|
||||||
}
|
}
|
||||||
|
|
||||||
if qoshelper.PodQOSClass(pod) != api.PodQOSBestEffort {
|
if qoshelper.GetPodQOS(pod) != api.PodQOSBestEffort {
|
||||||
extraTolerations = append(extraTolerations, api.Toleration{
|
extraTolerations = append(extraTolerations, api.Toleration{
|
||||||
Key: corev1.TaintNodeMemoryPressure,
|
Key: corev1.TaintNodeMemoryPressure,
|
||||||
Operator: api.TolerationOpExists,
|
Operator: api.TolerationOpExists,
|
||||||
|
@ -871,7 +871,7 @@ func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
describeVolumes(pod.Spec.Volumes, w, "")
|
describeVolumes(pod.Spec.Volumes, w, "")
|
||||||
w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.PodQOSClass(pod))
|
w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.GetPodQOS(pod))
|
||||||
printLabelsMultiline(w, "Node-Selectors", pod.Spec.NodeSelector)
|
printLabelsMultiline(w, "Node-Selectors", pod.Spec.NodeSelector)
|
||||||
printPodTolerationsMultiline(w, "Tolerations", pod.Spec.Tolerations)
|
printPodTolerationsMultiline(w, "Tolerations", pod.Spec.Tolerations)
|
||||||
describeTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints, w, "")
|
describeTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints, w, "")
|
||||||
|
@ -28,20 +28,20 @@ func isSupportedQoSComputeResource(name core.ResourceName) bool {
|
|||||||
return supportedQoSComputeResources.Has(string(name))
|
return supportedQoSComputeResources.Has(string(name))
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodQOSClass returns the QoS class of a pod persisted in the PodStatus.
|
// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field.
|
||||||
// If QOSClass is empty, it returns value of GetPodQOS() which computes pod's QoS class.
|
// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class.
|
||||||
func PodQOSClass(pod *core.Pod) core.PodQOSClass {
|
func GetPodQOS(pod *core.Pod) core.PodQOSClass {
|
||||||
if pod.Status.QOSClass != "" {
|
if pod.Status.QOSClass != "" {
|
||||||
return pod.Status.QOSClass
|
return pod.Status.QOSClass
|
||||||
}
|
}
|
||||||
return GetPodQOS(pod)
|
return ComputePodQOS(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPodQOS returns the QoS class of a pod.
|
// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is expensive.
|
||||||
// A pod is besteffort if none of its containers have specified any requests or limits.
|
// A pod is besteffort if none of its containers have specified any requests or limits.
|
||||||
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||||
// A pod is burstable if limits and requests do not match across all containers.
|
// A pod is burstable if limits and requests do not match across all containers.
|
||||||
func GetPodQOS(pod *core.Pod) core.PodQOSClass {
|
func ComputePodQOS(pod *core.Pod) core.PodQOSClass {
|
||||||
requests := core.ResourceList{}
|
requests := core.ResourceList{}
|
||||||
limits := core.ResourceList{}
|
limits := core.ResourceList{}
|
||||||
zeroQuantity := resource.MustParse("0")
|
zeroQuantity := resource.MustParse("0")
|
||||||
|
@ -501,7 +501,7 @@ func computeCPUMemFraction(node v1.Node, resource *v1.ResourceRequirements, pods
|
|||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
framework.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(pod).MilliCPU, getNonZeroRequests(pod).Memory)
|
framework.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(pod).MilliCPU, getNonZeroRequests(pod).Memory)
|
||||||
// Ignore best effort pods while computing fractions as they won't be taken in account by scheduler.
|
// Ignore best effort pods while computing fractions as they won't be taken in account by scheduler.
|
||||||
if v1qos.PodQOSClass(pod) == v1.PodQOSBestEffort {
|
if v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
totalRequestedCPUResource += getNonZeroRequests(pod).MilliCPU
|
totalRequestedCPUResource += getNonZeroRequests(pod).MilliCPU
|
||||||
|
Loading…
Reference in New Issue
Block a user