Perf optimization: Move away from GetPodQOS, using PodStatus.QOSClass instead

This commit is contained in:
vinay kulkarni 2023-07-29 13:30:09 +00:00
parent 2c6c4566ef
commit 5d4410b960
17 changed files with 45 additions and 33 deletions

View File

@ -30,6 +30,15 @@ func isSupportedQoSComputeResource(name core.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
// PodQOSClass returns the QoS class of a pod persisted in the PodStatus.
// If QOSClass is empty, it returns value of GetPodQOS() which computes pod's QoS class.
func PodQOSClass(pod *core.Pod) core.PodQOSClass {
if pod.Status.QOSClass != "" {
return pod.Status.QOSClass
}
return GetPodQOS(pod)
}
// GetPodQOS returns the QoS class of a pod.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.

View File

@ -32,6 +32,15 @@ func isSupportedQoSComputeResource(name v1.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
// PodQOSClass returns the QoS class of a pod persisted in the PodStatus.
// If QOSClass is empty, it returns value of GetPodQOS() which computes pod's QoS class.
func PodQOSClass(pod *v1.Pod) v1.PodQOSClass {
if pod.Status.QOSClass != "" {
return pod.Status.QOSClass
}
return GetPodQOS(pod)
}
// GetPodQOS returns the QoS class of a pod.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.

View File

@ -4785,19 +4785,8 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
return allErrs
}
//TODO(vinaykul,InPlacePodVerticalScaling): With KEP 2527, we can rely on persistence of PodStatus.QOSClass
// We can use PodStatus.QOSClass instead of GetPodQOS here, in kubelet, and elsewhere, as PodStatus.QOSClass
// does not change once it is bootstrapped in podCreate. This needs to be addressed before beta as a
// separate PR covering all uses of GetPodQOS. With that change, we can drop the below block.
// Ref: https://github.com/kubernetes/kubernetes/pull/102884#discussion_r1093790446
// Ref: https://github.com/kubernetes/kubernetes/pull/102884/#discussion_r663280487
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// reject attempts to change pod qos
oldQoS := qos.GetPodQOS(oldPod)
newQoS := qos.GetPodQOS(newPod)
if newQoS != oldQoS {
allErrs = append(allErrs, field.Invalid(fldPath, newQoS, "Pod QoS is immutable"))
}
if qos.PodQOSClass(oldPod) != qos.GetPodQOS(newPod) {
allErrs = append(allErrs, field.Invalid(fldPath, newPod.Status.QOSClass, "Pod QoS is immutable"))
}
// handle updateable fields by munging those fields prior to deep equal comparison.

View File

@ -419,7 +419,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit
}
func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int {
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
if v1qos.PodQOSClass(pod) != v1.PodQOSGuaranteed {
return 0
}
cpuQuantity := container.Resources.Requests[v1.ResourceCPU]

View File

@ -165,7 +165,7 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64,
}
// determine the qos class
qosClass := v1qos.GetPodQOS(pod)
qosClass := v1qos.PodQOSClass(pod)
// build the result
result := &ResourceConfig{}

View File

@ -93,7 +93,7 @@ func (p *staticPolicy) Start(s state.State) error {
// Allocate call is idempotent
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error {
// allocate the memory only for guaranteed pods
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
if v1qos.PodQOSClass(pod) != v1.PodQOSGuaranteed {
return nil
}
@ -362,7 +362,7 @@ func getPodRequestedResources(pod *v1.Pod) (map[v1.ResourceName]uint64, error) {
}
func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint {
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
if v1qos.PodQOSClass(pod) != v1.PodQOSGuaranteed {
return nil
}
@ -390,7 +390,7 @@ func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[strin
// and is consulted to achieve NUMA aware resource alignment among this
// and other resource controllers.
func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
if v1qos.PodQOSClass(pod) != v1.PodQOSGuaranteed {
return nil
}

View File

@ -99,7 +99,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
// GetPodContainerName returns the CgroupName identifier, and its literal cgroupfs form on the host.
func (m *podContainerManagerImpl) GetPodContainerName(pod *v1.Pod) (CgroupName, string) {
podQOS := v1qos.GetPodQOS(pod)
podQOS := v1qos.PodQOSClass(pod)
// Get the parent QOS container name
var parentContainer CgroupName
switch podQOS {

View File

@ -173,7 +173,7 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass]
reuseReqs := make(v1.ResourceList, 4)
for i := range pods {
pod := pods[i]
qosClass := v1qos.GetPodQOS(pod)
qosClass := v1qos.PodQOSClass(pod)
if qosClass != v1.PodQOSBurstable {
// we only care about the burstable qos tier
continue
@ -207,7 +207,7 @@ func (m *qosContainerManagerImpl) getQoSMemoryRequests() map[v1.PodQOSClass]int6
reuseReqs := make(v1.ResourceList, 4)
for _, pod := range pods {
podMemoryRequest := int64(0)
qosClass := v1qos.GetPodQOS(pod)
qosClass := v1qos.PodQOSClass(pod)
if qosClass == v1.PodQOSBestEffort {
// limits are not set for Best Effort pods
continue

View File

@ -151,7 +151,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
// Conditions other than memory pressure reject all pods
nodeOnlyHasMemoryPressureCondition := hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) && len(m.nodeConditions) == 1
if nodeOnlyHasMemoryPressureCondition {
notBestEffort := v1.PodQOSBestEffort != v1qos.GetPodQOS(attrs.Pod)
notBestEffort := v1.PodQOSBestEffort != v1qos.PodQOSClass(attrs.Pod)
if notBestEffort {
return lifecycle.PodAdmitResult{Admit: true}
}

View File

@ -1839,7 +1839,7 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine
}
// set status for Pods created on versions of kube older than 1.6
apiPodStatus.QOSClass = v1qos.GetPodQOS(pod)
apiPodStatus.QOSClass = v1qos.PodQOSClass(pod)
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,

View File

@ -247,7 +247,7 @@ func (a admissionRequirementList) toString() string {
func sortPodsByQOS(preemptor *v1.Pod, pods []*v1.Pod) (bestEffort, burstable, guaranteed []*v1.Pod) {
for _, pod := range pods {
if kubetypes.Preemptable(preemptor, pod) {
switch v1qos.GetPodQOS(pod) {
switch v1qos.PodQOSClass(pod) {
case v1.PodQOSBestEffort:
bestEffort = append(bestEffort, pod)
case v1.PodQOSBurstable:

View File

@ -46,7 +46,7 @@ func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapa
return guaranteedOOMScoreAdj
}
switch v1qos.GetPodQOS(pod) {
switch v1qos.PodQOSClass(pod) {
case v1.PodQOSGuaranteed:
// Guaranteed containers should be the last to get killed.
return guaranteedOOMScoreAdj

View File

@ -375,7 +375,7 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, e
}
func isBestEffort(pod *corev1.Pod) bool {
return qos.GetPodQOS(pod) == corev1.PodQOSBestEffort
return qos.PodQOSClass(pod) == corev1.PodQOSBestEffort
}
func isTerminating(pod *corev1.Pod) bool {

View File

@ -99,7 +99,7 @@ func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.
extraTolerations = ts
}
if qoshelper.GetPodQOS(pod) != api.PodQOSBestEffort {
if qoshelper.PodQOSClass(pod) != api.PodQOSBestEffort {
extraTolerations = append(extraTolerations, api.Toleration{
Key: corev1.TaintNodeMemoryPressure,
Operator: api.TolerationOpExists,

View File

@ -871,11 +871,7 @@ func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) {
}
}
describeVolumes(pod.Spec.Volumes, w, "")
if pod.Status.QOSClass != "" {
w.Write(LEVEL_0, "QoS Class:\t%s\n", pod.Status.QOSClass)
} else {
w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.GetPodQOS(pod))
}
w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.PodQOSClass(pod))
printLabelsMultiline(w, "Node-Selectors", pod.Spec.NodeSelector)
printPodTolerationsMultiline(w, "Tolerations", pod.Spec.Tolerations)
describeTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints, w, "")

View File

@ -28,6 +28,15 @@ func isSupportedQoSComputeResource(name core.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
// PodQOSClass returns the QoS class of a pod persisted in the PodStatus.
// If QOSClass is empty, it returns value of GetPodQOS() which computes pod's QoS class.
func PodQOSClass(pod *core.Pod) core.PodQOSClass {
if pod.Status.QOSClass != "" {
return pod.Status.QOSClass
}
return GetPodQOS(pod)
}
// GetPodQOS returns the QoS class of a pod.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.

View File

@ -501,7 +501,7 @@ func computeCPUMemFraction(node v1.Node, resource *v1.ResourceRequirements, pods
for _, pod := range pods {
framework.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(pod).MilliCPU, getNonZeroRequests(pod).Memory)
// Ignore best effort pods while computing fractions as they won't be taken in account by scheduler.
if v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort {
if v1qos.PodQOSClass(pod) == v1.PodQOSBestEffort {
continue
}
totalRequestedCPUResource += getNonZeroRequests(pod).MilliCPU