mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 08:17:26 +00:00
prune flipping int/ext conversion for quota controller
This commit is contained in:
parent
b8f4aa3516
commit
d11ee913a1
@ -15,12 +15,10 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/resourcequota",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/quota:go_default_library",
|
||||
"//pkg/quota/evaluator/core:go_default_library",
|
||||
"//pkg/quota/generic:go_default_library",
|
||||
"//pkg/quota/v1:go_default_library",
|
||||
"//pkg/quota/v1/evaluator/core:go_default_library",
|
||||
"//pkg/quota/v1/generic:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
@ -49,9 +47,9 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/quota:go_default_library",
|
||||
"//pkg/quota/generic:go_default_library",
|
||||
"//pkg/quota/install:go_default_library",
|
||||
"//pkg/quota/v1:go_default_library",
|
||||
"//pkg/quota/v1/generic:go_default_library",
|
||||
"//pkg/quota/v1/install:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -39,10 +39,8 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
quota "k8s.io/kubernetes/pkg/quota/v1"
|
||||
)
|
||||
|
||||
// NamespacedResourcesFunc knows how to discover namespaced resources.
|
||||
@ -226,7 +224,7 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) {
|
||||
// if we declared a constraint that has no usage (which this controller can calculate, prioritize it)
|
||||
for constraint := range resourceQuota.Status.Hard {
|
||||
if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound {
|
||||
matchedResources := []api.ResourceName{api.ResourceName(constraint)}
|
||||
matchedResources := []v1.ResourceName{v1.ResourceName(constraint)}
|
||||
for _, evaluator := range rq.registry.List() {
|
||||
if intersection := evaluator.MatchingResources(matchedResources); len(intersection) > 0 {
|
||||
rq.missingUsageQueue.Add(key)
|
||||
@ -320,25 +318,20 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
|
||||
}
|
||||
|
||||
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
|
||||
func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.ResourceQuota) (err error) {
|
||||
func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQuota) (err error) {
|
||||
// quota is dirty if any part of spec hard limits differs from the status hard limits
|
||||
dirty := !apiequality.Semantic.DeepEqual(v1ResourceQuota.Spec.Hard, v1ResourceQuota.Status.Hard)
|
||||
|
||||
resourceQuota := api.ResourceQuota{}
|
||||
if err := k8s_api_v1.Convert_v1_ResourceQuota_To_core_ResourceQuota(v1ResourceQuota, &resourceQuota, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
dirty := !apiequality.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard)
|
||||
|
||||
// dirty tracks if the usage status differs from the previous sync,
|
||||
// if so, we send a new usage with latest status
|
||||
// if this is our first sync, it will be dirty by default, since we need track usage
|
||||
dirty = dirty || (resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil)
|
||||
dirty = dirty || resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil
|
||||
|
||||
used := api.ResourceList{}
|
||||
used := v1.ResourceList{}
|
||||
if resourceQuota.Status.Used != nil {
|
||||
used = quota.Add(api.ResourceList{}, resourceQuota.Status.Used)
|
||||
used = quota.Add(v1.ResourceList{}, resourceQuota.Status.Used)
|
||||
}
|
||||
hardLimits := quota.Add(api.ResourceList{}, resourceQuota.Spec.Hard)
|
||||
hardLimits := quota.Add(v1.ResourceList{}, resourceQuota.Spec.Hard)
|
||||
|
||||
newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry, resourceQuota.Spec.ScopeSelector)
|
||||
if err != nil {
|
||||
@ -354,14 +347,14 @@ func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.Resourc
|
||||
|
||||
// Create a usage object that is based on the quota resource version that will handle updates
|
||||
// by default, we preserve the past usage observation, and set hard to the current spec
|
||||
usage := api.ResourceQuota{
|
||||
usage := v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: resourceQuota.Name,
|
||||
Namespace: resourceQuota.Namespace,
|
||||
ResourceVersion: resourceQuota.ResourceVersion,
|
||||
Labels: resourceQuota.Labels,
|
||||
Annotations: resourceQuota.Annotations},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: hardLimits,
|
||||
Used: used,
|
||||
},
|
||||
@ -371,11 +364,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.Resourc
|
||||
|
||||
// there was a change observed by this controller that requires we update quota
|
||||
if dirty {
|
||||
v1Usage := &v1.ResourceQuota{}
|
||||
if err := k8s_api_v1.Convert_core_ResourceQuota_To_v1_ResourceQuota(&usage, v1Usage, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(v1Usage)
|
||||
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -406,12 +395,7 @@ func (rq *ResourceQuotaController) replenishQuota(groupResource schema.GroupReso
|
||||
// only queue those quotas that are tracking a resource associated with this kind.
|
||||
for i := range resourceQuotas {
|
||||
resourceQuota := resourceQuotas[i]
|
||||
internalResourceQuota := &api.ResourceQuota{}
|
||||
if err := k8s_api_v1.Convert_v1_ResourceQuota_To_core_ResourceQuota(resourceQuota, internalResourceQuota, nil); err != nil {
|
||||
glog.Error(err)
|
||||
continue
|
||||
}
|
||||
resourceQuotaResources := quota.ResourceNames(internalResourceQuota.Status.Hard)
|
||||
resourceQuotaResources := quota.ResourceNames(resourceQuota.Status.Hard)
|
||||
if intersection := evaluator.MatchingResources(resourceQuotaResources); len(intersection) > 0 {
|
||||
// TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota.
|
||||
rq.enqueueResourceQuota(resourceQuota)
|
||||
|
@ -33,9 +33,9 @@ import (
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
"k8s.io/kubernetes/pkg/quota/install"
|
||||
quota "k8s.io/kubernetes/pkg/quota/v1"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/generic"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/install"
|
||||
)
|
||||
|
||||
func getResourceList(cpu, memory string) v1.ResourceList {
|
||||
|
@ -33,9 +33,9 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/evaluator/core"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
quota "k8s.io/kubernetes/pkg/quota/v1"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/generic"
|
||||
)
|
||||
|
||||
type eventType int
|
||||
|
Loading…
Reference in New Issue
Block a user