mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-21 09:57:52 +00:00
Update quota status with limits even when calculating errors
This commit is contained in:
@@ -54,6 +54,7 @@ go_test(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
|
@@ -30,6 +30,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -321,12 +322,12 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
|
||||
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
|
||||
func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQuota) (err error) {
|
||||
// quota is dirty if any part of spec hard limits differs from the status hard limits
|
||||
dirty := !apiequality.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard)
|
||||
statusLimitsDirty := !apiequality.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard)
|
||||
|
||||
// dirty tracks if the usage status differs from the previous sync,
|
||||
// if so, we send a new usage with latest status
|
||||
// if this is our first sync, it will be dirty by default, since we need track usage
|
||||
dirty = dirty || resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil
|
||||
dirty := statusLimitsDirty || resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil
|
||||
|
||||
used := v1.ResourceList{}
|
||||
if resourceQuota.Status.Used != nil {
|
||||
@@ -334,9 +335,12 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQ
|
||||
}
|
||||
hardLimits := quota.Add(v1.ResourceList{}, resourceQuota.Spec.Hard)
|
||||
|
||||
errors := []error{}
|
||||
|
||||
newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry, resourceQuota.Spec.ScopeSelector)
|
||||
if err != nil {
|
||||
return err
|
||||
// if err is non-nil, remember it to return, but continue updating status with any resources in newUsage
|
||||
errors = append(errors, err)
|
||||
}
|
||||
for key, value := range newUsage {
|
||||
used[key] = value
|
||||
@@ -359,9 +363,11 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQ
|
||||
// there was a change observed by this controller that requires we update quota
|
||||
if dirty {
|
||||
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(usage)
|
||||
return err
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return utilerrors.NewAggregate(errors)
|
||||
}
|
||||
|
||||
// replenishQuota is a replenishment function invoked by a controller to notify that a quota should be recalculated
|
||||
|
@@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -83,6 +84,23 @@ func newGenericLister(groupResource schema.GroupResource, items []runtime.Object
|
||||
return cache.NewGenericLister(store, groupResource)
|
||||
}
|
||||
|
||||
func newErrorLister() cache.GenericLister {
|
||||
return errorLister{}
|
||||
}
|
||||
|
||||
type errorLister struct {
|
||||
}
|
||||
|
||||
func (errorLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
|
||||
return nil, fmt.Errorf("error listing")
|
||||
}
|
||||
func (errorLister) Get(name string) (runtime.Object, error) {
|
||||
return nil, fmt.Errorf("error getting")
|
||||
}
|
||||
func (errorLister) ByNamespace(namespace string) cache.GenericNamespaceLister {
|
||||
return errorLister{}
|
||||
}
|
||||
|
||||
type quotaController struct {
|
||||
*ResourceQuotaController
|
||||
stop chan struct{}
|
||||
@@ -205,9 +223,11 @@ func newTestPodsWithPriorityClasses() []runtime.Object {
|
||||
func TestSyncResourceQuota(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
gvr schema.GroupVersionResource
|
||||
errorGVR schema.GroupVersionResource
|
||||
items []runtime.Object
|
||||
quota v1.ResourceQuota
|
||||
status v1.ResourceQuotaStatus
|
||||
expectedError string
|
||||
expectedActionSet sets.String
|
||||
}{
|
||||
"non-matching-best-effort-scoped-quota": {
|
||||
@@ -699,18 +719,75 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
expectedActionSet: sets.NewString(),
|
||||
items: []runtime.Object{},
|
||||
},
|
||||
"quota-missing-status-with-calculation-error": {
|
||||
errorGVR: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
Status: v1.ResourceQuotaStatus{},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
expectedError: "error listing",
|
||||
expectedActionSet: sets.NewString("update-resourcequotas-status"),
|
||||
items: []runtime.Object{},
|
||||
},
|
||||
"quota-missing-status-with-partial-calculation-error": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("configmaps"),
|
||||
errorGVR: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse("1"),
|
||||
v1.ResourceConfigMaps: resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
Status: v1.ResourceQuotaStatus{},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse("1"),
|
||||
v1.ResourceConfigMaps: resource.MustParse("1"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceConfigMaps: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
expectedError: "error listing",
|
||||
expectedActionSet: sets.NewString("update-resourcequotas-status"),
|
||||
items: []runtime.Object{},
|
||||
},
|
||||
}
|
||||
|
||||
for testName, testCase := range testCases {
|
||||
kubeClient := fake.NewSimpleClientset(&testCase.quota)
|
||||
listersForResourceConfig := map[schema.GroupVersionResource]cache.GenericLister{
|
||||
testCase.gvr: newGenericLister(testCase.gvr.GroupResource(), testCase.items),
|
||||
testCase.gvr: newGenericLister(testCase.gvr.GroupResource(), testCase.items),
|
||||
testCase.errorGVR: newErrorLister(),
|
||||
}
|
||||
qc := setupQuotaController(t, kubeClient, mockListerForResourceFunc(listersForResourceConfig), mockDiscoveryFunc)
|
||||
defer close(qc.stop)
|
||||
|
||||
if err := qc.syncResourceQuota(&testCase.quota); err != nil {
|
||||
t.Fatalf("test: %s, unexpected error: %v", testName, err)
|
||||
if len(testCase.expectedError) == 0 || !strings.Contains(err.Error(), testCase.expectedError) {
|
||||
t.Fatalf("test: %s, unexpected error: %v", testName, err)
|
||||
}
|
||||
} else if len(testCase.expectedError) > 0 {
|
||||
t.Fatalf("test: %s, expected error %q, got none", testName, testCase.expectedError)
|
||||
}
|
||||
|
||||
actionSet := sets.NewString()
|
||||
|
Reference in New Issue
Block a user