mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
Merge pull request #6935 from derekwaynecarr/fix_quota
Fix quota status not updating with change in spec
This commit is contained in:
commit
c3caf397af
@ -108,10 +108,13 @@ func FilterQuotaPods(pods []api.Pod) []api.Pod {
|
||||
// syncResourceQuota runs a complete sync of current status
|
||||
func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err error) {
|
||||
|
||||
// quota is dirty if any part of spec hard limits differs from the status hard limits
|
||||
dirty := !api.Semantic.DeepEqual(quota.Spec.Hard, quota.Status.Hard)
|
||||
|
||||
// dirty tracks if the usage status differs from the previous sync,
|
||||
// if so, we send a new usage with latest status
|
||||
// if this is our first sync, it will be dirty by default, since we need track usage
|
||||
dirty := quota.Status.Hard == nil || quota.Status.Used == nil
|
||||
dirty = dirty || (quota.Status.Hard == nil || quota.Status.Used == nil)
|
||||
|
||||
// Create a usage object that is based on the quota resource version
|
||||
usage := api.ResourceQuota{
|
||||
|
@ -179,3 +179,91 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSyncResourceQuotaSpecChange(t *testing.T) {
|
||||
quota := api.ResourceQuota{
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedUsage := api.ResourceQuota{
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
kubeClient := testclient.NewSimpleFake("a)
|
||||
|
||||
resourceQuotaManager := NewResourceQuotaManager(kubeClient)
|
||||
err := resourceQuotaManager.syncResourceQuota(quota)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error %v", err)
|
||||
}
|
||||
|
||||
usage := kubeClient.Actions[1].Value.(*api.ResourceQuota)
|
||||
|
||||
// ensure hard and used limits are what we expected
|
||||
for k, v := range expectedUsage.Status.Hard {
|
||||
actual := usage.Status.Hard[k]
|
||||
actualValue := actual.String()
|
||||
expectedValue := v.String()
|
||||
if expectedValue != actualValue {
|
||||
t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
|
||||
}
|
||||
}
|
||||
for k, v := range expectedUsage.Status.Used {
|
||||
actual := usage.Status.Used[k]
|
||||
actualValue := actual.String()
|
||||
expectedValue := v.String()
|
||||
if expectedValue != actualValue {
|
||||
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSyncResourceQuotaNoChange(t *testing.T) {
|
||||
quota := api.ResourceQuota{
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
kubeClient := testclient.NewSimpleFake(&api.PodList{}, "a)
|
||||
|
||||
resourceQuotaManager := NewResourceQuotaManager(kubeClient)
|
||||
err := resourceQuotaManager.syncResourceQuota(quota)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error %v", err)
|
||||
}
|
||||
|
||||
if len(kubeClient.Actions) != 1 && kubeClient.Actions[0].Action != "list-pods" {
|
||||
t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions)
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user