diff --git a/pkg/resourcequota/resource_quota_controller.go b/pkg/resourcequota/resource_quota_controller.go index 93006679b88..3d651c12fc8 100644 --- a/pkg/resourcequota/resource_quota_controller.go +++ b/pkg/resourcequota/resource_quota_controller.go @@ -108,10 +108,13 @@ func FilterQuotaPods(pods []api.Pod) []api.Pod { // syncResourceQuota runs a complete sync of current status func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err error) { + // quota is dirty if any part of spec hard limits differs from the status hard limits + dirty := !api.Semantic.DeepEqual(quota.Spec.Hard, quota.Status.Hard) + // dirty tracks if the usage status differs from the previous sync, // if so, we send a new usage with latest status // if this is our first sync, it will be dirty by default, since we need track usage - dirty := quota.Status.Hard == nil || quota.Status.Used == nil + dirty = dirty || (quota.Status.Hard == nil || quota.Status.Used == nil) // Create a usage object that is based on the quota resource version usage := api.ResourceQuota{ diff --git a/pkg/resourcequota/resource_quota_controller_test.go b/pkg/resourcequota/resource_quota_controller_test.go index 45813dcaa32..8d03ab44c34 100644 --- a/pkg/resourcequota/resource_quota_controller_test.go +++ b/pkg/resourcequota/resource_quota_controller_test.go @@ -179,3 +179,91 @@ func TestSyncResourceQuota(t *testing.T) { } } + +func TestSyncResourceQuotaSpecChange(t *testing.T) { + quota := api.ResourceQuota{ + Spec: api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4"), + }, + }, + Status: api.ResourceQuotaStatus{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("3"), + }, + Used: api.ResourceList{ + api.ResourceCPU: resource.MustParse("0"), + }, + }, + } + + expectedUsage := api.ResourceQuota{ + Status: api.ResourceQuotaStatus{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4"), + }, + Used: api.ResourceList{ + api.ResourceCPU: resource.MustParse("0"), + }, + }, + } + + kubeClient := testclient.NewSimpleFake("a) + + resourceQuotaManager := NewResourceQuotaManager(kubeClient) + err := resourceQuotaManager.syncResourceQuota(quota) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + + usage := kubeClient.Actions[1].Value.(*api.ResourceQuota) + + // ensure hard and used limits are what we expected + for k, v := range expectedUsage.Status.Hard { + actual := usage.Status.Hard[k] + actualValue := actual.String() + expectedValue := v.String() + if expectedValue != actualValue { + t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue) + } + } + for k, v := range expectedUsage.Status.Used { + actual := usage.Status.Used[k] + actualValue := actual.String() + expectedValue := v.String() + if expectedValue != actualValue { + t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue) + } + } + +} + +func TestSyncResourceQuotaNoChange(t *testing.T) { + quota := api.ResourceQuota{ + Spec: api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4"), + }, + }, + Status: api.ResourceQuotaStatus{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4"), + }, + Used: api.ResourceList{ + api.ResourceCPU: resource.MustParse("0"), + }, + }, + } + + kubeClient := testclient.NewSimpleFake(&api.PodList{}, "a) + + resourceQuotaManager := NewResourceQuotaManager(kubeClient) + err := resourceQuotaManager.syncResourceQuota(quota) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + + if len(kubeClient.Actions) != 1 && kubeClient.Actions[0].Action != "list-pods" { + t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions) + } +}