the observed used should match those that have hard constraint

This commit is contained in:
Ke Zhang 2016-08-06 06:10:09 +08:00
parent c393f11261
commit d9c9cafbf3
2 changed files with 104 additions and 0 deletions

View File

@ -289,6 +289,10 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota api.ResourceQ
used[key] = value
}
// ensure set of used values match those that have hard constraints
hardResources := quota.ResourceNames(hardLimits)
used = quota.Mask(used, hardResources)
// Create a usage object that is based on the quota resource version that will handle updates
// by default, we preserve the past usage observation, and set hard to the current spec
usage := api.ResourceQuota{

View File

@ -242,6 +242,106 @@ func TestSyncResourceQuotaSpecChange(t *testing.T) {
}
}
func TestSyncResourceQuotaSpecHardChange(t *testing.T) {
resourceQuota := api.ResourceQuota{
ObjectMeta: api.ObjectMeta{
Namespace: "default",
Name: "rq",
},
Spec: api.ResourceQuotaSpec{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("4"),
},
},
Status: api.ResourceQuotaStatus{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("3"),
api.ResourceMemory: resource.MustParse("1Gi"),
},
Used: api.ResourceList{
api.ResourceCPU: resource.MustParse("0"),
api.ResourceMemory: resource.MustParse("0"),
},
},
}
expectedUsage := api.ResourceQuota{
Status: api.ResourceQuotaStatus{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("4"),
},
Used: api.ResourceList{
api.ResourceCPU: resource.MustParse("0"),
},
},
}
kubeClient := fake.NewSimpleClientset(&resourceQuota)
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
KubeClient: kubeClient,
ResyncPeriod: controller.NoResyncPeriodFunc,
Registry: install.NewRegistry(kubeClient),
GroupKindsToReplenish: []unversioned.GroupKind{
api.Kind("Pod"),
api.Kind("Service"),
api.Kind("ReplicationController"),
api.Kind("PersistentVolumeClaim"),
},
ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient),
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
}
quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
err := quotaController.syncResourceQuota(resourceQuota)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
expectedActionSet := sets.NewString(
strings.Join([]string{"list", "pods", ""}, "-"),
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
)
actionSet := sets.NewString()
for _, action := range kubeClient.Actions() {
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
}
if !actionSet.HasAll(expectedActionSet.List()...) {
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
}
lastActionIndex := len(kubeClient.Actions()) - 1
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*api.ResourceQuota)
// ensure hard and used limits are what we expected
for k, v := range expectedUsage.Status.Hard {
actual := usage.Status.Hard[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
for k, v := range expectedUsage.Status.Used {
actual := usage.Status.Used[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
// ensure usage hard and used are are synced with spec hard, not have dirty resource
for k, v := range usage.Status.Hard {
if k == api.ResourceMemory {
t.Errorf("Unexpected Usage Hard: Key: %v, Value: %v", k, v.String())
}
}
for k, v := range usage.Status.Used {
if k == api.ResourceMemory {
t.Errorf("Unexpected Usage Used: Key: %v, Value: %v", k, v.String())
}
}
}
func TestSyncResourceQuotaNoChange(t *testing.T) {
resourceQuota := api.ResourceQuota{