From b86e8f7631dd07dd7fbb83d143726c33e4ee8475 Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Mon, 27 Aug 2018 21:46:11 +0800 Subject: [PATCH 1/9] externalize quota admission controller --- plugin/pkg/admission/resourcequota/BUILD | 22 +- .../pkg/admission/resourcequota/admission.go | 22 +- .../admission/resourcequota/admission_test.go | 1055 +++++++++-------- .../pkg/admission/resourcequota/controller.go | 56 +- .../resourcequota/resource_access.go | 28 +- 5 files changed, 594 insertions(+), 589 deletions(-) diff --git a/plugin/pkg/admission/resourcequota/BUILD b/plugin/pkg/admission/resourcequota/BUILD index 714cbdd8b02..e638b0691a0 100644 --- a/plugin/pkg/admission/resourcequota/BUILD +++ b/plugin/pkg/admission/resourcequota/BUILD @@ -17,19 +17,16 @@ go_library( ], importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota", deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/client/listers/core/internalversion:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", - "//pkg/quota:go_default_library", - "//pkg/quota/generic:go_default_library", + "//pkg/quota/v1:go_default_library", + "//pkg/quota/v1/generic:go_default_library", "//pkg/util/reflector/prometheus:go_default_library", "//pkg/util/workqueue/prometheus:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota/install:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota/validation:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -41,7 +38,11 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission/initializer:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/etcd:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/hashicorp/golang-lru:go_default_library", @@ -54,17 +55,18 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", - "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/controller:go_default_library", - "//pkg/quota/generic:go_default_library", - "//pkg/quota/install:go_default_library", + "//pkg/quota/v1/generic:go_default_library", + "//pkg/quota/v1/install:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//vendor/github.com/hashicorp/golang-lru:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/admission.go b/plugin/pkg/admission/resourcequota/admission.go index ec8bc590a23..43097ef5569 100644 --- a/plugin/pkg/admission/resourcequota/admission.go +++ b/plugin/pkg/admission/resourcequota/admission.go @@ -21,13 +21,14 @@ import ( "io" "time" + corev1 "k8s.io/api/core/v1" "k8s.io/apiserver/pkg/admission" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" + genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation" ) @@ -65,12 +66,13 @@ type QuotaAdmission struct { } var _ admission.ValidationInterface = &QuotaAdmission{} -var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&QuotaAdmission{}) +var _ = genericadmissioninitializer.WantsExternalKubeInformerFactory(&QuotaAdmission{}) +var _ = genericadmissioninitializer.WantsExternalKubeClientSet(&QuotaAdmission{}) var _ = kubeapiserveradmission.WantsQuotaConfiguration(&QuotaAdmission{}) type liveLookupEntry struct { expiry time.Time - items []*api.ResourceQuota + items []*corev1.ResourceQuota } // NewResourceQuota configures an admission controller that can enforce quota constraints @@ -91,12 +93,12 @@ func NewResourceQuota(config *resourcequotaapi.Configuration, numEvaluators int, }, nil } -func (a *QuotaAdmission) SetInternalKubeClientSet(client internalclientset.Interface) { +func (a *QuotaAdmission) SetExternalKubeClientSet(client kubernetes.Interface) { a.quotaAccessor.client = client } -func (a *QuotaAdmission) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { - a.quotaAccessor.lister = f.Core().InternalVersion().ResourceQuotas().Lister() +func (a *QuotaAdmission) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + a.quotaAccessor.lister = f.Core().V1().ResourceQuotas().Lister() } func (a *QuotaAdmission) SetQuotaConfiguration(c quota.Configuration) { diff --git a/plugin/pkg/admission/resourcequota/admission_test.go b/plugin/pkg/admission/resourcequota/admission_test.go index 610b2dec51b..09e29b0b8fc 100644 --- a/plugin/pkg/admission/resourcequota/admission_test.go +++ b/plugin/pkg/admission/resourcequota/admission_test.go @@ -25,19 +25,20 @@ import ( lru "github.com/hashicorp/golang-lru" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" testcore "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/quota/generic" - "k8s.io/kubernetes/pkg/quota/install" + "k8s.io/kubernetes/pkg/quota/v1/generic" + "k8s.io/kubernetes/pkg/quota/v1/install" resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" ) @@ -92,33 +93,33 @@ func validPersistentVolumeClaim(name string, resources api.ResourceRequirements) } func TestPrettyPrint(t *testing.T) { - toResourceList := func(resources map[api.ResourceName]string) api.ResourceList { - resourceList := api.ResourceList{} + toResourceList := func(resources map[corev1.ResourceName]string) corev1.ResourceList { + resourceList := corev1.ResourceList{} for key, value := range resources { resourceList[key] = resource.MustParse(value) } return resourceList } testCases := []struct { - input api.ResourceList + input corev1.ResourceList expected string }{ { - input: toResourceList(map[api.ResourceName]string{ - api.ResourceCPU: "100m", + input: toResourceList(map[corev1.ResourceName]string{ + corev1.ResourceCPU: "100m", }), expected: "cpu=100m", }, { - input: toResourceList(map[api.ResourceName]string{ - api.ResourcePods: "10", - api.ResourceServices: "10", - api.ResourceReplicationControllers: "10", - api.ResourceServicesNodePorts: "10", - api.ResourceRequestsCPU: "100m", - api.ResourceRequestsMemory: "100Mi", - api.ResourceLimitsCPU: "100m", - api.ResourceLimitsMemory: "100Mi", + input: toResourceList(map[corev1.ResourceName]string{ + corev1.ResourcePods: "10", + corev1.ResourceServices: "10", + corev1.ResourceReplicationControllers: "10", + corev1.ResourceServicesNodePorts: "10", + corev1.ResourceRequestsCPU: "100m", + corev1.ResourceRequestsMemory: "100Mi", + corev1.ResourceLimitsCPU: "100m", + corev1.ResourceLimitsMemory: "100Mi", }), expected: "limits.cpu=100m,limits.memory=100Mi,pods=10,replicationcontrollers=10,requests.cpu=100m,requests.memory=100Mi,services=10,services.nodeports=10", }, @@ -140,7 +141,7 @@ func TestAdmissionIgnoresDelete(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -150,7 +151,7 @@ func TestAdmissionIgnoresDelete(t *testing.T) { evaluator: evaluator, } namespace := "default" - err := handler.Validate(admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), namespace, "name", api.Resource("pods").WithVersion("version"), "", admission.Delete, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), namespace, "name", corev1.Resource("pods").WithVersion("version"), "", admission.Delete, false, nil)) if err != nil { t.Errorf("ResourceQuota should admit all deletes: %v", err) } @@ -160,15 +161,15 @@ func TestAdmissionIgnoresDelete(t *testing.T) { // It verifies that creation of a pod that would have exceeded quota is properly failed // It verifies that create operations to a subresource that would have exceeded quota would succeed func TestAdmissionIgnoresSubresources(t *testing.T) { - resourceQuota := &api.ResourceQuota{} + resourceQuota := &corev1.ResourceQuota{} resourceQuota.Name = "quota" resourceQuota.Namespace = "test" - resourceQuota.Status = api.ResourceQuotaStatus{ - Hard: api.ResourceList{}, - Used: api.ResourceList{}, + resourceQuota.Status = corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{}, + Used: corev1.ResourceList{}, } - resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi") - resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi") + resourceQuota.Status.Hard[corev1.ResourceMemory] = resource.MustParse("2Gi") + resourceQuota.Status.Used[corev1.ResourceMemory] = resource.MustParse("1Gi") stopCh := make(chan struct{}) defer close(stopCh) @@ -176,7 +177,7 @@ func TestAdmissionIgnoresSubresources(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -185,13 +186,13 @@ func TestAdmissionIgnoresSubresources(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err == nil { t.Errorf("Expected an error because the pod exceeded allowed quota") } - err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "subresource", admission.Create, false, nil)) + err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "subresource", admission.Create, false, nil)) if err != nil { t.Errorf("Did not expect an error because the action went to a subresource: %v", err) } @@ -199,18 +200,18 @@ func TestAdmissionIgnoresSubresources(t *testing.T) { // TestAdmitBelowQuotaLimit verifies that a pod when created has its usage reflected on the quota func TestAdmitBelowQuotaLimit(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -221,7 +222,7 @@ func TestAdmitBelowQuotaLimit(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -230,9 +231,9 @@ func TestAdmitBelowQuotaLimit(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -253,18 +254,18 @@ func TestAdmitBelowQuotaLimit(t *testing.T) { decimatedActions := removeListWatch(kubeClient.Actions()) lastActionIndex := len(decimatedActions) - 1 - usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*api.ResourceQuota) - expectedUsage := api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota) + expectedUsage := corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1100m"), - api.ResourceMemory: resource.MustParse("52Gi"), - api.ResourcePods: resource.MustParse("4"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1100m"), + corev1.ResourceMemory: resource.MustParse("52Gi"), + corev1.ResourcePods: resource.MustParse("4"), }, }, } @@ -281,18 +282,18 @@ func TestAdmitBelowQuotaLimit(t *testing.T) { // TestAdmitDryRun verifies that a pod when created with dry-run doesn not have its usage reflected on the quota // and that dry-run requests can still be rejected if they would exceed the quota func TestAdmitDryRun(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -303,7 +304,7 @@ func TestAdmitDryRun(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -312,16 +313,16 @@ func TestAdmitDryRun(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, true, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, true, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } newPod = validPod("too-large-pod", 1, getResourceRequirements(getResourceList("100m", "60Gi"), getResourceList("", ""))) - err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, true, nil)) + err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, true, nil)) if err == nil { t.Errorf("Expected error but got none") } @@ -334,18 +335,18 @@ func TestAdmitDryRun(t *testing.T) { // TestAdmitHandlesOldObjects verifies that admit handles updates correctly with old objects func TestAdmitHandlesOldObjects(t *testing.T) { // in this scenario, the old quota was based on a service type=loadbalancer - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceServices: resource.MustParse("10"), - api.ResourceServicesLoadBalancers: resource.MustParse("10"), - api.ResourceServicesNodePorts: resource.MustParse("10"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("10"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("10"), + corev1.ResourceServicesNodePorts: resource.MustParse("10"), }, - Used: api.ResourceList{ - api.ResourceServices: resource.MustParse("1"), - api.ResourceServicesLoadBalancers: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("0"), + Used: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("1"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("0"), }, }, } @@ -358,7 +359,7 @@ func TestAdmitHandlesOldObjects(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -367,7 +368,7 @@ func TestAdmitHandlesOldObjects(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) // old service was a load balancer, but updated version is a node port. existingService := &api.Service{ @@ -381,7 +382,7 @@ func TestAdmitHandlesOldObjects(t *testing.T) { Ports: []api.ServicePort{{Port: 1234}}, }, } - err := handler.Validate(admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, api.Resource("services").WithVersion("version"), "", admission.Update, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -404,21 +405,21 @@ func TestAdmitHandlesOldObjects(t *testing.T) { // verify usage decremented the loadbalancer, and incremented the nodeport, but kept the service the same. decimatedActions := removeListWatch(kubeClient.Actions()) lastActionIndex := len(decimatedActions) - 1 - usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*api.ResourceQuota) + usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota) - // Verify service usage. Since we don't add negative values, the api.ResourceServicesLoadBalancers + // Verify service usage. Since we don't add negative values, the corev1.ResourceServicesLoadBalancers // will remain on last reported value - expectedUsage := api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceServices: resource.MustParse("10"), - api.ResourceServicesLoadBalancers: resource.MustParse("10"), - api.ResourceServicesNodePorts: resource.MustParse("10"), + expectedUsage := corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("10"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("10"), + corev1.ResourceServicesNodePorts: resource.MustParse("10"), }, - Used: api.ResourceList{ - api.ResourceServices: resource.MustParse("1"), - api.ResourceServicesLoadBalancers: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("1"), + Used: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("1"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("1"), }, }, } @@ -433,16 +434,16 @@ func TestAdmitHandlesOldObjects(t *testing.T) { } func TestAdmitHandlesNegativePVCUpdates(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("3"), - api.ResourceRequestsStorage: resource.MustParse("100Gi"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("3"), + corev1.ResourceRequestsStorage: resource.MustParse("100Gi"), }, - Used: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("1"), - api.ResourceRequestsStorage: resource.MustParse("10Gi"), + Used: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), + corev1.ResourceRequestsStorage: resource.MustParse("10Gi"), }, }, } @@ -465,7 +466,7 @@ func TestAdmitHandlesNegativePVCUpdates(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -474,7 +475,7 @@ func TestAdmitHandlesNegativePVCUpdates(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) oldPVC := &api.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{Name: "pvc-to-update", Namespace: "test", ResourceVersion: "1"}, @@ -490,7 +491,7 @@ func TestAdmitHandlesNegativePVCUpdates(t *testing.T) { }, } - err = handler.Validate(admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, api.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, false, nil)) + err = handler.Validate(admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -500,16 +501,16 @@ func TestAdmitHandlesNegativePVCUpdates(t *testing.T) { } func TestAdmitHandlesPVCUpdates(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("3"), - api.ResourceRequestsStorage: resource.MustParse("100Gi"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("3"), + corev1.ResourceRequestsStorage: resource.MustParse("100Gi"), }, - Used: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("1"), - api.ResourceRequestsStorage: resource.MustParse("10Gi"), + Used: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), + corev1.ResourceRequestsStorage: resource.MustParse("10Gi"), }, }, } @@ -532,7 +533,7 @@ func TestAdmitHandlesPVCUpdates(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -541,7 +542,7 @@ func TestAdmitHandlesPVCUpdates(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) oldPVC := &api.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{Name: "pvc-to-update", Namespace: "test", ResourceVersion: "1"}, @@ -557,7 +558,7 @@ func TestAdmitHandlesPVCUpdates(t *testing.T) { }, } - err = handler.Validate(admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, api.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, false, nil)) + err = handler.Validate(admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -580,16 +581,16 @@ func TestAdmitHandlesPVCUpdates(t *testing.T) { decimatedActions := removeListWatch(kubeClient.Actions()) lastActionIndex := len(decimatedActions) - 1 - usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*api.ResourceQuota) - expectedUsage := api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("3"), - api.ResourceRequestsStorage: resource.MustParse("100Gi"), + usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota) + expectedUsage := corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("3"), + corev1.ResourceRequestsStorage: resource.MustParse("100Gi"), }, - Used: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("1"), - api.ResourceRequestsStorage: resource.MustParse("15Gi"), + Used: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), + corev1.ResourceRequestsStorage: resource.MustParse("15Gi"), }, }, } @@ -607,18 +608,18 @@ func TestAdmitHandlesPVCUpdates(t *testing.T) { // TestAdmitHandlesCreatingUpdates verifies that admit handles updates which behave as creates func TestAdmitHandlesCreatingUpdates(t *testing.T) { // in this scenario, there is an existing service - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceServices: resource.MustParse("10"), - api.ResourceServicesLoadBalancers: resource.MustParse("10"), - api.ResourceServicesNodePorts: resource.MustParse("10"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("10"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("10"), + corev1.ResourceServicesNodePorts: resource.MustParse("10"), }, - Used: api.ResourceList{ - api.ResourceServices: resource.MustParse("1"), - api.ResourceServicesLoadBalancers: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("0"), + Used: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("1"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("0"), }, }, } @@ -631,7 +632,7 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -640,7 +641,7 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) // old service didn't exist, so this update is actually a create oldService := &api.Service{ @@ -654,7 +655,7 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) { Ports: []api.ServicePort{{Port: 1234}}, }, } - err := handler.Validate(admission.NewAttributesRecord(newService, oldService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, api.Resource("services").WithVersion("version"), "", admission.Update, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newService, oldService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -677,18 +678,18 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) { // verify that the "old" object was ignored for calculating the new usage decimatedActions := removeListWatch(kubeClient.Actions()) lastActionIndex := len(decimatedActions) - 1 - usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*api.ResourceQuota) - expectedUsage := api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceServices: resource.MustParse("10"), - api.ResourceServicesLoadBalancers: resource.MustParse("10"), - api.ResourceServicesNodePorts: resource.MustParse("10"), + usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota) + expectedUsage := corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("10"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("10"), + corev1.ResourceServicesNodePorts: resource.MustParse("10"), }, - Used: api.ResourceList{ - api.ResourceServices: resource.MustParse("2"), - api.ResourceServicesLoadBalancers: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("1"), + Used: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("2"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("1"), }, }, } @@ -704,18 +705,18 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) { // TestAdmitExceedQuotaLimit verifies that if a pod exceeded allowed usage that its rejected during admission. func TestAdmitExceedQuotaLimit(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -726,7 +727,7 @@ func TestAdmitExceedQuotaLimit(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -735,9 +736,9 @@ func TestAdmitExceedQuotaLimit(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err == nil { t.Errorf("Expected an error exceeding quota") } @@ -747,20 +748,20 @@ func TestAdmitExceedQuotaLimit(t *testing.T) { // specified on the pod. In this case, we create a quota that tracks cpu request, memory request, and memory limit. // We ensure that a pod that does not specify a memory limit that it fails in admission. func TestAdmitEnforceQuotaConstraints(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourceLimitsMemory: resource.MustParse("200Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourceLimitsMemory: resource.MustParse("200Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourceLimitsMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourceLimitsMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -771,7 +772,7 @@ func TestAdmitEnforceQuotaConstraints(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -780,10 +781,10 @@ func TestAdmitEnforceQuotaConstraints(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) // verify all values are specified as required on the quota newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err == nil { t.Errorf("Expected an error because the pod does not specify a memory limit") } @@ -791,20 +792,20 @@ func TestAdmitEnforceQuotaConstraints(t *testing.T) { // TestAdmitPodInNamespaceWithoutQuota ensures that if a namespace has no quota, that a pod can get in func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "other", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourceLimitsMemory: resource.MustParse("200Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourceLimitsMemory: resource.MustParse("200Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourceLimitsMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourceLimitsMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -819,7 +820,7 @@ func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() quotaAccessor.liveLookupCache = liveLookupCache config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() @@ -830,11 +831,11 @@ func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) { evaluator: evaluator, } // Add to the index - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", ""))) // Add to the lru cache so we do not do a live client lookup - liveLookupCache.Add(newPod.Namespace, liveLookupEntry{expiry: time.Now().Add(time.Duration(30 * time.Second)), items: []*api.ResourceQuota{}}) - err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + liveLookupCache.Add(newPod.Namespace, liveLookupEntry{expiry: time.Now().Add(time.Duration(30 * time.Second)), items: []*corev1.ResourceQuota{}}) + err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Did not expect an error because the pod is in a different namespace than the quota") } @@ -844,39 +845,39 @@ func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) { // It creates a terminating and non-terminating quota, and creates a terminating pod. // It ensures that the terminating quota is incremented, and the non-terminating quota is not. func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) { - resourceQuotaNonTerminating := &api.ResourceQuota{ + resourceQuotaNonTerminating := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-non-terminating", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeNotTerminating}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeNotTerminating}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } - resourceQuotaTerminating := &api.ResourceQuota{ + resourceQuotaTerminating := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-terminating", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeTerminating}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeTerminating}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -887,7 +888,7 @@ func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -896,14 +897,14 @@ func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaNonTerminating) - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaTerminating) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaNonTerminating) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaTerminating) // create a pod that has an active deadline newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) activeDeadlineSeconds := int64(30) newPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -924,24 +925,24 @@ func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) { decimatedActions := removeListWatch(kubeClient.Actions()) lastActionIndex := len(decimatedActions) - 1 - usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*api.ResourceQuota) + usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota) // ensure only the quota-terminating was updated if usage.Name != resourceQuotaTerminating.Name { t.Errorf("Incremented the wrong quota, expected %v, actual %v", resourceQuotaTerminating.Name, usage.Name) } - expectedUsage := api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + expectedUsage := corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1100m"), - api.ResourceMemory: resource.MustParse("52Gi"), - api.ResourcePods: resource.MustParse("4"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1100m"), + corev1.ResourceMemory: resource.MustParse("52Gi"), + corev1.ResourcePods: resource.MustParse("4"), }, }, } @@ -958,31 +959,31 @@ func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) { // TestAdmitBelowBestEffortQuotaLimit creates a best effort and non-best effort quota. // It verifies that best effort pods are properly scoped to the best effort quota document. func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) { - resourceQuotaBestEffort := &api.ResourceQuota{ + resourceQuotaBestEffort := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, }, } - resourceQuotaNotBestEffort := &api.ResourceQuota{ + resourceQuotaNotBestEffort := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-not-besteffort", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeNotBestEffort}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeNotBestEffort}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -993,7 +994,7 @@ func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -1002,12 +1003,12 @@ func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaBestEffort) - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaNotBestEffort) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaBestEffort) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaNotBestEffort) // create a pod that is best effort because it does not make a request for anything newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -1023,19 +1024,19 @@ func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) { } decimatedActions := removeListWatch(kubeClient.Actions()) lastActionIndex := len(decimatedActions) - 1 - usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*api.ResourceQuota) + usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota) if usage.Name != resourceQuotaBestEffort.Name { t.Errorf("Incremented the wrong quota, expected %v, actual %v", resourceQuotaBestEffort.Name, usage.Name) } - expectedUsage := api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + expectedUsage := corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("4"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("4"), }, }, } @@ -1065,17 +1066,17 @@ func removeListWatch(in []testcore.Action) []testcore.Action { // TestAdmitBestEffortQuotaLimitIgnoresBurstable validates that a besteffort quota does not match a resource // guaranteed pod. func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -1086,7 +1087,7 @@ func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -1095,9 +1096,9 @@ func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -1110,32 +1111,32 @@ func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) { func TestHasUsageStats(t *testing.T) { testCases := map[string]struct { - a api.ResourceQuota + a corev1.ResourceQuota expected bool }{ "empty": { - a: api.ResourceQuota{Status: api.ResourceQuotaStatus{Hard: api.ResourceList{}}}, + a: corev1.ResourceQuota{Status: corev1.ResourceQuotaStatus{Hard: corev1.ResourceList{}}}, expected: true, }, "hard-only": { - a: api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceMemory: resource.MustParse("1Gi"), + a: corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), }, - Used: api.ResourceList{}, + Used: corev1.ResourceList{}, }, }, expected: false, }, "hard-used": { - a: api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceMemory: resource.MustParse("1Gi"), + a: corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), }, - Used: api.ResourceList{ - api.ResourceMemory: resource.MustParse("500Mi"), + Used: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("500Mi"), }, }, }, @@ -1153,14 +1154,14 @@ func TestHasUsageStats(t *testing.T) { // namespace, it will be set. func TestAdmissionSetsMissingNamespace(t *testing.T) { namespace := "test" - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: namespace, ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("1"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("1"), }, }, } @@ -1172,7 +1173,7 @@ func TestAdmissionSetsMissingNamespace(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -1181,13 +1182,13 @@ func TestAdmissionSetsMissingNamespace(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("pod-without-namespace", 1, getResourceRequirements(getResourceList("1", "2Gi"), getResourceList("", ""))) // unset the namespace newPod.ObjectMeta.Namespace = "" - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Got unexpected error: %v", err) } @@ -1198,16 +1199,16 @@ func TestAdmissionSetsMissingNamespace(t *testing.T) { // TestAdmitRejectsNegativeUsage verifies that usage for any measured resource cannot be negative. func TestAdmitRejectsNegativeUsage(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("3"), - api.ResourceRequestsStorage: resource.MustParse("100Gi"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("3"), + corev1.ResourceRequestsStorage: resource.MustParse("100Gi"), }, - Used: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("1"), - api.ResourceRequestsStorage: resource.MustParse("10Gi"), + Used: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), + corev1.ResourceRequestsStorage: resource.MustParse("10Gi"), }, }, } @@ -1218,7 +1219,7 @@ func TestAdmitRejectsNegativeUsage(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -1227,17 +1228,17 @@ func TestAdmitRejectsNegativeUsage(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) // verify quota rejects negative pvc storage requests newPvc := validPersistentVolumeClaim("not-allowed-pvc", getResourceRequirements(api.ResourceList{api.ResourceStorage: resource.MustParse("-1Gi")}, api.ResourceList{})) - err := handler.Validate(admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, api.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, false, nil)) if err == nil { t.Errorf("Expected an error because the pvc has negative storage usage") } // verify quota accepts non-negative pvc storage requests newPvc = validPersistentVolumeClaim("not-allowed-pvc", getResourceRequirements(api.ResourceList{api.ResourceStorage: resource.MustParse("1Gi")}, api.ResourceList{})) - err = handler.Validate(admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, api.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, false, nil)) + err = handler.Validate(admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -1245,16 +1246,16 @@ func TestAdmitRejectsNegativeUsage(t *testing.T) { // TestAdmitWhenUnrelatedResourceExceedsQuota verifies that if resource X exceeds quota, it does not prohibit resource Y from admission. func TestAdmitWhenUnrelatedResourceExceedsQuota(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceServices: resource.MustParse("3"), - api.ResourcePods: resource.MustParse("4"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("3"), + corev1.ResourcePods: resource.MustParse("4"), }, - Used: api.ResourceList{ - api.ResourceServices: resource.MustParse("4"), - api.ResourcePods: resource.MustParse("1"), + Used: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("4"), + corev1.ResourcePods: resource.MustParse("1"), }, }, } @@ -1265,7 +1266,7 @@ func TestAdmitWhenUnrelatedResourceExceedsQuota(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -1274,11 +1275,11 @@ func TestAdmitWhenUnrelatedResourceExceedsQuota(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) // create a pod that should pass existing quota newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -1293,7 +1294,7 @@ func TestAdmitLimitedResourceNoQuota(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() // disable consumption of cpu unless there is a covering quota. config := &resourcequotaapi.Configuration{ @@ -1312,7 +1313,7 @@ func TestAdmitLimitedResourceNoQuota(t *testing.T) { evaluator: evaluator, } newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err == nil { t.Errorf("Expected an error for consuming a limited resource without quota.") } @@ -1327,7 +1328,7 @@ func TestAdmitLimitedResourceNoQuotaIgnoresNonMatchingResources(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() // disable consumption of cpu unless there is a covering quota. config := &resourcequotaapi.Configuration{ @@ -1346,7 +1347,7 @@ func TestAdmitLimitedResourceNoQuotaIgnoresNonMatchingResources(t *testing.T) { evaluator: evaluator, } newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -1354,14 +1355,14 @@ func TestAdmitLimitedResourceNoQuotaIgnoresNonMatchingResources(t *testing.T) { // TestAdmitLimitedResourceWithQuota verifies if a limited resource is configured with quota, it can be consumed. func TestAdmitLimitedResourceWithQuota(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("10"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("10"), }, - Used: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("1"), + Used: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("1"), }, }, } @@ -1373,7 +1374,7 @@ func TestAdmitLimitedResourceWithQuota(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() // disable consumption of cpu unless there is a covering quota. // disable consumption of cpu unless there is a covering quota. @@ -1394,7 +1395,7 @@ func TestAdmitLimitedResourceWithQuota(t *testing.T) { } indexer.Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1402,25 +1403,25 @@ func TestAdmitLimitedResourceWithQuota(t *testing.T) { // TestAdmitLimitedResourceWithMultipleQuota verifies if a limited resource is configured with quota, it can be consumed if one matches. func TestAdmitLimitedResourceWithMultipleQuota(t *testing.T) { - resourceQuota1 := &api.ResourceQuota{ + resourceQuota1 := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota1", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("10"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("10"), }, - Used: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("1"), + Used: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("1"), }, }, } - resourceQuota2 := &api.ResourceQuota{ + resourceQuota2 := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota2", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceMemory: resource.MustParse("10Gi"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("10Gi"), }, - Used: api.ResourceList{ - api.ResourceMemory: resource.MustParse("1Gi"), + Used: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), }, }, } @@ -1432,7 +1433,7 @@ func TestAdmitLimitedResourceWithMultipleQuota(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() // disable consumption of cpu unless there is a covering quota. // disable consumption of cpu unless there is a covering quota. @@ -1454,7 +1455,7 @@ func TestAdmitLimitedResourceWithMultipleQuota(t *testing.T) { indexer.Add(resourceQuota1) indexer.Add(resourceQuota2) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1462,14 +1463,14 @@ func TestAdmitLimitedResourceWithMultipleQuota(t *testing.T) { // TestAdmitLimitedResourceWithQuotaThatDoesNotCover verifies if a limited resource is configured the quota must cover the resource. func TestAdmitLimitedResourceWithQuotaThatDoesNotCover(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceMemory: resource.MustParse("10Gi"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("10Gi"), }, - Used: api.ResourceList{ - api.ResourceMemory: resource.MustParse("1Gi"), + Used: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), }, }, } @@ -1481,7 +1482,7 @@ func TestAdmitLimitedResourceWithQuotaThatDoesNotCover(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() // disable consumption of cpu unless there is a covering quota. // disable consumption of cpu unless there is a covering quota. @@ -1502,7 +1503,7 @@ func TestAdmitLimitedResourceWithQuotaThatDoesNotCover(t *testing.T) { } indexer.Add(resourceQuota) newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err == nil { t.Fatalf("Expected an error since the quota did not cover cpu") } @@ -1513,22 +1514,22 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { testCases := []struct { description string testPod *api.Pod - quota *api.ResourceQuota - anotherQuota *api.ResourceQuota + quota *corev1.ResourceQuota + anotherQuota *corev1.ResourceQuota config *resourcequotaapi.Configuration expErr string }{ { description: "Covering quota exists for configured limited scope PriorityClassNameExists.", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists}, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists}, }, }, }, @@ -1537,10 +1538,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists, }, }, }, @@ -1551,14 +1552,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "configured limited scope PriorityClassNameExists and limited cpu resource. No covering quota for cpu and pod admit fails.", testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists}, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists}, }, }, }, @@ -1567,10 +1568,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists, }, }, MatchContains: []string{"requests.cpu"}, // match on "requests.cpu" only @@ -1582,15 +1583,15 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Covering quota does not exist for configured limited scope PriorityClassNameExists.", testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists, }, }, }, @@ -1601,15 +1602,15 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Covering quota does not exist for configured limited scope resourceQuotaBestEffort", testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, @@ -1620,17 +1621,17 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Covering quota exist for configured limited scope resourceQuotaBestEffort", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, }, }, @@ -1638,10 +1639,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, @@ -1652,24 +1653,24 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Neither matches pod. Pod allowed", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1681,24 +1682,24 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Only BestEffort scope matches pod. Pod admit fails because covering quota is missing for BestEffort scope", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1710,24 +1711,24 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Only PriorityClass scope matches pod. Pod admit fails because covering quota is missing for PriorityClass scope", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1739,24 +1740,24 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Pod admit fails because covering quota is missing for PriorityClass scope and BestEffort scope", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1768,17 +1769,17 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for BestEffort scope. Pod admit fails because covering quota is missing for PriorityClass scope", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, }, }, @@ -1786,19 +1787,19 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1810,14 +1811,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for PriorityClass scope. Pod admit fails because covering quota is missing for BestEffort scope", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1828,19 +1829,19 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1852,28 +1853,28 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for both the scopes. Pod admit success. No Error", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, }, }, - anotherQuota: &api.ResourceQuota{ + anotherQuota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1884,19 +1885,19 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1908,30 +1909,30 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Pod allowed with priorityclass if limited scope PriorityClassNameExists not configured.", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{}, expErr: "", }, { description: "quota fails, though covering quota for configured limited scope PriorityClassNameExists exists.", testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "20Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists}, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists}, }, }, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceMemory: resource.MustParse("10Gi"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("10Gi"), }, - Used: api.ResourceList{ - api.ResourceMemory: resource.MustParse("1Gi"), + Used: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), }, }, }, @@ -1939,10 +1940,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists, }, }, }, @@ -1953,14 +1954,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Pod has different priorityclass than configured limited. Covering quota exists for configured limited scope PriorityClassIn.", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1971,10 +1972,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1986,14 +1987,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Pod has limited priorityclass. Covering quota exists for configured limited scope PriorityClassIn.", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -2004,10 +2005,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name", "cluster-services"}, }, }, @@ -2019,14 +2020,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Pod has limited priorityclass. Covering quota does not exist for configured limited scope PriorityClassIn.", testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name"}, }, }, @@ -2037,10 +2038,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name", "cluster-services"}, }, }, @@ -2052,14 +2053,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "From the above test case, just changing pod priority from cluster-services to another-priorityclass-name. expecting no error", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "another-priorityclass-name"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name"}, }, }, @@ -2070,10 +2071,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name", "cluster-services"}, }, }, @@ -2085,15 +2086,15 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Pod has limited priorityclass. Covering quota does NOT exists for configured limited scope PriorityClassIn.", testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name", "cluster-services"}, }, }, @@ -2105,14 +2106,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Pod has limited priorityclass. Covering quota exists for configured limited scope PriorityClassIn through PriorityClassNameExists", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists}, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists}, }, }, }, @@ -2121,10 +2122,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name", "cluster-services"}, }, }, @@ -2150,7 +2151,7 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -2163,7 +2164,7 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { if testCase.anotherQuota != nil { indexer.Add(testCase.anotherQuota) } - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if testCase.expErr == "" { if err != nil { t.Fatalf("Testcase, %v, failed with unexpected error: %v. ExpErr: %v", testCase.description, err, testCase.expErr) diff --git a/plugin/pkg/admission/resourcequota/controller.go b/plugin/pkg/admission/resourcequota/controller.go index 30b9defafa0..34c621903b9 100644 --- a/plugin/pkg/admission/resourcequota/controller.go +++ b/plugin/pkg/admission/resourcequota/controller.go @@ -25,6 +25,7 @@ import ( "github.com/golang/glog" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -34,9 +35,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/admission" "k8s.io/client-go/util/workqueue" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" _ "k8s.io/kubernetes/pkg/util/reflector/prometheus" // for reflector metric registration _ "k8s.io/kubernetes/pkg/util/workqueue/prometheus" // for workqueue metric registration resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" @@ -52,7 +52,7 @@ type Evaluator interface { type quotaEvaluator struct { quotaAccessor QuotaAccessor // lockAcquisitionFunc acquires any required locks and returns a cleanup method to defer - lockAcquisitionFunc func([]api.ResourceQuota) func() + lockAcquisitionFunc func([]corev1.ResourceQuota) func() ignoredResources map[schema.GroupResource]struct{} @@ -111,7 +111,7 @@ func newAdmissionWaiter(a admission.Attributes) *admissionWaiter { // NewQuotaEvaluator configures an admission controller that can enforce quota constraints // using the provided registry. The registry must have the capability to handle group/kinds that // are persisted by the server this admission controller is intercepting -func NewQuotaEvaluator(quotaAccessor QuotaAccessor, ignoredResources map[schema.GroupResource]struct{}, quotaRegistry quota.Registry, lockAcquisitionFunc func([]api.ResourceQuota) func(), config *resourcequotaapi.Configuration, workers int, stopCh <-chan struct{}) Evaluator { +func NewQuotaEvaluator(quotaAccessor QuotaAccessor, ignoredResources map[schema.GroupResource]struct{}, quotaRegistry quota.Registry, lockAcquisitionFunc func([]corev1.ResourceQuota) func(), config *resourcequotaapi.Configuration, workers int, stopCh <-chan struct{}) Evaluator { // if we get a nil config, just create an empty default. if config == nil { config = &resourcequotaapi.Configuration{} @@ -214,7 +214,7 @@ func (e *quotaEvaluator) checkAttributes(ns string, admissionAttributes []*admis // updates failed on conflict errors and we have retries left, re-get the failed quota from our cache for the latest version // and recurse into this method with the subset. It's safe for us to evaluate ONLY the subset, because the other quota // documents for these waiters have already been evaluated. Step 1, will mark all the ones that should already have succeeded. -func (e *quotaEvaluator) checkQuotas(quotas []api.ResourceQuota, admissionAttributes []*admissionWaiter, remainingRetries int) { +func (e *quotaEvaluator) checkQuotas(quotas []corev1.ResourceQuota, admissionAttributes []*admissionWaiter, remainingRetries int) { // yet another copy to compare against originals to see if we actually have deltas originalQuotas, err := copyQuotas(quotas) if err != nil { @@ -264,7 +264,7 @@ func (e *quotaEvaluator) checkQuotas(quotas []api.ResourceQuota, admissionAttrib // 1. check to see if the quota changed. If not, skip. // 2. if the quota changed and the update passes, be happy // 3. if the quota changed and the update fails, add the original to a retry list - var updatedFailedQuotas []api.ResourceQuota + var updatedFailedQuotas []corev1.ResourceQuota var lastErr error for i := range quotas { newQuota := quotas[i] @@ -318,7 +318,7 @@ func (e *quotaEvaluator) checkQuotas(quotas []api.ResourceQuota, admissionAttrib // this logic goes through our cache to find the new version of all quotas that failed update. If something has been removed // it is skipped on this retry. After all, you removed it. - quotasToCheck := []api.ResourceQuota{} + quotasToCheck := []corev1.ResourceQuota{} for _, newQuota := range newQuotas { for _, oldQuota := range updatedFailedQuotas { if newQuota.Name == oldQuota.Name { @@ -330,8 +330,8 @@ func (e *quotaEvaluator) checkQuotas(quotas []api.ResourceQuota, admissionAttrib e.checkQuotas(quotasToCheck, admissionAttributes, remainingRetries-1) } -func copyQuotas(in []api.ResourceQuota) ([]api.ResourceQuota, error) { - out := make([]api.ResourceQuota, 0, len(in)) +func copyQuotas(in []corev1.ResourceQuota) ([]corev1.ResourceQuota, error) { + out := make([]corev1.ResourceQuota, 0, len(in)) for _, quota := range in { out = append(out, *quota.DeepCopy()) } @@ -355,8 +355,8 @@ func filterLimitedResourcesByGroupResource(input []resourcequotaapi.LimitedResou // limitedByDefault determines from the specified usage and limitedResources the set of resources names // that must be present in a covering quota. It returns empty set if it was unable to determine if // a resource was not limited by default. -func limitedByDefault(usage api.ResourceList, limitedResources []resourcequotaapi.LimitedResource) []api.ResourceName { - result := []api.ResourceName{} +func limitedByDefault(usage corev1.ResourceList, limitedResources []resourcequotaapi.LimitedResource) []corev1.ResourceName { + result := []corev1.ResourceName{} for _, limitedResource := range limitedResources { for k, v := range usage { // if a resource is consumed, we need to check if it matches on the limited resource list. @@ -374,13 +374,13 @@ func limitedByDefault(usage api.ResourceList, limitedResources []resourcequotaap return result } -func getMatchedLimitedScopes(evaluator quota.Evaluator, inputObject runtime.Object, limitedResources []resourcequotaapi.LimitedResource) ([]api.ScopedResourceSelectorRequirement, error) { - scopes := []api.ScopedResourceSelectorRequirement{} +func getMatchedLimitedScopes(evaluator quota.Evaluator, inputObject runtime.Object, limitedResources []resourcequotaapi.LimitedResource) ([]corev1.ScopedResourceSelectorRequirement, error) { + scopes := []corev1.ScopedResourceSelectorRequirement{} for _, limitedResource := range limitedResources { matched, err := evaluator.MatchingScopes(inputObject, limitedResource.MatchScopes) if err != nil { glog.Errorf("Error while matching limited Scopes: %v", err) - return []api.ScopedResourceSelectorRequirement{}, err + return []corev1.ScopedResourceSelectorRequirement{}, err } for _, scope := range matched { scopes = append(scopes, scope) @@ -391,7 +391,7 @@ func getMatchedLimitedScopes(evaluator quota.Evaluator, inputObject runtime.Obje // checkRequest verifies that the request does not exceed any quota constraint. it returns a copy of quotas not yet persisted // that capture what the usage would be if the request succeeded. It return an error if there is insufficient quota to satisfy the request -func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.Attributes) ([]api.ResourceQuota, error) { +func (e *quotaEvaluator) checkRequest(quotas []corev1.ResourceQuota, a admission.Attributes) ([]corev1.ResourceQuota, error) { evaluator := e.registry.Get(a.GetResource().GroupResource()) if evaluator == nil { return quotas, nil @@ -400,8 +400,8 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At } // CheckRequest is a static version of quotaEvaluator.checkRequest, possible to be called from outside. -func CheckRequest(quotas []api.ResourceQuota, a admission.Attributes, evaluator quota.Evaluator, - limited []resourcequotaapi.LimitedResource) ([]api.ResourceQuota, error) { +func CheckRequest(quotas []corev1.ResourceQuota, a admission.Attributes, evaluator quota.Evaluator, + limited []resourcequotaapi.LimitedResource) ([]corev1.ResourceQuota, error) { if !evaluator.Handles(a) { return quotas, nil } @@ -416,7 +416,7 @@ func CheckRequest(quotas []api.ResourceQuota, a admission.Attributes, evaluator } // determine the set of resource names that must exist in a covering quota - limitedResourceNames := []api.ResourceName{} + limitedResourceNames := []corev1.ResourceName{} limitedResources := filterLimitedResourcesByGroupResource(limited, a.GetResource().GroupResource()) if len(limitedResources) > 0 { deltaUsage, err := evaluator.Usage(inputObject) @@ -436,7 +436,7 @@ func CheckRequest(quotas []api.ResourceQuota, a admission.Attributes, evaluator // this is needed to know if we have satisfied any constraints where consumption // was limited by default. restrictedResourcesSet := sets.String{} - restrictedScopes := []api.ScopedResourceSelectorRequirement{} + restrictedScopes := []corev1.ScopedResourceSelectorRequirement{} for i := range quotas { resourceQuota := quotas[i] scopeSelectors := getScopeSelectorsFromQuota(resourceQuota) @@ -571,12 +571,12 @@ func CheckRequest(quotas []api.ResourceQuota, a admission.Attributes, evaluator return outQuotas, nil } -func getScopeSelectorsFromQuota(quota api.ResourceQuota) []api.ScopedResourceSelectorRequirement { - selectors := []api.ScopedResourceSelectorRequirement{} +func getScopeSelectorsFromQuota(quota corev1.ResourceQuota) []corev1.ScopedResourceSelectorRequirement { + selectors := []corev1.ScopedResourceSelectorRequirement{} for _, scope := range quota.Spec.Scopes { - selectors = append(selectors, api.ScopedResourceSelectorRequirement{ + selectors = append(selectors, corev1.ScopedResourceSelectorRequirement{ ScopeName: scope, - Operator: api.ScopeSelectorOpExists}) + Operator: corev1.ScopeSelectorOpExists}) } if quota.Spec.ScopeSelector != nil { for _, scopeSelector := range quota.Spec.ScopeSelector.MatchExpressions { @@ -680,7 +680,7 @@ func (e *quotaEvaluator) getWork() (string, []*admissionWaiter, bool) { // prettyPrint formats a resource list for usage in errors // it outputs resources sorted in increasing order -func prettyPrint(item api.ResourceList) string { +func prettyPrint(item corev1.ResourceList) string { parts := []string{} keys := []string{} for key := range item { @@ -688,14 +688,14 @@ func prettyPrint(item api.ResourceList) string { } sort.Strings(keys) for _, key := range keys { - value := item[api.ResourceName(key)] + value := item[corev1.ResourceName(key)] constraint := key + "=" + value.String() parts = append(parts, constraint) } return strings.Join(parts, ",") } -func prettyPrintResourceNames(a []api.ResourceName) string { +func prettyPrintResourceNames(a []corev1.ResourceName) string { values := []string{} for _, value := range a { values = append(values, string(value)) @@ -705,7 +705,7 @@ func prettyPrintResourceNames(a []api.ResourceName) string { } // hasUsageStats returns true if for each hard constraint there is a value for its current usage -func hasUsageStats(resourceQuota *api.ResourceQuota) bool { +func hasUsageStats(resourceQuota *corev1.ResourceQuota) bool { for resourceName := range resourceQuota.Status.Hard { if _, found := resourceQuota.Status.Used[resourceName]; !found { return false diff --git a/plugin/pkg/admission/resourcequota/resource_access.go b/plugin/pkg/admission/resourcequota/resource_access.go index c7e12d6c8e0..f703d478b33 100644 --- a/plugin/pkg/admission/resourcequota/resource_access.go +++ b/plugin/pkg/admission/resourcequota/resource_access.go @@ -20,14 +20,14 @@ import ( "fmt" "time" - lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/golang-lru" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apiserver/pkg/storage/etcd" - api "k8s.io/kubernetes/pkg/apis/core" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" ) // QuotaAccessor abstracts the get/set logic from the rest of the Evaluator. This could be a test stub, a straight passthrough, @@ -35,17 +35,17 @@ import ( type QuotaAccessor interface { // UpdateQuotaStatus is called to persist final status. This method should write to persistent storage. // An error indicates that write didn't complete successfully. - UpdateQuotaStatus(newQuota *api.ResourceQuota) error + UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error // GetQuotas gets all possible quotas for a given namespace - GetQuotas(namespace string) ([]api.ResourceQuota, error) + GetQuotas(namespace string) ([]corev1.ResourceQuota, error) } type quotaAccessor struct { - client clientset.Interface + client kubernetes.Interface // lister can list/get quota objects from a shared informer's cache - lister corelisters.ResourceQuotaLister + lister corev1listers.ResourceQuotaLister // liveLookups holds the last few live lookups we've done to help ammortize cost on repeated lookup failures. // This lets us handle the case of latent caches, by looking up actual results for a namespace on cache miss/no results. @@ -77,8 +77,8 @@ func newQuotaAccessor() (*quotaAccessor, error) { }, nil } -func (e *quotaAccessor) UpdateQuotaStatus(newQuota *api.ResourceQuota) error { - updatedQuota, err := e.client.Core().ResourceQuotas(newQuota.Namespace).UpdateStatus(newQuota) +func (e *quotaAccessor) UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error { + updatedQuota, err := e.client.CoreV1().ResourceQuotas(newQuota.Namespace).UpdateStatus(newQuota) if err != nil { return err } @@ -93,13 +93,13 @@ var etcdVersioner = etcd.APIObjectVersioner{} // checkCache compares the passed quota against the value in the look-aside cache and returns the newer // if the cache is out of date, it deletes the stale entry. This only works because of etcd resourceVersions // being monotonically increasing integers -func (e *quotaAccessor) checkCache(quota *api.ResourceQuota) *api.ResourceQuota { +func (e *quotaAccessor) checkCache(quota *corev1.ResourceQuota) *corev1.ResourceQuota { key := quota.Namespace + "/" + quota.Name uncastCachedQuota, ok := e.updatedQuotas.Get(key) if !ok { return quota } - cachedQuota := uncastCachedQuota.(*api.ResourceQuota) + cachedQuota := uncastCachedQuota.(*corev1.ResourceQuota) if etcdVersioner.CompareResourceVersion(quota, cachedQuota) >= 0 { e.updatedQuotas.Remove(key) @@ -108,7 +108,7 @@ func (e *quotaAccessor) checkCache(quota *api.ResourceQuota) *api.ResourceQuota return cachedQuota } -func (e *quotaAccessor) GetQuotas(namespace string) ([]api.ResourceQuota, error) { +func (e *quotaAccessor) GetQuotas(namespace string) ([]corev1.ResourceQuota, error) { // determine if there are any quotas in this namespace // if there are no quotas, we don't need to do anything items, err := e.lister.ResourceQuotas(namespace).List(labels.Everything()) @@ -142,7 +142,7 @@ func (e *quotaAccessor) GetQuotas(namespace string) ([]api.ResourceQuota, error) } } - resourceQuotas := []api.ResourceQuota{} + resourceQuotas := []corev1.ResourceQuota{} for i := range items { quota := items[i] quota = e.checkCache(quota) From 48dd084a795bda2c42608ddfef6508e4638e78ad Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Mon, 27 Aug 2018 21:47:28 +0800 Subject: [PATCH 2/9] externalize fields for quota private schema --- plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD | 2 +- .../pkg/admission/resourcequota/apis/resourcequota/types.go | 4 ++-- .../admission/resourcequota/apis/resourcequota/v1alpha1/BUILD | 1 - .../apis/resourcequota/v1alpha1/zz_generated.conversion.go | 3 +-- .../admission/resourcequota/apis/resourcequota/v1beta1/BUILD | 1 - .../apis/resourcequota/v1beta1/zz_generated.conversion.go | 3 +-- .../resourcequota/apis/resourcequota/zz_generated.deepcopy.go | 4 ++-- 7 files changed, 7 insertions(+), 11 deletions(-) diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD b/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD index 9440df52e70..b35353ab96d 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD @@ -15,7 +15,7 @@ go_library( ], importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota", deps = [ - "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go index 7686e4dab5c..b8ffc104218 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go @@ -17,8 +17,8 @@ limitations under the License. package resourcequota import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/apis/core" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -68,5 +68,5 @@ type LimitedResource struct { // "PriorityClassNameIn=cluster-services" // +optional // MatchScopes []string `json:"matchScopes,omitempty"` - MatchScopes []core.ScopedResourceSelectorRequirement `json:"matchScopes,omitempty"` + MatchScopes []corev1.ScopedResourceSelectorRequirement `json:"matchScopes,omitempty"` } diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/BUILD b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/BUILD index 3022ef85a23..65ce81437b0 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/BUILD +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/BUILD @@ -18,7 +18,6 @@ go_library( ], importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1", deps = [ - "//pkg/apis/core:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go index 27f439dd653..3ca9511b3c5 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go @@ -26,7 +26,6 @@ import ( v1 "k8s.io/api/core/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - core "k8s.io/kubernetes/pkg/apis/core" resourcequota "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" ) @@ -84,7 +83,7 @@ func autoConvert_v1alpha1_LimitedResource_To_resourcequota_LimitedResource(in *L out.APIGroup = in.APIGroup out.Resource = in.Resource out.MatchContains = *(*[]string)(unsafe.Pointer(&in.MatchContains)) - out.MatchScopes = *(*[]core.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchScopes)) + out.MatchScopes = *(*[]v1.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchScopes)) return nil } diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/BUILD b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/BUILD index 47b24da1ab7..0819802ecc5 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/BUILD +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/BUILD @@ -18,7 +18,6 @@ go_library( ], importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1", deps = [ - "//pkg/apis/core:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/zz_generated.conversion.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/zz_generated.conversion.go index fb8cb98723e..bff5582a611 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/zz_generated.conversion.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/zz_generated.conversion.go @@ -26,7 +26,6 @@ import ( v1 "k8s.io/api/core/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - core "k8s.io/kubernetes/pkg/apis/core" resourcequota "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" ) @@ -84,7 +83,7 @@ func autoConvert_v1beta1_LimitedResource_To_resourcequota_LimitedResource(in *Li out.APIGroup = in.APIGroup out.Resource = in.Resource out.MatchContains = *(*[]string)(unsafe.Pointer(&in.MatchContains)) - out.MatchScopes = *(*[]core.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchScopes)) + out.MatchScopes = *(*[]v1.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchScopes)) return nil } diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go index c33c217ca7c..78baf66effc 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go @@ -21,8 +21,8 @@ limitations under the License. package resourcequota import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" - core "k8s.io/kubernetes/pkg/apis/core" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -67,7 +67,7 @@ func (in *LimitedResource) DeepCopyInto(out *LimitedResource) { } if in.MatchScopes != nil { in, out := &in.MatchScopes, &out.MatchScopes - *out = make([]core.ScopedResourceSelectorRequirement, len(*in)) + *out = make([]v1.ScopedResourceSelectorRequirement, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } From ec3b0886cd57028d49518bb8db332813d2b4f158 Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Mon, 27 Aug 2018 21:48:27 +0800 Subject: [PATCH 3/9] directly switching quota pkg --- pkg/quota/BUILD | 39 +-- pkg/quota/resources_test.go | 321 ------------------ pkg/quota/v1/BUILD | 53 +++ pkg/quota/{ => v1}/OWNERS | 0 pkg/quota/{ => v1}/evaluator/OWNERS | 0 pkg/quota/{ => v1}/evaluator/core/BUILD | 15 +- pkg/quota/{ => v1}/evaluator/core/doc.go | 2 +- .../core/persistent_volume_claims.go | 76 +++-- .../core/persistent_volume_claims_test.go | 23 +- pkg/quota/{ => v1}/evaluator/core/pods.go | 209 +++++------- .../{ => v1}/evaluator/core/pods_test.go | 131 +++---- pkg/quota/{ => v1}/evaluator/core/registry.go | 17 +- pkg/quota/{ => v1}/evaluator/core/services.go | 80 ++--- .../{ => v1}/evaluator/core/services_test.go | 77 ++--- pkg/quota/{ => v1}/generic/BUILD | 6 +- pkg/quota/{ => v1}/generic/OWNERS | 0 pkg/quota/{ => v1}/generic/configuration.go | 2 +- pkg/quota/{ => v1}/generic/evaluator.go | 56 +-- pkg/quota/{ => v1}/generic/registry.go | 2 +- pkg/quota/{ => v1}/install/BUILD | 8 +- pkg/quota/{ => v1}/install/OWNERS | 0 pkg/quota/{ => v1}/install/registry.go | 6 +- pkg/quota/{ => v1}/interfaces.go | 22 +- pkg/quota/{ => v1}/resources.go | 59 ++-- pkg/quota/v1/resources_test.go | 321 ++++++++++++++++++ 25 files changed, 760 insertions(+), 765 deletions(-) delete mode 100644 pkg/quota/resources_test.go create mode 100644 pkg/quota/v1/BUILD rename pkg/quota/{ => v1}/OWNERS (100%) rename pkg/quota/{ => v1}/evaluator/OWNERS (100%) rename pkg/quota/{ => v1}/evaluator/core/BUILD (84%) rename pkg/quota/{ => v1}/evaluator/core/doc.go (89%) rename pkg/quota/{ => v1}/evaluator/core/persistent_volume_claims.go (70%) rename pkg/quota/{ => v1}/evaluator/core/persistent_volume_claims_test.go (81%) rename pkg/quota/{ => v1}/evaluator/core/pods.go (64%) rename pkg/quota/{ => v1}/evaluator/core/pods_test.go (74%) rename pkg/quota/{ => v1}/evaluator/core/registry.go (70%) rename pkg/quota/{ => v1}/evaluator/core/services.go (64%) rename pkg/quota/{ => v1}/evaluator/core/services_test.go (67%) rename pkg/quota/{ => v1}/generic/BUILD (86%) rename pkg/quota/{ => v1}/generic/OWNERS (100%) rename pkg/quota/{ => v1}/generic/configuration.go (96%) rename pkg/quota/{ => v1}/generic/evaluator.go (78%) rename pkg/quota/{ => v1}/generic/registry.go (98%) rename pkg/quota/{ => v1}/install/BUILD (70%) rename pkg/quota/{ => v1}/install/OWNERS (100%) rename pkg/quota/{ => v1}/install/registry.go (91%) rename pkg/quota/{ => v1}/interfaces.go (80%) rename pkg/quota/{ => v1}/resources.go (77%) create mode 100644 pkg/quota/v1/resources_test.go diff --git a/pkg/quota/BUILD b/pkg/quota/BUILD index 1fb06434993..8659f8169d5 100644 --- a/pkg/quota/BUILD +++ b/pkg/quota/BUILD @@ -1,40 +1,5 @@ package(default_visibility = ["//visibility:public"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "interfaces.go", - "resources.go", - ], - importpath = "k8s.io/kubernetes/pkg/quota", - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["resources_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], -) - filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -46,9 +11,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//pkg/quota/evaluator/core:all-srcs", - "//pkg/quota/generic:all-srcs", - "//pkg/quota/install:all-srcs", + "//pkg/quota/v1:all-srcs", ], tags = ["automanaged"], ) diff --git a/pkg/quota/resources_test.go b/pkg/quota/resources_test.go deleted file mode 100644 index 2df5dbd74b0..00000000000 --- a/pkg/quota/resources_test.go +++ /dev/null @@ -1,321 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package quota - -import ( - "testing" - - "k8s.io/apimachinery/pkg/api/resource" - api "k8s.io/kubernetes/pkg/apis/core" -) - -func TestEquals(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - b api.ResourceList - expected bool - }{ - "isEqual": { - a: api.ResourceList{}, - b: api.ResourceList{}, - expected: true, - }, - "isEqualWithKeys": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - b: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - expected: true, - }, - "isNotEqualSameKeys": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("200m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - b: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - expected: false, - }, - "isNotEqualDiffKeys": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - b: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("1Gi"), - api.ResourcePods: resource.MustParse("1"), - }, - expected: false, - }, - } - for testName, testCase := range testCases { - if result := Equals(testCase.a, testCase.b); result != testCase.expected { - t.Errorf("%s expected: %v, actual: %v, a=%v, b=%v", testName, testCase.expected, result, testCase.a, testCase.b) - } - } -} - -func TestMax(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - b api.ResourceList - expected api.ResourceList - }{ - "noKeys": { - a: api.ResourceList{}, - b: api.ResourceList{}, - expected: api.ResourceList{}, - }, - "toEmpty": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - b: api.ResourceList{}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - }, - "matching": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - b: api.ResourceList{api.ResourceCPU: resource.MustParse("150m")}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("150m")}, - }, - "matching(reverse)": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("150m")}, - b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("150m")}, - }, - "matching-equal": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - }, - } - for testName, testCase := range testCases { - sum := Max(testCase.a, testCase.b) - if result := Equals(testCase.expected, sum); !result { - t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sum) - } - } -} - -func TestAdd(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - b api.ResourceList - expected api.ResourceList - }{ - "noKeys": { - a: api.ResourceList{}, - b: api.ResourceList{}, - expected: api.ResourceList{}, - }, - "toEmpty": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - b: api.ResourceList{}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - }, - "matching": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, - }, - } - for testName, testCase := range testCases { - sum := Add(testCase.a, testCase.b) - if result := Equals(testCase.expected, sum); !result { - t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sum) - } - } -} - -func TestSubtract(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - b api.ResourceList - expected api.ResourceList - }{ - "noKeys": { - a: api.ResourceList{}, - b: api.ResourceList{}, - expected: api.ResourceList{}, - }, - "value-empty": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - b: api.ResourceList{}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - }, - "empty-value": { - a: api.ResourceList{}, - b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("-100m")}, - }, - "value-value": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, - b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - }, - } - for testName, testCase := range testCases { - sub := Subtract(testCase.a, testCase.b) - if result := Equals(testCase.expected, sub); !result { - t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sub) - } - } -} - -func TestResourceNames(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - expected []api.ResourceName - }{ - "empty": { - a: api.ResourceList{}, - expected: []api.ResourceName{}, - }, - "values": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - expected: []api.ResourceName{api.ResourceMemory, api.ResourceCPU}, - }, - } - for testName, testCase := range testCases { - actualSet := ToSet(ResourceNames(testCase.a)) - expectedSet := ToSet(testCase.expected) - if !actualSet.Equal(expectedSet) { - t.Errorf("%s expected: %v, actual: %v", testName, expectedSet, actualSet) - } - } -} - -func TestContains(t *testing.T) { - testCases := map[string]struct { - a []api.ResourceName - b api.ResourceName - expected bool - }{ - "does-not-contain": { - a: []api.ResourceName{api.ResourceMemory}, - b: api.ResourceCPU, - expected: false, - }, - "does-contain": { - a: []api.ResourceName{api.ResourceMemory, api.ResourceCPU}, - b: api.ResourceCPU, - expected: true, - }, - } - for testName, testCase := range testCases { - if actual := Contains(testCase.a, testCase.b); actual != testCase.expected { - t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, actual) - } - } -} - -func TestContainsPrefix(t *testing.T) { - testCases := map[string]struct { - a []string - b api.ResourceName - expected bool - }{ - "does-not-contain": { - a: []string{api.ResourceHugePagesPrefix}, - b: api.ResourceCPU, - expected: false, - }, - "does-contain": { - a: []string{api.ResourceHugePagesPrefix}, - b: api.ResourceName(api.ResourceHugePagesPrefix + "2Mi"), - expected: true, - }, - } - for testName, testCase := range testCases { - if actual := ContainsPrefix(testCase.a, testCase.b); actual != testCase.expected { - t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, actual) - } - } -} - -func TestIsZero(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - expected bool - }{ - "empty": { - a: api.ResourceList{}, - expected: true, - }, - "zero": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("0"), - api.ResourceMemory: resource.MustParse("0"), - }, - expected: true, - }, - "non-zero": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("200m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - expected: false, - }, - } - for testName, testCase := range testCases { - if result := IsZero(testCase.a); result != testCase.expected { - t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, result) - } - } -} - -func TestIsNegative(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - expected []api.ResourceName - }{ - "empty": { - a: api.ResourceList{}, - expected: []api.ResourceName{}, - }, - "some-negative": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("-10"), - api.ResourceMemory: resource.MustParse("0"), - }, - expected: []api.ResourceName{api.ResourceCPU}, - }, - "all-negative": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("-200m"), - api.ResourceMemory: resource.MustParse("-1Gi"), - }, - expected: []api.ResourceName{api.ResourceCPU, api.ResourceMemory}, - }, - } - for testName, testCase := range testCases { - actual := IsNegative(testCase.a) - actualSet := ToSet(actual) - expectedSet := ToSet(testCase.expected) - if !actualSet.Equal(expectedSet) { - t.Errorf("%s expected: %v, actual: %v", testName, expectedSet, actualSet) - } - } -} diff --git a/pkg/quota/v1/BUILD b/pkg/quota/v1/BUILD new file mode 100644 index 00000000000..7b3cb195b78 --- /dev/null +++ b/pkg/quota/v1/BUILD @@ -0,0 +1,53 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "interfaces.go", + "resources.go", + ], + importpath = "k8s.io/kubernetes/pkg/quota/v1", + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["resources_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/quota/v1/evaluator/core:all-srcs", + "//pkg/quota/v1/generic:all-srcs", + "//pkg/quota/v1/install:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/pkg/quota/OWNERS b/pkg/quota/v1/OWNERS similarity index 100% rename from pkg/quota/OWNERS rename to pkg/quota/v1/OWNERS diff --git a/pkg/quota/evaluator/OWNERS b/pkg/quota/v1/evaluator/OWNERS similarity index 100% rename from pkg/quota/evaluator/OWNERS rename to pkg/quota/v1/evaluator/OWNERS diff --git a/pkg/quota/evaluator/core/BUILD b/pkg/quota/v1/evaluator/core/BUILD similarity index 84% rename from pkg/quota/evaluator/core/BUILD rename to pkg/quota/v1/evaluator/core/BUILD index 13f06d3cb9c..800a34d7525 100644 --- a/pkg/quota/evaluator/core/BUILD +++ b/pkg/quota/v1/evaluator/core/BUILD @@ -15,16 +15,16 @@ go_library( "registry.go", "services.go", ], - importpath = "k8s.io/kubernetes/pkg/quota/evaluator/core", + importpath = "k8s.io/kubernetes/pkg/quota/v1/evaluator/core", deps = [ "//pkg/apis/core:go_default_library", - "//pkg/apis/core/helper:go_default_library", - "//pkg/apis/core/helper/qos:go_default_library", "//pkg/apis/core/v1:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper/qos:go_default_library", "//pkg/features:go_default_library", "//pkg/kubeapiserver/admission/util:go_default_library", - "//pkg/quota:go_default_library", - "//pkg/quota/generic:go_default_library", + "//pkg/quota/v1:go_default_library", + "//pkg/quota/v1/generic:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -50,9 +50,10 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", - "//pkg/quota:go_default_library", - "//pkg/quota/generic:go_default_library", + "//pkg/quota/v1:go_default_library", + "//pkg/quota/v1/generic:go_default_library", "//pkg/util/node:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/pkg/quota/evaluator/core/doc.go b/pkg/quota/v1/evaluator/core/doc.go similarity index 89% rename from pkg/quota/evaluator/core/doc.go rename to pkg/quota/v1/evaluator/core/doc.go index 3c9d632cbfe..a8649344199 100644 --- a/pkg/quota/evaluator/core/doc.go +++ b/pkg/quota/v1/evaluator/core/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // core contains modules that interface with the core api group -package core // import "k8s.io/kubernetes/pkg/quota/evaluator/core" +package core // import "k8s.io/kubernetes/pkg/quota/v1/evaluator/core" diff --git a/pkg/quota/evaluator/core/persistent_volume_claims.go b/pkg/quota/v1/evaluator/core/persistent_volume_claims.go similarity index 70% rename from pkg/quota/evaluator/core/persistent_volume_claims.go rename to pkg/quota/v1/evaluator/core/persistent_volume_claims.go index 62051e45147..ea8ecad52df 100644 --- a/pkg/quota/evaluator/core/persistent_volume_claims.go +++ b/pkg/quota/v1/evaluator/core/persistent_volume_claims.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -30,22 +30,22 @@ import ( "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/core/helper" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/core/v1/helper" k8sfeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubeapiserver/admission/util" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) // the name used for object count quota -var pvcObjectCountName = generic.ObjectCountQuotaResourceNameFor(v1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource()) +var pvcObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource()) // pvcResources are the set of static resources managed by quota associated with pvcs. // for each resource in this list, it may be refined dynamically based on storage class. -var pvcResources = []api.ResourceName{ - api.ResourcePersistentVolumeClaims, - api.ResourceRequestsStorage, +var pvcResources = []corev1.ResourceName{ + corev1.ResourcePersistentVolumeClaims, + corev1.ResourceRequestsStorage, } // storageClassSuffix is the suffix to the qualified portion of storage class resource name. @@ -56,19 +56,21 @@ var pvcResources = []api.ResourceName{ // * bronze.storageclass.storage.k8s.io/requests.storage: 500Gi const storageClassSuffix string = ".storageclass.storage.k8s.io/" +/* TODO: prune? // ResourceByStorageClass returns a quota resource name by storage class. -func ResourceByStorageClass(storageClass string, resourceName api.ResourceName) api.ResourceName { - return api.ResourceName(string(storageClass + storageClassSuffix + string(resourceName))) +func ResourceByStorageClass(storageClass string, resourceName corev1.ResourceName) corev1.ResourceName { + return corev1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName))) } +*/ // V1ResourceByStorageClass returns a quota resource name by storage class. -func V1ResourceByStorageClass(storageClass string, resourceName v1.ResourceName) v1.ResourceName { - return v1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName))) +func V1ResourceByStorageClass(storageClass string, resourceName corev1.ResourceName) corev1.ResourceName { + return corev1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName))) } // NewPersistentVolumeClaimEvaluator returns an evaluator that can evaluate persistent volume claims func NewPersistentVolumeClaimEvaluator(f quota.ListerForResourceFunc) quota.Evaluator { - listFuncByNamespace := generic.ListResourceUsingListerFunc(f, v1.SchemeGroupVersion.WithResource("persistentvolumeclaims")) + listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims")) pvcEvaluator := &pvcEvaluator{listFuncByNamespace: listFuncByNamespace} return pvcEvaluator } @@ -80,14 +82,14 @@ type pvcEvaluator struct { } // Constraints verifies that all required resources are present on the item. -func (p *pvcEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { +func (p *pvcEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error { // no-op for persistent volume claims return nil } // GroupResource that this evaluator tracks func (p *pvcEvaluator) GroupResource() schema.GroupResource { - return v1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource() + return corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource() } // Handles returns true if the evaluator should handle the specified operation. @@ -119,27 +121,27 @@ func (p *pvcEvaluator) Handles(a admission.Attributes) bool { } // Matches returns true if the evaluator matches the specified quota with the provided input item -func (p *pvcEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) { +func (p *pvcEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc) } // MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches. -func (p *pvcEvaluator) MatchingScopes(item runtime.Object, scopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - return []api.ScopedResourceSelectorRequirement{}, nil +func (p *pvcEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil } // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. // It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope -func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSelectorRequirement, matchedQuotaScopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - return []api.ScopedResourceSelectorRequirement{}, nil +func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil } // MatchingResources takes the input specified list of resources and returns the set of resources it matches. -func (p *pvcEvaluator) MatchingResources(items []api.ResourceName) []api.ResourceName { - result := []api.ResourceName{} +func (p *pvcEvaluator) MatchingResources(items []corev1.ResourceName) []corev1.ResourceName { + result := []corev1.ResourceName{} for _, item := range items { // match object count quota fields - if quota.Contains([]api.ResourceName{pvcObjectCountName}, item) { + if quota.Contains([]corev1.ResourceName{pvcObjectCountName}, item) { result = append(result, item) continue } @@ -161,15 +163,15 @@ func (p *pvcEvaluator) MatchingResources(items []api.ResourceName) []api.Resourc } // Usage knows how to measure usage associated with item. -func (p *pvcEvaluator) Usage(item runtime.Object) (api.ResourceList, error) { - result := api.ResourceList{} - pvc, err := toInternalPersistentVolumeClaimOrError(item) +func (p *pvcEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) { + result := corev1.ResourceList{} + pvc, err := toExternalPersistentVolumeClaimOrError(item) if err != nil { return result, err } // charge for claim - result[api.ResourcePersistentVolumeClaims] = *(resource.NewQuantity(1, resource.DecimalSI)) + result[corev1.ResourcePersistentVolumeClaims] = *(resource.NewQuantity(1, resource.DecimalSI)) result[pvcObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI)) if utilfeature.DefaultFeatureGate.Enabled(features.Initializers) { if !initialization.IsInitialized(pvc.Initializers) { @@ -179,16 +181,16 @@ func (p *pvcEvaluator) Usage(item runtime.Object) (api.ResourceList, error) { } storageClassRef := helper.GetPersistentVolumeClaimClass(pvc) if len(storageClassRef) > 0 { - storageClassClaim := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourcePersistentVolumeClaims)) + storageClassClaim := corev1.ResourceName(storageClassRef + storageClassSuffix + string(corev1.ResourcePersistentVolumeClaims)) result[storageClassClaim] = *(resource.NewQuantity(1, resource.DecimalSI)) } // charge for storage - if request, found := pvc.Spec.Resources.Requests[api.ResourceStorage]; found { - result[api.ResourceRequestsStorage] = request + if request, found := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; found { + result[corev1.ResourceRequestsStorage] = request // charge usage to the storage class (if present) if len(storageClassRef) > 0 { - storageClassStorage := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourceRequestsStorage)) + storageClassStorage := corev1.ResourceName(storageClassRef + storageClassSuffix + string(corev1.ResourceRequestsStorage)) result[storageClassStorage] = request } } @@ -203,15 +205,15 @@ func (p *pvcEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageS // ensure we implement required interface var _ quota.Evaluator = &pvcEvaluator{} -func toInternalPersistentVolumeClaimOrError(obj runtime.Object) (*api.PersistentVolumeClaim, error) { - pvc := &api.PersistentVolumeClaim{} +func toExternalPersistentVolumeClaimOrError(obj runtime.Object) (*corev1.PersistentVolumeClaim, error) { + pvc := &corev1.PersistentVolumeClaim{} switch t := obj.(type) { - case *v1.PersistentVolumeClaim: - if err := k8s_api_v1.Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(t, pvc, nil); err != nil { + case *corev1.PersistentVolumeClaim: + pvc = t + case *api.PersistentVolumeClaim: + if err := k8s_api_v1.Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(t, pvc, nil); err != nil { return nil, err } - case *api.PersistentVolumeClaim: - pvc = t default: return nil, fmt.Errorf("expect *api.PersistentVolumeClaim or *v1.PersistentVolumeClaim, got %v", t) } diff --git a/pkg/quota/evaluator/core/persistent_volume_claims_test.go b/pkg/quota/v1/evaluator/core/persistent_volume_claims_test.go similarity index 81% rename from pkg/quota/evaluator/core/persistent_volume_claims_test.go rename to pkg/quota/v1/evaluator/core/persistent_volume_claims_test.go index e2b1c69d98a..12e0dc89a3b 100644 --- a/pkg/quota/evaluator/core/persistent_volume_claims_test.go +++ b/pkg/quota/v1/evaluator/core/persistent_volume_claims_test.go @@ -19,12 +19,13 @@ package core import ( "testing" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) func testVolumeClaim(name string, namespace string, spec api.PersistentVolumeClaimSpec) *api.PersistentVolumeClaim { @@ -79,23 +80,23 @@ func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) { evaluator := NewPersistentVolumeClaimEvaluator(nil) testCases := map[string]struct { pvc *api.PersistentVolumeClaim - usage api.ResourceList + usage corev1.ResourceList }{ "pvc-usage": { pvc: validClaim, - usage: api.ResourceList{ - api.ResourceRequestsStorage: resource.MustParse("10Gi"), - api.ResourcePersistentVolumeClaims: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsStorage: resource.MustParse("10Gi"), + corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "persistentvolumeclaims"}): resource.MustParse("1"), }, }, "pvc-usage-by-class": { pvc: validClaimByStorageClass, - usage: api.ResourceList{ - api.ResourceRequestsStorage: resource.MustParse("10Gi"), - api.ResourcePersistentVolumeClaims: resource.MustParse("1"), - ResourceByStorageClass(classGold, api.ResourceRequestsStorage): resource.MustParse("10Gi"), - ResourceByStorageClass(classGold, api.ResourcePersistentVolumeClaims): resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsStorage: resource.MustParse("10Gi"), + corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), + V1ResourceByStorageClass(classGold, corev1.ResourceRequestsStorage): resource.MustParse("10Gi"), + V1ResourceByStorageClass(classGold, corev1.ResourcePersistentVolumeClaims): resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "persistentvolumeclaims"}): resource.MustParse("1"), }, }, diff --git a/pkg/quota/evaluator/core/pods.go b/pkg/quota/v1/evaluator/core/pods.go similarity index 64% rename from pkg/quota/evaluator/core/pods.go rename to pkg/quota/v1/evaluator/core/pods.go index 7861c3d0a13..dbf20e5661c 100644 --- a/pkg/quota/evaluator/core/pods.go +++ b/pkg/quota/v1/evaluator/core/pods.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -32,57 +32,57 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/pkg/apis/core/helper/qos" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/kubeapiserver/admission/util" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) // the name used for object count quota -var podObjectCountName = generic.ObjectCountQuotaResourceNameFor(v1.SchemeGroupVersion.WithResource("pods").GroupResource()) +var podObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("pods").GroupResource()) // podResources are the set of resources managed by quota associated with pods. -var podResources = []api.ResourceName{ +var podResources = []corev1.ResourceName{ podObjectCountName, - api.ResourceCPU, - api.ResourceMemory, - api.ResourceEphemeralStorage, - api.ResourceRequestsCPU, - api.ResourceRequestsMemory, - api.ResourceRequestsEphemeralStorage, - api.ResourceLimitsCPU, - api.ResourceLimitsMemory, - api.ResourceLimitsEphemeralStorage, - api.ResourcePods, + corev1.ResourceCPU, + corev1.ResourceMemory, + corev1.ResourceEphemeralStorage, + corev1.ResourceRequestsCPU, + corev1.ResourceRequestsMemory, + corev1.ResourceRequestsEphemeralStorage, + corev1.ResourceLimitsCPU, + corev1.ResourceLimitsMemory, + corev1.ResourceLimitsEphemeralStorage, + corev1.ResourcePods, } // podResourcePrefixes are the set of prefixes for resources (Hugepages, and other // potential extended reources with specific prefix) managed by quota associated with pods. var podResourcePrefixes = []string{ - api.ResourceHugePagesPrefix, - api.ResourceRequestsHugePagesPrefix, + corev1.ResourceHugePagesPrefix, + corev1.ResourceRequestsHugePagesPrefix, } // requestedResourcePrefixes are the set of prefixes for resources // that might be declared in pod's Resources.Requests/Limits var requestedResourcePrefixes = []string{ - api.ResourceHugePagesPrefix, + corev1.ResourceHugePagesPrefix, } // maskResourceWithPrefix mask resource with certain prefix // e.g. hugepages-XXX -> requests.hugepages-XXX -func maskResourceWithPrefix(resource api.ResourceName, prefix string) api.ResourceName { - return api.ResourceName(fmt.Sprintf("%s%s", prefix, string(resource))) +func maskResourceWithPrefix(resource corev1.ResourceName, prefix string) corev1.ResourceName { + return corev1.ResourceName(fmt.Sprintf("%s%s", prefix, string(resource))) } // isExtendedResourceNameForQuota returns true if the extended resource name // has the quota related resource prefix. -func isExtendedResourceNameForQuota(name api.ResourceName) bool { +func isExtendedResourceNameForQuota(name corev1.ResourceName) bool { // As overcommit is not supported by extended resources for now, // only quota objects in format of "requests.resourceName" is allowed. - return !helper.IsNativeResource(name) && strings.HasPrefix(string(name), api.DefaultResourceRequestsPrefix) + return !helper.IsNativeResource(name) && strings.HasPrefix(string(name), corev1.DefaultResourceRequestsPrefix) } // NOTE: it was a mistake, but if a quota tracks cpu or memory related resources, @@ -90,17 +90,17 @@ func isExtendedResourceNameForQuota(name api.ResourceName) bool { // this mistake for other future resources (gpus, ephemeral-storage,etc). // do not add more resources to this list! var validationSet = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceRequestsCPU), - string(api.ResourceRequestsMemory), - string(api.ResourceLimitsCPU), - string(api.ResourceLimitsMemory), + string(corev1.ResourceCPU), + string(corev1.ResourceMemory), + string(corev1.ResourceRequestsCPU), + string(corev1.ResourceRequestsMemory), + string(corev1.ResourceLimitsCPU), + string(corev1.ResourceLimitsMemory), ) // NewPodEvaluator returns an evaluator that can evaluate pods func NewPodEvaluator(f quota.ListerForResourceFunc, clock clock.Clock) quota.Evaluator { - listFuncByNamespace := generic.ListResourceUsingListerFunc(f, v1.SchemeGroupVersion.WithResource("pods")) + listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("pods")) podEvaluator := &podEvaluator{listFuncByNamespace: listFuncByNamespace, clock: clock} return podEvaluator } @@ -115,10 +115,10 @@ type podEvaluator struct { // Constraints verifies that all required resources are present on the pod // In addition, it validates that the resources are valid (i.e. requests < limits) -func (p *podEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { - pod, ok := item.(*api.Pod) - if !ok { - return fmt.Errorf("unexpected input object %v", item) +func (p *podEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error { + pod, err := toExternalPodOrError(item) + if err != nil { + return err } // BACKWARD COMPATIBILITY REQUIREMENT: if we quota cpu or memory, then each container @@ -141,7 +141,7 @@ func (p *podEvaluator) Constraints(required []api.ResourceName, item runtime.Obj // GroupResource that this evaluator tracks func (p *podEvaluator) GroupResource() schema.GroupResource { - return v1.SchemeGroupVersion.WithResource("pods").GroupResource() + return corev1.SchemeGroupVersion.WithResource("pods").GroupResource() } // Handles returns true if the evaluator should handle the specified attributes. @@ -161,12 +161,12 @@ func (p *podEvaluator) Handles(a admission.Attributes) bool { } // Matches returns true if the evaluator matches the specified quota with the provided input item -func (p *podEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) { +func (p *podEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { return generic.Matches(resourceQuota, item, p.MatchingResources, podMatchesScopeFunc) } // MatchingResources takes the input specified list of resources and returns the set of resources it matches. -func (p *podEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName { +func (p *podEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName { result := quota.Intersection(input, podResources) for _, resource := range input { // for resources with certain prefix, e.g. hugepages @@ -183,12 +183,12 @@ func (p *podEvaluator) MatchingResources(input []api.ResourceName) []api.Resourc } // MatchingScopes takes the input specified list of scopes and pod object. Returns the set of scope selectors pod matches. -func (p *podEvaluator) MatchingScopes(item runtime.Object, scopeSelectors []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - matchedScopes := []api.ScopedResourceSelectorRequirement{} +func (p *podEvaluator) MatchingScopes(item runtime.Object, scopeSelectors []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + matchedScopes := []corev1.ScopedResourceSelectorRequirement{} for _, selector := range scopeSelectors { match, err := podMatchesScopeFunc(selector, item) if err != nil { - return []api.ScopedResourceSelectorRequirement{}, fmt.Errorf("error on matching scope %v: %v", selector, err) + return []corev1.ScopedResourceSelectorRequirement{}, fmt.Errorf("error on matching scope %v: %v", selector, err) } if match { matchedScopes = append(matchedScopes, selector) @@ -199,8 +199,8 @@ func (p *podEvaluator) MatchingScopes(item runtime.Object, scopeSelectors []api. // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. // It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope -func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSelectorRequirement, matchedQuotaScopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - uncoveredScopes := []api.ScopedResourceSelectorRequirement{} +func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + uncoveredScopes := []corev1.ScopedResourceSelectorRequirement{} for _, selector := range limitedScopes { isCovered := false for _, matchedScopeSelector := range matchedQuotaScopes { @@ -218,7 +218,7 @@ func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSe } // Usage knows how to measure usage associated with pods -func (p *podEvaluator) Usage(item runtime.Object) (api.ResourceList, error) { +func (p *podEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) { // delegate to normal usage return PodUsageFunc(item, p.clock) } @@ -233,7 +233,7 @@ var _ quota.Evaluator = &podEvaluator{} // enforcePodContainerConstraints checks for required resources that are not set on this container and // adds them to missingSet. -func enforcePodContainerConstraints(container *api.Container, requiredSet, missingSet sets.String) { +func enforcePodContainerConstraints(container *corev1.Container, requiredSet, missingSet sets.String) { requests := container.Resources.Requests limits := container.Resources.Limits containerUsage := podComputeUsageHelper(requests, limits) @@ -245,55 +245,55 @@ func enforcePodContainerConstraints(container *api.Container, requiredSet, missi } // podComputeUsageHelper can summarize the pod compute quota usage based on requests and limits -func podComputeUsageHelper(requests api.ResourceList, limits api.ResourceList) api.ResourceList { - result := api.ResourceList{} - result[api.ResourcePods] = resource.MustParse("1") - if request, found := requests[api.ResourceCPU]; found { - result[api.ResourceCPU] = request - result[api.ResourceRequestsCPU] = request +func podComputeUsageHelper(requests corev1.ResourceList, limits corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + result[corev1.ResourcePods] = resource.MustParse("1") + if request, found := requests[corev1.ResourceCPU]; found { + result[corev1.ResourceCPU] = request + result[corev1.ResourceRequestsCPU] = request } - if limit, found := limits[api.ResourceCPU]; found { - result[api.ResourceLimitsCPU] = limit + if limit, found := limits[corev1.ResourceCPU]; found { + result[corev1.ResourceLimitsCPU] = limit } - if request, found := requests[api.ResourceMemory]; found { - result[api.ResourceMemory] = request - result[api.ResourceRequestsMemory] = request + if request, found := requests[corev1.ResourceMemory]; found { + result[corev1.ResourceMemory] = request + result[corev1.ResourceRequestsMemory] = request } - if limit, found := limits[api.ResourceMemory]; found { - result[api.ResourceLimitsMemory] = limit + if limit, found := limits[corev1.ResourceMemory]; found { + result[corev1.ResourceLimitsMemory] = limit } - if request, found := requests[api.ResourceEphemeralStorage]; found { - result[api.ResourceEphemeralStorage] = request - result[api.ResourceRequestsEphemeralStorage] = request + if request, found := requests[corev1.ResourceEphemeralStorage]; found { + result[corev1.ResourceEphemeralStorage] = request + result[corev1.ResourceRequestsEphemeralStorage] = request } - if limit, found := limits[api.ResourceEphemeralStorage]; found { - result[api.ResourceLimitsEphemeralStorage] = limit + if limit, found := limits[corev1.ResourceEphemeralStorage]; found { + result[corev1.ResourceLimitsEphemeralStorage] = limit } for resource, request := range requests { // for resources with certain prefix, e.g. hugepages if quota.ContainsPrefix(requestedResourcePrefixes, resource) { result[resource] = request - result[maskResourceWithPrefix(resource, api.DefaultResourceRequestsPrefix)] = request + result[maskResourceWithPrefix(resource, corev1.DefaultResourceRequestsPrefix)] = request } // for extended resources if helper.IsExtendedResourceName(resource) { // only quota objects in format of "requests.resourceName" is allowed for extended resource. - result[maskResourceWithPrefix(resource, api.DefaultResourceRequestsPrefix)] = request + result[maskResourceWithPrefix(resource, corev1.DefaultResourceRequestsPrefix)] = request } } return result } -func toInternalPodOrError(obj runtime.Object) (*api.Pod, error) { - pod := &api.Pod{} +func toExternalPodOrError(obj runtime.Object) (*corev1.Pod, error) { + pod := &corev1.Pod{} switch t := obj.(type) { - case *v1.Pod: - if err := k8s_api_v1.Convert_v1_Pod_To_core_Pod(t, pod, nil); err != nil { + case *corev1.Pod: + pod = t + case *api.Pod: + if err := k8s_api_v1.Convert_core_Pod_To_v1_Pod(t, pod, nil); err != nil { return nil, err } - case *api.Pod: - pod = t default: return nil, fmt.Errorf("expect *api.Pod or *v1.Pod, got %v", t) } @@ -301,21 +301,21 @@ func toInternalPodOrError(obj runtime.Object) (*api.Pod, error) { } // podMatchesScopeFunc is a function that knows how to evaluate if a pod matches a scope -func podMatchesScopeFunc(selector api.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) { - pod, err := toInternalPodOrError(object) +func podMatchesScopeFunc(selector corev1.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) { + pod, err := toExternalPodOrError(object) if err != nil { return false, err } switch selector.ScopeName { - case api.ResourceQuotaScopeTerminating: + case corev1.ResourceQuotaScopeTerminating: return isTerminating(pod), nil - case api.ResourceQuotaScopeNotTerminating: + case corev1.ResourceQuotaScopeNotTerminating: return !isTerminating(pod), nil - case api.ResourceQuotaScopeBestEffort: + case corev1.ResourceQuotaScopeBestEffort: return isBestEffort(pod), nil - case api.ResourceQuotaScopeNotBestEffort: + case corev1.ResourceQuotaScopeNotBestEffort: return !isBestEffort(pod), nil - case api.ResourceQuotaScopePriorityClass: + case corev1.ResourceQuotaScopePriorityClass: return podMatchesSelector(pod, selector) } return false, nil @@ -325,28 +325,28 @@ func podMatchesScopeFunc(selector api.ScopedResourceSelectorRequirement, object // A pod is charged for quota if the following are not true. // - pod has a terminal phase (failed or succeeded) // - pod has been marked for deletion and grace period has expired -func PodUsageFunc(obj runtime.Object, clock clock.Clock) (api.ResourceList, error) { - pod, err := toInternalPodOrError(obj) +func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, error) { + pod, err := toExternalPodOrError(obj) if err != nil { - return api.ResourceList{}, err + return corev1.ResourceList{}, err } // always quota the object count (even if the pod is end of life) // object count quotas track all objects that are in storage. // where "pods" tracks all pods that have not reached a terminal state, // count/pods tracks all pods independent of state. - result := api.ResourceList{ + result := corev1.ResourceList{ podObjectCountName: *(resource.NewQuantity(1, resource.DecimalSI)), } // by convention, we do not quota compute resources that have reached end-of life // note: the "pods" resource is considered a compute resource since it is tied to life-cycle. - if !QuotaPod(pod, clock) { + if !QuotaV1Pod(pod, clock) { return result, nil } - requests := api.ResourceList{} - limits := api.ResourceList{} + requests := corev1.ResourceList{} + limits := corev1.ResourceList{} // TODO: ideally, we have pod level requests and limits in the future. for i := range pod.Spec.Containers { requests = quota.Add(requests, pod.Spec.Containers[i].Resources.Requests) @@ -364,25 +364,25 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (api.ResourceList, erro return result, nil } -func isBestEffort(pod *api.Pod) bool { - return qos.GetPodQOS(pod) == api.PodQOSBestEffort +func isBestEffort(pod *corev1.Pod) bool { + return qos.GetPodQOS(pod) == corev1.PodQOSBestEffort } -func isTerminating(pod *api.Pod) bool { +func isTerminating(pod *corev1.Pod) bool { if pod.Spec.ActiveDeadlineSeconds != nil && *pod.Spec.ActiveDeadlineSeconds >= int64(0) { return true } return false } -func podMatchesSelector(pod *api.Pod, selector api.ScopedResourceSelectorRequirement) (bool, error) { +func podMatchesSelector(pod *corev1.Pod, selector corev1.ScopedResourceSelectorRequirement) (bool, error) { labelSelector, err := helper.ScopedResourceSelectorRequirementsAsSelector(selector) if err != nil { return false, fmt.Errorf("failed to parse and convert selector: %v", err) } var m map[string]string if len(pod.Spec.PriorityClassName) != 0 { - m = map[string]string{string(api.ResourceQuotaScopePriorityClass): pod.Spec.PriorityClassName} + m = map[string]string{string(corev1.ResourceQuotaScopePriorityClass): pod.Spec.PriorityClassName} } if labelSelector.Matches(labels.Set(m)) { return true, nil @@ -390,36 +390,11 @@ func podMatchesSelector(pod *api.Pod, selector api.ScopedResourceSelectorRequire return false, nil } -// QuotaPod returns true if the pod is eligible to track against a quota -// A pod is eligible for quota, unless any of the following are true: -// - pod has a terminal phase (failed or succeeded) -// - pod has been marked for deletion and grace period has expired. -func QuotaPod(pod *api.Pod, clock clock.Clock) bool { - // if pod is terminal, ignore it for quota - if api.PodFailed == pod.Status.Phase || api.PodSucceeded == pod.Status.Phase { - return false - } - // deleted pods that should be gone should not be charged to user quota. - // this can happen if a node is lost, and the kubelet is never able to confirm deletion. - // even though the cluster may have drifting clocks, quota makes a reasonable effort - // to balance cluster needs against user needs. user's do not control clocks, - // but at worst a small drive in clocks will only slightly impact quota. - if pod.DeletionTimestamp != nil && pod.DeletionGracePeriodSeconds != nil { - now := clock.Now() - deletionTime := pod.DeletionTimestamp.Time - gracePeriod := time.Duration(*pod.DeletionGracePeriodSeconds) * time.Second - if now.After(deletionTime.Add(gracePeriod)) { - return false - } - } - return true -} - // QuotaV1Pod returns true if the pod is eligible to track against a quota // if it's not in a terminal state according to its phase. -func QuotaV1Pod(pod *v1.Pod, clock clock.Clock) bool { +func QuotaV1Pod(pod *corev1.Pod, clock clock.Clock) bool { // if pod is terminal, ignore it for quota - if v1.PodFailed == pod.Status.Phase || v1.PodSucceeded == pod.Status.Phase { + if corev1.PodFailed == pod.Status.Phase || corev1.PodSucceeded == pod.Status.Phase { return false } // if pods are stuck terminating (for example, a node is lost), we do not want diff --git a/pkg/quota/evaluator/core/pods_test.go b/pkg/quota/v1/evaluator/core/pods_test.go similarity index 74% rename from pkg/quota/evaluator/core/pods_test.go rename to pkg/quota/v1/evaluator/core/pods_test.go index 4d0744373fa..8d0b93c9f8f 100644 --- a/pkg/quota/evaluator/core/pods_test.go +++ b/pkg/quota/v1/evaluator/core/pods_test.go @@ -20,20 +20,21 @@ import ( "testing" "time" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/clock" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" "k8s.io/kubernetes/pkg/util/node" ) func TestPodConstraintsFunc(t *testing.T) { testCases := map[string]struct { pod *api.Pod - required []api.ResourceName + required []corev1.ResourceName err string }{ "init container resource missing": { @@ -47,7 +48,7 @@ func TestPodConstraintsFunc(t *testing.T) { }}, }, }, - required: []api.ResourceName{api.ResourceMemory}, + required: []corev1.ResourceName{corev1.ResourceMemory}, err: `must specify memory`, }, "container resource missing": { @@ -61,7 +62,7 @@ func TestPodConstraintsFunc(t *testing.T) { }}, }, }, - required: []api.ResourceName{api.ResourceMemory}, + required: []corev1.ResourceName{corev1.ResourceMemory}, err: `must specify memory`, }, } @@ -90,7 +91,7 @@ func TestPodEvaluatorUsage(t *testing.T) { testCases := map[string]struct { pod *api.Pod - usage api.ResourceList + usage corev1.ResourceList }{ "init container CPU": { pod: &api.Pod{ @@ -103,11 +104,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("1m"), - api.ResourceLimitsCPU: resource.MustParse("2m"), - api.ResourcePods: resource.MustParse("1"), - api.ResourceCPU: resource.MustParse("1m"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("1m"), + corev1.ResourceLimitsCPU: resource.MustParse("2m"), + corev1.ResourcePods: resource.MustParse("1"), + corev1.ResourceCPU: resource.MustParse("1m"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -122,11 +123,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceRequestsMemory: resource.MustParse("1m"), - api.ResourceLimitsMemory: resource.MustParse("2m"), - api.ResourcePods: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("1m"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsMemory: resource.MustParse("1m"), + corev1.ResourceLimitsMemory: resource.MustParse("2m"), + corev1.ResourcePods: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1m"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -141,11 +142,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceEphemeralStorage: resource.MustParse("32Mi"), - api.ResourceRequestsEphemeralStorage: resource.MustParse("32Mi"), - api.ResourceLimitsEphemeralStorage: resource.MustParse("64Mi"), - api.ResourcePods: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceEphemeralStorage: resource.MustParse("32Mi"), + corev1.ResourceRequestsEphemeralStorage: resource.MustParse("32Mi"), + corev1.ResourceLimitsEphemeralStorage: resource.MustParse("64Mi"), + corev1.ResourcePods: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -159,10 +160,10 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceName(api.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), - api.ResourceName(api.ResourceRequestsHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), - api.ResourcePods: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceName(corev1.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), + corev1.ResourceName(corev1.ResourceRequestsHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), + corev1.ResourcePods: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -177,9 +178,9 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceName("requests.example.com/dongle"): resource.MustParse("3"), - api.ResourcePods: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceName("requests.example.com/dongle"): resource.MustParse("3"), + corev1.ResourcePods: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -194,11 +195,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("1m"), - api.ResourceLimitsCPU: resource.MustParse("2m"), - api.ResourcePods: resource.MustParse("1"), - api.ResourceCPU: resource.MustParse("1m"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("1m"), + corev1.ResourceLimitsCPU: resource.MustParse("2m"), + corev1.ResourcePods: resource.MustParse("1"), + corev1.ResourceCPU: resource.MustParse("1m"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -213,11 +214,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceRequestsMemory: resource.MustParse("1m"), - api.ResourceLimitsMemory: resource.MustParse("2m"), - api.ResourcePods: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("1m"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsMemory: resource.MustParse("1m"), + corev1.ResourceLimitsMemory: resource.MustParse("2m"), + corev1.ResourcePods: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1m"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -232,11 +233,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceEphemeralStorage: resource.MustParse("32Mi"), - api.ResourceRequestsEphemeralStorage: resource.MustParse("32Mi"), - api.ResourceLimitsEphemeralStorage: resource.MustParse("64Mi"), - api.ResourcePods: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceEphemeralStorage: resource.MustParse("32Mi"), + corev1.ResourceRequestsEphemeralStorage: resource.MustParse("32Mi"), + corev1.ResourceLimitsEphemeralStorage: resource.MustParse("64Mi"), + corev1.ResourcePods: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -250,10 +251,10 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceName(api.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), - api.ResourceName(api.ResourceRequestsHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), - api.ResourcePods: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceName(api.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), + corev1.ResourceName(api.ResourceRequestsHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), + corev1.ResourcePods: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -268,9 +269,9 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceName("requests.example.com/dongle"): resource.MustParse("3"), - api.ResourcePods: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceName("requests.example.com/dongle"): resource.MustParse("3"), + corev1.ResourcePods: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -339,15 +340,15 @@ func TestPodEvaluatorUsage(t *testing.T) { }, }, }, - usage: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("4"), - api.ResourceRequestsMemory: resource.MustParse("100M"), - api.ResourceLimitsCPU: resource.MustParse("8"), - api.ResourceLimitsMemory: resource.MustParse("200M"), - api.ResourcePods: resource.MustParse("1"), - api.ResourceCPU: resource.MustParse("4"), - api.ResourceMemory: resource.MustParse("100M"), - api.ResourceName("requests.example.com/dongle"): resource.MustParse("4"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("4"), + corev1.ResourceRequestsMemory: resource.MustParse("100M"), + corev1.ResourceLimitsCPU: resource.MustParse("8"), + corev1.ResourceLimitsMemory: resource.MustParse("200M"), + corev1.ResourcePods: resource.MustParse("1"), + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("100M"), + corev1.ResourceName("requests.example.com/dongle"): resource.MustParse("4"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -378,7 +379,7 @@ func TestPodEvaluatorUsage(t *testing.T) { }, }, }, - usage: api.ResourceList{ + usage: corev1.ResourceList{ generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -406,11 +407,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }, }, }, - usage: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("1"), - api.ResourceLimitsCPU: resource.MustParse("2"), - api.ResourcePods: resource.MustParse("1"), - api.ResourceCPU: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("1"), + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourcePods: resource.MustParse("1"), + corev1.ResourceCPU: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, diff --git a/pkg/quota/evaluator/core/registry.go b/pkg/quota/v1/evaluator/core/registry.go similarity index 70% rename from pkg/quota/evaluator/core/registry.go rename to pkg/quota/v1/evaluator/core/registry.go index ba54143c32e..43a86d318ce 100644 --- a/pkg/quota/evaluator/core/registry.go +++ b/pkg/quota/v1/evaluator/core/registry.go @@ -17,20 +17,19 @@ limitations under the License. package core import ( - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/clock" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) // legacyObjectCountAliases are what we used to do simple object counting quota with mapped to alias -var legacyObjectCountAliases = map[schema.GroupVersionResource]api.ResourceName{ - v1.SchemeGroupVersion.WithResource("configmaps"): api.ResourceConfigMaps, - v1.SchemeGroupVersion.WithResource("resourcequotas"): api.ResourceQuotas, - v1.SchemeGroupVersion.WithResource("replicationcontrollers"): api.ResourceReplicationControllers, - v1.SchemeGroupVersion.WithResource("secrets"): api.ResourceSecrets, +var legacyObjectCountAliases = map[schema.GroupVersionResource]corev1.ResourceName{ + corev1.SchemeGroupVersion.WithResource("configmaps"): corev1.ResourceConfigMaps, + corev1.SchemeGroupVersion.WithResource("resourcequotas"): corev1.ResourceQuotas, + corev1.SchemeGroupVersion.WithResource("replicationcontrollers"): corev1.ResourceReplicationControllers, + corev1.SchemeGroupVersion.WithResource("secrets"): corev1.ResourceSecrets, } // NewEvaluators returns the list of static evaluators that manage more than counts diff --git a/pkg/quota/evaluator/core/services.go b/pkg/quota/v1/evaluator/core/services.go similarity index 64% rename from pkg/quota/evaluator/core/services.go rename to pkg/quota/v1/evaluator/core/services.go index 006c0e9b113..eaebcc698a2 100644 --- a/pkg/quota/evaluator/core/services.go +++ b/pkg/quota/v1/evaluator/core/services.go @@ -19,31 +19,31 @@ package core import ( "fmt" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) // the name used for object count quota -var serviceObjectCountName = generic.ObjectCountQuotaResourceNameFor(v1.SchemeGroupVersion.WithResource("services").GroupResource()) +var serviceObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("services").GroupResource()) // serviceResources are the set of resources managed by quota associated with services. -var serviceResources = []api.ResourceName{ +var serviceResources = []corev1.ResourceName{ serviceObjectCountName, - api.ResourceServices, - api.ResourceServicesNodePorts, - api.ResourceServicesLoadBalancers, + corev1.ResourceServices, + corev1.ResourceServicesNodePorts, + corev1.ResourceServicesLoadBalancers, } // NewServiceEvaluator returns an evaluator that can evaluate services. func NewServiceEvaluator(f quota.ListerForResourceFunc) quota.Evaluator { - listFuncByNamespace := generic.ListResourceUsingListerFunc(f, v1.SchemeGroupVersion.WithResource("services")) + listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("services")) serviceEvaluator := &serviceEvaluator{listFuncByNamespace: listFuncByNamespace} return serviceEvaluator } @@ -55,14 +55,14 @@ type serviceEvaluator struct { } // Constraints verifies that all required resources are present on the item -func (p *serviceEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { +func (p *serviceEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error { // this is a no-op for services return nil } // GroupResource that this evaluator tracks func (p *serviceEvaluator) GroupResource() schema.GroupResource { - return v1.SchemeGroupVersion.WithResource("services").GroupResource() + return corev1.SchemeGroupVersion.WithResource("services").GroupResource() } // Handles returns true of the evaluator should handle the specified operation. @@ -73,36 +73,36 @@ func (p *serviceEvaluator) Handles(a admission.Attributes) bool { } // Matches returns true if the evaluator matches the specified quota with the provided input item -func (p *serviceEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) { +func (p *serviceEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc) } // MatchingResources takes the input specified list of resources and returns the set of resources it matches. -func (p *serviceEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName { +func (p *serviceEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName { return quota.Intersection(input, serviceResources) } // MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches. -func (p *serviceEvaluator) MatchingScopes(item runtime.Object, scopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - return []api.ScopedResourceSelectorRequirement{}, nil +func (p *serviceEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil } // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. // It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope -func (p *serviceEvaluator) UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSelectorRequirement, matchedQuotaScopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - return []api.ScopedResourceSelectorRequirement{}, nil +func (p *serviceEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil } // convert the input object to an internal service object or error. -func toInternalServiceOrError(obj runtime.Object) (*api.Service, error) { - svc := &api.Service{} +func toExternalServiceOrError(obj runtime.Object) (*corev1.Service, error) { + svc := &corev1.Service{} switch t := obj.(type) { - case *v1.Service: - if err := k8s_api_v1.Convert_v1_Service_To_core_Service(t, svc, nil); err != nil { + case *corev1.Service: + svc = t + case *api.Service: + if err := k8s_api_v1.Convert_core_Service_To_v1_Service(t, svc, nil); err != nil { return nil, err } - case *api.Service: - svc = t default: return nil, fmt.Errorf("expect *api.Service or *v1.Service, got %v", t) } @@ -110,28 +110,28 @@ func toInternalServiceOrError(obj runtime.Object) (*api.Service, error) { } // Usage knows how to measure usage associated with services -func (p *serviceEvaluator) Usage(item runtime.Object) (api.ResourceList, error) { - result := api.ResourceList{} - svc, err := toInternalServiceOrError(item) +func (p *serviceEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) { + result := corev1.ResourceList{} + svc, err := toExternalServiceOrError(item) if err != nil { return result, err } ports := len(svc.Spec.Ports) // default service usage result[serviceObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI)) - result[api.ResourceServices] = *(resource.NewQuantity(1, resource.DecimalSI)) - result[api.ResourceServicesLoadBalancers] = resource.Quantity{Format: resource.DecimalSI} - result[api.ResourceServicesNodePorts] = resource.Quantity{Format: resource.DecimalSI} + result[corev1.ResourceServices] = *(resource.NewQuantity(1, resource.DecimalSI)) + result[corev1.ResourceServicesLoadBalancers] = resource.Quantity{Format: resource.DecimalSI} + result[corev1.ResourceServicesNodePorts] = resource.Quantity{Format: resource.DecimalSI} switch svc.Spec.Type { - case api.ServiceTypeNodePort: + case corev1.ServiceTypeNodePort: // node port services need to count node ports value := resource.NewQuantity(int64(ports), resource.DecimalSI) - result[api.ResourceServicesNodePorts] = *value - case api.ServiceTypeLoadBalancer: + result[corev1.ResourceServicesNodePorts] = *value + case corev1.ServiceTypeLoadBalancer: // load balancer services need to count node ports and load balancers value := resource.NewQuantity(int64(ports), resource.DecimalSI) - result[api.ResourceServicesNodePorts] = *value - result[api.ResourceServicesLoadBalancers] = *(resource.NewQuantity(1, resource.DecimalSI)) + result[corev1.ResourceServicesNodePorts] = *value + result[corev1.ResourceServicesLoadBalancers] = *(resource.NewQuantity(1, resource.DecimalSI)) } return result, nil } @@ -144,12 +144,12 @@ func (p *serviceEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.Us var _ quota.Evaluator = &serviceEvaluator{} //GetQuotaServiceType returns ServiceType if the service type is eligible to track against a quota, nor return "" -func GetQuotaServiceType(service *v1.Service) v1.ServiceType { +func GetQuotaServiceType(service *corev1.Service) corev1.ServiceType { switch service.Spec.Type { - case v1.ServiceTypeNodePort: - return v1.ServiceTypeNodePort - case v1.ServiceTypeLoadBalancer: - return v1.ServiceTypeLoadBalancer + case corev1.ServiceTypeNodePort: + return corev1.ServiceTypeNodePort + case corev1.ServiceTypeLoadBalancer: + return corev1.ServiceTypeLoadBalancer } - return v1.ServiceType("") + return corev1.ServiceType("") } diff --git a/pkg/quota/evaluator/core/services_test.go b/pkg/quota/v1/evaluator/core/services_test.go similarity index 67% rename from pkg/quota/evaluator/core/services_test.go rename to pkg/quota/v1/evaluator/core/services_test.go index 601397ce193..b9f227a0c82 100644 --- a/pkg/quota/evaluator/core/services_test.go +++ b/pkg/quota/v1/evaluator/core/services_test.go @@ -19,28 +19,29 @@ package core import ( "testing" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime/schema" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) func TestServiceEvaluatorMatchesResources(t *testing.T) { evaluator := NewServiceEvaluator(nil) // we give a lot of resources - input := []api.ResourceName{ - api.ResourceConfigMaps, - api.ResourceCPU, - api.ResourceServices, - api.ResourceServicesNodePorts, - api.ResourceServicesLoadBalancers, + input := []corev1.ResourceName{ + corev1.ResourceConfigMaps, + corev1.ResourceCPU, + corev1.ResourceServices, + corev1.ResourceServicesNodePorts, + corev1.ResourceServicesLoadBalancers, } // but we only match these... - expected := quota.ToSet([]api.ResourceName{ - api.ResourceServices, - api.ResourceServicesNodePorts, - api.ResourceServicesLoadBalancers, + expected := quota.ToSet([]corev1.ResourceName{ + corev1.ResourceServices, + corev1.ResourceServicesNodePorts, + corev1.ResourceServicesLoadBalancers, }) actual := quota.ToSet(evaluator.MatchingResources(input)) if !expected.Equal(actual) { @@ -52,7 +53,7 @@ func TestServiceEvaluatorUsage(t *testing.T) { evaluator := NewServiceEvaluator(nil) testCases := map[string]struct { service *api.Service - usage api.ResourceList + usage corev1.ResourceList }{ "loadbalancer": { service: &api.Service{ @@ -60,10 +61,10 @@ func TestServiceEvaluatorUsage(t *testing.T) { Type: api.ServiceTypeLoadBalancer, }, }, - usage: api.ResourceList{ - api.ResourceServicesNodePorts: resource.MustParse("0"), - api.ResourceServicesLoadBalancers: resource.MustParse("1"), - api.ResourceServices: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceServicesNodePorts: resource.MustParse("0"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("1"), + corev1.ResourceServices: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "services"}): resource.MustParse("1"), }, }, @@ -78,10 +79,10 @@ func TestServiceEvaluatorUsage(t *testing.T) { }, }, }, - usage: api.ResourceList{ - api.ResourceServicesNodePorts: resource.MustParse("1"), - api.ResourceServicesLoadBalancers: resource.MustParse("1"), - api.ResourceServices: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceServicesNodePorts: resource.MustParse("1"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("1"), + corev1.ResourceServices: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "services"}): resource.MustParse("1"), }, }, @@ -91,10 +92,10 @@ func TestServiceEvaluatorUsage(t *testing.T) { Type: api.ServiceTypeClusterIP, }, }, - usage: api.ResourceList{ - api.ResourceServices: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("0"), - api.ResourceServicesLoadBalancers: resource.MustParse("0"), + usage: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("0"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("0"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "services"}): resource.MustParse("1"), }, }, @@ -109,10 +110,10 @@ func TestServiceEvaluatorUsage(t *testing.T) { }, }, }, - usage: api.ResourceList{ - api.ResourceServices: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("1"), - api.ResourceServicesLoadBalancers: resource.MustParse("0"), + usage: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("1"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("0"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "services"}): resource.MustParse("1"), }, }, @@ -130,10 +131,10 @@ func TestServiceEvaluatorUsage(t *testing.T) { }, }, }, - usage: api.ResourceList{ - api.ResourceServices: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("2"), - api.ResourceServicesLoadBalancers: resource.MustParse("0"), + usage: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("2"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("0"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "services"}): resource.MustParse("1"), }, }, @@ -152,7 +153,7 @@ func TestServiceEvaluatorUsage(t *testing.T) { func TestServiceConstraintsFunc(t *testing.T) { testCases := map[string]struct { service *api.Service - required []api.ResourceName + required []corev1.ResourceName err string }{ "loadbalancer": { @@ -161,7 +162,7 @@ func TestServiceConstraintsFunc(t *testing.T) { Type: api.ServiceTypeLoadBalancer, }, }, - required: []api.ResourceName{api.ResourceServicesLoadBalancers}, + required: []corev1.ResourceName{corev1.ResourceServicesLoadBalancers}, }, "clusterip": { service: &api.Service{ @@ -169,7 +170,7 @@ func TestServiceConstraintsFunc(t *testing.T) { Type: api.ServiceTypeClusterIP, }, }, - required: []api.ResourceName{api.ResourceServicesLoadBalancers, api.ResourceServices}, + required: []corev1.ResourceName{corev1.ResourceServicesLoadBalancers, corev1.ResourceServices}, }, "nodeports": { service: &api.Service{ @@ -182,7 +183,7 @@ func TestServiceConstraintsFunc(t *testing.T) { }, }, }, - required: []api.ResourceName{api.ResourceServicesNodePorts}, + required: []corev1.ResourceName{corev1.ResourceServicesNodePorts}, }, "multi-nodeports": { service: &api.Service{ @@ -198,7 +199,7 @@ func TestServiceConstraintsFunc(t *testing.T) { }, }, }, - required: []api.ResourceName{api.ResourceServicesNodePorts}, + required: []corev1.ResourceName{corev1.ResourceServicesNodePorts}, }, } diff --git a/pkg/quota/generic/BUILD b/pkg/quota/v1/generic/BUILD similarity index 86% rename from pkg/quota/generic/BUILD rename to pkg/quota/v1/generic/BUILD index d7c63ceb69d..c99ade4e3f5 100644 --- a/pkg/quota/generic/BUILD +++ b/pkg/quota/v1/generic/BUILD @@ -12,10 +12,10 @@ go_library( "evaluator.go", "registry.go", ], - importpath = "k8s.io/kubernetes/pkg/quota/generic", + importpath = "k8s.io/kubernetes/pkg/quota/v1/generic", deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/quota:go_default_library", + "//pkg/quota/v1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/quota/generic/OWNERS b/pkg/quota/v1/generic/OWNERS similarity index 100% rename from pkg/quota/generic/OWNERS rename to pkg/quota/v1/generic/OWNERS diff --git a/pkg/quota/generic/configuration.go b/pkg/quota/v1/generic/configuration.go similarity index 96% rename from pkg/quota/generic/configuration.go rename to pkg/quota/v1/generic/configuration.go index 59c009e13d3..1a1acc44185 100644 --- a/pkg/quota/generic/configuration.go +++ b/pkg/quota/v1/generic/configuration.go @@ -18,7 +18,7 @@ package generic import ( "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/quota" + quota "k8s.io/kubernetes/pkg/quota/v1" ) // implements a basic configuration diff --git a/pkg/quota/generic/evaluator.go b/pkg/quota/v1/generic/evaluator.go similarity index 78% rename from pkg/quota/generic/evaluator.go rename to pkg/quota/v1/generic/evaluator.go index 60da7d634bb..4e377175445 100644 --- a/pkg/quota/generic/evaluator.go +++ b/pkg/quota/v1/generic/evaluator.go @@ -19,6 +19,7 @@ package generic import ( "fmt" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -26,8 +27,7 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/quota" + quota "k8s.io/kubernetes/pkg/quota/v1" ) // InformerForResourceFunc knows how to provision an informer @@ -56,33 +56,33 @@ func ListResourceUsingListerFunc(l quota.ListerForResourceFunc, resource schema. } // ObjectCountQuotaResourceNameFor returns the object count quota name for specified groupResource -func ObjectCountQuotaResourceNameFor(groupResource schema.GroupResource) api.ResourceName { +func ObjectCountQuotaResourceNameFor(groupResource schema.GroupResource) corev1.ResourceName { if len(groupResource.Group) == 0 { - return api.ResourceName("count/" + groupResource.Resource) + return corev1.ResourceName("count/" + groupResource.Resource) } - return api.ResourceName("count/" + groupResource.Resource + "." + groupResource.Group) + return corev1.ResourceName("count/" + groupResource.Resource + "." + groupResource.Group) } // ListFuncByNamespace knows how to list resources in a namespace type ListFuncByNamespace func(namespace string) ([]runtime.Object, error) // MatchesScopeFunc knows how to evaluate if an object matches a scope -type MatchesScopeFunc func(scope api.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) +type MatchesScopeFunc func(scope corev1.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) // UsageFunc knows how to measure usage associated with an object -type UsageFunc func(object runtime.Object) (api.ResourceList, error) +type UsageFunc func(object runtime.Object) (corev1.ResourceList, error) // MatchingResourceNamesFunc is a function that returns the list of resources matched -type MatchingResourceNamesFunc func(input []api.ResourceName) []api.ResourceName +type MatchingResourceNamesFunc func(input []corev1.ResourceName) []corev1.ResourceName // MatchesNoScopeFunc returns false on all match checks -func MatchesNoScopeFunc(scope api.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) { +func MatchesNoScopeFunc(scope corev1.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) { return false, nil } // Matches returns true if the quota matches the specified item. func Matches( - resourceQuota *api.ResourceQuota, item runtime.Object, + resourceQuota *corev1.ResourceQuota, item runtime.Object, matchFunc MatchingResourceNamesFunc, scopeFunc MatchesScopeFunc) (bool, error) { if resourceQuota == nil { return false, fmt.Errorf("expected non-nil quota") @@ -101,12 +101,12 @@ func Matches( return matchResource && matchScope, nil } -func getScopeSelectorsFromQuota(quota *api.ResourceQuota) []api.ScopedResourceSelectorRequirement { - selectors := []api.ScopedResourceSelectorRequirement{} +func getScopeSelectorsFromQuota(quota *corev1.ResourceQuota) []corev1.ScopedResourceSelectorRequirement { + selectors := []corev1.ScopedResourceSelectorRequirement{} for _, scope := range quota.Spec.Scopes { - selectors = append(selectors, api.ScopedResourceSelectorRequirement{ + selectors = append(selectors, corev1.ScopedResourceSelectorRequirement{ ScopeName: scope, - Operator: api.ScopeSelectorOpExists}) + Operator: corev1.ScopeSelectorOpExists}) } if quota.Spec.ScopeSelector != nil { for _, scopeSelector := range quota.Spec.ScopeSelector.MatchExpressions { @@ -122,7 +122,7 @@ func CalculateUsageStats(options quota.UsageStatsOptions, scopeFunc MatchesScopeFunc, usageFunc UsageFunc) (quota.UsageStats, error) { // default each tracked resource to zero - result := quota.UsageStats{Used: api.ResourceList{}} + result := quota.UsageStats{Used: corev1.ResourceList{}} for _, resourceName := range options.Resources { result.Used[resourceName] = resource.Quantity{Format: resource.DecimalSI} } @@ -134,7 +134,7 @@ func CalculateUsageStats(options quota.UsageStatsOptions, // need to verify that the item matches the set of scopes matchesScopes := true for _, scope := range options.Scopes { - innerMatch, err := scopeFunc(api.ScopedResourceSelectorRequirement{ScopeName: scope}, item) + innerMatch, err := scopeFunc(corev1.ScopedResourceSelectorRequirement{ScopeName: scope}, item) if err != nil { return result, nil } @@ -174,11 +174,11 @@ type objectCountEvaluator struct { // TODO move to dynamic client in future listFuncByNamespace ListFuncByNamespace // Names associated with this resource in the quota for generic counting. - resourceNames []api.ResourceName + resourceNames []corev1.ResourceName } // Constraints returns an error if the configured resource name is not in the required set. -func (o *objectCountEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { +func (o *objectCountEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error { // no-op for object counting return nil } @@ -190,30 +190,30 @@ func (o *objectCountEvaluator) Handles(a admission.Attributes) bool { } // Matches returns true if the evaluator matches the specified quota with the provided input item -func (o *objectCountEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) { +func (o *objectCountEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { return Matches(resourceQuota, item, o.MatchingResources, MatchesNoScopeFunc) } // MatchingResources takes the input specified list of resources and returns the set of resources it matches. -func (o *objectCountEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName { +func (o *objectCountEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName { return quota.Intersection(input, o.resourceNames) } // MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches. -func (o *objectCountEvaluator) MatchingScopes(item runtime.Object, scopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - return []api.ScopedResourceSelectorRequirement{}, nil +func (o *objectCountEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil } // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. // It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope -func (o *objectCountEvaluator) UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSelectorRequirement, matchedQuotaScopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - return []api.ScopedResourceSelectorRequirement{}, nil +func (o *objectCountEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil } // Usage returns the resource usage for the specified object -func (o *objectCountEvaluator) Usage(object runtime.Object) (api.ResourceList, error) { +func (o *objectCountEvaluator) Usage(object runtime.Object) (corev1.ResourceList, error) { quantity := resource.NewQuantity(1, resource.DecimalSI) - resourceList := api.ResourceList{} + resourceList := corev1.ResourceList{} for _, resourceName := range o.resourceNames { resourceList[resourceName] = *quantity } @@ -239,9 +239,9 @@ var _ quota.Evaluator = &objectCountEvaluator{} // backward compatibility, alias should not be used. func NewObjectCountEvaluator( groupResource schema.GroupResource, listFuncByNamespace ListFuncByNamespace, - alias api.ResourceName) quota.Evaluator { + alias corev1.ResourceName) quota.Evaluator { - resourceNames := []api.ResourceName{ObjectCountQuotaResourceNameFor(groupResource)} + resourceNames := []corev1.ResourceName{ObjectCountQuotaResourceNameFor(groupResource)} if len(alias) > 0 { resourceNames = append(resourceNames, alias) } diff --git a/pkg/quota/generic/registry.go b/pkg/quota/v1/generic/registry.go similarity index 98% rename from pkg/quota/generic/registry.go rename to pkg/quota/v1/generic/registry.go index fdc38e02b1c..10404a3f288 100644 --- a/pkg/quota/generic/registry.go +++ b/pkg/quota/v1/generic/registry.go @@ -20,7 +20,7 @@ import ( "sync" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/quota" + quota "k8s.io/kubernetes/pkg/quota/v1" ) // implements a basic registry diff --git a/pkg/quota/install/BUILD b/pkg/quota/v1/install/BUILD similarity index 70% rename from pkg/quota/install/BUILD rename to pkg/quota/v1/install/BUILD index 31678067ea6..96425fa422c 100644 --- a/pkg/quota/install/BUILD +++ b/pkg/quota/v1/install/BUILD @@ -8,11 +8,11 @@ load( go_library( name = "go_default_library", srcs = ["registry.go"], - importpath = "k8s.io/kubernetes/pkg/quota/install", + importpath = "k8s.io/kubernetes/pkg/quota/v1/install", deps = [ - "//pkg/quota:go_default_library", - "//pkg/quota/evaluator/core:go_default_library", - "//pkg/quota/generic:go_default_library", + "//pkg/quota/v1:go_default_library", + "//pkg/quota/v1/evaluator/core:go_default_library", + "//pkg/quota/v1/generic:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], ) diff --git a/pkg/quota/install/OWNERS b/pkg/quota/v1/install/OWNERS similarity index 100% rename from pkg/quota/install/OWNERS rename to pkg/quota/v1/install/OWNERS diff --git a/pkg/quota/install/registry.go b/pkg/quota/v1/install/registry.go similarity index 91% rename from pkg/quota/install/registry.go rename to pkg/quota/v1/install/registry.go index dd4596d310a..b870368530f 100644 --- a/pkg/quota/install/registry.go +++ b/pkg/quota/v1/install/registry.go @@ -18,9 +18,9 @@ package install import ( "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/evaluator/core" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + core "k8s.io/kubernetes/pkg/quota/v1/evaluator/core" + generic "k8s.io/kubernetes/pkg/quota/v1/generic" ) // NewQuotaConfigurationForAdmission returns a quota configuration for admission control. diff --git a/pkg/quota/interfaces.go b/pkg/quota/v1/interfaces.go similarity index 80% rename from pkg/quota/interfaces.go rename to pkg/quota/v1/interfaces.go index e6723b8aef7..d71b6641830 100644 --- a/pkg/quota/interfaces.go +++ b/pkg/quota/v1/interfaces.go @@ -17,11 +17,11 @@ limitations under the License. package quota import ( + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/apis/core" ) // UsageStatsOptions is an options structs that describes how stats should be calculated @@ -29,37 +29,37 @@ type UsageStatsOptions struct { // Namespace where stats should be calculate Namespace string // Scopes that must match counted objects - Scopes []api.ResourceQuotaScope + Scopes []corev1.ResourceQuotaScope // Resources are the set of resources to include in the measurement - Resources []api.ResourceName - ScopeSelector *api.ScopeSelector + Resources []corev1.ResourceName + ScopeSelector *corev1.ScopeSelector } // UsageStats is result of measuring observed resource use in the system type UsageStats struct { // Used maps resource to quantity used - Used api.ResourceList + Used corev1.ResourceList } // Evaluator knows how to evaluate quota usage for a particular group resource type Evaluator interface { // Constraints ensures that each required resource is present on item - Constraints(required []api.ResourceName, item runtime.Object) error + Constraints(required []corev1.ResourceName, item runtime.Object) error // GroupResource returns the groupResource that this object knows how to evaluate GroupResource() schema.GroupResource // Handles determines if quota could be impacted by the specified attribute. // If true, admission control must perform quota processing for the operation, otherwise it is safe to ignore quota. Handles(operation admission.Attributes) bool // Matches returns true if the specified quota matches the input item - Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) + Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) // MatchingScopes takes the input specified list of scopes and input object and returns the set of scopes that matches input object. - MatchingScopes(item runtime.Object, scopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) + MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope - UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSelectorRequirement, matchedQuotaScopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) + UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) // MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches. - MatchingResources(input []api.ResourceName) []api.ResourceName + MatchingResources(input []corev1.ResourceName) []corev1.ResourceName // Usage returns the resource usage for the specified object - Usage(item runtime.Object) (api.ResourceList, error) + Usage(item runtime.Object) (corev1.ResourceList, error) // UsageStats calculates latest observed usage stats for all objects UsageStats(options UsageStatsOptions) (UsageStats, error) } diff --git a/pkg/quota/resources.go b/pkg/quota/v1/resources.go similarity index 77% rename from pkg/quota/resources.go rename to pkg/quota/v1/resources.go index b261aedef5f..b6aa3210d4b 100644 --- a/pkg/quota/resources.go +++ b/pkg/quota/v1/resources.go @@ -19,14 +19,13 @@ package quota import ( "strings" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" - api "k8s.io/kubernetes/pkg/apis/core" ) // Equals returns true if the two lists are equivalent -func Equals(a api.ResourceList, b api.ResourceList) bool { +func Equals(a corev1.ResourceList, b corev1.ResourceList) bool { if len(a) != len(b) { return false } @@ -45,7 +44,7 @@ func Equals(a api.ResourceList, b api.ResourceList) bool { } // V1Equals returns true if the two lists are equivalent -func V1Equals(a v1.ResourceList, b v1.ResourceList) bool { +func V1Equals(a corev1.ResourceList, b corev1.ResourceList) bool { if len(a) != len(b) { return false } @@ -65,9 +64,9 @@ func V1Equals(a v1.ResourceList, b v1.ResourceList) bool { // LessThanOrEqual returns true if a < b for each key in b // If false, it returns the keys in a that exceeded b -func LessThanOrEqual(a api.ResourceList, b api.ResourceList) (bool, []api.ResourceName) { +func LessThanOrEqual(a corev1.ResourceList, b corev1.ResourceList) (bool, []corev1.ResourceName) { result := true - resourceNames := []api.ResourceName{} + resourceNames := []corev1.ResourceName{} for key, value := range b { if other, found := a[key]; found { if other.Cmp(value) > 0 { @@ -80,8 +79,8 @@ func LessThanOrEqual(a api.ResourceList, b api.ResourceList) (bool, []api.Resour } // Max returns the result of Max(a, b) for each named resource -func Max(a api.ResourceList, b api.ResourceList) api.ResourceList { - result := api.ResourceList{} +func Max(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} for key, value := range a { if other, found := b[key]; found { if value.Cmp(other) <= 0 { @@ -100,8 +99,8 @@ func Max(a api.ResourceList, b api.ResourceList) api.ResourceList { } // Add returns the result of a + b for each named resource -func Add(a api.ResourceList, b api.ResourceList) api.ResourceList { - result := api.ResourceList{} +func Add(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} for key, value := range a { quantity := *value.Copy() if other, found := b[key]; found { @@ -120,10 +119,10 @@ func Add(a api.ResourceList, b api.ResourceList) api.ResourceList { // SubtractWithNonNegativeResult - subtracts and returns result of a - b but // makes sure we don't return negative values to prevent negative resource usage. -func SubtractWithNonNegativeResult(a api.ResourceList, b api.ResourceList) api.ResourceList { +func SubtractWithNonNegativeResult(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { zero := resource.MustParse("0") - result := api.ResourceList{} + result := corev1.ResourceList{} for key, value := range a { quantity := *value.Copy() if other, found := b[key]; found { @@ -145,8 +144,8 @@ func SubtractWithNonNegativeResult(a api.ResourceList, b api.ResourceList) api.R } // Subtract returns the result of a - b for each named resource -func Subtract(a api.ResourceList, b api.ResourceList) api.ResourceList { - result := api.ResourceList{} +func Subtract(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} for key, value := range a { quantity := *value.Copy() if other, found := b[key]; found { @@ -165,9 +164,9 @@ func Subtract(a api.ResourceList, b api.ResourceList) api.ResourceList { } // Mask returns a new resource list that only has the values with the specified names -func Mask(resources api.ResourceList, names []api.ResourceName) api.ResourceList { +func Mask(resources corev1.ResourceList, names []corev1.ResourceName) corev1.ResourceList { nameSet := ToSet(names) - result := api.ResourceList{} + result := corev1.ResourceList{} for key, value := range resources { if nameSet.Has(string(key)) { result[key] = *value.Copy() @@ -177,8 +176,8 @@ func Mask(resources api.ResourceList, names []api.ResourceName) api.ResourceList } // ResourceNames returns a list of all resource names in the ResourceList -func ResourceNames(resources api.ResourceList) []api.ResourceName { - result := []api.ResourceName{} +func ResourceNames(resources corev1.ResourceList) []corev1.ResourceName { + result := []corev1.ResourceName{} for resourceName := range resources { result = append(result, resourceName) } @@ -186,12 +185,12 @@ func ResourceNames(resources api.ResourceList) []api.ResourceName { } // Contains returns true if the specified item is in the list of items -func Contains(items []api.ResourceName, item api.ResourceName) bool { +func Contains(items []corev1.ResourceName, item corev1.ResourceName) bool { return ToSet(items).Has(string(item)) } // ContainsPrefix returns true if the specified item has a prefix that contained in given prefix Set -func ContainsPrefix(prefixSet []string, item api.ResourceName) bool { +func ContainsPrefix(prefixSet []string, item corev1.ResourceName) bool { for _, prefix := range prefixSet { if strings.HasPrefix(string(item), prefix) { return true @@ -201,19 +200,19 @@ func ContainsPrefix(prefixSet []string, item api.ResourceName) bool { } // Intersection returns the intersection of both list of resources -func Intersection(a []api.ResourceName, b []api.ResourceName) []api.ResourceName { +func Intersection(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName { setA := ToSet(a) setB := ToSet(b) setC := setA.Intersection(setB) - result := []api.ResourceName{} + result := []corev1.ResourceName{} for _, resourceName := range setC.List() { - result = append(result, api.ResourceName(resourceName)) + result = append(result, corev1.ResourceName(resourceName)) } return result } // IsZero returns true if each key maps to the quantity value 0 -func IsZero(a api.ResourceList) bool { +func IsZero(a corev1.ResourceList) bool { zero := resource.MustParse("0") for _, v := range a { if v.Cmp(zero) != 0 { @@ -224,8 +223,8 @@ func IsZero(a api.ResourceList) bool { } // IsNegative returns the set of resource names that have a negative value. -func IsNegative(a api.ResourceList) []api.ResourceName { - results := []api.ResourceName{} +func IsNegative(a corev1.ResourceList) []corev1.ResourceName { + results := []corev1.ResourceName{} zero := resource.MustParse("0") for k, v := range a { if v.Cmp(zero) < 0 { @@ -236,7 +235,7 @@ func IsNegative(a api.ResourceList) []api.ResourceName { } // ToSet takes a list of resource names and converts to a string set -func ToSet(resourceNames []api.ResourceName) sets.String { +func ToSet(resourceNames []corev1.ResourceName) sets.String { result := sets.NewString() for _, resourceName := range resourceNames { result.Insert(string(resourceName)) @@ -245,12 +244,12 @@ func ToSet(resourceNames []api.ResourceName) sets.String { } // CalculateUsage calculates and returns the requested ResourceList usage -func CalculateUsage(namespaceName string, scopes []api.ResourceQuotaScope, hardLimits api.ResourceList, registry Registry, scopeSelector *api.ScopeSelector) (api.ResourceList, error) { +func CalculateUsage(namespaceName string, scopes []corev1.ResourceQuotaScope, hardLimits corev1.ResourceList, registry Registry, scopeSelector *corev1.ScopeSelector) (corev1.ResourceList, error) { // find the intersection between the hard resources on the quota // and the resources this controller can track to know what we can // look to measure updated usage stats for hardResources := ResourceNames(hardLimits) - potentialResources := []api.ResourceName{} + potentialResources := []corev1.ResourceName{} evaluators := registry.List() for _, evaluator := range evaluators { potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...) @@ -259,7 +258,7 @@ func CalculateUsage(namespaceName string, scopes []api.ResourceQuotaScope, hardL matchedResources := Intersection(hardResources, potentialResources) // sum the observed usage from each evaluator - newUsage := api.ResourceList{} + newUsage := corev1.ResourceList{} for _, evaluator := range evaluators { // only trigger the evaluator if it matches a resource in the quota, otherwise, skip calculating anything intersection := evaluator.MatchingResources(matchedResources) diff --git a/pkg/quota/v1/resources_test.go b/pkg/quota/v1/resources_test.go new file mode 100644 index 00000000000..61175c706eb --- /dev/null +++ b/pkg/quota/v1/resources_test.go @@ -0,0 +1,321 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package quota + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestEquals(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + b corev1.ResourceList + expected bool + }{ + "isEqual": { + a: corev1.ResourceList{}, + b: corev1.ResourceList{}, + expected: true, + }, + "isEqualWithKeys": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + b: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + expected: true, + }, + "isNotEqualSameKeys": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + b: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + expected: false, + }, + "isNotEqualDiffKeys": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + b: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + corev1.ResourcePods: resource.MustParse("1"), + }, + expected: false, + }, + } + for testName, testCase := range testCases { + if result := Equals(testCase.a, testCase.b); result != testCase.expected { + t.Errorf("%s expected: %v, actual: %v, a=%v, b=%v", testName, testCase.expected, result, testCase.a, testCase.b) + } + } +} + +func TestMax(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + b corev1.ResourceList + expected corev1.ResourceList + }{ + "noKeys": { + a: corev1.ResourceList{}, + b: corev1.ResourceList{}, + expected: corev1.ResourceList{}, + }, + "toEmpty": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + b: corev1.ResourceList{}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + }, + "matching": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + b: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("150m")}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("150m")}, + }, + "matching(reverse)": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("150m")}, + b: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("150m")}, + }, + "matching-equal": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + b: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + }, + } + for testName, testCase := range testCases { + sum := Max(testCase.a, testCase.b) + if result := Equals(testCase.expected, sum); !result { + t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sum) + } + } +} + +func TestAdd(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + b corev1.ResourceList + expected corev1.ResourceList + }{ + "noKeys": { + a: corev1.ResourceList{}, + b: corev1.ResourceList{}, + expected: corev1.ResourceList{}, + }, + "toEmpty": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + b: corev1.ResourceList{}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + }, + "matching": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + b: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("200m")}, + }, + } + for testName, testCase := range testCases { + sum := Add(testCase.a, testCase.b) + if result := Equals(testCase.expected, sum); !result { + t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sum) + } + } +} + +func TestSubtract(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + b corev1.ResourceList + expected corev1.ResourceList + }{ + "noKeys": { + a: corev1.ResourceList{}, + b: corev1.ResourceList{}, + expected: corev1.ResourceList{}, + }, + "value-empty": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + b: corev1.ResourceList{}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + }, + "empty-value": { + a: corev1.ResourceList{}, + b: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("-100m")}, + }, + "value-value": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("200m")}, + b: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + }, + } + for testName, testCase := range testCases { + sub := Subtract(testCase.a, testCase.b) + if result := Equals(testCase.expected, sub); !result { + t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sub) + } + } +} + +func TestResourceNames(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + expected []corev1.ResourceName + }{ + "empty": { + a: corev1.ResourceList{}, + expected: []corev1.ResourceName{}, + }, + "values": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + expected: []corev1.ResourceName{corev1.ResourceMemory, corev1.ResourceCPU}, + }, + } + for testName, testCase := range testCases { + actualSet := ToSet(ResourceNames(testCase.a)) + expectedSet := ToSet(testCase.expected) + if !actualSet.Equal(expectedSet) { + t.Errorf("%s expected: %v, actual: %v", testName, expectedSet, actualSet) + } + } +} + +func TestContains(t *testing.T) { + testCases := map[string]struct { + a []corev1.ResourceName + b corev1.ResourceName + expected bool + }{ + "does-not-contain": { + a: []corev1.ResourceName{corev1.ResourceMemory}, + b: corev1.ResourceCPU, + expected: false, + }, + "does-contain": { + a: []corev1.ResourceName{corev1.ResourceMemory, corev1.ResourceCPU}, + b: corev1.ResourceCPU, + expected: true, + }, + } + for testName, testCase := range testCases { + if actual := Contains(testCase.a, testCase.b); actual != testCase.expected { + t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, actual) + } + } +} + +func TestContainsPrefix(t *testing.T) { + testCases := map[string]struct { + a []string + b corev1.ResourceName + expected bool + }{ + "does-not-contain": { + a: []string{corev1.ResourceHugePagesPrefix}, + b: corev1.ResourceCPU, + expected: false, + }, + "does-contain": { + a: []string{corev1.ResourceHugePagesPrefix}, + b: corev1.ResourceName(corev1.ResourceHugePagesPrefix + "2Mi"), + expected: true, + }, + } + for testName, testCase := range testCases { + if actual := ContainsPrefix(testCase.a, testCase.b); actual != testCase.expected { + t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, actual) + } + } +} + +func TestIsZero(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + expected bool + }{ + "empty": { + a: corev1.ResourceList{}, + expected: true, + }, + "zero": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0"), + corev1.ResourceMemory: resource.MustParse("0"), + }, + expected: true, + }, + "non-zero": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + expected: false, + }, + } + for testName, testCase := range testCases { + if result := IsZero(testCase.a); result != testCase.expected { + t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, result) + } + } +} + +func TestIsNegative(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + expected []corev1.ResourceName + }{ + "empty": { + a: corev1.ResourceList{}, + expected: []corev1.ResourceName{}, + }, + "some-negative": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("-10"), + corev1.ResourceMemory: resource.MustParse("0"), + }, + expected: []corev1.ResourceName{corev1.ResourceCPU}, + }, + "all-negative": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("-200m"), + corev1.ResourceMemory: resource.MustParse("-1Gi"), + }, + expected: []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory}, + }, + } + for testName, testCase := range testCases { + actual := IsNegative(testCase.a) + actualSet := ToSet(actual) + expectedSet := ToSet(testCase.expected) + if !actualSet.Equal(expectedSet) { + t.Errorf("%s expected: %v, actual: %v", testName, expectedSet, actualSet) + } + } +} From b8f4aa351620522407338a42392fed1e207d6fa4 Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Mon, 27 Aug 2018 21:49:01 +0800 Subject: [PATCH 4/9] move util funcs --- pkg/apis/core/helper/helpers.go | 25 ------------------------- pkg/apis/core/v1/helper/helpers.go | 25 +++++++++++++++++++++++++ 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/pkg/apis/core/helper/helpers.go b/pkg/apis/core/helper/helpers.go index 486122298c2..10c33f66bd1 100644 --- a/pkg/apis/core/helper/helpers.go +++ b/pkg/apis/core/helper/helpers.go @@ -537,28 +537,3 @@ func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool { return false } - -// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements -// labels.Selector. -func ScopedResourceSelectorRequirementsAsSelector(ssr core.ScopedResourceSelectorRequirement) (labels.Selector, error) { - selector := labels.NewSelector() - var op selection.Operator - switch ssr.Operator { - case core.ScopeSelectorOpIn: - op = selection.In - case core.ScopeSelectorOpNotIn: - op = selection.NotIn - case core.ScopeSelectorOpExists: - op = selection.Exists - case core.ScopeSelectorOpDoesNotExist: - op = selection.DoesNotExist - default: - return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator) - } - r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - return selector, nil -} diff --git a/pkg/apis/core/v1/helper/helpers.go b/pkg/apis/core/v1/helper/helpers.go index bf6c001b780..fa11a6b36a9 100644 --- a/pkg/apis/core/v1/helper/helpers.go +++ b/pkg/apis/core/v1/helper/helpers.go @@ -500,3 +500,28 @@ func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { return "" } + +// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements +// labels.Selector. +func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorRequirement) (labels.Selector, error) { + selector := labels.NewSelector() + var op selection.Operator + switch ssr.Operator { + case v1.ScopeSelectorOpIn: + op = selection.In + case v1.ScopeSelectorOpNotIn: + op = selection.NotIn + case v1.ScopeSelectorOpExists: + op = selection.Exists + case v1.ScopeSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator) + } + r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + return selector, nil +} From d11ee913a1ce22f659fefd4c7e7bcaa62591897f Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Mon, 27 Aug 2018 21:49:26 +0800 Subject: [PATCH 5/9] prune flipping int/ext conversion for quota controller --- pkg/controller/resourcequota/BUILD | 14 +++---- .../resource_quota_controller.go | 40 ++++++------------- .../resource_quota_controller_test.go | 6 +-- .../resourcequota/resource_quota_monitor.go | 6 +-- 4 files changed, 24 insertions(+), 42 deletions(-) diff --git a/pkg/controller/resourcequota/BUILD b/pkg/controller/resourcequota/BUILD index 5c697684479..d09d8f4a76b 100644 --- a/pkg/controller/resourcequota/BUILD +++ b/pkg/controller/resourcequota/BUILD @@ -15,12 +15,10 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/controller/resourcequota", deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/apis/core/v1:go_default_library", "//pkg/controller:go_default_library", - "//pkg/quota:go_default_library", - "//pkg/quota/evaluator/core:go_default_library", - "//pkg/quota/generic:go_default_library", + "//pkg/quota/v1:go_default_library", + "//pkg/quota/v1/evaluator/core:go_default_library", + "//pkg/quota/v1/generic:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -49,9 +47,9 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/controller:go_default_library", - "//pkg/quota:go_default_library", - "//pkg/quota/generic:go_default_library", - "//pkg/quota/install:go_default_library", + "//pkg/quota/v1:go_default_library", + "//pkg/quota/v1/generic:go_default_library", + "//pkg/quota/v1/install:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index 2b46d45ed38..2212bfd4b16 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -39,10 +39,8 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" - api "k8s.io/kubernetes/pkg/apis/core" - k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/quota" + quota "k8s.io/kubernetes/pkg/quota/v1" ) // NamespacedResourcesFunc knows how to discover namespaced resources. @@ -226,7 +224,7 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) { // if we declared a constraint that has no usage (which this controller can calculate, prioritize it) for constraint := range resourceQuota.Status.Hard { if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound { - matchedResources := []api.ResourceName{api.ResourceName(constraint)} + matchedResources := []v1.ResourceName{v1.ResourceName(constraint)} for _, evaluator := range rq.registry.List() { if intersection := evaluator.MatchingResources(matchedResources); len(intersection) > 0 { rq.missingUsageQueue.Add(key) @@ -320,25 +318,20 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err } // syncResourceQuota runs a complete sync of resource quota status across all known kinds -func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.ResourceQuota) (err error) { +func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQuota) (err error) { // quota is dirty if any part of spec hard limits differs from the status hard limits - dirty := !apiequality.Semantic.DeepEqual(v1ResourceQuota.Spec.Hard, v1ResourceQuota.Status.Hard) - - resourceQuota := api.ResourceQuota{} - if err := k8s_api_v1.Convert_v1_ResourceQuota_To_core_ResourceQuota(v1ResourceQuota, &resourceQuota, nil); err != nil { - return err - } + dirty := !apiequality.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard) // dirty tracks if the usage status differs from the previous sync, // if so, we send a new usage with latest status // if this is our first sync, it will be dirty by default, since we need track usage - dirty = dirty || (resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil) + dirty = dirty || resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil - used := api.ResourceList{} + used := v1.ResourceList{} if resourceQuota.Status.Used != nil { - used = quota.Add(api.ResourceList{}, resourceQuota.Status.Used) + used = quota.Add(v1.ResourceList{}, resourceQuota.Status.Used) } - hardLimits := quota.Add(api.ResourceList{}, resourceQuota.Spec.Hard) + hardLimits := quota.Add(v1.ResourceList{}, resourceQuota.Spec.Hard) newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry, resourceQuota.Spec.ScopeSelector) if err != nil { @@ -354,14 +347,14 @@ func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.Resourc // Create a usage object that is based on the quota resource version that will handle updates // by default, we preserve the past usage observation, and set hard to the current spec - usage := api.ResourceQuota{ + usage := v1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{ Name: resourceQuota.Name, Namespace: resourceQuota.Namespace, ResourceVersion: resourceQuota.ResourceVersion, Labels: resourceQuota.Labels, Annotations: resourceQuota.Annotations}, - Status: api.ResourceQuotaStatus{ + Status: v1.ResourceQuotaStatus{ Hard: hardLimits, Used: used, }, @@ -371,11 +364,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.Resourc // there was a change observed by this controller that requires we update quota if dirty { - v1Usage := &v1.ResourceQuota{} - if err := k8s_api_v1.Convert_core_ResourceQuota_To_v1_ResourceQuota(&usage, v1Usage, nil); err != nil { - return err - } - _, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(v1Usage) + _, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(&usage) return err } return nil @@ -406,12 +395,7 @@ func (rq *ResourceQuotaController) replenishQuota(groupResource schema.GroupReso // only queue those quotas that are tracking a resource associated with this kind. for i := range resourceQuotas { resourceQuota := resourceQuotas[i] - internalResourceQuota := &api.ResourceQuota{} - if err := k8s_api_v1.Convert_v1_ResourceQuota_To_core_ResourceQuota(resourceQuota, internalResourceQuota, nil); err != nil { - glog.Error(err) - continue - } - resourceQuotaResources := quota.ResourceNames(internalResourceQuota.Status.Hard) + resourceQuotaResources := quota.ResourceNames(resourceQuota.Status.Hard) if intersection := evaluator.MatchingResources(resourceQuotaResources); len(intersection) > 0 { // TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota. rq.enqueueResourceQuota(resourceQuota) diff --git a/pkg/controller/resourcequota/resource_quota_controller_test.go b/pkg/controller/resourcequota/resource_quota_controller_test.go index a953e1ca91e..769b23022a3 100644 --- a/pkg/controller/resourcequota/resource_quota_controller_test.go +++ b/pkg/controller/resourcequota/resource_quota_controller_test.go @@ -33,9 +33,9 @@ import ( core "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" - "k8s.io/kubernetes/pkg/quota/install" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" + "k8s.io/kubernetes/pkg/quota/v1/install" ) func getResourceList(cpu, memory string) v1.ResourceList { diff --git a/pkg/controller/resourcequota/resource_quota_monitor.go b/pkg/controller/resourcequota/resource_quota_monitor.go index be87777e0f0..aa77fca731f 100644 --- a/pkg/controller/resourcequota/resource_quota_monitor.go +++ b/pkg/controller/resourcequota/resource_quota_monitor.go @@ -33,9 +33,9 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/evaluator/core" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/evaluator/core" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) type eventType int From a4f33a6a9f0d5711f1c64ac47791ac22377dfaab Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Mon, 27 Aug 2018 21:50:15 +0800 Subject: [PATCH 6/9] align imports for cmd --- cmd/kube-apiserver/app/BUILD | 2 +- cmd/kube-apiserver/app/server.go | 2 +- cmd/kube-controller-manager/app/BUILD | 4 ++-- cmd/kube-controller-manager/app/core.go | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/kube-apiserver/app/BUILD b/cmd/kube-apiserver/app/BUILD index 83d287cba76..299a96e65ab 100644 --- a/cmd/kube-apiserver/app/BUILD +++ b/cmd/kube-apiserver/app/BUILD @@ -33,7 +33,7 @@ go_library( "//pkg/master/controller/crdregistration:go_default_library", "//pkg/master/reconcilers:go_default_library", "//pkg/master/tunneler:go_default_library", - "//pkg/quota/install:go_default_library", + "//pkg/quota/v1/install:go_default_library", "//pkg/registry/cachesize:go_default_library", "//pkg/registry/rbac/rest:go_default_library", "//pkg/serviceaccount:go_default_library", diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 72548ecc7b8..247d59cf772 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -81,7 +81,7 @@ import ( "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/master/reconcilers" "k8s.io/kubernetes/pkg/master/tunneler" - quotainstall "k8s.io/kubernetes/pkg/quota/install" + quotainstall "k8s.io/kubernetes/pkg/quota/v1/install" "k8s.io/kubernetes/pkg/registry/cachesize" rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest" "k8s.io/kubernetes/pkg/serviceaccount" diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index 6b3d03b6ec2..7d62955c663 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -74,8 +74,8 @@ go_library( "//pkg/controller/volume/pvcprotection:go_default_library", "//pkg/controller/volume/pvprotection:go_default_library", "//pkg/features:go_default_library", - "//pkg/quota/generic:go_default_library", - "//pkg/quota/install:go_default_library", + "//pkg/quota/v1/generic:go_default_library", + "//pkg/quota/v1/install:go_default_library", "//pkg/serviceaccount:go_default_library", "//pkg/util/configz:go_default_library", "//pkg/util/flag:go_default_library", diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index 0e05f02845b..de198b3b560 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -56,8 +56,8 @@ import ( "k8s.io/kubernetes/pkg/controller/volume/pvcprotection" "k8s.io/kubernetes/pkg/controller/volume/pvprotection" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/quota/generic" - quotainstall "k8s.io/kubernetes/pkg/quota/install" + "k8s.io/kubernetes/pkg/quota/v1/generic" + quotainstall "k8s.io/kubernetes/pkg/quota/v1/install" "k8s.io/kubernetes/pkg/util/metrics" ) From 7f1ef6672c7126274e1195418762203ce1b21d8c Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Mon, 27 Aug 2018 21:50:30 +0800 Subject: [PATCH 7/9] align imports for test --- test/e2e/scheduling/BUILD | 2 +- test/e2e/scheduling/resource_quota.go | 2 +- test/integration/quota/BUILD | 6 ++---- test/integration/quota/quota_test.go | 22 +++++++++------------- 4 files changed, 13 insertions(+), 19 deletions(-) diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index 7b44dc34289..5d4f284d80f 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -24,7 +24,7 @@ go_library( "//pkg/apis/extensions:go_default_library", "//pkg/apis/scheduling:go_default_library", "//pkg/kubelet/apis:go_default_library", - "//pkg/quota/evaluator/core:go_default_library", + "//pkg/quota/v1/evaluator/core:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/test/e2e/scheduling/resource_quota.go b/test/e2e/scheduling/resource_quota.go index 375fd61562e..40f5b16f3cd 100644 --- a/test/e2e/scheduling/resource_quota.go +++ b/test/e2e/scheduling/resource_quota.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/quota/evaluator/core" + "k8s.io/kubernetes/pkg/quota/v1/evaluator/core" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" diff --git a/test/integration/quota/BUILD b/test/integration/quota/BUILD index 107a4bd9030..3bf9cd52aab 100644 --- a/test/integration/quota/BUILD +++ b/test/integration/quota/BUILD @@ -14,13 +14,11 @@ go_test( ], tags = ["integration"], deps = [ - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/replication:go_default_library", "//pkg/controller/resourcequota:go_default_library", - "//pkg/quota/generic:go_default_library", - "//pkg/quota/install:go_default_library", + "//pkg/quota/v1/generic:go_default_library", + "//pkg/quota/v1/install:go_default_library", "//plugin/pkg/admission/resourcequota:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/test/integration/quota/quota_test.go b/test/integration/quota/quota_test.go index 9bc6f384bfb..9f6a9a418bf 100644 --- a/test/integration/quota/quota_test.go +++ b/test/integration/quota/quota_test.go @@ -37,13 +37,11 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" watchtools "k8s.io/client-go/tools/watch" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" "k8s.io/kubernetes/pkg/controller" replicationcontroller "k8s.io/kubernetes/pkg/controller/replication" resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota" - "k8s.io/kubernetes/pkg/quota/generic" - quotainstall "k8s.io/kubernetes/pkg/quota/install" + "k8s.io/kubernetes/pkg/quota/v1/generic" + quotainstall "k8s.io/kubernetes/pkg/quota/v1/install" "k8s.io/kubernetes/plugin/pkg/admission/resourcequota" resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" "k8s.io/kubernetes/test/integration/framework" @@ -65,15 +63,14 @@ func TestQuota(t *testing.T) { admissionCh := make(chan struct{}) clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) config := &resourcequotaapi.Configuration{} admission, err := resourcequota.NewResourceQuota(config, 5, admissionCh) if err != nil { t.Fatalf("unexpected error: %v", err) } - admission.SetInternalKubeClientSet(internalClientset) - internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, controller.NoResyncPeriodFunc()) - admission.SetInternalKubeInformerFactory(internalInformers) + admission.SetExternalKubeClientSet(clientset) + internalInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) + admission.SetExternalKubeInformerFactory(internalInformers) qca := quotainstall.NewQuotaConfigurationForAdmission() admission.SetQuotaConfiguration(qca) defer close(admissionCh) @@ -257,7 +254,6 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { admissionCh := make(chan struct{}) clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) // stop creation of a pod resource unless there is a quota config := &resourcequotaapi.Configuration{ @@ -273,9 +269,9 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - admission.SetInternalKubeClientSet(internalClientset) - internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, controller.NoResyncPeriodFunc()) - admission.SetInternalKubeInformerFactory(internalInformers) + admission.SetExternalKubeClientSet(clientset) + externalInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) + admission.SetExternalKubeInformerFactory(externalInformers) admission.SetQuotaConfiguration(qca) defer close(admissionCh) @@ -324,7 +320,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { // Periodically the quota controller to detect new resource types go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, controllerCh) - internalInformers.Start(controllerCh) + externalInformers.Start(controllerCh) informers.Start(controllerCh) close(informersStarted) From ede89afa990a3eb939049146db755d70fe5fb7ca Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Mon, 27 Aug 2018 21:52:08 +0800 Subject: [PATCH 8/9] align imports for kubeapiserver admission initializer --- pkg/kubeapiserver/admission/BUILD | 2 +- pkg/kubeapiserver/admission/initializer.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/kubeapiserver/admission/BUILD b/pkg/kubeapiserver/admission/BUILD index 46b8805356a..14879391b61 100644 --- a/pkg/kubeapiserver/admission/BUILD +++ b/pkg/kubeapiserver/admission/BUILD @@ -20,7 +20,7 @@ go_library( deps = [ "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/quota:go_default_library", + "//pkg/quota/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", diff --git a/pkg/kubeapiserver/admission/initializer.go b/pkg/kubeapiserver/admission/initializer.go index d47338ac2f4..e7fd5461ad5 100644 --- a/pkg/kubeapiserver/admission/initializer.go +++ b/pkg/kubeapiserver/admission/initializer.go @@ -23,7 +23,7 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" - "k8s.io/kubernetes/pkg/quota" + quota "k8s.io/kubernetes/pkg/quota/v1" ) // TODO add a `WantsToRun` which takes a stopCh. Might make it generic. From ae628c548843de9973d3bdcda1dcce23eb35cc8f Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Mon, 27 Aug 2018 21:52:26 +0800 Subject: [PATCH 9/9] fixes golint: pkg switching --- hack/.golint_failures | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index 9c940c00485..90649410670 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -240,7 +240,7 @@ pkg/proxy/userspace pkg/proxy/util pkg/proxy/winkernel pkg/proxy/winuserspace -pkg/quota/evaluator/core +pkg/quota/v1/evaluator/core pkg/registry/admissionregistration/initializerconfiguration/storage pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage pkg/registry/admissionregistration/rest