diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 1e98ae2700f..7090c15a9ad 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -17515,9 +17515,20 @@ "hard": { "type": "any", "description": "Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota" + }, + "scopes": { + "type": "array", + "items": { + "$ref": "v1.ResourceQuotaScope" + }, + "description": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects." } } }, + "v1.ResourceQuotaScope": { + "id": "v1.ResourceQuotaScope", + "properties": {} + }, "v1.ResourceQuotaStatus": { "id": "v1.ResourceQuotaStatus", "description": "ResourceQuotaStatus defines the enforced hard limits and observed use.", diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 7c568f58961..595571d53e5 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -60,6 +60,7 @@ import ( servicecontroller "k8s.io/kubernetes/pkg/controller/service" serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount" "k8s.io/kubernetes/pkg/healthz" + quotainstall "k8s.io/kubernetes/pkg/quota/install" "k8s.io/kubernetes/pkg/serviceaccount" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/configz" @@ -226,9 +227,23 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig glog.Infof("allocate-node-cidrs set to %v, node controller not creating routes", s.AllocateNodeCIDRs) } - go resourcequotacontroller.NewResourceQuotaController( - clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resourcequota-controller")), - controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration)).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop) + resourceQuotaControllerClient := clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resourcequota-controller")) + resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient) + groupKindsToReplenish := []unversioned.GroupKind{ + api.Kind("Pod"), + api.Kind("Service"), + api.Kind("ReplicationController"), + api.Kind("PersistentVolumeClaim"), + api.Kind("Secret"), + } + resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ + KubeClient: resourceQuotaControllerClient, + ResyncPeriod: controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration), + Registry: resourceQuotaRegistry, + GroupKindsToReplenish: groupKindsToReplenish, + ControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactory(resourceQuotaControllerClient), + } + go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop) // If apiserver is not running we should wait for some time and fail only then. This is particularly // important when we start apiserver and controller manager at the same time. diff --git a/contrib/mesos/pkg/controllermanager/controllermanager.go b/contrib/mesos/pkg/controllermanager/controllermanager.go index dc652195649..7dfdc2beeb2 100644 --- a/contrib/mesos/pkg/controllermanager/controllermanager.go +++ b/contrib/mesos/pkg/controllermanager/controllermanager.go @@ -28,6 +28,7 @@ import ( kubecontrollermanager "k8s.io/kubernetes/cmd/kube-controller-manager/app" "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" "k8s.io/kubernetes/contrib/mesos/pkg/node" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" client "k8s.io/kubernetes/pkg/client/unversioned" @@ -53,6 +54,7 @@ import ( servicecontroller "k8s.io/kubernetes/pkg/controller/service" serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount" "k8s.io/kubernetes/pkg/healthz" + quotainstall "k8s.io/kubernetes/pkg/quota/install" "k8s.io/kubernetes/pkg/serviceaccount" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/wait" @@ -173,8 +175,23 @@ func (s *CMServer) Run(_ []string) error { routeController.Run(s.NodeSyncPeriod.Duration) } - go resourcequotacontroller.NewResourceQuotaController( - clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resource-quota-controller")), controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration)).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop) + resourceQuotaControllerClient := clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resource-quota-controller")) + resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient) + groupKindsToReplenish := []unversioned.GroupKind{ + api.Kind("Pod"), + api.Kind("Service"), + api.Kind("ReplicationController"), + api.Kind("PersistentVolumeClaim"), + api.Kind("Secret"), + } + resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ + KubeClient: resourceQuotaControllerClient, + ResyncPeriod: controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration), + Registry: resourceQuotaRegistry, + GroupKindsToReplenish: groupKindsToReplenish, + ControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactory(resourceQuotaControllerClient), + } + go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop) // If apiserver is not running we should wait for some time and fail only then. This is particularly // important when we start apiserver and controller manager at the same time. diff --git a/contrib/mesos/pkg/scheduler/resources/resource_test.go b/contrib/mesos/pkg/scheduler/resources/resource_test.go index c2b79bfee03..5260790f338 100644 --- a/contrib/mesos/pkg/scheduler/resources/resource_test.go +++ b/contrib/mesos/pkg/scheduler/resources/resource_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/controller/resourcequota" ) type resources struct { @@ -81,7 +80,6 @@ func TestResources(tst *testing.T) { } tst.Logf("Testing resource computation for %v => request=%v limit=%v", t, pod.Spec.Containers[0].Resources.Requests, pod.Spec.Containers[0].Resources.Limits) - tst.Logf("hasRequests: cpu => %v, mem => %v", resourcequota.PodHasRequests(pod, api.ResourceCPU), resourcequota.PodHasRequests(pod, api.ResourceMemory)) beforeCpuR, beforeCpuL, _, err := LimitedCPUForPod(pod, DefaultDefaultContainerCPULimit) assert.NoError(err, "CPUForPod should not return an error") diff --git a/docs/api-reference/extensions/v1beta1/definitions.html b/docs/api-reference/extensions/v1beta1/definitions.html index f47cebcf2bc..c91c13282a8 100755 --- a/docs/api-reference/extensions/v1beta1/definitions.html +++ b/docs/api-reference/extensions/v1beta1/definitions.html @@ -5591,7 +5591,7 @@ Populated by the system when a graceful deletion is requested. Read-only. More i