Merge pull request #25414 from derekwaynecarr/quota_int_test_improvements

Automatic merge from submit-queue

Quota integration test improvements

This PR does the following:

* allow a replication manager to get created that does not record events
* improve the shutdown behavior of replication manager and resource quota to ensure doWork funcs exit properly
* update quota integration test to use non event generating replication manager, reduce number of pods to provision

I am hoping this combination of changes should fix the referenced flake.

Fixes https://github.com/kubernetes/kubernetes/issues/25037
This commit is contained in:
k8s-merge-robot 2016-05-10 18:58:34 -07:00
commit bfccd929c6
6 changed files with 138 additions and 64 deletions

View File

@ -102,11 +102,18 @@ type ReplicationManager struct {
queue *workqueue.Type queue *workqueue.Type
} }
// NewReplicationManager creates a replication manager
func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager { func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
return newReplicationManagerInternal(
eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}),
podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize)
}
// newReplicationManagerInternal configures a replication manager with the specified event recorder
func newReplicationManagerInternal(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
} }
@ -115,7 +122,7 @@ func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient
kubeClient: kubeClient, kubeClient: kubeClient,
podControl: controller.RealPodControl{ podControl: controller.RealPodControl{
KubeClient: kubeClient, KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}), Recorder: eventRecorder,
}, },
burstReplicas: burstReplicas, burstReplicas: burstReplicas,
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()), expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
@ -195,7 +202,14 @@ func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient
rm.podStoreSynced = rm.podController.HasSynced rm.podStoreSynced = rm.podController.HasSynced
rm.lookupCache = controller.NewMatchingCache(lookupCacheSize) rm.lookupCache = controller.NewMatchingCache(lookupCacheSize)
return rm return rm
}
// NewReplicationManagerFromClientForIntegration creates a new ReplicationManager that runs its own informer. It disables event recording for use in integration tests.
func NewReplicationManagerFromClientForIntegration(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
podInformer := informers.CreateSharedPodIndexInformer(kubeClient, resyncPeriod())
rm := newReplicationManagerInternal(&record.FakeRecorder{}, podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize)
rm.internalPodInformer = podInformer
return rm
} }
// NewReplicationManagerFromClient creates a new ReplicationManager that runs its own informer. // NewReplicationManagerFromClient creates a new ReplicationManager that runs its own informer.
@ -413,18 +427,23 @@ func (rm *ReplicationManager) enqueueController(obj interface{}) {
// worker runs a worker thread that just dequeues items, processes them, and marks them done. // worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key. // It enforces that the syncHandler is never invoked concurrently with the same key.
func (rm *ReplicationManager) worker() { func (rm *ReplicationManager) worker() {
for { workFunc := func() bool {
func() {
key, quit := rm.queue.Get() key, quit := rm.queue.Get()
if quit { if quit {
return return true
} }
defer rm.queue.Done(key) defer rm.queue.Done(key)
err := rm.syncHandler(key.(string)) err := rm.syncHandler(key.(string))
if err != nil { if err != nil {
glog.Errorf("Error syncing replication controller: %v", err) glog.Errorf("Error syncing replication controller: %v", err)
} }
}() return false
}
for {
if quit := workFunc(); quit {
glog.Infof("replication controller worker shutting down")
return
}
} }
} }

View File

@ -163,11 +163,10 @@ func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
// worker runs a worker thread that just dequeues items, processes them, and marks them done. // worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key. // It enforces that the syncHandler is never invoked concurrently with the same key.
func (rq *ResourceQuotaController) worker() { func (rq *ResourceQuotaController) worker() {
for { workFunc := func() bool {
func() {
key, quit := rq.queue.Get() key, quit := rq.queue.Get()
if quit { if quit {
return return true
} }
defer rq.queue.Done(key) defer rq.queue.Done(key)
err := rq.syncHandler(key.(string)) err := rq.syncHandler(key.(string))
@ -175,7 +174,13 @@ func (rq *ResourceQuotaController) worker() {
utilruntime.HandleError(err) utilruntime.HandleError(err)
rq.queue.Add(key) rq.queue.Add(key)
} }
}() return false
}
for {
if quit := workFunc(); quit {
glog.Infof("resource quota controller worker shutting down")
return
}
} }
} }

View File

@ -28,13 +28,15 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota"
"k8s.io/kubernetes/pkg/quota/install" "k8s.io/kubernetes/pkg/quota/install"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
) )
func init() { func init() {
admission.RegisterPlugin("ResourceQuota", admission.RegisterPlugin("ResourceQuota",
func(client clientset.Interface, config io.Reader) (admission.Interface, error) { func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
registry := install.NewRegistry(client) registry := install.NewRegistry(client)
return NewResourceQuota(client, registry, 5) // TODO: expose a stop channel in admission factory
return NewResourceQuota(client, registry, 5, make(chan struct{}))
}) })
} }
@ -53,12 +55,14 @@ type liveLookupEntry struct {
// NewResourceQuota configures an admission controller that can enforce quota constraints // NewResourceQuota configures an admission controller that can enforce quota constraints
// using the provided registry. The registry must have the capability to handle group/kinds that // using the provided registry. The registry must have the capability to handle group/kinds that
// are persisted by the server this admission controller is intercepting // are persisted by the server this admission controller is intercepting
func NewResourceQuota(client clientset.Interface, registry quota.Registry, numEvaluators int) (admission.Interface, error) { func NewResourceQuota(client clientset.Interface, registry quota.Registry, numEvaluators int, stopCh <-chan struct{}) (admission.Interface, error) {
evaluator, err := newQuotaEvaluator(client, registry) evaluator, err := newQuotaEvaluator(client, registry)
if err != nil { if err != nil {
return nil, err return nil, err
} }
evaluator.Run(numEvaluators)
defer utilruntime.HandleCrash()
go evaluator.Run(numEvaluators, stopCh)
return &quotaAdmission{ return &quotaAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update), Handler: admission.NewHandler(admission.Create, admission.Update),

View File

@ -22,7 +22,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
"k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
@ -36,6 +36,7 @@ import (
"k8s.io/kubernetes/pkg/quota/generic" "k8s.io/kubernetes/pkg/quota/generic"
"k8s.io/kubernetes/pkg/quota/install" "k8s.io/kubernetes/pkg/quota/install"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
) )
@ -115,7 +116,9 @@ func TestPrettyPrint(t *testing.T) {
// TestAdmissionIgnoresDelete verifies that the admission controller ignores delete operations // TestAdmissionIgnoresDelete verifies that the admission controller ignores delete operations
func TestAdmissionIgnoresDelete(t *testing.T) { func TestAdmissionIgnoresDelete(t *testing.T) {
kubeClient := fake.NewSimpleClientset() kubeClient := fake.NewSimpleClientset()
handler, err := NewResourceQuota(kubeClient, install.NewRegistry(kubeClient), 5) stopCh := make(chan struct{})
defer close(stopCh)
handler, err := NewResourceQuota(kubeClient, install.NewRegistry(kubeClient), 5, stopCh)
if err != nil { if err != nil {
t.Errorf("Unexpected error %v", err) t.Errorf("Unexpected error %v", err)
} }
@ -143,7 +146,10 @@ func TestAdmissionIgnoresSubresources(t *testing.T) {
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient)) evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
evaluator.indexer = indexer evaluator.indexer = indexer
evaluator.Run(5) stopCh := make(chan struct{})
defer close(stopCh)
defer utilruntime.HandleCrash()
go evaluator.Run(5, stopCh)
handler := &quotaAdmission{ handler := &quotaAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update), Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator, evaluator: evaluator,
@ -181,7 +187,10 @@ func TestAdmitBelowQuotaLimit(t *testing.T) {
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient)) evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
evaluator.indexer = indexer evaluator.indexer = indexer
evaluator.Run(5) stopCh := make(chan struct{})
defer close(stopCh)
defer utilruntime.HandleCrash()
go evaluator.Run(5, stopCh)
handler := &quotaAdmission{ handler := &quotaAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update), Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator, evaluator: evaluator,
@ -255,7 +264,10 @@ func TestAdmitExceedQuotaLimit(t *testing.T) {
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient)) evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
evaluator.indexer = indexer evaluator.indexer = indexer
evaluator.Run(5) stopCh := make(chan struct{})
defer close(stopCh)
defer utilruntime.HandleCrash()
go evaluator.Run(5, stopCh)
handler := &quotaAdmission{ handler := &quotaAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update), Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator, evaluator: evaluator,
@ -293,7 +305,10 @@ func TestAdmitEnforceQuotaConstraints(t *testing.T) {
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient)) evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
evaluator.indexer = indexer evaluator.indexer = indexer
evaluator.Run(5) stopCh := make(chan struct{})
defer close(stopCh)
defer utilruntime.HandleCrash()
go evaluator.Run(5, stopCh)
handler := &quotaAdmission{ handler := &quotaAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update), Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator, evaluator: evaluator,
@ -334,7 +349,10 @@ func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) {
evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient)) evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
evaluator.indexer = indexer evaluator.indexer = indexer
evaluator.liveLookupCache = liveLookupCache evaluator.liveLookupCache = liveLookupCache
evaluator.Run(5) stopCh := make(chan struct{})
defer close(stopCh)
defer utilruntime.HandleCrash()
go evaluator.Run(5, stopCh)
handler := &quotaAdmission{ handler := &quotaAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update), Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator, evaluator: evaluator,
@ -394,7 +412,10 @@ func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) {
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient)) evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
evaluator.indexer = indexer evaluator.indexer = indexer
evaluator.Run(5) stopCh := make(chan struct{})
defer close(stopCh)
defer utilruntime.HandleCrash()
go evaluator.Run(5, stopCh)
handler := &quotaAdmission{ handler := &quotaAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update), Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator, evaluator: evaluator,
@ -493,7 +514,10 @@ func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) {
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient)) evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
evaluator.indexer = indexer evaluator.indexer = indexer
evaluator.Run(5) stopCh := make(chan struct{})
defer close(stopCh)
defer utilruntime.HandleCrash()
go evaluator.Run(5, stopCh)
handler := &quotaAdmission{ handler := &quotaAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update), Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator, evaluator: evaluator,
@ -579,7 +603,10 @@ func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) {
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient)) evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
evaluator.indexer = indexer evaluator.indexer = indexer
evaluator.Run(5) stopCh := make(chan struct{})
defer close(stopCh)
defer utilruntime.HandleCrash()
go evaluator.Run(5, stopCh)
handler := &quotaAdmission{ handler := &quotaAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update), Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator, evaluator: evaluator,
@ -692,7 +719,10 @@ func TestAdmissionSetsMissingNamespace(t *testing.T) {
evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient)) evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
evaluator.indexer = indexer evaluator.indexer = indexer
evaluator.registry = registry evaluator.registry = registry
evaluator.Run(5) stopCh := make(chan struct{})
defer close(stopCh)
defer utilruntime.HandleCrash()
go evaluator.Run(5, stopCh)
handler := &quotaAdmission{ handler := &quotaAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update), Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator, evaluator: evaluator,

View File

@ -21,7 +21,8 @@ import (
"sync" "sync"
"time" "time"
"github.com/hashicorp/golang-lru" "github.com/golang/glog"
lru "github.com/hashicorp/golang-lru"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
@ -126,25 +127,35 @@ func newQuotaEvaluator(client clientset.Interface, registry quota.Registry) (*qu
} }
// Run begins watching and syncing. // Run begins watching and syncing.
func (e *quotaEvaluator) Run(workers int) { func (e *quotaEvaluator) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
for i := 0; i < workers; i++ { for i := 0; i < workers; i++ {
go wait.Until(e.doWork, time.Second, make(chan struct{})) go wait.Until(e.doWork, time.Second, stopCh)
} }
<-stopCh
glog.Infof("Shutting down quota evaluator")
e.queue.ShutDown()
} }
func (e *quotaEvaluator) doWork() { func (e *quotaEvaluator) doWork() {
for { workFunc := func() bool {
func() { ns, admissionAttributes, quit := e.getWork()
ns, admissionAttributes := e.getWork() if quit {
return true
}
defer e.completeWork(ns) defer e.completeWork(ns)
if len(admissionAttributes) == 0 { if len(admissionAttributes) == 0 {
return false
}
e.checkAttributes(ns, admissionAttributes)
return false
}
for {
if quit := workFunc(); quit {
glog.Infof("quota evaluator worker shutdown")
return return
} }
e.checkAttributes(ns, admissionAttributes)
}()
} }
} }
@ -434,8 +445,11 @@ func (e *quotaEvaluator) completeWork(ns string) {
e.inProgress.Delete(ns) e.inProgress.Delete(ns)
} }
func (e *quotaEvaluator) getWork() (string, []*admissionWaiter) { func (e *quotaEvaluator) getWork() (string, []*admissionWaiter, bool) {
uncastNS, _ := e.queue.Get() uncastNS, shutdown := e.queue.Get()
if shutdown {
return "", []*admissionWaiter{}, shutdown
}
ns := uncastNS.(string) ns := uncastNS.(string)
e.workLock.Lock() e.workLock.Lock()
@ -450,12 +464,12 @@ func (e *quotaEvaluator) getWork() (string, []*admissionWaiter) {
if len(work) != 0 { if len(work) != 0 {
e.inProgress.Insert(ns) e.inProgress.Insert(ns)
return ns, work return ns, work, false
} }
e.queue.Done(ns) e.queue.Done(ns)
e.inProgress.Delete(ns) e.inProgress.Delete(ns)
return ns, []*admissionWaiter{} return ns, []*admissionWaiter{}, false
} }
func (e *quotaEvaluator) getQuotas(namespace string) ([]api.ResourceQuota, error) { func (e *quotaEvaluator) getQuotas(namespace string) ([]api.ResourceQuota, error) {

View File

@ -43,6 +43,10 @@ import (
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
) )
func init() {
requireEtcd()
}
// 1.2 code gets: // 1.2 code gets:
// quota_test.go:95: Took 4.218619579s to scale up without quota // quota_test.go:95: Took 4.218619579s to scale up without quota
// quota_test.go:199: unexpected error: timed out waiting for the condition, ended with 342 pods (1 minute) // quota_test.go:199: unexpected error: timed out waiting for the condition, ended with 342 pods (1 minute)
@ -59,13 +63,15 @@ func TestQuota(t *testing.T) {
<-initializationCh <-initializationCh
m.Handler.ServeHTTP(w, req) m.Handler.ServeHTTP(w, req)
})) }))
// TODO: Uncomment when fix #19254 // TODO: https://github.com/kubernetes/kubernetes/issues/25412
// defer s.Close() //defer s.Close()
admissionCh := make(chan struct{})
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
admission, err := resourcequota.NewResourceQuota(clientset, quotainstall.NewRegistry(clientset), 5) admission, err := resourcequota.NewResourceQuota(clientset, quotainstall.NewRegistry(clientset), 5, admissionCh)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
defer close(admissionCh)
masterConfig := framework.NewIntegrationTestMasterConfig() masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.AdmissionControl = admission masterConfig.AdmissionControl = admission
@ -78,16 +84,12 @@ func TestQuota(t *testing.T) {
controllerCh := make(chan struct{}) controllerCh := make(chan struct{})
defer close(controllerCh) defer close(controllerCh)
go replicationcontroller.NewReplicationManagerFromClient(clientset, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas, 4096). go replicationcontroller.NewReplicationManagerFromClientForIntegration(clientset, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas, 4096).
Run(3, controllerCh) Run(3, controllerCh)
resourceQuotaRegistry := quotainstall.NewRegistry(clientset) resourceQuotaRegistry := quotainstall.NewRegistry(clientset)
groupKindsToReplenish := []unversioned.GroupKind{ groupKindsToReplenish := []unversioned.GroupKind{
api.Kind("Pod"), api.Kind("Pod"),
api.Kind("Service"),
api.Kind("ReplicationController"),
api.Kind("PersistentVolumeClaim"),
api.Kind("Secret"),
} }
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
KubeClient: clientset, KubeClient: clientset,
@ -118,8 +120,8 @@ func TestQuota(t *testing.T) {
scale(t, "quotaed", clientset) scale(t, "quotaed", clientset)
endTime = time.Now() endTime = time.Now()
t.Logf("Took %v to scale up with quota", endTime.Sub(startTime)) t.Logf("Took %v to scale up with quota", endTime.Sub(startTime))
} }
func waitForQuota(t *testing.T, quota *api.ResourceQuota, clientset *clientset.Clientset) { func waitForQuota(t *testing.T, quota *api.ResourceQuota, clientset *clientset.Clientset) {
w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(api.SingleObject(api.ObjectMeta{Name: quota.Name})) w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(api.SingleObject(api.ObjectMeta{Name: quota.Name}))
if err != nil { if err != nil {
@ -152,7 +154,7 @@ func waitForQuota(t *testing.T, quota *api.ResourceQuota, clientset *clientset.C
} }
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
target := 1000 target := 100
rc := &api.ReplicationController{ rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: "foo", Name: "foo",