mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
make cache size configurable
This commit is contained in:
parent
d9a35a25d7
commit
e44e71ca87
@ -196,7 +196,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
|
|||||||
Run(3, wait.NeverStop)
|
Run(3, wait.NeverStop)
|
||||||
|
|
||||||
// TODO: Write an integration test for the replication controllers watch.
|
// TODO: Write an integration test for the replication controllers watch.
|
||||||
go replicationcontroller.NewReplicationManager(clientset, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas).
|
go replicationcontroller.NewReplicationManager(clientset, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas, 4096).
|
||||||
Run(3, wait.NeverStop)
|
Run(3, wait.NeverStop)
|
||||||
|
|
||||||
nodeController := nodecontroller.NewNodeController(nil, clientset, 5*time.Minute, util.NewFakeAlwaysRateLimiter(), util.NewFakeAlwaysRateLimiter(),
|
nodeController := nodecontroller.NewNodeController(nil, clientset, 5*time.Minute, util.NewFakeAlwaysRateLimiter(), util.NewFakeAlwaysRateLimiter(),
|
||||||
|
@ -181,6 +181,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replication-controller")),
|
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replication-controller")),
|
||||||
ResyncPeriod(s),
|
ResyncPeriod(s),
|
||||||
replicationcontroller.BurstReplicas,
|
replicationcontroller.BurstReplicas,
|
||||||
|
s.LookupCacheSizeForRC,
|
||||||
).Run(s.ConcurrentRCSyncs, wait.NeverStop)
|
).Run(s.ConcurrentRCSyncs, wait.NeverStop)
|
||||||
|
|
||||||
if s.TerminatedPodGCThreshold > 0 {
|
if s.TerminatedPodGCThreshold > 0 {
|
||||||
@ -285,7 +286,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||||||
|
|
||||||
if containsResource(resources, "replicasets") {
|
if containsResource(resources, "replicasets") {
|
||||||
glog.Infof("Starting ReplicaSet controller")
|
glog.Infof("Starting ReplicaSet controller")
|
||||||
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas).
|
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, s.LookupCacheSizeForRS).
|
||||||
Run(s.ConcurrentRSSyncs, wait.NeverStop)
|
Run(s.ConcurrentRSSyncs, wait.NeverStop)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -53,6 +53,8 @@ func NewCMServer() *CMServer {
|
|||||||
ConcurrentResourceQuotaSyncs: 5,
|
ConcurrentResourceQuotaSyncs: 5,
|
||||||
ConcurrentDeploymentSyncs: 5,
|
ConcurrentDeploymentSyncs: 5,
|
||||||
ConcurrentNamespaceSyncs: 2,
|
ConcurrentNamespaceSyncs: 2,
|
||||||
|
LookupCacheSizeForRC: 4096,
|
||||||
|
LookupCacheSizeForRS: 4096,
|
||||||
ServiceSyncPeriod: unversioned.Duration{5 * time.Minute},
|
ServiceSyncPeriod: unversioned.Duration{5 * time.Minute},
|
||||||
NodeSyncPeriod: unversioned.Duration{10 * time.Second},
|
NodeSyncPeriod: unversioned.Duration{10 * time.Second},
|
||||||
ResourceQuotaSyncPeriod: unversioned.Duration{5 * time.Minute},
|
ResourceQuotaSyncPeriod: unversioned.Duration{5 * time.Minute},
|
||||||
@ -98,6 +100,8 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
|||||||
fs.IntVar(&s.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load")
|
fs.IntVar(&s.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load")
|
||||||
fs.IntVar(&s.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load")
|
fs.IntVar(&s.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load")
|
||||||
fs.IntVar(&s.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load")
|
fs.IntVar(&s.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load")
|
||||||
|
fs.IntVar(&s.LookupCacheSizeForRC, "rc-lookup-cache-size", s.LookupCacheSizeForRC, "The the size of lookup cache for replication controllers. Larger number = more responsive replica management, but more MEM load.")
|
||||||
|
fs.IntVar(&s.LookupCacheSizeForRS, "rs-lookup-cache-size", s.LookupCacheSizeForRS, "The the size of lookup cache for replicatsets. Larger number = more responsive replica management, but more MEM load.")
|
||||||
fs.DurationVar(&s.ServiceSyncPeriod.Duration, "service-sync-period", s.ServiceSyncPeriod.Duration, "The period for syncing services with their external load balancers")
|
fs.DurationVar(&s.ServiceSyncPeriod.Duration, "service-sync-period", s.ServiceSyncPeriod.Duration, "The period for syncing services with their external load balancers")
|
||||||
fs.DurationVar(&s.NodeSyncPeriod.Duration, "node-sync-period", s.NodeSyncPeriod.Duration, ""+
|
fs.DurationVar(&s.NodeSyncPeriod.Duration, "node-sync-period", s.NodeSyncPeriod.Duration, ""+
|
||||||
"The period for syncing nodes from cloudprovider. Longer periods will result in "+
|
"The period for syncing nodes from cloudprovider. Longer periods will result in "+
|
||||||
|
@ -131,7 +131,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||||||
endpoints := s.createEndpointController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "endpoint-controller")))
|
endpoints := s.createEndpointController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "endpoint-controller")))
|
||||||
go endpoints.Run(s.ConcurrentEndpointSyncs, wait.NeverStop)
|
go endpoints.Run(s.ConcurrentEndpointSyncs, wait.NeverStop)
|
||||||
|
|
||||||
go replicationcontroller.NewReplicationManager(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replication-controller")), s.resyncPeriod, replicationcontroller.BurstReplicas).
|
go replicationcontroller.NewReplicationManager(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replication-controller")), s.resyncPeriod, replicationcontroller.BurstReplicas, s.LookupCacheSizeForRC).
|
||||||
Run(s.ConcurrentRCSyncs, wait.NeverStop)
|
Run(s.ConcurrentRCSyncs, wait.NeverStop)
|
||||||
|
|
||||||
if s.TerminatedPodGCThreshold > 0 {
|
if s.TerminatedPodGCThreshold > 0 {
|
||||||
@ -238,7 +238,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||||||
|
|
||||||
if containsResource(resources, "replicasets") {
|
if containsResource(resources, "replicasets") {
|
||||||
glog.Infof("Starting ReplicaSet controller")
|
glog.Infof("Starting ReplicaSet controller")
|
||||||
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replicaset-controller")), s.resyncPeriod, replicaset.BurstReplicas).
|
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replicaset-controller")), s.resyncPeriod, replicaset.BurstReplicas, s.LookupCacheSizeForRS).
|
||||||
Run(s.ConcurrentRSSyncs, wait.NeverStop)
|
Run(s.ConcurrentRSSyncs, wait.NeverStop)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,14 +98,16 @@ kube-controller-manager
|
|||||||
--pv-recycler-pod-template-filepath-nfs="": The file path to a pod definition used as a template for NFS persistent volume recycling
|
--pv-recycler-pod-template-filepath-nfs="": The file path to a pod definition used as a template for NFS persistent volume recycling
|
||||||
--pv-recycler-timeout-increment-hostpath=30: the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.
|
--pv-recycler-timeout-increment-hostpath=30: the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.
|
||||||
--pvclaimbinder-sync-period=10m0s: The period for syncing persistent volumes and persistent volume claims
|
--pvclaimbinder-sync-period=10m0s: The period for syncing persistent volumes and persistent volume claims
|
||||||
|
--rc-lookup-cache-size=4096: The the size of lookup cache for replication controllers. Larger number = more responsive replica management, but more MEM load.
|
||||||
--resource-quota-sync-period=5m0s: The period for syncing quota usage status in the system
|
--resource-quota-sync-period=5m0s: The period for syncing quota usage status in the system
|
||||||
--root-ca-file="": If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.
|
--root-ca-file="": If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.
|
||||||
|
--rs-lookup-cache-size=4096: The the size of lookup cache for replicatsets. Larger number = more responsive replica management, but more MEM load.
|
||||||
--service-account-private-key-file="": Filename containing a PEM-encoded private RSA key used to sign service account tokens.
|
--service-account-private-key-file="": Filename containing a PEM-encoded private RSA key used to sign service account tokens.
|
||||||
--service-sync-period=5m0s: The period for syncing services with their external load balancers
|
--service-sync-period=5m0s: The period for syncing services with their external load balancers
|
||||||
--terminated-pod-gc-threshold=12500: Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.
|
--terminated-pod-gc-threshold=12500: Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.
|
||||||
```
|
```
|
||||||
|
|
||||||
###### Auto generated by spf13/cobra on 8-Feb-2016
|
###### Auto generated by spf13/cobra on 24-Feb-2016
|
||||||
|
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||||
|
@ -289,6 +289,7 @@ pv-recycler-pod-template-filepath-nfs
|
|||||||
pv-recycler-maximum-retry
|
pv-recycler-maximum-retry
|
||||||
pv-recycler-timeout-increment-hostpath
|
pv-recycler-timeout-increment-hostpath
|
||||||
pvclaimbinder-sync-period
|
pvclaimbinder-sync-period
|
||||||
|
rc-lookup-cache-size
|
||||||
read-only-port
|
read-only-port
|
||||||
really-crash-for-testing
|
really-crash-for-testing
|
||||||
reconcile-cidr
|
reconcile-cidr
|
||||||
@ -314,6 +315,7 @@ rkt-path
|
|||||||
rkt-stage1-image
|
rkt-stage1-image
|
||||||
root-ca-file
|
root-ca-file
|
||||||
root-dir
|
root-dir
|
||||||
|
rs-lookup-cache-size
|
||||||
run-proxy
|
run-proxy
|
||||||
runtime-config
|
runtime-config
|
||||||
runtime-cgroups
|
runtime-cgroups
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -399,11 +399,11 @@ type KubeControllerManagerConfiguration struct {
|
|||||||
// but more CPU (and network) load.
|
// but more CPU (and network) load.
|
||||||
ConcurrentEndpointSyncs int `json:"concurrentEndpointSyncs"`
|
ConcurrentEndpointSyncs int `json:"concurrentEndpointSyncs"`
|
||||||
// concurrentRSSyncs is the number of replica sets that are allowed to sync
|
// concurrentRSSyncs is the number of replica sets that are allowed to sync
|
||||||
// concurrently. Larger number = more reponsive replica management, but more
|
// concurrently. Larger number = more responsive replica management, but more
|
||||||
// CPU (and network) load.
|
// CPU (and network) load.
|
||||||
ConcurrentRSSyncs int `json:"concurrentRSSyncs"`
|
ConcurrentRSSyncs int `json:"concurrentRSSyncs"`
|
||||||
// concurrentRCSyncs is the number of replication controllers that are
|
// concurrentRCSyncs is the number of replication controllers that are
|
||||||
// allowed to sync concurrently. Larger number = more reponsive replica
|
// allowed to sync concurrently. Larger number = more responsive replica
|
||||||
// management, but more CPU (and network) load.
|
// management, but more CPU (and network) load.
|
||||||
ConcurrentRCSyncs int `json:"concurrentRCSyncs"`
|
ConcurrentRCSyncs int `json:"concurrentRCSyncs"`
|
||||||
// concurrentResourceQuotaSyncs is the number of resource quotas that are
|
// concurrentResourceQuotaSyncs is the number of resource quotas that are
|
||||||
@ -411,20 +411,26 @@ type KubeControllerManagerConfiguration struct {
|
|||||||
// management, but more CPU (and network) load.
|
// management, but more CPU (and network) load.
|
||||||
ConcurrentResourceQuotaSyncs int `json:"concurrentResourceQuotaSyncs"`
|
ConcurrentResourceQuotaSyncs int `json:"concurrentResourceQuotaSyncs"`
|
||||||
// concurrentDeploymentSyncs is the number of deployment objects that are
|
// concurrentDeploymentSyncs is the number of deployment objects that are
|
||||||
// allowed to sync concurrently. Larger number = more reponsive deployments,
|
// allowed to sync concurrently. Larger number = more responsive deployments,
|
||||||
// but more CPU (and network) load.
|
// but more CPU (and network) load.
|
||||||
ConcurrentDeploymentSyncs int `json:"concurrentDeploymentSyncs"`
|
ConcurrentDeploymentSyncs int `json:"concurrentDeploymentSyncs"`
|
||||||
// concurrentDaemonSetSyncs is the number of daemonset objects that are
|
// concurrentDaemonSetSyncs is the number of daemonset objects that are
|
||||||
// allowed to sync concurrently. Larger number = more reponsive DaemonSet,
|
// allowed to sync concurrently. Larger number = more responsive DaemonSet,
|
||||||
// but more CPU (and network) load.
|
// but more CPU (and network) load.
|
||||||
ConcurrentDaemonSetSyncs int `json:"concurrentDaemonSetSyncs"`
|
ConcurrentDaemonSetSyncs int `json:"concurrentDaemonSetSyncs"`
|
||||||
// concurrentJobSyncs is the number of job objects that are
|
// concurrentJobSyncs is the number of job objects that are
|
||||||
// allowed to sync concurrently. Larger number = more reponsive jobs,
|
// allowed to sync concurrently. Larger number = more responsive jobs,
|
||||||
// but more CPU (and network) load.
|
// but more CPU (and network) load.
|
||||||
ConcurrentJobSyncs int `json:"concurrentJobSyncs"`
|
ConcurrentJobSyncs int `json:"concurrentJobSyncs"`
|
||||||
// concurrentNamespaceSyncs is the number of namespace objects that are
|
// concurrentNamespaceSyncs is the number of namespace objects that are
|
||||||
// allowed to sync concurrently.
|
// allowed to sync concurrently.
|
||||||
ConcurrentNamespaceSyncs int `json:"concurrentNamespaceSyncs"`
|
ConcurrentNamespaceSyncs int `json:"concurrentNamespaceSyncs"`
|
||||||
|
// LookupCacheSizeForRC is the size of lookup cache for replication controllers.
|
||||||
|
// Larger number = more responsive replica management, but more MEM load.
|
||||||
|
LookupCacheSizeForRC int `json:"lookupCacheSizeForRC"`
|
||||||
|
// LookupCacheSizeForRS is the size of lookup cache for replicatsets.
|
||||||
|
// Larger number = more responsive replica management, but more MEM load.
|
||||||
|
LookupCacheSizeForRS int `json:"lookupCacheSizeForRS"`
|
||||||
// serviceSyncPeriod is the period for syncing services with their external
|
// serviceSyncPeriod is the period for syncing services with their external
|
||||||
// load balancers.
|
// load balancers.
|
||||||
ServiceSyncPeriod unversioned.Duration `json:"serviceSyncPeriod"`
|
ServiceSyncPeriod unversioned.Duration `json:"serviceSyncPeriod"`
|
||||||
|
@ -25,8 +25,6 @@ import (
|
|||||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||||
)
|
)
|
||||||
|
|
||||||
const DefaultCacheEntries = 4096
|
|
||||||
|
|
||||||
type objectWithMeta interface {
|
type objectWithMeta interface {
|
||||||
meta.Object
|
meta.Object
|
||||||
}
|
}
|
||||||
|
@ -95,7 +95,7 @@ type ReplicaSetController struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewReplicaSetController creates a new ReplicaSetController.
|
// NewReplicaSetController creates a new ReplicaSetController.
|
||||||
func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicaSetController {
|
func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicaSetController {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")})
|
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")})
|
||||||
@ -191,7 +191,7 @@ func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod contro
|
|||||||
|
|
||||||
rsc.syncHandler = rsc.syncReplicaSet
|
rsc.syncHandler = rsc.syncReplicaSet
|
||||||
rsc.podStoreSynced = rsc.podController.HasSynced
|
rsc.podStoreSynced = rsc.podController.HasSynced
|
||||||
rsc.lookupCache = controller.NewMatchingCache(controller.DefaultCacheEntries)
|
rsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
|
||||||
return rsc
|
return rsc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,7 +139,7 @@ type serverResponse struct {
|
|||||||
func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
||||||
@ -156,7 +156,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
|||||||
func TestSyncReplicaSetDeletes(t *testing.T) {
|
func TestSyncReplicaSetDeletes(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
@ -173,7 +173,7 @@ func TestSyncReplicaSetDeletes(t *testing.T) {
|
|||||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
@ -206,7 +206,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
|||||||
|
|
||||||
func TestSyncReplicaSetCreates(t *testing.T) {
|
func TestSyncReplicaSetCreates(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
||||||
@ -229,7 +229,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
|||||||
testServer := httptest.NewServer(&fakeHandler)
|
testServer := httptest.NewServer(&fakeHandler)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
// Steady state for the ReplicaSet, no Status.Replicas updates expected
|
// Steady state for the ReplicaSet, no Status.Replicas updates expected
|
||||||
@ -272,7 +272,7 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
|||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
||||||
@ -313,7 +313,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
|||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
|
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
@ -360,7 +360,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPodControllerLookup(t *testing.T) {
|
func TestPodControllerLookup(t *testing.T) {
|
||||||
manager := NewReplicaSetController(clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicaSetController(clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
inRSs []*extensions.ReplicaSet
|
inRSs []*extensions.ReplicaSet
|
||||||
@ -428,7 +428,7 @@ func TestWatchControllers(t *testing.T) {
|
|||||||
fakeWatch := watch.NewFake()
|
fakeWatch := watch.NewFake()
|
||||||
client := &fake.Clientset{}
|
client := &fake.Clientset{}
|
||||||
client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
var testRSSpec extensions.ReplicaSet
|
var testRSSpec extensions.ReplicaSet
|
||||||
@ -471,7 +471,7 @@ func TestWatchPods(t *testing.T) {
|
|||||||
fakeWatch := watch.NewFake()
|
fakeWatch := watch.NewFake()
|
||||||
client := &fake.Clientset{}
|
client := &fake.Clientset{}
|
||||||
client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
// Put one ReplicaSet and one pod into the controller's stores
|
// Put one ReplicaSet and one pod into the controller's stores
|
||||||
@ -514,7 +514,7 @@ func TestWatchPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdatePods(t *testing.T) {
|
func TestUpdatePods(t *testing.T) {
|
||||||
manager := NewReplicaSetController(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicaSetController(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
received := make(chan string)
|
received := make(chan string)
|
||||||
@ -575,7 +575,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
|||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
@ -655,7 +655,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
|||||||
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
|
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, burstReplicas)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, burstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
@ -777,7 +777,7 @@ func (fe FakeRSExpectations) SatisfiedExpectations(controllerKey string) bool {
|
|||||||
func TestRSSyncExpectations(t *testing.T) {
|
func TestRSSyncExpectations(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
@ -802,7 +802,7 @@ func TestRSSyncExpectations(t *testing.T) {
|
|||||||
|
|
||||||
func TestDeleteControllerAndExpectations(t *testing.T) {
|
func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 10)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 10, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
rs := newReplicaSet(1, map[string]string{"foo": "bar"})
|
rs := newReplicaSet(1, map[string]string{"foo": "bar"})
|
||||||
@ -845,7 +845,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|||||||
func TestRSManagerNotReady(t *testing.T) {
|
func TestRSManagerNotReady(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2, 0)
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
manager.podStoreSynced = func() bool { return false }
|
manager.podStoreSynced = func() bool { return false }
|
||||||
|
|
||||||
@ -884,7 +884,7 @@ func TestOverlappingRSs(t *testing.T) {
|
|||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
|
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 10)
|
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 10, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
// Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store
|
// Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store
|
||||||
|
@ -94,7 +94,7 @@ type ReplicationManager struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewReplicationManager creates a new ReplicationManager.
|
// NewReplicationManager creates a new ReplicationManager.
|
||||||
func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicationManager {
|
func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")})
|
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")})
|
||||||
@ -190,7 +190,7 @@ func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controll
|
|||||||
|
|
||||||
rm.syncHandler = rm.syncReplicationController
|
rm.syncHandler = rm.syncReplicationController
|
||||||
rm.podStoreSynced = rm.podController.HasSynced
|
rm.podStoreSynced = rm.podController.HasSynced
|
||||||
rm.lookupCache = controller.NewMatchingCache(controller.DefaultCacheEntries)
|
rm.lookupCache = controller.NewMatchingCache(lookupCacheSize)
|
||||||
return rm
|
return rm
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ type serverResponse struct {
|
|||||||
func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
||||||
@ -153,7 +153,7 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
|||||||
func TestSyncReplicationControllerDeletes(t *testing.T) {
|
func TestSyncReplicationControllerDeletes(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
@ -169,7 +169,7 @@ func TestSyncReplicationControllerDeletes(t *testing.T) {
|
|||||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
@ -201,7 +201,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
|||||||
|
|
||||||
func TestSyncReplicationControllerCreates(t *testing.T) {
|
func TestSyncReplicationControllerCreates(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
||||||
@ -224,7 +224,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
|||||||
// TODO: Uncomment when fix #19254
|
// TODO: Uncomment when fix #19254
|
||||||
// defer testServer.Close()
|
// defer testServer.Close()
|
||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
// Steady state for the replication controller, no Status.Replicas updates expected
|
// Steady state for the replication controller, no Status.Replicas updates expected
|
||||||
@ -266,7 +266,7 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
|||||||
// TODO: Uncomment when fix #19254
|
// TODO: Uncomment when fix #19254
|
||||||
// defer testServer.Close()
|
// defer testServer.Close()
|
||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
||||||
@ -306,7 +306,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
|||||||
// defer testServer.Close()
|
// defer testServer.Close()
|
||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
@ -352,7 +352,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPodControllerLookup(t *testing.T) {
|
func TestPodControllerLookup(t *testing.T) {
|
||||||
manager := NewReplicationManager(clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
inRCs []*api.ReplicationController
|
inRCs []*api.ReplicationController
|
||||||
@ -415,7 +415,7 @@ func TestWatchControllers(t *testing.T) {
|
|||||||
fakeWatch := watch.NewFake()
|
fakeWatch := watch.NewFake()
|
||||||
c := &fake.Clientset{}
|
c := &fake.Clientset{}
|
||||||
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
var testControllerSpec api.ReplicationController
|
var testControllerSpec api.ReplicationController
|
||||||
@ -458,7 +458,7 @@ func TestWatchPods(t *testing.T) {
|
|||||||
fakeWatch := watch.NewFake()
|
fakeWatch := watch.NewFake()
|
||||||
c := &fake.Clientset{}
|
c := &fake.Clientset{}
|
||||||
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
// Put one rc and one pod into the controller's stores
|
// Put one rc and one pod into the controller's stores
|
||||||
@ -500,7 +500,7 @@ func TestWatchPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdatePods(t *testing.T) {
|
func TestUpdatePods(t *testing.T) {
|
||||||
manager := NewReplicationManager(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
received := make(chan string)
|
received := make(chan string)
|
||||||
@ -560,7 +560,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
|||||||
// defer testServer.Close()
|
// defer testServer.Close()
|
||||||
|
|
||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
rc := newReplicationController(1)
|
rc := newReplicationController(1)
|
||||||
@ -641,7 +641,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
|||||||
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
|
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
|
||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, burstReplicas)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, burstReplicas, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
@ -761,7 +761,7 @@ func (fe FakeRCExpectations) SatisfiedExpectations(controllerKey string) bool {
|
|||||||
func TestRCSyncExpectations(t *testing.T) {
|
func TestRCSyncExpectations(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 2)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 2, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
@ -785,7 +785,7 @@ func TestRCSyncExpectations(t *testing.T) {
|
|||||||
|
|
||||||
func TestDeleteControllerAndExpectations(t *testing.T) {
|
func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 10)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 10, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
rc := newReplicationController(1)
|
rc := newReplicationController(1)
|
||||||
@ -828,7 +828,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|||||||
func TestRCManagerNotReady(t *testing.T) {
|
func TestRCManagerNotReady(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 2)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 2, 0)
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
manager.podStoreSynced = func() bool { return false }
|
manager.podStoreSynced = func() bool { return false }
|
||||||
|
|
||||||
@ -866,7 +866,7 @@ func TestOverlappingRCs(t *testing.T) {
|
|||||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
|
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 10)
|
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 10, 0)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
|
||||||
// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
|
// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
|
||||||
@ -895,7 +895,7 @@ func TestOverlappingRCs(t *testing.T) {
|
|||||||
|
|
||||||
func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
|
|
||||||
const nsNum = 1000
|
const nsNum = 1000
|
||||||
|
|
||||||
@ -941,7 +941,7 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
|||||||
|
|
||||||
func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
||||||
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||||
|
|
||||||
const rcNum = 1000
|
const rcNum = 1000
|
||||||
const replicaNum = 3
|
const replicaNum = 3
|
||||||
|
@ -107,7 +107,7 @@ func NewMasterComponents(c *Config) *MasterComponents {
|
|||||||
restClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
|
restClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
|
||||||
clientset := clientset.NewForConfigOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
|
clientset := clientset.NewForConfigOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
|
||||||
rcStopCh := make(chan struct{})
|
rcStopCh := make(chan struct{})
|
||||||
controllerManager := replicationcontroller.NewReplicationManager(clientset, controller.NoResyncPeriodFunc, c.Burst)
|
controllerManager := replicationcontroller.NewReplicationManager(clientset, controller.NoResyncPeriodFunc, c.Burst, 4096)
|
||||||
|
|
||||||
// TODO: Support events once we can cleanly shutdown an event recorder.
|
// TODO: Support events once we can cleanly shutdown an event recorder.
|
||||||
controllerManager.SetEventRecorder(&record.FakeRecorder{})
|
controllerManager.SetEventRecorder(&record.FakeRecorder{})
|
||||||
|
Loading…
Reference in New Issue
Block a user