mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 22:46:12 +00:00
Merge pull request #35047 from deads2k/controller-11-rs-flakes
Automatic merge from submit-queue fix more RS controller flakes I saw another flake: ``` panic: Fail in goroutine after TestUpdatePods has completed /usr/local/go/src/runtime/panic.go:500 +0x1ae /go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/util/runtime/runtime.go:56 +0x17d /usr/local/go/src/runtime/panic.go:458 +0x271 /usr/local/go/src/testing/testing.go:412 +0x182 /usr/local/go/src/testing/testing.go:484 +0x95 /go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go:619 +0x1d2 /go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go:414 +0x191 /go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go:403 +0x39 /go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go:169 +0x42 /go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/util/wait/wait.go:87 +0x70 /go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/util/wait/wait.go:88 +0xbe /go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/util/wait/wait.go:49 +0x5b /go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go:625 +0x369 ``` This resolves that by separating the listers from the watch like it used to be in this set of tests. The tests were like this before the refactor. I think they limit utility, but I'm not prepared to re-write them all. @kargakis
This commit is contained in:
commit
29af9853fe
@ -52,6 +52,8 @@ import (
|
||||
func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh chan struct{}, burstReplicas int, lookupCacheSize int) *ReplicaSetController {
|
||||
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
ret := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, burstReplicas, lookupCacheSize, false)
|
||||
ret.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
ret.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
informers.Start(stopCh)
|
||||
return ret
|
||||
}
|
||||
@ -299,8 +301,6 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
|
||||
// Steady state for the ReplicaSet, no Status.Replicas updates expected
|
||||
activePods := 5
|
||||
@ -347,8 +347,6 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
|
||||
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
@ -398,8 +396,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
@ -524,7 +521,9 @@ func TestWatchControllers(t *testing.T) {
|
||||
client.AddWatchReactor("replicasets", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
manager := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, BurstReplicas, 0, false)
|
||||
informers.Start(stopCh)
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
var testRSSpec extensions.ReplicaSet
|
||||
@ -567,8 +566,6 @@ func TestWatchPods(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
|
||||
// Put one ReplicaSet and one pod into the controller's stores
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
@ -691,8 +688,6 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
rs := newReplicaSet(1, labelMap)
|
||||
@ -1131,8 +1126,7 @@ func setupManagerWithGCEnabled(stopCh chan struct{}, objs ...runtime.Object) (ma
|
||||
manager = testNewReplicaSetControllerFromClient(c, stopCh, BurstReplicas, 0)
|
||||
manager.garbageCollectorEnabled = true
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
|
||||
manager.podControl = fakePodControl
|
||||
return manager, fakePodControl
|
||||
}
|
||||
@ -1357,8 +1351,6 @@ func TestReadyReplicas(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
@ -1402,8 +1394,6 @@ func TestAvailableReplicas(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
|
Loading…
Reference in New Issue
Block a user