|
|
|
@@ -22,6 +22,7 @@ import (
|
|
|
|
|
"fmt"
|
|
|
|
|
"math/rand"
|
|
|
|
|
"net/http/httptest"
|
|
|
|
|
"net/url"
|
|
|
|
|
"strings"
|
|
|
|
|
"testing"
|
|
|
|
|
"time"
|
|
|
|
@@ -38,6 +39,7 @@ import (
|
|
|
|
|
"k8s.io/kubernetes/pkg/client/restclient"
|
|
|
|
|
"k8s.io/kubernetes/pkg/client/testing/core"
|
|
|
|
|
"k8s.io/kubernetes/pkg/controller"
|
|
|
|
|
"k8s.io/kubernetes/pkg/controller/informers"
|
|
|
|
|
"k8s.io/kubernetes/pkg/runtime"
|
|
|
|
|
"k8s.io/kubernetes/pkg/securitycontext"
|
|
|
|
|
"k8s.io/kubernetes/pkg/util/sets"
|
|
|
|
@@ -47,6 +49,39 @@ import (
|
|
|
|
|
"k8s.io/kubernetes/pkg/watch"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh chan struct{}, burstReplicas int, lookupCacheSize int) *ReplicaSetController {
|
|
|
|
|
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
|
|
|
|
ret := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, burstReplicas, lookupCacheSize, false)
|
|
|
|
|
informers.Start(stopCh)
|
|
|
|
|
return ret
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func filterInformerActions(actions []core.Action) []core.Action {
|
|
|
|
|
ret := []core.Action{}
|
|
|
|
|
for _, action := range actions {
|
|
|
|
|
if len(action.GetNamespace()) == 0 &&
|
|
|
|
|
(action.Matches("list", "pods") ||
|
|
|
|
|
action.Matches("list", "replicasets") ||
|
|
|
|
|
action.Matches("watch", "pods") ||
|
|
|
|
|
action.Matches("watch", "replicasets")) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
ret = append(ret, action)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func skipListerFunc(verb string, url url.URL) bool {
|
|
|
|
|
if verb != "GET" {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
if strings.HasSuffix(url.Path, "/pods") || strings.HasSuffix(url.Path, "/replicasets") {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var alwaysReady = func() bool { return true }
|
|
|
|
|
|
|
|
|
|
func getKey(rs *extensions.ReplicaSet, t *testing.T) string {
|
|
|
|
@@ -161,14 +196,16 @@ type serverResponse struct {
|
|
|
|
|
func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
|
|
|
|
|
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rsSpec := newReplicaSet(2, labelMap)
|
|
|
|
|
manager.rsStore.Indexer.Add(rsSpec)
|
|
|
|
|
newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rsSpec, "pod")
|
|
|
|
|
manager.rsLister.Indexer.Add(rsSpec)
|
|
|
|
|
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rsSpec, "pod")
|
|
|
|
|
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
manager.syncReplicaSet(getKey(rsSpec, t))
|
|
|
|
@@ -178,15 +215,17 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
|
|
|
|
func TestSyncReplicaSetDeletes(t *testing.T) {
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
|
|
|
|
|
// 2 running pods and a controller with 1 replica, one pod delete expected
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rsSpec := newReplicaSet(1, labelMap)
|
|
|
|
|
manager.rsStore.Indexer.Add(rsSpec)
|
|
|
|
|
newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rsSpec, "pod")
|
|
|
|
|
manager.rsLister.Indexer.Add(rsSpec)
|
|
|
|
|
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rsSpec, "pod")
|
|
|
|
|
|
|
|
|
|
manager.syncReplicaSet(getKey(rsSpec, t))
|
|
|
|
|
validateSyncReplicaSet(t, &fakePodControl, 0, 1, 0)
|
|
|
|
@@ -195,8 +234,10 @@ func TestSyncReplicaSetDeletes(t *testing.T) {
|
|
|
|
|
func TestDeleteFinalStateUnknown(t *testing.T) {
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
|
|
|
|
|
received := make(chan string)
|
|
|
|
@@ -209,7 +250,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
|
|
|
|
// the controller matching the selectors of the deleted pod into the work queue.
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rsSpec := newReplicaSet(1, labelMap)
|
|
|
|
|
manager.rsStore.Indexer.Add(rsSpec)
|
|
|
|
|
manager.rsLister.Indexer.Add(rsSpec)
|
|
|
|
|
pods := newPodList(nil, 1, api.PodRunning, labelMap, rsSpec, "pod")
|
|
|
|
|
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
|
|
|
|
|
|
|
|
|
@@ -228,13 +269,15 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
|
|
|
|
|
|
|
|
|
func TestSyncReplicaSetCreates(t *testing.T) {
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
|
|
|
|
|
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rs := newReplicaSet(2, labelMap)
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
@@ -245,22 +288,27 @@ func TestSyncReplicaSetCreates(t *testing.T) {
|
|
|
|
|
func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
|
|
|
|
// Setup a fake server to listen for requests, and run the ReplicaSet controller in steady state
|
|
|
|
|
fakeHandler := utiltesting.FakeHandler{
|
|
|
|
|
StatusCode: 200,
|
|
|
|
|
ResponseBody: "{}",
|
|
|
|
|
StatusCode: 200,
|
|
|
|
|
ResponseBody: "{}",
|
|
|
|
|
SkipRequestFn: skipListerFunc,
|
|
|
|
|
}
|
|
|
|
|
testServer := httptest.NewServer(&fakeHandler)
|
|
|
|
|
defer testServer.Close()
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
|
|
|
|
|
// Steady state for the ReplicaSet, no Status.Replicas updates expected
|
|
|
|
|
activePods := 5
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rs := newReplicaSet(activePods, labelMap)
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)}
|
|
|
|
|
newPodList(manager.podStore.Indexer, activePods, api.PodRunning, labelMap, rs, "pod")
|
|
|
|
|
newPodList(manager.podLister.Indexer, activePods, api.PodRunning, labelMap, rs, "pod")
|
|
|
|
|
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
@@ -274,7 +322,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
|
|
|
|
// This response body is just so we don't err out decoding the http response, all
|
|
|
|
|
// we care about is the request body sent below.
|
|
|
|
|
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
|
|
|
|
fakeHandler.ResponseBody = response
|
|
|
|
|
fakeHandler.SetResponseBody(response)
|
|
|
|
|
|
|
|
|
|
rs.Generation = rs.Generation + 1
|
|
|
|
|
manager.syncReplicaSet(getKey(rs, t))
|
|
|
|
@@ -287,15 +335,20 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
|
|
|
|
func TestControllerUpdateReplicas(t *testing.T) {
|
|
|
|
|
// This is a happy server just to record the PUT request we expect for status.Replicas
|
|
|
|
|
fakeHandler := utiltesting.FakeHandler{
|
|
|
|
|
StatusCode: 200,
|
|
|
|
|
ResponseBody: "{}",
|
|
|
|
|
StatusCode: 200,
|
|
|
|
|
ResponseBody: "{}",
|
|
|
|
|
SkipRequestFn: skipListerFunc,
|
|
|
|
|
}
|
|
|
|
|
testServer := httptest.NewServer(&fakeHandler)
|
|
|
|
|
defer testServer.Close()
|
|
|
|
|
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
|
|
|
|
|
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
|
|
|
|
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
|
|
|
@@ -303,15 +356,15 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
|
|
|
|
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
|
|
|
|
rs := newReplicaSet(5, labelMap)
|
|
|
|
|
rs.Spec.Template.Labels = extraLabelMap
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
rs.Status = extensions.ReplicaSetStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0}
|
|
|
|
|
rs.Generation = 1
|
|
|
|
|
newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rs, "pod")
|
|
|
|
|
newPodList(manager.podStore.Indexer, 2, api.PodRunning, extraLabelMap, rs, "podWithExtraLabel")
|
|
|
|
|
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rs, "pod")
|
|
|
|
|
newPodList(manager.podLister.Indexer, 2, api.PodRunning, extraLabelMap, rs, "podWithExtraLabel")
|
|
|
|
|
|
|
|
|
|
// This response body is just so we don't err out decoding the http response
|
|
|
|
|
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
|
|
|
|
fakeHandler.ResponseBody = response
|
|
|
|
|
fakeHandler.SetResponseBody(response)
|
|
|
|
|
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
@@ -332,22 +385,27 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
|
|
|
|
func TestSyncReplicaSetDormancy(t *testing.T) {
|
|
|
|
|
// Setup a test server so we can lie about the current state of pods
|
|
|
|
|
fakeHandler := utiltesting.FakeHandler{
|
|
|
|
|
StatusCode: 200,
|
|
|
|
|
ResponseBody: "{}",
|
|
|
|
|
StatusCode: 200,
|
|
|
|
|
ResponseBody: "{}",
|
|
|
|
|
SkipRequestFn: skipListerFunc,
|
|
|
|
|
}
|
|
|
|
|
testServer := httptest.NewServer(&fakeHandler)
|
|
|
|
|
defer testServer.Close()
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rsSpec := newReplicaSet(2, labelMap)
|
|
|
|
|
manager.rsStore.Indexer.Add(rsSpec)
|
|
|
|
|
newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap, rsSpec, "pod")
|
|
|
|
|
manager.rsLister.Indexer.Add(rsSpec)
|
|
|
|
|
newPodList(manager.podLister.Indexer, 1, api.PodRunning, labelMap, rsSpec, "pod")
|
|
|
|
|
|
|
|
|
|
// Creates a replica and sets expectations
|
|
|
|
|
rsSpec.Status.Replicas = 1
|
|
|
|
@@ -394,8 +452,10 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestPodControllerLookup(t *testing.T) {
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}), stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
testCases := []struct {
|
|
|
|
|
inRSs []*extensions.ReplicaSet
|
|
|
|
|
pod *api.Pod
|
|
|
|
@@ -441,7 +501,7 @@ func TestPodControllerLookup(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
for _, c := range testCases {
|
|
|
|
|
for _, r := range c.inRSs {
|
|
|
|
|
manager.rsStore.Indexer.Add(r)
|
|
|
|
|
manager.rsLister.Indexer.Add(r)
|
|
|
|
|
}
|
|
|
|
|
if rs := manager.getPodReplicaSet(c.pod); rs != nil {
|
|
|
|
|
if c.outRSName != rs.Name {
|
|
|
|
@@ -461,9 +521,11 @@ type FakeWatcher struct {
|
|
|
|
|
func TestWatchControllers(t *testing.T) {
|
|
|
|
|
fakeWatch := watch.NewFake()
|
|
|
|
|
client := &fake.Clientset{}
|
|
|
|
|
client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
client.AddWatchReactor("replicasets", core.DefaultWatchReactor(fakeWatch, nil))
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
|
|
|
|
|
var testRSSpec extensions.ReplicaSet
|
|
|
|
|
received := make(chan string)
|
|
|
|
@@ -472,8 +534,7 @@ func TestWatchControllers(t *testing.T) {
|
|
|
|
|
// and eventually into the syncHandler. The handler validates the received controller
|
|
|
|
|
// and closes the received channel to indicate that the test can finish.
|
|
|
|
|
manager.syncHandler = func(key string) error {
|
|
|
|
|
|
|
|
|
|
obj, exists, err := manager.rsStore.Indexer.GetByKey(key)
|
|
|
|
|
obj, exists, err := manager.rsLister.Indexer.GetByKey(key)
|
|
|
|
|
if !exists || err != nil {
|
|
|
|
|
t.Errorf("Expected to find replica set under key %v", key)
|
|
|
|
|
}
|
|
|
|
@@ -486,9 +547,6 @@ func TestWatchControllers(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
// Start only the ReplicaSet watcher and the workqueue, send a watch event,
|
|
|
|
|
// and make sure it hits the sync method.
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
go manager.rsController.Run(stopCh)
|
|
|
|
|
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
|
|
|
|
|
|
|
|
|
testRSSpec.Name = "foo"
|
|
|
|
@@ -504,20 +562,23 @@ func TestWatchControllers(t *testing.T) {
|
|
|
|
|
func TestWatchPods(t *testing.T) {
|
|
|
|
|
fakeWatch := watch.NewFake()
|
|
|
|
|
client := &fake.Clientset{}
|
|
|
|
|
client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
client.AddWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
|
|
|
|
|
// Put one ReplicaSet and one pod into the controller's stores
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
testRSSpec := newReplicaSet(1, labelMap)
|
|
|
|
|
manager.rsStore.Indexer.Add(testRSSpec)
|
|
|
|
|
manager.rsLister.Indexer.Add(testRSSpec)
|
|
|
|
|
received := make(chan string)
|
|
|
|
|
// The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and
|
|
|
|
|
// send it into the syncHandler.
|
|
|
|
|
manager.syncHandler = func(key string) error {
|
|
|
|
|
|
|
|
|
|
obj, exists, err := manager.rsStore.Indexer.GetByKey(key)
|
|
|
|
|
obj, exists, err := manager.rsLister.Indexer.GetByKey(key)
|
|
|
|
|
if !exists || err != nil {
|
|
|
|
|
t.Errorf("Expected to find replica set under key %v", key)
|
|
|
|
|
}
|
|
|
|
@@ -530,10 +591,6 @@ func TestWatchPods(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
// Start only the pod watcher and the workqueue, send a watch event,
|
|
|
|
|
// and make sure it hits the sync method for the right ReplicaSet.
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
go manager.podController.Run(stopCh)
|
|
|
|
|
go manager.internalPodInformer.Run(stopCh)
|
|
|
|
|
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
|
|
|
|
|
|
|
|
|
pods := newPodList(nil, 1, api.PodRunning, labelMap, testRSSpec, "pod")
|
|
|
|
@@ -549,13 +606,15 @@ func TestWatchPods(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestUpdatePods(t *testing.T) {
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(fake.NewSimpleClientset(), stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
|
|
|
|
|
received := make(chan string)
|
|
|
|
|
|
|
|
|
|
manager.syncHandler = func(key string) error {
|
|
|
|
|
obj, exists, err := manager.rsStore.Indexer.GetByKey(key)
|
|
|
|
|
obj, exists, err := manager.rsLister.Indexer.GetByKey(key)
|
|
|
|
|
if !exists || err != nil {
|
|
|
|
|
t.Errorf("Expected to find replica set under key %v", key)
|
|
|
|
|
}
|
|
|
|
@@ -563,24 +622,22 @@ func TestUpdatePods(t *testing.T) {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
|
|
|
|
|
|
|
|
|
// Put 2 ReplicaSets and one pod into the controller's stores
|
|
|
|
|
labelMap1 := map[string]string{"foo": "bar"}
|
|
|
|
|
testRSSpec1 := newReplicaSet(1, labelMap1)
|
|
|
|
|
manager.rsStore.Indexer.Add(testRSSpec1)
|
|
|
|
|
manager.rsLister.Indexer.Add(testRSSpec1)
|
|
|
|
|
testRSSpec2 := *testRSSpec1
|
|
|
|
|
labelMap2 := map[string]string{"bar": "foo"}
|
|
|
|
|
testRSSpec2.Spec.Selector = &unversioned.LabelSelector{MatchLabels: labelMap2}
|
|
|
|
|
testRSSpec2.Name = "barfoo"
|
|
|
|
|
manager.rsStore.Indexer.Add(&testRSSpec2)
|
|
|
|
|
manager.rsLister.Indexer.Add(&testRSSpec2)
|
|
|
|
|
|
|
|
|
|
// case 1: We put in the podStore a pod with labels matching testRSSpec1,
|
|
|
|
|
// case 1: We put in the podLister a pod with labels matching testRSSpec1,
|
|
|
|
|
// then update its labels to match testRSSpec2. We expect to receive a sync
|
|
|
|
|
// request for both replica sets.
|
|
|
|
|
pod1 := newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
|
|
|
|
|
pod1 := newPodList(manager.podLister.Indexer, 1, api.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
|
|
|
|
|
pod1.ResourceVersion = "1"
|
|
|
|
|
pod2 := pod1
|
|
|
|
|
pod2.Labels = labelMap2
|
|
|
|
@@ -599,7 +656,7 @@ func TestUpdatePods(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// case 2: pod1 in the podStore has labels matching testRSSpec1. We update
|
|
|
|
|
// case 2: pod1 in the podLister has labels matching testRSSpec1. We update
|
|
|
|
|
// its labels to match no replica set. We expect to receive a sync request
|
|
|
|
|
// for testRSSpec1.
|
|
|
|
|
pod2.Labels = make(map[string]string)
|
|
|
|
@@ -622,21 +679,26 @@ func TestUpdatePods(t *testing.T) {
|
|
|
|
|
func TestControllerUpdateRequeue(t *testing.T) {
|
|
|
|
|
// This server should force a requeue of the controller because it fails to update status.Replicas.
|
|
|
|
|
fakeHandler := utiltesting.FakeHandler{
|
|
|
|
|
StatusCode: 500,
|
|
|
|
|
ResponseBody: "{}",
|
|
|
|
|
StatusCode: 500,
|
|
|
|
|
ResponseBody: "{}",
|
|
|
|
|
SkipRequestFn: skipListerFunc,
|
|
|
|
|
}
|
|
|
|
|
testServer := httptest.NewServer(&fakeHandler)
|
|
|
|
|
defer testServer.Close()
|
|
|
|
|
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rs := newReplicaSet(1, labelMap)
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
rs.Status = extensions.ReplicaSetStatus{Replicas: 2}
|
|
|
|
|
newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap, rs, "pod")
|
|
|
|
|
newPodList(manager.podLister.Indexer, 1, api.PodRunning, labelMap, rs, "pod")
|
|
|
|
|
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
@@ -698,13 +760,15 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
|
|
|
|
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, burstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, burstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rsSpec := newReplicaSet(numReplicas, labelMap)
|
|
|
|
|
manager.rsStore.Indexer.Add(rsSpec)
|
|
|
|
|
manager.rsLister.Indexer.Add(rsSpec)
|
|
|
|
|
|
|
|
|
|
expectedPods := int32(0)
|
|
|
|
|
pods := newPodList(nil, numReplicas, api.PodPending, labelMap, rsSpec, "pod")
|
|
|
|
@@ -718,14 +782,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|
|
|
|
for _, replicas := range []int32{int32(numReplicas), 0} {
|
|
|
|
|
|
|
|
|
|
rsSpec.Spec.Replicas = replicas
|
|
|
|
|
manager.rsStore.Indexer.Add(rsSpec)
|
|
|
|
|
manager.rsLister.Indexer.Add(rsSpec)
|
|
|
|
|
|
|
|
|
|
for i := 0; i < numReplicas; i += burstReplicas {
|
|
|
|
|
manager.syncReplicaSet(getKey(rsSpec, t))
|
|
|
|
|
|
|
|
|
|
// The store accrues active pods. It's also used by the ReplicaSet to determine how many
|
|
|
|
|
// replicas to create.
|
|
|
|
|
activePods := int32(len(manager.podStore.Indexer.List()))
|
|
|
|
|
activePods := int32(len(manager.podLister.Indexer.List()))
|
|
|
|
|
if replicas != 0 {
|
|
|
|
|
// This is the number of pods currently "in flight". They were created by the
|
|
|
|
|
// ReplicaSet controller above, which then puts the ReplicaSet to sleep till
|
|
|
|
@@ -740,7 +804,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|
|
|
|
// This simulates the watch events for all but 1 of the expected pods.
|
|
|
|
|
// None of these should wake the controller because it has expectations==BurstReplicas.
|
|
|
|
|
for i := int32(0); i < expectedPods-1; i++ {
|
|
|
|
|
manager.podStore.Indexer.Add(&pods.Items[i])
|
|
|
|
|
manager.podLister.Indexer.Add(&pods.Items[i])
|
|
|
|
|
manager.addPod(&pods.Items[i])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -776,7 +840,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|
|
|
|
// has exactly one expectation at the end, to verify that we
|
|
|
|
|
// don't double delete.
|
|
|
|
|
for i := range podsToDelete[1:] {
|
|
|
|
|
manager.podStore.Indexer.Delete(podsToDelete[i])
|
|
|
|
|
manager.podLister.Indexer.Delete(podsToDelete[i])
|
|
|
|
|
manager.deletePod(podsToDelete[i])
|
|
|
|
|
}
|
|
|
|
|
podExp, exists, err := manager.expectations.GetExpectations(rsKey)
|
|
|
|
@@ -797,7 +861,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|
|
|
|
// The last add pod will decrease the expectation of the ReplicaSet to 0,
|
|
|
|
|
// which will cause it to create/delete the remaining replicas up to burstReplicas.
|
|
|
|
|
if replicas != 0 {
|
|
|
|
|
manager.podStore.Indexer.Add(&pods.Items[expectedPods-1])
|
|
|
|
|
manager.podLister.Indexer.Add(&pods.Items[expectedPods-1])
|
|
|
|
|
manager.addPod(&pods.Items[expectedPods-1])
|
|
|
|
|
} else {
|
|
|
|
|
expectedDel := manager.expectations.GetUIDs(getKey(rsSpec, t))
|
|
|
|
@@ -812,14 +876,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|
|
|
|
Labels: rsSpec.Spec.Selector.MatchLabels,
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
manager.podStore.Indexer.Delete(lastPod)
|
|
|
|
|
manager.podLister.Indexer.Delete(lastPod)
|
|
|
|
|
manager.deletePod(lastPod)
|
|
|
|
|
}
|
|
|
|
|
pods.Items = pods.Items[expectedPods:]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Confirm that we've created the right number of replicas
|
|
|
|
|
activePods := int32(len(manager.podStore.Indexer.List()))
|
|
|
|
|
activePods := int32(len(manager.podLister.Indexer.List()))
|
|
|
|
|
if activePods != rsSpec.Spec.Replicas {
|
|
|
|
|
t.Fatalf("Unexpected number of active pods, expected %d, got %d", rsSpec.Spec.Replicas, activePods)
|
|
|
|
|
}
|
|
|
|
@@ -850,15 +914,17 @@ func (fe FakeRSExpectations) SatisfiedExpectations(controllerKey string) bool {
|
|
|
|
|
func TestRSSyncExpectations(t *testing.T) {
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 2, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, 2, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rsSpec := newReplicaSet(2, labelMap)
|
|
|
|
|
manager.rsStore.Indexer.Add(rsSpec)
|
|
|
|
|
manager.rsLister.Indexer.Add(rsSpec)
|
|
|
|
|
pods := newPodList(nil, 2, api.PodPending, labelMap, rsSpec, "pod")
|
|
|
|
|
manager.podStore.Indexer.Add(&pods.Items[0])
|
|
|
|
|
manager.podLister.Indexer.Add(&pods.Items[0])
|
|
|
|
|
postExpectationsPod := pods.Items[1]
|
|
|
|
|
|
|
|
|
|
manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRSExpectations{
|
|
|
|
@@ -866,7 +932,7 @@ func TestRSSyncExpectations(t *testing.T) {
|
|
|
|
|
// If we check active pods before checking expectataions, the
|
|
|
|
|
// ReplicaSet will create a new replica because it doesn't see
|
|
|
|
|
// this pod, but has fulfilled its expectations.
|
|
|
|
|
manager.podStore.Indexer.Add(&postExpectationsPod)
|
|
|
|
|
manager.podLister.Indexer.Add(&postExpectationsPod)
|
|
|
|
|
},
|
|
|
|
|
})
|
|
|
|
|
manager.syncReplicaSet(getKey(rsSpec, t))
|
|
|
|
@@ -875,11 +941,13 @@ func TestRSSyncExpectations(t *testing.T) {
|
|
|
|
|
|
|
|
|
|
func TestDeleteControllerAndExpectations(t *testing.T) {
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 10, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
|
|
|
|
|
rs := newReplicaSet(1, map[string]string{"foo": "bar"})
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
@@ -901,7 +969,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|
|
|
|
if !exists || err != nil {
|
|
|
|
|
t.Errorf("No expectations found for ReplicaSet")
|
|
|
|
|
}
|
|
|
|
|
manager.rsStore.Indexer.Delete(rs)
|
|
|
|
|
manager.rsLister.Indexer.Delete(rs)
|
|
|
|
|
manager.syncReplicaSet(getKey(rs, t))
|
|
|
|
|
|
|
|
|
|
if _, exists, err = manager.expectations.GetExpectations(rsKey); exists {
|
|
|
|
@@ -910,37 +978,11 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|
|
|
|
|
|
|
|
|
// This should have no effect, since we've deleted the ReplicaSet.
|
|
|
|
|
podExp.Add(-1, 0)
|
|
|
|
|
manager.podStore.Indexer.Replace(make([]interface{}, 0), "0")
|
|
|
|
|
manager.podLister.Indexer.Replace(make([]interface{}, 0), "0")
|
|
|
|
|
manager.syncReplicaSet(getKey(rs, t))
|
|
|
|
|
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestRSManagerNotReady(t *testing.T) {
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 2, 0)
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
manager.podStoreSynced = func() bool { return false }
|
|
|
|
|
|
|
|
|
|
// Simulates the ReplicaSet reflector running before the pod reflector. We don't
|
|
|
|
|
// want to end up creating replicas in this case until the pod reflector
|
|
|
|
|
// has synced, so the ReplicaSet controller should just requeue the ReplicaSet.
|
|
|
|
|
rsSpec := newReplicaSet(1, map[string]string{"foo": "bar"})
|
|
|
|
|
manager.rsStore.Indexer.Add(rsSpec)
|
|
|
|
|
|
|
|
|
|
rsKey := getKey(rsSpec, t)
|
|
|
|
|
manager.syncReplicaSet(rsKey)
|
|
|
|
|
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
|
|
|
|
queueRS, _ := manager.queue.Get()
|
|
|
|
|
if queueRS != rsKey {
|
|
|
|
|
t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
manager.syncReplicaSet(rsKey)
|
|
|
|
|
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// shuffle returns a new shuffled list of container controllers.
|
|
|
|
|
func shuffle(controllers []*extensions.ReplicaSet) []*extensions.ReplicaSet {
|
|
|
|
|
numControllers := len(controllers)
|
|
|
|
@@ -957,41 +999,47 @@ func TestOverlappingRSs(t *testing.T) {
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
|
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 10, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
func() {
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
|
|
|
|
|
// Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store
|
|
|
|
|
var controllers []*extensions.ReplicaSet
|
|
|
|
|
for j := 1; j < 10; j++ {
|
|
|
|
|
rsSpec := newReplicaSet(1, labelMap)
|
|
|
|
|
rsSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
|
|
|
|
|
rsSpec.Name = string(uuid.NewUUID())
|
|
|
|
|
controllers = append(controllers, rsSpec)
|
|
|
|
|
}
|
|
|
|
|
shuffledControllers := shuffle(controllers)
|
|
|
|
|
for j := range shuffledControllers {
|
|
|
|
|
manager.rsStore.Indexer.Add(shuffledControllers[j])
|
|
|
|
|
}
|
|
|
|
|
// Add a pod and make sure only the oldest ReplicaSet is synced
|
|
|
|
|
pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0], "pod")
|
|
|
|
|
rsKey := getKey(controllers[0], t)
|
|
|
|
|
// Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store
|
|
|
|
|
var controllers []*extensions.ReplicaSet
|
|
|
|
|
for j := 1; j < 10; j++ {
|
|
|
|
|
rsSpec := newReplicaSet(1, labelMap)
|
|
|
|
|
rsSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
|
|
|
|
|
rsSpec.Name = string(uuid.NewUUID())
|
|
|
|
|
controllers = append(controllers, rsSpec)
|
|
|
|
|
}
|
|
|
|
|
shuffledControllers := shuffle(controllers)
|
|
|
|
|
for j := range shuffledControllers {
|
|
|
|
|
manager.rsLister.Indexer.Add(shuffledControllers[j])
|
|
|
|
|
}
|
|
|
|
|
// Add a pod and make sure only the oldest ReplicaSet is synced
|
|
|
|
|
pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0], "pod")
|
|
|
|
|
rsKey := getKey(controllers[0], t)
|
|
|
|
|
|
|
|
|
|
manager.addPod(&pods.Items[0])
|
|
|
|
|
queueRS, _ := manager.queue.Get()
|
|
|
|
|
if queueRS != rsKey {
|
|
|
|
|
t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
|
|
|
|
|
}
|
|
|
|
|
manager.addPod(&pods.Items[0])
|
|
|
|
|
queueRS, _ := manager.queue.Get()
|
|
|
|
|
if queueRS != rsKey {
|
|
|
|
|
t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestDeletionTimestamp(t *testing.T) {
|
|
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(c, stopCh, 10, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
|
|
|
|
|
rs := newReplicaSet(1, labelMap)
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
rsKey, err := controller.KeyFunc(rs)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Errorf("Couldn't get key for object %#v: %v", rs, err)
|
|
|
|
@@ -1077,12 +1125,14 @@ func TestDeletionTimestamp(t *testing.T) {
|
|
|
|
|
|
|
|
|
|
// setupManagerWithGCEnabled creates a RS manager with a fakePodControl
|
|
|
|
|
// and with garbageCollectorEnabled set to true
|
|
|
|
|
func setupManagerWithGCEnabled(objs ...runtime.Object) (manager *ReplicaSetController, fakePodControl *controller.FakePodControl) {
|
|
|
|
|
func setupManagerWithGCEnabled(stopCh chan struct{}, objs ...runtime.Object) (manager *ReplicaSetController, fakePodControl *controller.FakePodControl) {
|
|
|
|
|
c := fakeclientset.NewSimpleClientset(objs...)
|
|
|
|
|
fakePodControl = &controller.FakePodControl{}
|
|
|
|
|
manager = NewReplicaSetControllerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager = testNewReplicaSetControllerFromClient(c, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.garbageCollectorEnabled = true
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
manager.podControl = fakePodControl
|
|
|
|
|
return manager, fakePodControl
|
|
|
|
|
}
|
|
|
|
@@ -1090,14 +1140,16 @@ func setupManagerWithGCEnabled(objs ...runtime.Object) (manager *ReplicaSetContr
|
|
|
|
|
func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rs := newReplicaSet(2, labelMap)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(rs)
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
var trueVar = true
|
|
|
|
|
otherControllerReference := api.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar}
|
|
|
|
|
// add to podStore a matching Pod controlled by another controller. Expect no patch.
|
|
|
|
|
// add to podLister a matching Pod controlled by another controller. Expect no patch.
|
|
|
|
|
pod := newPod("pod", rs, api.PodRunning, nil)
|
|
|
|
|
pod.OwnerReferences = []api.OwnerReference{otherControllerReference}
|
|
|
|
|
manager.podStore.Indexer.Add(pod)
|
|
|
|
|
manager.podLister.Indexer.Add(pod)
|
|
|
|
|
err := manager.syncReplicaSet(getKey(rs, t))
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatal(err)
|
|
|
|
@@ -1109,15 +1161,17 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
|
|
|
|
func TestPatchPodWithOtherOwnerRef(t *testing.T) {
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rs := newReplicaSet(2, labelMap)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(rs)
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
// add to podStore one more matching pod that doesn't have a controller
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
// add to podLister one more matching pod that doesn't have a controller
|
|
|
|
|
// ref, but has an owner ref pointing to other object. Expect a patch to
|
|
|
|
|
// take control of it.
|
|
|
|
|
unrelatedOwnerReference := api.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
|
|
|
|
|
pod := newPod("pod", rs, api.PodRunning, nil)
|
|
|
|
|
pod.OwnerReferences = []api.OwnerReference{unrelatedOwnerReference}
|
|
|
|
|
manager.podStore.Indexer.Add(pod)
|
|
|
|
|
manager.podLister.Indexer.Add(pod)
|
|
|
|
|
|
|
|
|
|
err := manager.syncReplicaSet(getKey(rs, t))
|
|
|
|
|
if err != nil {
|
|
|
|
@@ -1130,14 +1184,16 @@ func TestPatchPodWithOtherOwnerRef(t *testing.T) {
|
|
|
|
|
func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rs := newReplicaSet(2, labelMap)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(rs)
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
// add to podStore a matching pod that has an ownerRef pointing to the rs,
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
// add to podLister a matching pod that has an ownerRef pointing to the rs,
|
|
|
|
|
// but ownerRef.Controller is false. Expect a patch to take control it.
|
|
|
|
|
rsOwnerReference := api.OwnerReference{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name}
|
|
|
|
|
pod := newPod("pod", rs, api.PodRunning, nil)
|
|
|
|
|
pod.OwnerReferences = []api.OwnerReference{rsOwnerReference}
|
|
|
|
|
manager.podStore.Indexer.Add(pod)
|
|
|
|
|
manager.podLister.Indexer.Add(pod)
|
|
|
|
|
|
|
|
|
|
err := manager.syncReplicaSet(getKey(rs, t))
|
|
|
|
|
if err != nil {
|
|
|
|
@@ -1150,12 +1206,14 @@ func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
|
|
|
|
|
func TestPatchPodFails(t *testing.T) {
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rs := newReplicaSet(2, labelMap)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(rs)
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
// add to podStore two matching pods. Expect two patches to take control
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
// add to podLister two matching pods. Expect two patches to take control
|
|
|
|
|
// them.
|
|
|
|
|
manager.podStore.Indexer.Add(newPod("pod1", rs, api.PodRunning, nil))
|
|
|
|
|
manager.podStore.Indexer.Add(newPod("pod2", rs, api.PodRunning, nil))
|
|
|
|
|
manager.podLister.Indexer.Add(newPod("pod1", rs, api.PodRunning, nil))
|
|
|
|
|
manager.podLister.Indexer.Add(newPod("pod2", rs, api.PodRunning, nil))
|
|
|
|
|
// let both patches fail. The rs controller will assume it fails to take
|
|
|
|
|
// control of the pods and create new ones.
|
|
|
|
|
fakePodControl.Err = fmt.Errorf("Fake Error")
|
|
|
|
@@ -1170,13 +1228,15 @@ func TestPatchPodFails(t *testing.T) {
|
|
|
|
|
func TestPatchExtraPodsThenDelete(t *testing.T) {
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rs := newReplicaSet(2, labelMap)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(rs)
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
// add to podStore three matching pods. Expect three patches to take control
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
// add to podLister three matching pods. Expect three patches to take control
|
|
|
|
|
// them, and later delete one of them.
|
|
|
|
|
manager.podStore.Indexer.Add(newPod("pod1", rs, api.PodRunning, nil))
|
|
|
|
|
manager.podStore.Indexer.Add(newPod("pod2", rs, api.PodRunning, nil))
|
|
|
|
|
manager.podStore.Indexer.Add(newPod("pod3", rs, api.PodRunning, nil))
|
|
|
|
|
manager.podLister.Indexer.Add(newPod("pod1", rs, api.PodRunning, nil))
|
|
|
|
|
manager.podLister.Indexer.Add(newPod("pod2", rs, api.PodRunning, nil))
|
|
|
|
|
manager.podLister.Indexer.Add(newPod("pod3", rs, api.PodRunning, nil))
|
|
|
|
|
err := manager.syncReplicaSet(getKey(rs, t))
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatal(err)
|
|
|
|
@@ -1188,9 +1248,11 @@ func TestPatchExtraPodsThenDelete(t *testing.T) {
|
|
|
|
|
func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rs := newReplicaSet(2, labelMap)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(rs)
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
// put one pod in the podStore
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
// put one pod in the podLister
|
|
|
|
|
pod := newPod("pod", rs, api.PodRunning, nil)
|
|
|
|
|
pod.ResourceVersion = "1"
|
|
|
|
|
var trueVar = true
|
|
|
|
@@ -1203,7 +1265,7 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
|
|
|
|
// add the updatedPod to the store. This is consistent with the behavior of
|
|
|
|
|
// the Informer: Informer updates the store before call the handler
|
|
|
|
|
// (updatePod() in this case).
|
|
|
|
|
manager.podStore.Indexer.Add(&updatedPod)
|
|
|
|
|
manager.podLister.Indexer.Add(&updatedPod)
|
|
|
|
|
// send a update of the same pod with modified labels
|
|
|
|
|
manager.updatePod(pod, &updatedPod)
|
|
|
|
|
// verifies that rs is added to the queue
|
|
|
|
@@ -1227,16 +1289,18 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
|
|
|
|
func TestUpdateSelectorControllerRef(t *testing.T) {
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rs := newReplicaSet(2, labelMap)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(rs)
|
|
|
|
|
// put 2 pods in the podStore
|
|
|
|
|
newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rs, "pod")
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
|
|
|
|
// put 2 pods in the podLister
|
|
|
|
|
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rs, "pod")
|
|
|
|
|
// update the RS so that its selector no longer matches the pods
|
|
|
|
|
updatedRS := *rs
|
|
|
|
|
updatedRS.Spec.Selector.MatchLabels = map[string]string{"foo": "baz"}
|
|
|
|
|
// put the updatedRS into the store. This is consistent with the behavior of
|
|
|
|
|
// the Informer: Informer updates the store before call the handler
|
|
|
|
|
// (updateRS() in this case).
|
|
|
|
|
manager.rsStore.Indexer.Add(&updatedRS)
|
|
|
|
|
manager.rsLister.Indexer.Add(&updatedRS)
|
|
|
|
|
manager.updateRS(rs, &updatedRS)
|
|
|
|
|
// verifies that the rs is added to the queue
|
|
|
|
|
rsKey := getKey(rs, t)
|
|
|
|
@@ -1261,12 +1325,14 @@ func TestUpdateSelectorControllerRef(t *testing.T) {
|
|
|
|
|
func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rs := newReplicaSet(2, labelMap)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(rs)
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
|
|
|
|
now := unversioned.Now()
|
|
|
|
|
rs.DeletionTimestamp = &now
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
pod1 := newPod("pod1", rs, api.PodRunning, nil)
|
|
|
|
|
manager.podStore.Indexer.Add(pod1)
|
|
|
|
|
manager.podLister.Indexer.Add(pod1)
|
|
|
|
|
|
|
|
|
|
// no patch, no create
|
|
|
|
|
err := manager.syncReplicaSet(getKey(rs, t))
|
|
|
|
@@ -1279,29 +1345,34 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
|
|
|
|
|
func TestReadyReplicas(t *testing.T) {
|
|
|
|
|
// This is a happy server just to record the PUT request we expect for status.Replicas
|
|
|
|
|
fakeHandler := utiltesting.FakeHandler{
|
|
|
|
|
StatusCode: 200,
|
|
|
|
|
ResponseBody: "{}",
|
|
|
|
|
StatusCode: 200,
|
|
|
|
|
ResponseBody: "{}",
|
|
|
|
|
SkipRequestFn: skipListerFunc,
|
|
|
|
|
}
|
|
|
|
|
testServer := httptest.NewServer(&fakeHandler)
|
|
|
|
|
defer testServer.Close()
|
|
|
|
|
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
|
|
|
|
|
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
|
rs := newReplicaSet(2, labelMap)
|
|
|
|
|
rs.Status = extensions.ReplicaSetStatus{Replicas: 2, ReadyReplicas: 0, AvailableReplicas: 0, ObservedGeneration: 1}
|
|
|
|
|
rs.Generation = 1
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
|
|
|
|
|
newPodList(manager.podStore.Indexer, 2, api.PodPending, labelMap, rs, "pod")
|
|
|
|
|
newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rs, "pod")
|
|
|
|
|
newPodList(manager.podLister.Indexer, 2, api.PodPending, labelMap, rs, "pod")
|
|
|
|
|
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rs, "pod")
|
|
|
|
|
|
|
|
|
|
// This response body is just so we don't err out decoding the http response
|
|
|
|
|
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
|
|
|
|
fakeHandler.ResponseBody = response
|
|
|
|
|
fakeHandler.SetResponseBody(response)
|
|
|
|
|
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
@@ -1319,15 +1390,20 @@ func TestReadyReplicas(t *testing.T) {
|
|
|
|
|
func TestAvailableReplicas(t *testing.T) {
|
|
|
|
|
// This is a happy server just to record the PUT request we expect for status.Replicas
|
|
|
|
|
fakeHandler := utiltesting.FakeHandler{
|
|
|
|
|
StatusCode: 200,
|
|
|
|
|
ResponseBody: "{}",
|
|
|
|
|
StatusCode: 200,
|
|
|
|
|
ResponseBody: "{}",
|
|
|
|
|
SkipRequestFn: skipListerFunc,
|
|
|
|
|
}
|
|
|
|
|
testServer := httptest.NewServer(&fakeHandler)
|
|
|
|
|
defer testServer.Close()
|
|
|
|
|
|
|
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
|
|
|
|
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
|
defer close(stopCh)
|
|
|
|
|
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
|
|
|
manager.podListerSynced = alwaysReady
|
|
|
|
|
manager.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
manager.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
|
|
|
|
|
|
|
|
|
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
|
|
|
|
labelMap := map[string]string{"foo": "bar"}
|
|
|
|
@@ -1336,21 +1412,21 @@ func TestAvailableReplicas(t *testing.T) {
|
|
|
|
|
rs.Generation = 1
|
|
|
|
|
// minReadySeconds set to 15s
|
|
|
|
|
rs.Spec.MinReadySeconds = 15
|
|
|
|
|
manager.rsStore.Indexer.Add(rs)
|
|
|
|
|
manager.rsLister.Indexer.Add(rs)
|
|
|
|
|
|
|
|
|
|
// First pod becomes ready 20s ago
|
|
|
|
|
moment := unversioned.Time{Time: time.Now().Add(-2e10)}
|
|
|
|
|
pod := newPod("pod", rs, api.PodRunning, &moment)
|
|
|
|
|
manager.podStore.Indexer.Add(pod)
|
|
|
|
|
manager.podLister.Indexer.Add(pod)
|
|
|
|
|
|
|
|
|
|
// Second pod becomes ready now
|
|
|
|
|
otherMoment := unversioned.Now()
|
|
|
|
|
otherPod := newPod("otherPod", rs, api.PodRunning, &otherMoment)
|
|
|
|
|
manager.podStore.Indexer.Add(otherPod)
|
|
|
|
|
manager.podLister.Indexer.Add(otherPod)
|
|
|
|
|
|
|
|
|
|
// This response body is just so we don't err out decoding the http response
|
|
|
|
|
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
|
|
|
|
fakeHandler.ResponseBody = response
|
|
|
|
|
fakeHandler.SetResponseBody(response)
|
|
|
|
|
|
|
|
|
|
fakePodControl := controller.FakePodControl{}
|
|
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|