let RC manager utilize the GC

This commit is contained in:
Chao Xu
2016-06-10 16:28:42 -07:00
parent bea5232790
commit 11a341de67
10 changed files with 1033 additions and 103 deletions

View File

@@ -26,6 +26,9 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
@@ -36,6 +39,7 @@ import (
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/metrics"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait"
@@ -65,6 +69,10 @@ const (
statusUpdateRetries = 1
)
func getRCKind() unversioned.GroupVersionKind {
return v1.SchemeGroupVersion.WithKind("ReplicationController")
}
// ReplicationManager is responsible for synchronizing ReplicationController objects stored
// in the system with actual running pods.
// TODO: this really should be called ReplicationController. The only reason why it's a Manager
@@ -105,20 +113,24 @@ type ReplicationManager struct {
// Controllers that need to be synced
queue *workqueue.Type
// garbageCollectorEnabled denotes if the garbage collector is enabled. RC
// manager behaves differently if GC is enabled.
garbageCollectorEnabled bool
}
// NewReplicationManager creates a replication manager
func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
return newReplicationManagerInternal(
return newReplicationManager(
eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}),
podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize)
podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled)
}
// newReplicationManagerInternal configures a replication manager with the specified event recorder
func newReplicationManagerInternal(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
// newReplicationManager configures a replication manager with the specified event recorder
func newReplicationManager(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
@@ -132,6 +144,7 @@ func newReplicationManagerInternal(eventRecorder record.EventRecorder, podInform
burstReplicas: burstReplicas,
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
queue: workqueue.New(),
garbageCollectorEnabled: garbageCollectorEnabled,
}
rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer(
@@ -147,43 +160,8 @@ func newReplicationManagerInternal(eventRecorder record.EventRecorder, podInform
// TODO: Can we have much longer period here?
FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: rm.enqueueController,
UpdateFunc: func(old, cur interface{}) {
oldRC := old.(*api.ReplicationController)
curRC := cur.(*api.ReplicationController)
// We should invalidate the whole lookup cache if a RC's selector has been updated.
//
// Imagine that you have two RCs:
// * old RC1
// * new RC2
// You also have a pod that is attached to RC2 (because it doesn't match RC1 selector).
// Now imagine that you are changing RC1 selector so that it is now matching that pod,
// in such case, we must invalidate the whole cache so that pod could be adopted by RC1
//
// This makes the lookup cache less helpful, but selector update does not happen often,
// so it's not a big problem
if !reflect.DeepEqual(oldRC.Spec.Selector, curRC.Spec.Selector) {
rm.lookupCache.InvalidateAll()
}
// You might imagine that we only really need to enqueue the
// controller when Spec changes, but it is safer to sync any
// time this function is triggered. That way a full informer
// resync can requeue any controllers that don't yet have pods
// but whose last attempts at creating a pod have failed (since
// we don't block on creation of pods) instead of those
// controllers stalling indefinitely. Enqueueing every time
// does result in some spurious syncs (like when Status.Replica
// is updated and the watch notification from it retriggers
// this function), but in general extra resyncs shouldn't be
// that bad as rcs that haven't met expectations yet won't
// sync, and all the listing is done using local stores.
if oldRC.Status.Replicas != curRC.Status.Replicas {
glog.V(4).Infof("Observed updated replica count for rc: %v, %d->%d", curRC.Name, oldRC.Status.Replicas, curRC.Status.Replicas)
}
rm.enqueueController(cur)
},
AddFunc: rm.enqueueController,
UpdateFunc: rm.updateRC,
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
// way of achieving this is by performing a `stop` operation on the controller.
@@ -212,7 +190,8 @@ func newReplicationManagerInternal(eventRecorder record.EventRecorder, podInform
// NewReplicationManagerFromClientForIntegration creates a new ReplicationManager that runs its own informer. It disables event recording for use in integration tests.
func NewReplicationManagerFromClientForIntegration(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
podInformer := informers.CreateSharedPodIndexInformer(kubeClient, resyncPeriod())
rm := newReplicationManagerInternal(&record.FakeRecorder{}, podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize)
garbageCollectorEnabled := false
rm := newReplicationManager(&record.FakeRecorder{}, podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled)
rm.internalPodInformer = podInformer
return rm
}
@@ -220,7 +199,8 @@ func NewReplicationManagerFromClientForIntegration(kubeClient clientset.Interfac
// NewReplicationManagerFromClient creates a new ReplicationManager that runs its own informer.
func NewReplicationManagerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
podInformer := informers.CreateSharedPodIndexInformer(kubeClient, resyncPeriod())
rm := NewReplicationManager(podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize)
garbageCollectorEnabled := false
rm := NewReplicationManager(podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled)
rm.internalPodInformer = podInformer
return rm
@@ -255,13 +235,14 @@ func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) {
// getPodController returns the controller managing the given pod.
// TODO: Surface that we are ignoring multiple controllers for a single pod.
// TODO: use ownerReference.Controller to determine if the rc controls the pod.
func (rm *ReplicationManager) getPodController(pod *api.Pod) *api.ReplicationController {
// look up in the cache, if cached and the cache is valid, just return cached value
if obj, cached := rm.lookupCache.GetMatchingObject(pod); cached {
controller, ok := obj.(*api.ReplicationController)
if !ok {
// This should not happen
glog.Errorf("lookup cache does not retuen a ReplicationController object")
glog.Errorf("lookup cache does not return a ReplicationController object")
return nil
}
if cached && rm.isCacheValid(pod, controller) {
@@ -320,6 +301,44 @@ func isControllerMatch(pod *api.Pod, rc *api.ReplicationController) bool {
return true
}
// callback when RC is updated
func (rm *ReplicationManager) updateRC(old, cur interface{}) {
oldRC := old.(*api.ReplicationController)
curRC := cur.(*api.ReplicationController)
// We should invalidate the whole lookup cache if a RC's selector has been updated.
//
// Imagine that you have two RCs:
// * old RC1
// * new RC2
// You also have a pod that is attached to RC2 (because it doesn't match RC1 selector).
// Now imagine that you are changing RC1 selector so that it is now matching that pod,
// in such case, we must invalidate the whole cache so that pod could be adopted by RC1
//
// This makes the lookup cache less helpful, but selector update does not happen often,
// so it's not a big problem
if !reflect.DeepEqual(oldRC.Spec.Selector, curRC.Spec.Selector) {
rm.lookupCache.InvalidateAll()
}
// You might imagine that we only really need to enqueue the
// controller when Spec changes, but it is safer to sync any
// time this function is triggered. That way a full informer
// resync can requeue any controllers that don't yet have pods
// but whose last attempts at creating a pod have failed (since
// we don't block on creation of pods) instead of those
// controllers stalling indefinitely. Enqueueing every time
// does result in some spurious syncs (like when Status.Replica
// is updated and the watch notification from it retriggers
// this function), but in general extra resyncs shouldn't be
// that bad as rcs that haven't met expectations yet won't
// sync, and all the listing is done using local stores.
if oldRC.Status.Replicas != curRC.Status.Replicas {
glog.V(4).Infof("Observed updated replica count for rc: %v, %d->%d", curRC.Name, oldRC.Status.Replicas, curRC.Status.Replicas)
}
rm.enqueueController(cur)
}
// When a pod is created, enqueue the controller that manages it and update it's expectations.
func (rm *ReplicationManager) addPod(obj interface{}) {
pod := obj.(*api.Pod)
@@ -370,10 +389,8 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) {
return
}
if rc := rm.getPodController(curPod); rc != nil {
rm.enqueueController(rc)
}
// Only need to get the old controller if the labels changed.
// Enqueue the oldRC before the curRC to give curRC a chance to adopt the oldPod.
if labelChanged {
// If the old and new rc are the same, the first one that syncs
// will set expectations preventing any damage from the second.
@@ -381,6 +398,10 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) {
rm.enqueueController(oldRC)
}
}
if curRC := rm.getPodController(curPod); curRC != nil {
rm.enqueueController(curRC)
}
}
// When a pod is deleted, enqueue the controller that manages the pod and update its expectations.
@@ -481,7 +502,21 @@ func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.Re
for i := 0; i < diff; i++ {
go func() {
defer wg.Done()
if err := rm.podControl.CreatePods(rc.Namespace, rc.Spec.Template, rc); err != nil {
var err error
if rm.garbageCollectorEnabled {
var trueVar = true
controllerRef := &api.OwnerReference{
APIVersion: getRCKind().GroupVersion().String(),
Kind: getRCKind().Kind,
Name: rc.Name,
UID: rc.UID,
Controller: &trueVar,
}
err = rm.podControl.CreatePodsWithControllerRef(rc.Namespace, rc.Spec.Template, rc, controllerRef)
} else {
err = rm.podControl.CreatePods(rc.Namespace, rc.Spec.Template, rc)
}
if err != nil {
// Decrement the expected number of creates because the informer won't observe this pod
glog.V(2).Infof("Failed creation, decrementing expectations for controller %q/%q", rc.Namespace, rc.Name)
rm.expectations.CreationObserved(rcKey)
@@ -580,7 +615,6 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
trace.Step("ReplicationController restored")
rcNeedsSync := rm.expectations.SatisfiedExpectations(rcKey)
trace.Step("Expectations restored")
podList, err := rm.podStore.Pods(rc.Namespace).List(labels.Set(rc.Spec.Selector).AsSelector())
if err != nil {
glog.Errorf("Error getting pods for rc %q: %v", key, err)
rm.queue.Add(key)
@@ -588,9 +622,59 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
}
trace.Step("Pods listed")
// TODO: Do this in a single pass, or use an index.
filteredPods := controller.FilterActivePods(podList.Items)
if rcNeedsSync {
// TODO: Do the List and Filter in a single pass, or use an index.
var filteredPods []*api.Pod
if rm.garbageCollectorEnabled {
// list all pods to include the pods that don't match the rc's selector
// anymore but has the stale controller ref.
podList, err := rm.podStore.Pods(rc.Namespace).List(labels.Everything())
if err != nil {
glog.Errorf("Error getting pods for rc %q: %v", key, err)
rm.queue.Add(key)
return err
}
cm := controller.NewPodControllerRefManager(rm.podControl, rc.ObjectMeta, labels.Set(rc.Spec.Selector).AsSelector(), getRCKind())
matchesAndControlled, matchesNeedsController, controlledDoesNotMatch := cm.Classify(podList.Items)
for _, pod := range matchesNeedsController {
err := cm.AdoptPod(pod)
// continue to next pod if adoption fails.
if err != nil {
// If the pod no longer exists, don't even log the error.
if !errors.IsNotFound(err) {
utilruntime.HandleError(err)
}
} else {
matchesAndControlled = append(matchesAndControlled, pod)
}
}
filteredPods = matchesAndControlled
// remove the controllerRef for the pods that no longer have matching labels
var errlist []error
for _, pod := range controlledDoesNotMatch {
err := cm.ReleasePod(pod)
if err != nil {
errlist = append(errlist, cm.ReleasePod(pod))
}
}
if len(errlist) != 0 {
aggregate := utilerrors.NewAggregate(errlist)
// push the RC into work queue again. We need to try to free the
// pods again otherwise they will stuck with the stale
// controllerRef.
rm.queue.Add(key)
return aggregate
}
} else {
podList, err := rm.podStore.Pods(rc.Namespace).List(labels.Set(rc.Spec.Selector).AsSelector())
if err != nil {
glog.Errorf("Error getting pods for rc %q: %v", key, err)
rm.queue.Add(key)
return err
}
filteredPods = controller.FilterActivePods(podList.Items)
}
if rcNeedsSync && rc.DeletionTimestamp == nil {
rm.manageReplicas(filteredPods, &rc)
}
trace.Step("manageReplicas done")

View File

@@ -95,34 +95,45 @@ func newReplicationController(replicas int) *api.ReplicationController {
return rc
}
// create a pod with the given phase for the given rc (same selectors and namespace).
func newPod(name string, rc *api.ReplicationController, status api.PodPhase) *api.Pod {
return &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: rc.Spec.Selector,
Namespace: rc.Namespace,
},
Status: api.PodStatus{Phase: status},
}
}
// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status api.PodPhase, rc *api.ReplicationController, name string) *api.PodList {
pods := []api.Pod{}
var trueVar = true
controllerReference := api.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar}
for i := 0; i < count; i++ {
newPod := api.Pod{
ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf("%s%d", name, i),
Labels: rc.Spec.Selector,
Namespace: rc.Namespace,
},
Status: api.PodStatus{Phase: status},
}
pod := newPod(fmt.Sprintf("%s%d", name, i), rc, status)
pod.OwnerReferences = []api.OwnerReference{controllerReference}
if store != nil {
store.Add(&newPod)
store.Add(pod)
}
pods = append(pods, newPod)
pods = append(pods, *pod)
}
return &api.PodList{
Items: pods,
}
}
func validateSyncReplication(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
if len(fakePodControl.Templates) != expectedCreates {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates))
func validateSyncReplication(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes, expectedPatches int) {
if e, a := expectedCreates, len(fakePodControl.Templates); e != a {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", e, a)
}
if len(fakePodControl.DeletePodName) != expectedDeletes {
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", expectedDeletes, len(fakePodControl.DeletePodName))
if e, a := expectedDeletes, len(fakePodControl.DeletePodName); e != a {
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", e, a)
}
if e, a := expectedPatches, len(fakePodControl.Patches); e != a {
t.Errorf("Unexpected number of patches. Expected %d, saw %d\n", e, a)
}
}
@@ -148,7 +159,7 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
manager.podControl = &fakePodControl
manager.syncReplicationController(getKey(controllerSpec, t))
validateSyncReplication(t, &fakePodControl, 0, 0)
validateSyncReplication(t, &fakePodControl, 0, 0, 0)
}
func TestSyncReplicationControllerDeletes(t *testing.T) {
@@ -164,7 +175,7 @@ func TestSyncReplicationControllerDeletes(t *testing.T) {
newPodList(manager.podStore.Indexer, 2, api.PodRunning, controllerSpec, "pod")
manager.syncReplicationController(getKey(controllerSpec, t))
validateSyncReplication(t, &fakePodControl, 0, 1)
validateSyncReplication(t, &fakePodControl, 0, 1, 0)
}
func TestDeleteFinalStateUnknown(t *testing.T) {
@@ -212,7 +223,7 @@ func TestSyncReplicationControllerCreates(t *testing.T) {
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.syncReplicationController(getKey(rc, t))
validateSyncReplication(t, &fakePodControl, 2, 0)
validateSyncReplication(t, &fakePodControl, 2, 0, 0)
}
func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
@@ -238,7 +249,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
manager.podControl = &fakePodControl
manager.syncReplicationController(getKey(rc, t))
validateSyncReplication(t, &fakePodControl, 0, 0)
validateSyncReplication(t, &fakePodControl, 0, 0, 0)
if fakeHandler.RequestReceived != nil {
t.Errorf("Unexpected update when pods and rcs are in a steady state")
}
@@ -297,7 +308,7 @@ func TestControllerUpdateReplicas(t *testing.T) {
decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc)
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &decRc)
validateSyncReplication(t, &fakePodControl, 1, 0)
validateSyncReplication(t, &fakePodControl, 1, 0, 0)
}
func TestSyncReplicationControllerDormancy(t *testing.T) {
@@ -321,13 +332,13 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
// Creates a replica and sets expectations
controllerSpec.Status.Replicas = 1
manager.syncReplicationController(getKey(controllerSpec, t))
validateSyncReplication(t, &fakePodControl, 1, 0)
validateSyncReplication(t, &fakePodControl, 1, 0, 0)
// Expectations prevents replicas but not an update on status
controllerSpec.Status.Replicas = 0
fakePodControl.Clear()
manager.syncReplicationController(getKey(controllerSpec, t))
validateSyncReplication(t, &fakePodControl, 0, 0)
validateSyncReplication(t, &fakePodControl, 0, 0, 0)
// Get the key for the controller
rcKey, err := controller.KeyFunc(controllerSpec)
@@ -336,19 +347,20 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
}
// Lowering expectations should lead to a sync that creates a replica, however the
// fakePodControl error will prevent this, leaving expectations at 0, 0
// fakePodControl error will prevent this, leaving expectations at 0, 0.
manager.expectations.CreationObserved(rcKey)
controllerSpec.Status.Replicas = 1
fakePodControl.Clear()
fakePodControl.Err = fmt.Errorf("Fake Error")
manager.syncReplicationController(getKey(controllerSpec, t))
validateSyncReplication(t, &fakePodControl, 0, 0)
validateSyncReplication(t, &fakePodControl, 1, 0, 0)
// This replica should not need a Lowering of expectations, since the previous create failed
fakePodControl.Clear()
fakePodControl.Err = nil
manager.syncReplicationController(getKey(controllerSpec, t))
validateSyncReplication(t, &fakePodControl, 1, 0)
validateSyncReplication(t, &fakePodControl, 1, 0, 0)
// 1 PUT for the rc status during dormancy window.
// Note that the pod creates go through pod control so they're not recorded.
@@ -697,7 +709,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
expectedPods = burstReplicas
}
// This validates the rc manager sync actually created pods
validateSyncReplication(t, &fakePodControl, expectedPods, 0)
validateSyncReplication(t, &fakePodControl, expectedPods, 0, 0)
// This simulates the watch events for all but 1 of the expected pods.
// None of these should wake the controller because it has expectations==BurstReplicas.
@@ -718,7 +730,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
if expectedPods > burstReplicas {
expectedPods = burstReplicas
}
validateSyncReplication(t, &fakePodControl, 0, expectedPods)
validateSyncReplication(t, &fakePodControl, 0, expectedPods, 0)
// To accurately simulate a watch we must delete the exact pods
// the rc is waiting for.
@@ -753,7 +765,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// Check that the rc didn't take any action for all the above pods
fakePodControl.Clear()
manager.syncReplicationController(getKey(controllerSpec, t))
validateSyncReplication(t, &fakePodControl, 0, 0)
validateSyncReplication(t, &fakePodControl, 0, 0, 0)
// Create/Delete the last pod
// The last add pod will decrease the expectation of the rc to 0,
@@ -831,7 +843,7 @@ func TestRCSyncExpectations(t *testing.T) {
},
})
manager.syncReplicationController(getKey(controllerSpec, t))
validateSyncReplication(t, &fakePodControl, 0, 0)
validateSyncReplication(t, &fakePodControl, 0, 0, 0)
}
func TestDeleteControllerAndExpectations(t *testing.T) {
@@ -847,7 +859,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
// This should set expectations for the rc
manager.syncReplicationController(getKey(rc, t))
validateSyncReplication(t, &fakePodControl, 1, 0)
validateSyncReplication(t, &fakePodControl, 1, 0, 0)
fakePodControl.Clear()
// Get the RC key
@@ -873,7 +885,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
podExp.Add(-1, 0)
manager.podStore.Indexer.Replace(make([]interface{}, 0), "0")
manager.syncReplicationController(getKey(rc, t))
validateSyncReplication(t, &fakePodControl, 0, 0)
validateSyncReplication(t, &fakePodControl, 0, 0, 0)
}
func TestRCManagerNotReady(t *testing.T) {
@@ -891,7 +903,7 @@ func TestRCManagerNotReady(t *testing.T) {
rcKey := getKey(controllerSpec, t)
manager.syncReplicationController(rcKey)
validateSyncReplication(t, &fakePodControl, 0, 0)
validateSyncReplication(t, &fakePodControl, 0, 0, 0)
queueRC, _ := manager.queue.Get()
if queueRC != rcKey {
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
@@ -899,7 +911,7 @@ func TestRCManagerNotReady(t *testing.T) {
manager.podStoreSynced = alwaysReady
manager.syncReplicationController(rcKey)
validateSyncReplication(t, &fakePodControl, 1, 0)
validateSyncReplication(t, &fakePodControl, 1, 0, 0)
}
// shuffle returns a new shuffled list of container controllers.
@@ -1116,3 +1128,193 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
}
}
}
// setupManagerWithGCEnabled creates a RC manager with a fakePodControl and with garbageCollectorEnabled set to true
func setupManagerWithGCEnabled() (manager *ReplicationManager, fakePodControl *controller.FakePodControl) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
fakePodControl = &controller.FakePodControl{}
manager = NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.garbageCollectorEnabled = true
manager.podStoreSynced = alwaysReady
manager.podControl = fakePodControl
return manager, fakePodControl
}
func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
manager, fakePodControl := setupManagerWithGCEnabled()
rc := newReplicationController(2)
manager.rcStore.Indexer.Add(rc)
var trueVar = true
otherControllerReference := api.OwnerReference{UID: util.NewUUID(), APIVersion: "v1", Kind: "ReplicationController", Name: "AnotherRC", Controller: &trueVar}
// add to podStore a matching Pod controlled by another controller. Expect no patch.
pod := newPod("pod", rc, api.PodRunning)
pod.OwnerReferences = []api.OwnerReference{otherControllerReference}
manager.podStore.Indexer.Add(pod)
err := manager.syncReplicationController(getKey(rc, t))
if err != nil {
t.Fatal(err)
}
// because the matching pod already has a controller, so 2 pods should be created.
validateSyncReplication(t, fakePodControl, 2, 0, 0)
}
func TestPatchPodWithOtherOwnerRef(t *testing.T) {
manager, fakePodControl := setupManagerWithGCEnabled()
rc := newReplicationController(2)
manager.rcStore.Indexer.Add(rc)
// add to podStore one more matching pod that doesn't have a controller
// ref, but has an owner ref pointing to other object. Expect a patch to
// take control of it.
unrelatedOwnerReference := api.OwnerReference{UID: util.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
pod := newPod("pod", rc, api.PodRunning)
pod.OwnerReferences = []api.OwnerReference{unrelatedOwnerReference}
manager.podStore.Indexer.Add(pod)
err := manager.syncReplicationController(getKey(rc, t))
if err != nil {
t.Fatal(err)
}
// 1 patch to take control of pod, and 1 create of new pod.
validateSyncReplication(t, fakePodControl, 1, 0, 1)
}
func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
manager, fakePodControl := setupManagerWithGCEnabled()
rc := newReplicationController(2)
manager.rcStore.Indexer.Add(rc)
// add to podStore a matching pod that has an ownerRef pointing to the rc,
// but ownerRef.Controller is false. Expect a patch to take control it.
rcOwnerReference := api.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name}
pod := newPod("pod", rc, api.PodRunning)
pod.OwnerReferences = []api.OwnerReference{rcOwnerReference}
manager.podStore.Indexer.Add(pod)
err := manager.syncReplicationController(getKey(rc, t))
if err != nil {
t.Fatal(err)
}
// 1 patch to take control of pod, and 1 create of new pod.
validateSyncReplication(t, fakePodControl, 1, 0, 1)
}
func TestPatchPodFails(t *testing.T) {
manager, fakePodControl := setupManagerWithGCEnabled()
rc := newReplicationController(2)
manager.rcStore.Indexer.Add(rc)
// add to podStore two matching pods. Expect two patches to take control
// them.
manager.podStore.Indexer.Add(newPod("pod1", rc, api.PodRunning))
manager.podStore.Indexer.Add(newPod("pod2", rc, api.PodRunning))
// let both patches fail. The rc manager will assume it fails to take
// control of the pods and create new ones.
fakePodControl.Err = fmt.Errorf("Fake Error")
err := manager.syncReplicationController(getKey(rc, t))
if err != nil {
t.Fatal(err)
}
// 2 patches to take control of pod1 and pod2 (both fail), 2 creates.
validateSyncReplication(t, fakePodControl, 2, 0, 2)
}
func TestPatchExtraPodsThenDelete(t *testing.T) {
manager, fakePodControl := setupManagerWithGCEnabled()
rc := newReplicationController(2)
manager.rcStore.Indexer.Add(rc)
// add to podStore three matching pods. Expect three patches to take control
// them, and later delete one of them.
manager.podStore.Indexer.Add(newPod("pod1", rc, api.PodRunning))
manager.podStore.Indexer.Add(newPod("pod2", rc, api.PodRunning))
manager.podStore.Indexer.Add(newPod("pod3", rc, api.PodRunning))
err := manager.syncReplicationController(getKey(rc, t))
if err != nil {
t.Fatal(err)
}
// 3 patches to take control of the pods, and 1 deletion because there is an extra pod.
validateSyncReplication(t, fakePodControl, 0, 1, 3)
}
func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
manager, fakePodControl := setupManagerWithGCEnabled()
rc := newReplicationController(2)
manager.rcStore.Indexer.Add(rc)
// put one pod in the podStore
pod := newPod("pod", rc, api.PodRunning)
var trueVar = true
rcOwnerReference := api.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar}
pod.OwnerReferences = []api.OwnerReference{rcOwnerReference}
updatedPod := *pod
// reset the labels
updatedPod.Labels = make(map[string]string)
// add the updatedPod to the store. This is consistent with the behavior of
// the Informer: Informer updates the store before call the handler
// (updatePod() in this case).
manager.podStore.Indexer.Add(&updatedPod)
// send a update of the same pod with modified labels
manager.updatePod(pod, &updatedPod)
// verifies that rc is added to the queue
rcKey := getKey(rc, t)
queueRC, _ := manager.queue.Get()
if queueRC != rcKey {
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
}
manager.queue.Done(queueRC)
err := manager.syncReplicationController(rcKey)
if err != nil {
t.Fatal(err)
}
// expect 1 patch to be sent to remove the controllerRef for the pod.
// expect 2 creates because the rc.Spec.Replicas=2 and there exists no
// matching pod.
validateSyncReplication(t, fakePodControl, 2, 0, 1)
fakePodControl.Clear()
}
func TestUpdateSelectorControllerRef(t *testing.T) {
manager, fakePodControl := setupManagerWithGCEnabled()
rc := newReplicationController(2)
// put 2 pods in the podStore
newPodList(manager.podStore.Indexer, 2, api.PodRunning, rc, "pod")
// update the RC so that its selector no longer matches the pods
updatedRC := *rc
updatedRC.Spec.Selector = map[string]string{"foo": "baz"}
// put the updatedRC into the store. This is consistent with the behavior of
// the Informer: Informer updates the store before call the handler
// (updateRC() in this case).
manager.rcStore.Indexer.Add(&updatedRC)
manager.updateRC(rc, &updatedRC)
// verifies that the rc is added to the queue
rcKey := getKey(rc, t)
queueRC, _ := manager.queue.Get()
if queueRC != rcKey {
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
}
manager.queue.Done(queueRC)
err := manager.syncReplicationController(rcKey)
if err != nil {
t.Fatal(err)
}
// expect 2 patches to be sent to remove the controllerRef for the pods.
// expect 2 creates because the rc.Spec.Replicas=2 and there exists no
// matching pod.
validateSyncReplication(t, fakePodControl, 2, 0, 2)
fakePodControl.Clear()
}
// RC manager shouldn't adopt or create more pods if the rc is about to be
// deleted.
func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
manager, fakePodControl := setupManagerWithGCEnabled()
rc := newReplicationController(2)
now := unversioned.Now()
rc.DeletionTimestamp = &now
manager.rcStore.Indexer.Add(rc)
pod1 := newPod("pod1", rc, api.PodRunning)
manager.podStore.Indexer.Add(pod1)
// no patch, no create
err := manager.syncReplicationController(getKey(rc, t))
if err != nil {
t.Fatal(err)
}
validateSyncReplication(t, fakePodControl, 0, 0, 0)
}