refactor replicaset sync call tree

This commit is contained in:
Jun Xiang Tee 2017-11-01 16:16:19 -07:00
parent 82184d8e00
commit d25af7bb53
4 changed files with 631 additions and 93 deletions

View File

@ -43,7 +43,10 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["replica_set_test.go"],
srcs = [
"replica_set_test.go",
"replica_set_utils_test.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/replicaset",
library = ":go_default_library",
deps = [

View File

@ -437,10 +437,8 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *exte
utilruntime.HandleError(fmt.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err))
return nil
}
var errCh chan error
if diff < 0 {
diff *= -1
errCh = make(chan error, diff)
if diff > rsc.burstReplicas {
diff = rsc.burstReplicas
}
@ -450,7 +448,6 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *exte
// into a performance bottleneck. We should generate a UID for the pod
// beforehand and store it via ExpectCreations.
rsc.expectations.ExpectCreations(rsKey, diff)
var wg sync.WaitGroup
glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
// and double with each successful iteration in a kind of "slow start".
@ -460,105 +457,85 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *exte
// prevented from spamming the API service with the pod create requests
// after one of its pods fails. Conveniently, this also prevents the
// event spam that those failures would generate.
for batchSize := integer.IntMin(diff, controller.SlowStartInitialBatchSize); diff > 0; batchSize = integer.IntMin(2*batchSize, diff) {
errorCount := len(errCh)
wg.Add(batchSize)
for i := 0; i < batchSize; i++ {
go func() {
defer wg.Done()
var err error
boolPtr := func(b bool) *bool { return &b }
controllerRef := &metav1.OwnerReference{
APIVersion: controllerKind.GroupVersion().String(),
Kind: controllerKind.Kind,
Name: rs.Name,
UID: rs.UID,
BlockOwnerDeletion: boolPtr(true),
Controller: boolPtr(true),
}
err = rsc.podControl.CreatePodsWithControllerRef(rs.Namespace, &rs.Spec.Template, rs, controllerRef)
if err != nil && errors.IsTimeout(err) {
// Pod is created but its initialization has timed out.
// If the initialization is successful eventually, the
// controller will observe the creation via the informer.
// If the initialization fails, or if the pod keeps
// uninitialized for a long time, the informer will not
// receive any update, and the controller will create a new
// pod when the expectation expires.
return
}
if err != nil {
// Decrement the expected number of creates because the informer won't observe this pod
glog.V(2).Infof("Failed creation, decrementing expectations for replica set %q/%q", rs.Namespace, rs.Name)
rsc.expectations.CreationObserved(rsKey)
errCh <- err
}
}()
successfulCreations, err := slowStartBatch(diff, controller.SlowStartInitialBatchSize, func() error {
boolPtr := func(b bool) *bool { return &b }
controllerRef := &metav1.OwnerReference{
APIVersion: controllerKind.GroupVersion().String(),
Kind: controllerKind.Kind,
Name: rs.Name,
UID: rs.UID,
BlockOwnerDeletion: boolPtr(true),
Controller: boolPtr(true),
}
wg.Wait()
// any skipped pods that we never attempted to start shouldn't be expected.
skippedPods := diff - batchSize
if errorCount < len(errCh) && skippedPods > 0 {
glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for replica set %q/%q", skippedPods, rs.Namespace, rs.Name)
for i := 0; i < skippedPods; i++ {
// Decrement the expected number of creates because the informer won't observe this pod
rsc.expectations.CreationObserved(rsKey)
}
// The skipped pods will be retried later. The next controller resync will
// retry the slow start process.
break
err := rsc.podControl.CreatePodsWithControllerRef(rs.Namespace, &rs.Spec.Template, rs, controllerRef)
if err != nil && errors.IsTimeout(err) {
// Pod is created but its initialization has timed out.
// If the initialization is successful eventually, the
// controller will observe the creation via the informer.
// If the initialization fails, or if the pod keeps
// uninitialized for a long time, the informer will not
// receive any update, and the controller will create a new
// pod when the expectation expires.
return nil
}
return err
})
// Any skipped pods that we never attempted to start shouldn't be expected.
// The skipped pods will be retried later. The next controller resync will
// retry the slow start process.
if skippedPods := diff - successfulCreations; skippedPods > 0 {
glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for replica set %v/%v", skippedPods, rs.Namespace, rs.Name)
for i := 0; i < skippedPods; i++ {
// Decrement the expected number of creates because the informer won't observe this pod
rsc.expectations.CreationObserved(rsKey)
}
diff -= batchSize
}
return err
} else if diff > 0 {
if diff > rsc.burstReplicas {
diff = rsc.burstReplicas
}
errCh = make(chan error, diff)
glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
// No need to sort pods if we are about to delete all of them
if *(rs.Spec.Replicas) != 0 {
// Sort the pods in the order such that not-ready < ready, unscheduled
// < scheduled, and pending < running. This ensures that we delete pods
// in the earlier stages whenever possible.
sort.Sort(controller.ActivePods(filteredPods))
}
// Choose which Pods to delete, preferring those in earlier phases of startup.
podsToDelete := getPodsToDelete(filteredPods, diff)
// Snapshot the UIDs (ns/name) of the pods we're expecting to see
// deleted, so we know to record their expectations exactly once either
// when we see it as an update of the deletion timestamp, or as a delete.
// Note that if the labels on a pod/rs change in a way that the pod gets
// orphaned, the rs will only wake up after the expectations have
// expired even if other pods are deleted.
deletedPodKeys := []string{}
for i := 0; i < diff; i++ {
deletedPodKeys = append(deletedPodKeys, controller.PodKey(filteredPods[i]))
}
rsc.expectations.ExpectDeletions(rsKey, deletedPodKeys)
rsc.expectations.ExpectDeletions(rsKey, getPodKeys(podsToDelete))
errCh := make(chan error, diff)
var wg sync.WaitGroup
wg.Add(diff)
for i := 0; i < diff; i++ {
go func(ix int) {
for _, pod := range podsToDelete {
go func(targetPod *v1.Pod) {
defer wg.Done()
if err := rsc.podControl.DeletePod(rs.Namespace, filteredPods[ix].Name, rs); err != nil {
if err := rsc.podControl.DeletePod(rs.Namespace, targetPod.Name, rs); err != nil {
// Decrement the expected number of deletes because the informer won't observe this deletion
podKey := controller.PodKey(filteredPods[ix])
podKey := controller.PodKey(targetPod)
glog.V(2).Infof("Failed to delete %v, decrementing expectations for controller %q/%q", podKey, rs.Namespace, rs.Name)
rsc.expectations.DeletionObserved(rsKey, podKey)
errCh <- err
}
}(i)
}(pod)
}
wg.Wait()
select {
case err := <-errCh:
// all errors have been reported before and they're likely to be the same, so we'll only return the first one we hit.
if err != nil {
return err
}
default:
}
}
select {
case err := <-errCh:
// all errors have been reported before and they're likely to be the same, so we'll only return the first one we hit.
if err != nil {
return err
}
default:
}
return nil
}
@ -606,22 +583,10 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
filteredPods = append(filteredPods, pod)
}
}
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing Pods (see #42639).
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.UID != rs.UID {
return nil, fmt.Errorf("original ReplicaSet %v/%v is gone: got uid %v, wanted %v", rs.Namespace, rs.Name, fresh.UID, rs.UID)
}
return fresh, nil
})
cm := controller.NewPodControllerRefManager(rsc.podControl, rs, selector, controllerKind, canAdoptFunc)
// NOTE: filteredPods are pointing to objects from cache - if you need to
// modify them, you need to copy it first.
filteredPods, err = cm.ClaimPods(filteredPods)
filteredPods, err = rsc.claimPods(rs, selector, filteredPods)
if err != nil {
return err
}
@ -630,9 +595,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
if rsNeedsSync && rs.DeletionTimestamp == nil {
manageReplicasErr = rsc.manageReplicas(filteredPods, rs)
}
rs = rs.DeepCopy()
newStatus := calculateStatus(rs, filteredPods, manageReplicasErr)
// Always updates status as pods come up or die.
@ -650,3 +613,77 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
}
return manageReplicasErr
}
func (rsc *ReplicaSetController) claimPods(rs *extensions.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) {
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing Pods (see #42639).
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.UID != rs.UID {
return nil, fmt.Errorf("original ReplicaSet %v/%v is gone: got uid %v, wanted %v", rs.Namespace, rs.Name, fresh.UID, rs.UID)
}
return fresh, nil
})
cm := controller.NewPodControllerRefManager(rsc.podControl, rs, selector, controllerKind, canAdoptFunc)
return cm.ClaimPods(filteredPods)
}
// slowStartBatch tries to call the provided function a total of 'count' times,
// starting slow to check for errors, then speeding up if calls succeed.
//
// It groups the calls into batches, starting with a group of initialBatchSize.
// Within each batch, it may call the function multiple times concurrently.
//
// If a whole batch succeeds, the next batch may get exponentially larger.
// If there are any failures in a batch, all remaining batches are skipped
// after waiting for the current batch to complete.
//
// It returns the number of successful calls to the function.
func slowStartBatch(count int, initialBatchSize int, fn func() error) (int, error) {
remaining := count
successes := 0
for batchSize := integer.IntMin(remaining, initialBatchSize); batchSize > 0; batchSize = integer.IntMin(2*batchSize, remaining) {
errCh := make(chan error, batchSize)
var wg sync.WaitGroup
wg.Add(batchSize)
for i := 0; i < batchSize; i++ {
go func() {
defer wg.Done()
if err := fn(); err != nil {
errCh <- err
}
}()
}
wg.Wait()
curSuccesses := batchSize - len(errCh)
successes += curSuccesses
if len(errCh) > 0 {
return successes, <-errCh
}
remaining -= batchSize
}
return successes, nil
}
func getPodsToDelete(filteredPods []*v1.Pod, diff int) []*v1.Pod {
// No need to sort pods if we are about to delete all of them.
// diff will always be <= len(filteredPods), so not need to handle > case.
if diff < len(filteredPods) {
// Sort the pods in the order such that not-ready < ready, unscheduled
// < scheduled, and pending < running. This ensures that we delete pods
// in the earlier stages whenever possible.
sort.Sort(controller.ActivePods(filteredPods))
}
return filteredPods[:diff]
}
func getPodKeys(pods []*v1.Pod) []string {
podKeys := make([]string, 0, len(pods))
for _, pod := range pods {
podKeys = append(podKeys, controller.PodKey(pod))
}
return podKeys
}

View File

@ -26,6 +26,7 @@ import (
"net/url"
"reflect"
"strings"
"sync"
"testing"
"time"
@ -1373,3 +1374,254 @@ func TestRemoveCondition(t *testing.T) {
}
}
}
func TestSlowStartBatch(t *testing.T) {
fakeErr := fmt.Errorf("fake error")
callCnt := 0
callLimit := 0
var lock sync.Mutex
fn := func() error {
lock.Lock()
defer lock.Unlock()
callCnt++
if callCnt > callLimit {
return fakeErr
}
return nil
}
tests := []struct {
name string
count int
callLimit int
fn func() error
expectedSuccesses int
expectedErr error
expectedCallCnt int
}{
{
name: "callLimit = 0 (all fail)",
count: 10,
callLimit: 0,
fn: fn,
expectedSuccesses: 0,
expectedErr: fakeErr,
expectedCallCnt: 1, // 1(first batch): function will be called at least once
},
{
name: "callLimit = count (all succeed)",
count: 10,
callLimit: 10,
fn: fn,
expectedSuccesses: 10,
expectedErr: nil,
expectedCallCnt: 10, // 1(first batch) + 2(2nd batch) + 4(3rd batch) + 3(4th batch) = 10
},
{
name: "callLimit < count (some succeed)",
count: 10,
callLimit: 5,
fn: fn,
expectedSuccesses: 5,
expectedErr: fakeErr,
expectedCallCnt: 7, // 1(first batch) + 2(2nd batch) + 4(3rd batch) = 7
},
}
for _, test := range tests {
callCnt = 0
callLimit = test.callLimit
successes, err := slowStartBatch(test.count, 1, test.fn)
if successes != test.expectedSuccesses {
t.Errorf("%s: unexpected processed batch size, expected %d, got %d", test.name, test.expectedSuccesses, successes)
}
if err != test.expectedErr {
t.Errorf("%s: unexpected processed batch size, expected %v, got %v", test.name, test.expectedErr, err)
}
// verify that slowStartBatch stops trying more calls after a batch fails
if callCnt != test.expectedCallCnt {
t.Errorf("%s: slowStartBatch() still tries calls after a batch fails, expected %d calls, got %d", test.name, test.expectedCallCnt, callCnt)
}
}
}
func TestGetPodsToDelete(t *testing.T) {
labelMap := map[string]string{"name": "foo"}
rs := newReplicaSet(1, labelMap)
// an unscheduled, pending pod
unscheduledPendingPod := newPod("unscheduled-pending-pod", rs, v1.PodPending, nil, true)
// a scheduled, pending pod
scheduledPendingPod := newPod("scheduled-pending-pod", rs, v1.PodPending, nil, true)
scheduledPendingPod.Spec.NodeName = "fake-node"
// a scheduled, running, not-ready pod
scheduledRunningNotReadyPod := newPod("scheduled-running-not-ready-pod", rs, v1.PodRunning, nil, true)
scheduledRunningNotReadyPod.Spec.NodeName = "fake-node"
scheduledRunningNotReadyPod.Status.Conditions = []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionFalse,
},
}
// a scheduled, running, ready pod
scheduledRunningReadyPod := newPod("scheduled-running-ready-pod", rs, v1.PodRunning, nil, true)
scheduledRunningReadyPod.Spec.NodeName = "fake-node"
scheduledRunningReadyPod.Status.Conditions = []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
}
tests := []struct {
name string
pods []*v1.Pod
diff int
expectedPodsToDelete []*v1.Pod
}{
// Order used when selecting pods for deletion:
// an unscheduled, pending pod
// a scheduled, pending pod
// a scheduled, running, not-ready pod
// a scheduled, running, ready pod
// Note that a pending pod cannot be ready
{
"len(pods) = 0 (i.e., diff = 0 too)",
[]*v1.Pod{},
0,
[]*v1.Pod{},
},
{
"diff = len(pods)",
[]*v1.Pod{
scheduledRunningNotReadyPod,
scheduledRunningReadyPod,
},
2,
[]*v1.Pod{scheduledRunningNotReadyPod, scheduledRunningReadyPod},
},
{
"diff < len(pods)",
[]*v1.Pod{
scheduledRunningReadyPod,
scheduledRunningNotReadyPod,
},
1,
[]*v1.Pod{scheduledRunningNotReadyPod},
},
{
"various pod phases and conditions, diff = len(pods)",
[]*v1.Pod{
scheduledRunningReadyPod,
scheduledRunningNotReadyPod,
scheduledPendingPod,
unscheduledPendingPod,
},
4,
[]*v1.Pod{
scheduledRunningReadyPod,
scheduledRunningNotReadyPod,
scheduledPendingPod,
unscheduledPendingPod,
},
},
{
"scheduled vs unscheduled, diff < len(pods)",
[]*v1.Pod{
scheduledPendingPod,
unscheduledPendingPod,
},
1,
[]*v1.Pod{
unscheduledPendingPod,
},
},
{
"ready vs not-ready, diff < len(pods)",
[]*v1.Pod{
scheduledRunningReadyPod,
scheduledRunningNotReadyPod,
scheduledRunningNotReadyPod,
},
2,
[]*v1.Pod{
scheduledRunningNotReadyPod,
scheduledRunningNotReadyPod,
},
},
{
"pending vs running, diff < len(pods)",
[]*v1.Pod{
scheduledPendingPod,
scheduledRunningNotReadyPod,
},
1,
[]*v1.Pod{
scheduledPendingPod,
},
},
{
"various pod phases and conditions, diff < len(pods)",
[]*v1.Pod{
scheduledRunningReadyPod,
scheduledRunningNotReadyPod,
scheduledPendingPod,
unscheduledPendingPod,
},
3,
[]*v1.Pod{
unscheduledPendingPod,
scheduledPendingPod,
scheduledRunningNotReadyPod,
},
},
}
for _, test := range tests {
podsToDelete := getPodsToDelete(test.pods, test.diff)
if len(podsToDelete) != len(test.expectedPodsToDelete) {
t.Errorf("%s: unexpected pods to delete, expected %v, got %v", test.name, test.expectedPodsToDelete, podsToDelete)
}
if !reflect.DeepEqual(podsToDelete, test.expectedPodsToDelete) {
t.Errorf("%s: unexpected pods to delete, expected %v, got %v", test.name, test.expectedPodsToDelete, podsToDelete)
}
}
}
func TestGetPodKeys(t *testing.T) {
labelMap := map[string]string{"name": "foo"}
rs := newReplicaSet(1, labelMap)
pod1 := newPod("pod1", rs, v1.PodRunning, nil, true)
pod2 := newPod("pod2", rs, v1.PodRunning, nil, true)
tests := []struct {
name string
pods []*v1.Pod
expectedPodKeys []string
}{
{
"len(pods) = 0 (i.e., pods = nil)",
[]*v1.Pod{},
[]string{},
},
{
"len(pods) > 0",
[]*v1.Pod{
pod1,
pod2,
},
[]string{"default/pod1", "default/pod2"},
},
}
for _, test := range tests {
podKeys := getPodKeys(test.pods)
if len(podKeys) != len(test.expectedPodKeys) {
t.Errorf("%s: unexpected keys for pods to delete, expected %v, got %v", test.name, test.expectedPodKeys, podKeys)
}
for i := 0; i < len(podKeys); i++ {
if podKeys[i] != test.expectedPodKeys[i] {
t.Errorf("%s: unexpected keys for pods to delete, expected %v, got %v", test.name, test.expectedPodKeys, podKeys)
}
}
}
}

View File

@ -0,0 +1,246 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// If you make changes to this file, you should also make the corresponding change in ReplicationController.
package replicaset
import (
"fmt"
"reflect"
"testing"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
)
func TestCalculateStatus(t *testing.T) {
labelMap := map[string]string{"name": "foo"}
fullLabelMap := map[string]string{"name": "foo", "type": "production"}
notFullyLabelledRS := newReplicaSet(1, labelMap)
// Set replica num to 2 for status condition testing (diff < 0, diff > 0)
fullyLabelledRS := newReplicaSet(2, fullLabelMap)
longMinReadySecondsRS := newReplicaSet(1, fullLabelMap)
longMinReadySecondsRS.Spec.MinReadySeconds = 3600
rsStatusTests := []struct {
name string
replicaset *extensions.ReplicaSet
filteredPods []*v1.Pod
expectedReplicaSetStatus extensions.ReplicaSetStatus
}{
{
"1 fully labelled pod",
fullyLabelledRS,
[]*v1.Pod{
newPod("pod1", fullyLabelledRS, v1.PodRunning, nil, true),
},
extensions.ReplicaSetStatus{
Replicas: 1,
FullyLabeledReplicas: 1,
ReadyReplicas: 1,
AvailableReplicas: 1,
},
},
{
"1 not fully labelled pod",
notFullyLabelledRS,
[]*v1.Pod{
newPod("pod1", notFullyLabelledRS, v1.PodRunning, nil, true),
},
extensions.ReplicaSetStatus{
Replicas: 1,
FullyLabeledReplicas: 0,
ReadyReplicas: 1,
AvailableReplicas: 1,
},
},
{
"2 fully labelled pods",
fullyLabelledRS,
[]*v1.Pod{
newPod("pod1", fullyLabelledRS, v1.PodRunning, nil, true),
newPod("pod2", fullyLabelledRS, v1.PodRunning, nil, true),
},
extensions.ReplicaSetStatus{
Replicas: 2,
FullyLabeledReplicas: 2,
ReadyReplicas: 2,
AvailableReplicas: 2,
},
},
{
"2 not fully labelled pods",
notFullyLabelledRS,
[]*v1.Pod{
newPod("pod1", notFullyLabelledRS, v1.PodRunning, nil, true),
newPod("pod2", notFullyLabelledRS, v1.PodRunning, nil, true),
},
extensions.ReplicaSetStatus{
Replicas: 2,
FullyLabeledReplicas: 0,
ReadyReplicas: 2,
AvailableReplicas: 2,
},
},
{
"1 fully labelled pod, 1 not fully labelled pod",
notFullyLabelledRS,
[]*v1.Pod{
newPod("pod1", notFullyLabelledRS, v1.PodRunning, nil, true),
newPod("pod2", fullyLabelledRS, v1.PodRunning, nil, true),
},
extensions.ReplicaSetStatus{
Replicas: 2,
FullyLabeledReplicas: 1,
ReadyReplicas: 2,
AvailableReplicas: 2,
},
},
{
"1 non-ready pod",
fullyLabelledRS,
[]*v1.Pod{
newPod("pod1", fullyLabelledRS, v1.PodPending, nil, true),
},
extensions.ReplicaSetStatus{
Replicas: 1,
FullyLabeledReplicas: 1,
ReadyReplicas: 0,
AvailableReplicas: 0,
},
},
{
"1 ready but non-available pod",
longMinReadySecondsRS,
[]*v1.Pod{
newPod("pod1", longMinReadySecondsRS, v1.PodRunning, nil, true),
},
extensions.ReplicaSetStatus{
Replicas: 1,
FullyLabeledReplicas: 1,
ReadyReplicas: 1,
AvailableReplicas: 0,
},
},
}
for _, test := range rsStatusTests {
replicaSetStatus := calculateStatus(test.replicaset, test.filteredPods, nil)
if !reflect.DeepEqual(replicaSetStatus, test.expectedReplicaSetStatus) {
t.Errorf("%s: unexpected replicaset status: expected %v, got %v", test.name, test.expectedReplicaSetStatus, replicaSetStatus)
}
}
}
func TestCalculateStatusConditions(t *testing.T) {
labelMap := map[string]string{"name": "foo"}
rs := newReplicaSet(2, labelMap)
replicaFailureRS := newReplicaSet(10, labelMap)
replicaFailureRS.Status.Conditions = []extensions.ReplicaSetCondition{
{
Type: extensions.ReplicaSetReplicaFailure,
Status: v1.ConditionTrue,
},
}
rsStatusConditionTests := []struct {
name string
replicaset *extensions.ReplicaSet
filteredPods []*v1.Pod
manageReplicasErr error
expectedReplicaSetConditions []extensions.ReplicaSetCondition
}{
{
"manageReplicasErr != nil && failureCond == nil, diff < 0",
rs,
[]*v1.Pod{
newPod("pod1", rs, v1.PodRunning, nil, true),
},
fmt.Errorf("fake manageReplicasErr"),
[]extensions.ReplicaSetCondition{
{
Type: extensions.ReplicaSetReplicaFailure,
Status: v1.ConditionTrue,
Reason: "FailedCreate",
Message: "fake manageReplicasErr",
},
},
},
{
"manageReplicasErr != nil && failureCond == nil, diff > 0",
rs,
[]*v1.Pod{
newPod("pod1", rs, v1.PodRunning, nil, true),
newPod("pod2", rs, v1.PodRunning, nil, true),
newPod("pod3", rs, v1.PodRunning, nil, true),
},
fmt.Errorf("fake manageReplicasErr"),
[]extensions.ReplicaSetCondition{
{
Type: extensions.ReplicaSetReplicaFailure,
Status: v1.ConditionTrue,
Reason: "FailedDelete",
Message: "fake manageReplicasErr",
},
},
},
{
"manageReplicasErr == nil && failureCond != nil",
replicaFailureRS,
[]*v1.Pod{
newPod("pod1", replicaFailureRS, v1.PodRunning, nil, true),
},
nil,
nil,
},
{
"manageReplicasErr != nil && failureCond != nil",
replicaFailureRS,
[]*v1.Pod{
newPod("pod1", replicaFailureRS, v1.PodRunning, nil, true),
},
fmt.Errorf("fake manageReplicasErr"),
[]extensions.ReplicaSetCondition{
{
Type: extensions.ReplicaSetReplicaFailure,
Status: v1.ConditionTrue,
},
},
},
{
"manageReplicasErr == nil && failureCond == nil",
rs,
[]*v1.Pod{
newPod("pod1", rs, v1.PodRunning, nil, true),
},
nil,
nil,
},
}
for _, test := range rsStatusConditionTests {
replicaSetStatus := calculateStatus(test.replicaset, test.filteredPods, test.manageReplicasErr)
// all test cases have at most 1 status condition
if len(replicaSetStatus.Conditions) > 0 {
test.expectedReplicaSetConditions[0].LastTransitionTime = replicaSetStatus.Conditions[0].LastTransitionTime
}
if !reflect.DeepEqual(replicaSetStatus.Conditions, test.expectedReplicaSetConditions) {
t.Errorf("%s: unexpected replicaset status: expected %v, got %v", test.name, test.expectedReplicaSetConditions, replicaSetStatus.Conditions)
}
}
}