mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 10:43:56 +00:00
Rewrite the tests to be table driven
This commit is contained in:
parent
fa88c0b779
commit
074d5b5329
@ -35,6 +35,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -131,11 +132,11 @@ func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.Replica
|
||||
}
|
||||
}
|
||||
|
||||
func newReplicaSet(name string, replicas int) *apps.ReplicaSet {
|
||||
func newReplicaSet(name string, replicas int, rsUuid types.UID) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
UID: rsUuid,
|
||||
Name: name,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
@ -209,162 +210,188 @@ func TestControllerExpectations(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Expectations have been surpassed
|
||||
podExp, exists, err := e.GetExpectations(rcKey)
|
||||
assert.NoError(t, err, "Could not get expectations for rc, exists %v and err %v", exists, err)
|
||||
assert.True(t, exists, "Could not get expectations for rc, exists %v and err %v", exists, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
expectationsToSet []int
|
||||
expireExpectations bool
|
||||
wantPodExpectations []int64
|
||||
wantExpectationsSatisfied bool
|
||||
}{
|
||||
{
|
||||
name: "Expectations have been surpassed",
|
||||
expireExpectations: false,
|
||||
wantPodExpectations: []int64{int64(-1), int64(-1)},
|
||||
wantExpectationsSatisfied: true,
|
||||
},
|
||||
{
|
||||
name: "Old expectations are cleared because of ttl",
|
||||
expectationsToSet: []int{1, 2},
|
||||
expireExpectations: true,
|
||||
wantPodExpectations: []int64{int64(1), int64(2)},
|
||||
wantExpectationsSatisfied: false,
|
||||
},
|
||||
}
|
||||
|
||||
add, del := podExp.GetExpectations()
|
||||
assert.Equal(t, int64(-1), add, "Unexpected pod expectations %#v", podExp)
|
||||
assert.Equal(t, int64(-1), del, "Unexpected pod expectations %#v", podExp)
|
||||
assert.True(t, e.SatisfiedExpectations(logger, rcKey), "Expectations are met but the rc will not sync")
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if len(test.expectationsToSet) > 0 {
|
||||
e.SetExpectations(logger, rcKey, test.expectationsToSet[0], test.expectationsToSet[1])
|
||||
}
|
||||
podExp, exists, err := e.GetExpectations(rcKey)
|
||||
assert.NoError(t, err, "Could not get expectations for rc, exists %v and err %v", exists, err)
|
||||
assert.True(t, exists, "Could not get expectations for rc, exists %v and err %v", exists, err)
|
||||
|
||||
// Next round of rc sync, old expectations are cleared
|
||||
e.SetExpectations(logger, rcKey, 1, 2)
|
||||
podExp, exists, err = e.GetExpectations(rcKey)
|
||||
assert.NoError(t, err, "Could not get expectations for rc, exists %v and err %v", exists, err)
|
||||
assert.True(t, exists, "Could not get expectations for rc, exists %v and err %v", exists, err)
|
||||
add, del = podExp.GetExpectations()
|
||||
add, del := podExp.GetExpectations()
|
||||
assert.Equal(t, test.wantPodExpectations[0], add, "Unexpected pod expectations %#v", podExp)
|
||||
assert.Equal(t, test.wantPodExpectations[1], del, "Unexpected pod expectations %#v", podExp)
|
||||
assert.Equal(t, test.wantExpectationsSatisfied, e.SatisfiedExpectations(logger, rcKey), "Expectations are met but the rc will not sync")
|
||||
|
||||
assert.Equal(t, int64(1), add, "Unexpected pod expectations %#v", podExp)
|
||||
assert.Equal(t, int64(2), del, "Unexpected pod expectations %#v", podExp)
|
||||
|
||||
// Expectations have expired because of ttl
|
||||
fakeClock.Step(ttl + 1)
|
||||
assert.True(t, e.SatisfiedExpectations(logger, rcKey),
|
||||
"Expectations should have expired but didn't")
|
||||
if test.expireExpectations {
|
||||
fakeClock.Step(ttl + 1)
|
||||
assert.True(t, e.SatisfiedExpectations(logger, rcKey), "Expectations should have expired but didn't")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUIDExpectations(t *testing.T) {
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
uidExp := NewUIDTrackingControllerExpectations(NewControllerExpectations())
|
||||
rcList := []*v1.ReplicationController{
|
||||
newReplicationController(2),
|
||||
newReplicationController(1),
|
||||
newReplicationController(0),
|
||||
newReplicationController(5),
|
||||
type test struct {
|
||||
name string
|
||||
numReplicas int
|
||||
}
|
||||
rcToPods := map[string][]string{}
|
||||
rcKeys := []string{}
|
||||
for i := range rcList {
|
||||
rc := rcList[i]
|
||||
rcName := fmt.Sprintf("rc-%v", i)
|
||||
|
||||
shuffleTests := func(tests []test) {
|
||||
for i := range tests {
|
||||
j := rand.Intn(i + 1)
|
||||
tests[i], tests[j] = tests[j], tests[i]
|
||||
}
|
||||
}
|
||||
|
||||
getRcDataFrom := func(test test) (string, []string) {
|
||||
rc := newReplicationController(test.numReplicas)
|
||||
|
||||
rcName := fmt.Sprintf("rc-%v", test.numReplicas)
|
||||
rc.Name = rcName
|
||||
rc.Spec.Selector[rcName] = rcName
|
||||
|
||||
podList := newPodList(nil, 5, v1.PodRunning, rc)
|
||||
rcKey, err := KeyFunc(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't get key for object %#v: %v", rc, err)
|
||||
}
|
||||
rcKeys = append(rcKeys, rcKey)
|
||||
|
||||
rcPodNames := []string{}
|
||||
for i := range podList.Items {
|
||||
p := &podList.Items[i]
|
||||
p.Name = fmt.Sprintf("%v-%v", p.Name, rc.Name)
|
||||
rcPodNames = append(rcPodNames, PodKey(p))
|
||||
}
|
||||
rcToPods[rcKey] = rcPodNames
|
||||
uidExp.ExpectDeletions(logger, rcKey, rcPodNames)
|
||||
}
|
||||
for i := range rcKeys {
|
||||
j := rand.Intn(i + 1)
|
||||
rcKeys[i], rcKeys[j] = rcKeys[j], rcKeys[i]
|
||||
}
|
||||
for _, rcKey := range rcKeys {
|
||||
assert.False(t, uidExp.SatisfiedExpectations(logger, rcKey),
|
||||
"Controller %v satisfied expectations before deletion", rcKey)
|
||||
|
||||
for _, p := range rcToPods[rcKey] {
|
||||
uidExp.DeletionObserved(logger, rcKey, p)
|
||||
}
|
||||
|
||||
assert.True(t, uidExp.SatisfiedExpectations(logger, rcKey),
|
||||
"Controller %v didn't satisfy expectations after deletion", rcKey)
|
||||
|
||||
uidExp.DeleteExpectations(logger, rcKey)
|
||||
|
||||
assert.Nil(t, uidExp.GetUIDs(rcKey),
|
||||
"Failed to delete uid expectations for %v", rcKey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreatePods(t *testing.T) {
|
||||
ns := metav1.NamespaceDefault
|
||||
body := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "empty_pod"}})
|
||||
fakeHandler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: string(body),
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
podControl := RealPodControl{
|
||||
KubeClient: clientset,
|
||||
Recorder: &record.FakeRecorder{},
|
||||
return rcKey, rcPodNames
|
||||
}
|
||||
|
||||
controllerSpec := newReplicationController(1)
|
||||
controllerRef := metav1.NewControllerRef(controllerSpec, v1.SchemeGroupVersion.WithKind("ReplicationController"))
|
||||
|
||||
// Make sure createReplica sends a POST to the apiserver with a pod from the controllers pod template
|
||||
err := podControl.CreatePods(context.TODO(), ns, controllerSpec.Spec.Template, controllerSpec, controllerRef)
|
||||
assert.NoError(t, err, "unexpected error: %v", err)
|
||||
|
||||
expectedPod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: controllerSpec.Spec.Template.Labels,
|
||||
GenerateName: fmt.Sprintf("%s-", controllerSpec.Name),
|
||||
},
|
||||
Spec: controllerSpec.Spec.Template.Spec,
|
||||
tests := []test{
|
||||
{name: "Replication controller with 2 replicas", numReplicas: 2},
|
||||
{name: "Replication controller with 1 replica", numReplicas: 1},
|
||||
{name: "Replication controller with no replicas", numReplicas: 0},
|
||||
{name: "Replication controller with 5 replicas", numReplicas: 5},
|
||||
}
|
||||
|
||||
shuffleTests(tests)
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
||||
rcKey, rcPodNames := getRcDataFrom(test)
|
||||
assert.False(t, uidExp.SatisfiedExpectations(logger, rcKey),
|
||||
"Controller %v satisfied expectations before deletion", rcKey)
|
||||
|
||||
for _, p := range rcPodNames {
|
||||
uidExp.DeletionObserved(logger, rcKey, p)
|
||||
}
|
||||
|
||||
assert.True(t, uidExp.SatisfiedExpectations(logger, rcKey),
|
||||
"Controller %v didn't satisfy expectations after deletion", rcKey)
|
||||
|
||||
uidExp.DeleteExpectations(logger, rcKey)
|
||||
|
||||
assert.Nil(t, uidExp.GetUIDs(rcKey),
|
||||
"Failed to delete uid expectations for %v", rcKey)
|
||||
})
|
||||
}
|
||||
fakeHandler.ValidateRequest(t, "/api/v1/namespaces/default/pods", "POST", nil)
|
||||
var actualPod = &v1.Pod{}
|
||||
err = json.Unmarshal([]byte(fakeHandler.RequestBody), actualPod)
|
||||
assert.NoError(t, err, "unexpected error: %v", err)
|
||||
assert.True(t, apiequality.Semantic.DeepDerivative(&expectedPod, actualPod),
|
||||
"Body: %s", fakeHandler.RequestBody)
|
||||
}
|
||||
|
||||
func TestCreatePodsWithGenerateName(t *testing.T) {
|
||||
ns := metav1.NamespaceDefault
|
||||
body := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "empty_pod"}})
|
||||
fakeHandler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: string(body),
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
podControl := RealPodControl{
|
||||
KubeClient: clientset,
|
||||
Recorder: &record.FakeRecorder{},
|
||||
}
|
||||
|
||||
generateName := "hello-"
|
||||
controllerSpec := newReplicationController(1)
|
||||
controllerRef := metav1.NewControllerRef(controllerSpec, v1.SchemeGroupVersion.WithKind("ReplicationController"))
|
||||
|
||||
// Make sure createReplica sends a POST to the apiserver with a pod from the controllers pod template
|
||||
generateName := "hello-"
|
||||
err := podControl.CreatePodsWithGenerateName(context.TODO(), ns, controllerSpec.Spec.Template, controllerSpec, controllerRef, generateName)
|
||||
assert.NoError(t, err, "unexpected error: %v", err)
|
||||
|
||||
expectedPod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: controllerSpec.Spec.Template.Labels,
|
||||
GenerateName: generateName,
|
||||
OwnerReferences: []metav1.OwnerReference{*controllerRef},
|
||||
type test struct {
|
||||
name string
|
||||
podCreationFunc func(podControl RealPodControl) error
|
||||
wantPod *v1.Pod
|
||||
}
|
||||
var tests = []test{
|
||||
{
|
||||
name: "Create pod",
|
||||
podCreationFunc: func(podControl RealPodControl) error {
|
||||
return podControl.CreatePods(context.TODO(), ns, controllerSpec.Spec.Template, controllerSpec, controllerRef)
|
||||
},
|
||||
wantPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: controllerSpec.Spec.Template.Labels,
|
||||
GenerateName: fmt.Sprintf("%s-", controllerSpec.Name),
|
||||
},
|
||||
Spec: controllerSpec.Spec.Template.Spec,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Create pod with generate name",
|
||||
podCreationFunc: func(podControl RealPodControl) error {
|
||||
// Make sure createReplica sends a POST to the apiserver with a pod from the controllers pod template
|
||||
return podControl.CreatePodsWithGenerateName(context.TODO(), ns, controllerSpec.Spec.Template, controllerSpec, controllerRef, generateName)
|
||||
},
|
||||
wantPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: controllerSpec.Spec.Template.Labels,
|
||||
GenerateName: generateName,
|
||||
OwnerReferences: []metav1.OwnerReference{*controllerRef},
|
||||
},
|
||||
Spec: controllerSpec.Spec.Template.Spec,
|
||||
},
|
||||
},
|
||||
Spec: controllerSpec.Spec.Template.Spec,
|
||||
}
|
||||
|
||||
fakeHandler.ValidateRequest(t, "/api/v1/namespaces/default/pods", "POST", nil)
|
||||
var actualPod = &v1.Pod{}
|
||||
err = json.Unmarshal([]byte(fakeHandler.RequestBody), actualPod)
|
||||
assert.NoError(t, err, "unexpected error: %v", err)
|
||||
assert.True(t, apiequality.Semantic.DeepDerivative(&expectedPod, actualPod),
|
||||
"Body: %s", fakeHandler.RequestBody)
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
body := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "empty_pod"}})
|
||||
fakeHandler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: string(body),
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
podControl := RealPodControl{
|
||||
KubeClient: clientset,
|
||||
Recorder: &record.FakeRecorder{},
|
||||
}
|
||||
|
||||
err := test.podCreationFunc(podControl)
|
||||
assert.NoError(t, err, "unexpected error: %v", err)
|
||||
|
||||
fakeHandler.ValidateRequest(t, "/api/v1/namespaces/default/pods", "POST", nil)
|
||||
var actualPod = &v1.Pod{}
|
||||
err = json.Unmarshal([]byte(fakeHandler.RequestBody), actualPod)
|
||||
assert.NoError(t, err, "unexpected error: %v", err)
|
||||
assert.True(t, apiequality.Semantic.DeepDerivative(test.wantPod, actualPod),
|
||||
"Body: %s", fakeHandler.RequestBody)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeletePodsAllowsMissing(t *testing.T) {
|
||||
@ -382,103 +409,172 @@ func TestDeletePodsAllowsMissing(t *testing.T) {
|
||||
|
||||
func TestActivePodFiltering(t *testing.T) {
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
// This rc is not needed by the test, only the newPodList to give the pods labels/a namespace.
|
||||
rc := newReplicationController(0)
|
||||
podList := newPodList(nil, 5, v1.PodRunning, rc)
|
||||
podList.Items[0].Status.Phase = v1.PodSucceeded
|
||||
podList.Items[1].Status.Phase = v1.PodFailed
|
||||
expectedNames := sets.NewString()
|
||||
for _, pod := range podList.Items[2:] {
|
||||
expectedNames.Insert(pod.Name)
|
||||
type podData struct {
|
||||
podName string
|
||||
podPhase v1.PodPhase
|
||||
}
|
||||
|
||||
var podPointers []*v1.Pod
|
||||
for i := range podList.Items {
|
||||
podPointers = append(podPointers, &podList.Items[i])
|
||||
}
|
||||
got := FilterActivePods(logger, podPointers)
|
||||
gotNames := sets.NewString()
|
||||
for _, pod := range got {
|
||||
gotNames.Insert(pod.Name)
|
||||
type test struct {
|
||||
name string
|
||||
pods []podData
|
||||
wantPodNames []string
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(expectedNames.List(), gotNames.List()); diff != "" {
|
||||
t.Errorf("Active pod names (-want,+got):\n%s", diff)
|
||||
tests := []test{
|
||||
{
|
||||
name: "Filters active pods",
|
||||
pods: []podData{
|
||||
{podName: "pod-1", podPhase: v1.PodSucceeded},
|
||||
{podName: "pod-2", podPhase: v1.PodFailed},
|
||||
{podName: "pod-3"},
|
||||
{podName: "pod-4"},
|
||||
{podName: "pod-5"},
|
||||
},
|
||||
wantPodNames: []string{"pod-3", "pod-4", "pod-5"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// This rc is not needed by the test, only the newPodList to give the pods labels/a namespace.
|
||||
rc := newReplicationController(0)
|
||||
podList := newPodList(nil, 5, v1.PodRunning, rc)
|
||||
for idx, testPod := range test.pods {
|
||||
podList.Items[idx].Name = testPod.podName
|
||||
podList.Items[idx].Status.Phase = testPod.podPhase
|
||||
}
|
||||
|
||||
var podPointers []*v1.Pod
|
||||
for i := range podList.Items {
|
||||
podPointers = append(podPointers, &podList.Items[i])
|
||||
}
|
||||
got := FilterActivePods(logger, podPointers)
|
||||
gotNames := sets.NewString()
|
||||
for _, pod := range got {
|
||||
gotNames.Insert(pod.Name)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(test.wantPodNames, gotNames.List()); diff != "" {
|
||||
t.Errorf("Active pod names (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortingActivePods(t *testing.T) {
|
||||
numPods := 9
|
||||
// This rc is not needed by the test, only the newPodList to give the pods labels/a namespace.
|
||||
rc := newReplicationController(0)
|
||||
podList := newPodList(nil, numPods, v1.PodRunning, rc)
|
||||
|
||||
pods := make([]*v1.Pod, len(podList.Items))
|
||||
for i := range podList.Items {
|
||||
pods[i] = &podList.Items[i]
|
||||
}
|
||||
// pods[0] is not scheduled yet.
|
||||
pods[0].Spec.NodeName = ""
|
||||
pods[0].Status.Phase = v1.PodPending
|
||||
// pods[1] is scheduled but pending.
|
||||
pods[1].Spec.NodeName = "bar"
|
||||
pods[1].Status.Phase = v1.PodPending
|
||||
// pods[2] is unknown.
|
||||
pods[2].Spec.NodeName = "foo"
|
||||
pods[2].Status.Phase = v1.PodUnknown
|
||||
// pods[3] is running but not ready.
|
||||
pods[3].Spec.NodeName = "foo"
|
||||
pods[3].Status.Phase = v1.PodRunning
|
||||
// pods[4] is running and ready but without LastTransitionTime.
|
||||
now := metav1.Now()
|
||||
pods[4].Spec.NodeName = "foo"
|
||||
pods[4].Status.Phase = v1.PodRunning
|
||||
pods[4].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
|
||||
pods[4].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
|
||||
// pods[5] is running and ready and with LastTransitionTime.
|
||||
pods[5].Spec.NodeName = "foo"
|
||||
pods[5].Status.Phase = v1.PodRunning
|
||||
pods[5].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: now}}
|
||||
pods[5].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
|
||||
// pods[6] is running ready for a longer time than pods[5].
|
||||
then := metav1.Time{Time: now.AddDate(0, -1, 0)}
|
||||
pods[6].Spec.NodeName = "foo"
|
||||
pods[6].Status.Phase = v1.PodRunning
|
||||
pods[6].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
|
||||
pods[6].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
|
||||
// pods[7] has lower container restart count than pods[6].
|
||||
pods[7].Spec.NodeName = "foo"
|
||||
pods[7].Status.Phase = v1.PodRunning
|
||||
pods[7].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
|
||||
pods[7].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}}
|
||||
pods[7].CreationTimestamp = now
|
||||
// pods[8] is older than pods[7].
|
||||
pods[8].Spec.NodeName = "foo"
|
||||
pods[8].Status.Phase = v1.PodRunning
|
||||
pods[8].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
|
||||
pods[8].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}}
|
||||
pods[8].CreationTimestamp = then
|
||||
|
||||
getOrder := func(pods []*v1.Pod) []string {
|
||||
names := make([]string, len(pods))
|
||||
for i := range pods {
|
||||
names[i] = pods[i].Name
|
||||
}
|
||||
return names
|
||||
tests := []struct {
|
||||
name string
|
||||
pods []v1.Pod
|
||||
wantOrder []string
|
||||
}{
|
||||
{
|
||||
name: "Sorts by active pod",
|
||||
pods: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unscheduled"},
|
||||
Spec: v1.PodSpec{NodeName: ""},
|
||||
Status: v1.PodStatus{Phase: v1.PodPending},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "scheduledButPending"},
|
||||
Spec: v1.PodSpec{NodeName: "bar"},
|
||||
Status: v1.PodStatus{Phase: v1.PodPending},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unknownPhase"},
|
||||
Spec: v1.PodSpec{NodeName: "foo"},
|
||||
Status: v1.PodStatus{Phase: v1.PodUnknown},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "runningButNotReady"},
|
||||
Spec: v1.PodSpec{NodeName: "foo"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "runningNoLastTransitionTime"},
|
||||
Spec: v1.PodSpec{NodeName: "foo"},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}},
|
||||
ContainerStatuses: []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "runningWithLastTransitionTime"},
|
||||
Spec: v1.PodSpec{NodeName: "foo"},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: now}},
|
||||
ContainerStatuses: []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "runningLongerTime"},
|
||||
Spec: v1.PodSpec{NodeName: "foo"},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}},
|
||||
ContainerStatuses: []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "lowerContainerRestartCount", CreationTimestamp: now},
|
||||
Spec: v1.PodSpec{NodeName: "foo"},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}},
|
||||
ContainerStatuses: []v1.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "oldest", CreationTimestamp: then},
|
||||
Spec: v1.PodSpec{NodeName: "foo"},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}},
|
||||
ContainerStatuses: []v1.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantOrder: []string{
|
||||
"unscheduled",
|
||||
"scheduledButPending",
|
||||
"unknownPhase",
|
||||
"runningButNotReady",
|
||||
"runningNoLastTransitionTime",
|
||||
"runningWithLastTransitionTime",
|
||||
"runningLongerTime",
|
||||
"lowerContainerRestartCount",
|
||||
"oldest",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expected := getOrder(pods)
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
numPods := len(test.pods)
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
idx := rand.Perm(numPods)
|
||||
randomizedPods := make([]*v1.Pod, numPods)
|
||||
for j := 0; j < numPods; j++ {
|
||||
randomizedPods[j] = pods[idx[j]]
|
||||
}
|
||||
sort.Sort(ActivePods(randomizedPods))
|
||||
actual := getOrder(randomizedPods)
|
||||
for i := 0; i < 20; i++ {
|
||||
idx := rand.Perm(numPods)
|
||||
randomizedPods := make([]*v1.Pod, numPods)
|
||||
for j := 0; j < numPods; j++ {
|
||||
randomizedPods[j] = &test.pods[idx[j]]
|
||||
}
|
||||
|
||||
assert.EqualValues(t, expected, actual, "expected %v, got %v", expected, actual)
|
||||
sort.Sort(ActivePods(randomizedPods))
|
||||
gotOrder := make([]string, len(randomizedPods))
|
||||
for i := range randomizedPods {
|
||||
gotOrder[i] = randomizedPods[i].Name
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(test.wantOrder, gotOrder); diff != "" {
|
||||
t.Errorf("Sorted active pod names (-want,+got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -545,20 +641,23 @@ func TestSortingActivePodsWithRanks(t *testing.T) {
|
||||
{p1: unscheduled5Hours, p2: unscheduled8Hours},
|
||||
{p1: ready5Hours, p2: ready10Hours},
|
||||
}
|
||||
for _, tc := range equalityTests {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LogarithmicScaleDown, !tc.disableLogarithmicScaleDown)()
|
||||
if tc.p2 == nil {
|
||||
tc.p2 = tc.p1
|
||||
}
|
||||
podsWithRanks := ActivePodsWithRanks{
|
||||
Pods: []*v1.Pod{tc.p1, tc.p2},
|
||||
Rank: []int{1, 1},
|
||||
Now: now,
|
||||
}
|
||||
if podsWithRanks.Less(0, 1) || podsWithRanks.Less(1, 0) {
|
||||
t.Errorf("expected pod %q to be equivalent to %q", tc.p1.Name, tc.p2.Name)
|
||||
}
|
||||
for i, test := range equalityTests {
|
||||
t.Run(fmt.Sprintf("Equality tests %d", i), func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LogarithmicScaleDown, !test.disableLogarithmicScaleDown)()
|
||||
if test.p2 == nil {
|
||||
test.p2 = test.p1
|
||||
}
|
||||
podsWithRanks := ActivePodsWithRanks{
|
||||
Pods: []*v1.Pod{test.p1, test.p2},
|
||||
Rank: []int{1, 1},
|
||||
Now: now,
|
||||
}
|
||||
if podsWithRanks.Less(0, 1) || podsWithRanks.Less(1, 0) {
|
||||
t.Errorf("expected pod %q to be equivalent to %q", test.p1.Name, test.p2.Name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type podWithRank struct {
|
||||
pod *v1.Pod
|
||||
rank int
|
||||
@ -585,8 +684,9 @@ func TestSortingActivePodsWithRanks(t *testing.T) {
|
||||
{lesser: podWithRank{highPodDeletionCost, 2}, greater: podWithRank{lowPodDeletionCost, 1}, disablePodDeletioncost: true},
|
||||
{lesser: podWithRank{ready2Hours, 1}, greater: podWithRank{ready5Hours, 1}},
|
||||
}
|
||||
|
||||
for i, test := range inequalityTests {
|
||||
t.Run(fmt.Sprintf("test%d", i), func(t *testing.T) {
|
||||
t.Run(fmt.Sprintf("Inequality tests %d", i), func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDeletionCost, !test.disablePodDeletioncost)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LogarithmicScaleDown, !test.disableLogarithmicScaleDown)()
|
||||
|
||||
@ -606,24 +706,36 @@ func TestSortingActivePodsWithRanks(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestActiveReplicaSetsFiltering(t *testing.T) {
|
||||
var replicaSets []*apps.ReplicaSet
|
||||
replicaSets = append(replicaSets, newReplicaSet("zero", 0))
|
||||
replicaSets = append(replicaSets, nil)
|
||||
replicaSets = append(replicaSets, newReplicaSet("foo", 1))
|
||||
replicaSets = append(replicaSets, newReplicaSet("bar", 2))
|
||||
expectedNames := sets.NewString()
|
||||
for _, rs := range replicaSets[2:] {
|
||||
expectedNames.Insert(rs.Name)
|
||||
|
||||
rsUuid := uuid.NewUUID()
|
||||
tests := []struct {
|
||||
name string
|
||||
replicaSets []*apps.ReplicaSet
|
||||
wantReplicaSets []*apps.ReplicaSet
|
||||
}{
|
||||
{
|
||||
name: "Filters active replica sets",
|
||||
replicaSets: []*apps.ReplicaSet{
|
||||
newReplicaSet("zero", 0, rsUuid),
|
||||
nil,
|
||||
newReplicaSet("foo", 1, rsUuid),
|
||||
newReplicaSet("bar", 2, rsUuid),
|
||||
},
|
||||
wantReplicaSets: []*apps.ReplicaSet{
|
||||
newReplicaSet("foo", 1, rsUuid),
|
||||
newReplicaSet("bar", 2, rsUuid),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
got := FilterActiveReplicaSets(replicaSets)
|
||||
gotNames := sets.NewString()
|
||||
for _, rs := range got {
|
||||
gotNames.Insert(rs.Name)
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
gotReplicaSets := FilterActiveReplicaSets(test.replicaSets)
|
||||
|
||||
if diff := cmp.Diff(expectedNames.List(), gotNames.List()); diff != "" {
|
||||
t.Errorf("Active replica set names (-want,+got):\n%s", diff)
|
||||
if diff := cmp.Diff(test.wantReplicaSets, gotReplicaSets); diff != "" {
|
||||
t.Errorf("Active replica set names (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user